date_collected
stringclasses 1
value | repo_name
stringlengths 6
116
| file_name
stringlengths 2
220
| file_contents
stringlengths 13
357k
| prompts
sequence |
---|---|---|---|---|
2024-01-10 | Moshiii/resumelab_stremlit | pages~3_Authors.py | import streamlit as st
import PyPDF2
import os
import io
import time
from langchain import PromptTemplate, OpenAI, LLMChain
from langchain.chat_models import ChatOpenAI
from langchain.chains import create_extraction_chain
import fitz
test = False
st.markdown("Coding: Moshi, Carrie")
st.markdown("Prompting: Moshi, Carrie")
st.markdown("Special Thanks to Leona for UX Consulting")
st.markdown("Moshi: [email protected]")
st.markdown("Carrie: [email protected]")
st.markdown("Leona: [email protected]")
| [] |
2024-01-10 | Moshiii/resumelab_stremlit | pages~2_Generate_Cover_Letter.py | import streamlit as st
import PyPDF2
import os
import io
import time
from langchain import PromptTemplate, OpenAI, LLMChain
from langchain.chat_models import ChatOpenAI
from langchain.chains import create_extraction_chain
import fitz
test = False
def read_jd(text_content):
prompt_template = '''plrease reformat the following text to a Job description in the following format:
resume_text:
{text_content}
Desired format:
Job Position:
Position name
Education qualification:
Degree and major
Experience requirement:
Experience and number of years
Programming Language:
list of Programming Languages
Hard skill:
list of Hard skill
Soft skill:
list of Soft skill
Job respobsiability:
summerized bullet points of responsiability
Company Value:
summerized company value and vision paragraph
'''
llm_chain = LLMChain(
llm=llm,
prompt=PromptTemplate.from_template(prompt_template)
)
result = llm_chain.predict(
text_content=st.session_state['page_jd_jd_text_area'])
return result
def generate_refined_resume():
prompt_template = '''please generate a cover letter based on the Job description.
Job description:
{JD}
'''
llm_chain = LLMChain(
llm=llm,
prompt=PromptTemplate.from_template(prompt_template)
)
refined_resume = llm_chain.predict(
JD=st.session_state['page_jd_JD'])
return refined_resume
if 'page_jd_if_upload_clicked' not in st.session_state:
st.session_state['page_jd_if_upload_clicked'] = False
if 'page_jd_if_resume_uploaded' not in st.session_state:
st.session_state['page_jd_if_resume_uploaded'] = False
if 'page_jd_if_validate_clicked' not in st.session_state:
st.session_state['page_jd_if_validate_clicked'] = False
if 'page_jd_if_generate_clicked' not in st.session_state:
st.session_state['page_jd_if_generate_clicked'] = False
if 'page_jd_resume' not in st.session_state:
st.session_state['page_jd_resume'] = ""
if 'page_jd_JD' not in st.session_state:
st.session_state['page_jd_JD'] = ""
if 'page_jd_jd_text_area' not in st.session_state:
st.session_state['page_jd_jd_text_area'] = ""
if 'page_jd_generated' not in st.session_state:
st.session_state['page_jd_generated'] = False
if 'page_jd_refined_resume' not in st.session_state:
st.session_state['page_jd_refined_resume'] = ""
if 'page_jd_text_content' not in st.session_state:
st.session_state['page_jd_text_content'] = ""
st.markdown("Step 1. Provide your OpenAI API Key")
st.markdown("Step 2. Upload Job Description")
st.markdown("Step 3. Click 'Read JD.' AI will parse JD to text, and you may edit it before moving to the next step.")
st.markdown("Step 5. Click 'Generate Cover Letter.' AI will Generate Cover Letter based on the JD.")
st.markdown("Step 6. Click 'Download Cover Letter.' to save your result")
API_O = st.text_input('OPENAI_API_KEY', st.session_state['openAI_key'],type="password")
# API_O = st.secrets["OPENAI_API_KEY"]
MODEL = "gpt-3.5-turbo"
if API_O:
llm = ChatOpenAI(temperature=0, openai_api_key=API_O,
model_name=MODEL, verbose=False)
else:
st.info("please provide API Key")
jd_text_area = st.text_area('Upload JD', st.session_state['page_jd_JD'], 1000)
if st.button("Read JD"):
if jd_text_area != "":
st.session_state['page_jd_if_upload_clicked'] = True
else:
st.info("please make sure you provide all info")
if st.session_state['page_jd_if_upload_clicked'] == True:
if st.session_state['page_jd_jd_text_area']=="":
st.session_state['page_jd_jd_text_area'] = jd_text_area
st.session_state['page_jd_if_resume_uploaded'] = True
if st.session_state['page_jd_if_resume_uploaded']:
with st.spinner(text='Reading In progress'):
if test:
st.session_state['page_jd_resume'] = "test resume"
st.session_state['page_jd_JD'] = "test JD"
if st.session_state['page_jd_JD'] == "":
jd_result = read_jd(st.session_state['page_jd_jd_text_area'])
st.session_state['page_jd_JD'] = jd_result
st.success('JD reading Completed')
st.session_state['page_jd_JD'] = st.text_area(
'JD', st.session_state['page_jd_JD'], 1000)
if st.button("Generate Cover Letter"):
st.session_state['page_jd_if_generate_clicked'] = True
if st.session_state['page_jd_if_generate_clicked']:
with st.spinner(text='Optimize In progress'):
if test:
st.session_state['page_jd_refined_resume'] = "test refined_resume"
if st.session_state['page_jd_refined_resume'] == "":
refined_resume = generate_refined_resume()
st.session_state['page_jd_refined_resume'] = refined_resume
st.success('Resume Refined')
st.session_state['page_jd_refined_resume'] = st.text_area(
'cover letter', st.session_state['page_jd_refined_resume'], 1000)
st.session_state['page_jd_generated'] = True
st.download_button('Download Cover Letter', st.session_state['page_jd_refined_resume'],
file_name="Cover_Letter_ResumeLab")
| [
"please generate a cover letter based on the Job description.\n Job description:\n {JD}\n ",
"plrease reformat the following text to a Job description in the following format:\n resume_text:\n {text_content}\n Desired format: \n Job Position:\n Position name\n Education qualification: \n Degree and major\n Experience requirement: \n Experience and number of years\n Programming Language: \n list of Programming Languages\n Hard skill:\n list of Hard skill\n Soft skill:\n list of Soft skill\n Job respobsiability:\n summerized bullet points of responsiability\n Company Value:\n summerized company value and vision paragraph\n "
] |
2024-01-10 | Moshiii/resumelab_stremlit | Polish_Resume.py | import streamlit as st
import PyPDF2
import os
import io
import time
from langchain import PromptTemplate, OpenAI, LLMChain
from langchain.chat_models import ChatOpenAI
from langchain.chains import create_extraction_chain
import fitz
test = False
def read_resume(text_content):
prompt_template = '''plrease reformat the following text to a resume in the following format:
resume_text:
{text_content}
Desired format:
Summary:
personal summary
Skills:
list of skill limited to 10
Experience:
company, role
details
company, role
details
...
Projects:
project name (skill list)
details
project name (skill list)
details
...
Eduation:
university name and major | start time - end time
university name and major | start time - end time
...
'''
llm_chain = LLMChain(
llm=llm,
prompt=PromptTemplate.from_template(prompt_template)
)
result = llm_chain.predict(text_content=st.session_state['text_content'])
return result
def get_suggestion():
prompt_template = '''plrease list 6 short one sentence suggestions to improve the resume. 3 suggestions are given already, please make sure to include them in the output.
for example :
Try to add numbers and metrics in the Experience and Projects to make it more impressive
Try to include technical skill keywords in bullet points
resume_text:
{text_content}
Suggestions(please include the following three):
1.Polish text and Fix all grammar issue.
2.Try to add numbers and metrics in the Experience and Projects to make it more impressive
3.Try to include technical skill keywords in bullet points
'''
llm_chain = LLMChain(
llm=llm,
prompt=PromptTemplate.from_template(prompt_template)
)
suggestions = llm_chain.predict(
text_content=st.session_state['resume'])
return suggestions
def generate_refined_resume():
prompt_template = '''plrease polish the following resume based on the suggestions.
suggestions:
{suggestions}
resume_text:
{text_content}
'''
llm_chain = LLMChain(
llm=llm,
prompt=PromptTemplate.from_template(prompt_template)
)
refined_resume = llm_chain.predict(
suggestions=st.session_state['suggestions'], text_content=st.session_state['resume'])
return refined_resume
if 'openAI_key' not in st.session_state:
st.session_state['openAI_key'] = ""
if 'if_upload_clicked' not in st.session_state:
st.session_state['if_upload_clicked'] = False
if 'if_resume_uploaded' not in st.session_state:
st.session_state['if_resume_uploaded'] = False
if 'if_validate_clicked' not in st.session_state:
st.session_state['if_validate_clicked'] = False
if 'if_generate_clicked' not in st.session_state:
st.session_state['if_generate_clicked'] = False
if 'resume' not in st.session_state:
st.session_state['resume'] = ""
if 'suggestions' not in st.session_state:
st.session_state['suggestions'] = ""
if 'generated' not in st.session_state:
st.session_state['generated'] = False
if 'refined_resume' not in st.session_state:
st.session_state['refined_resume'] = ""
if 'text_content' not in st.session_state:
st.session_state['text_content'] = ""
st.markdown("Step 1. Provide your OpenAI API Key")
st.markdown("Step 2. Upload your resume")
st.markdown("Step 3. Click 'Read Resume.' AI will parse your resume to text, and you may edit it before moving to the next step.")
st.markdown("Step 4. Click 'Make Suggestions.' AI will provide you with suggestions for polishing your resume.")
st.markdown("Step 5. Click 'Auto Improve.' AI will polish your resume based on the suggestions.")
st.markdown("Step 6. Click 'Download Resume.' to save your result")
API_O = st.text_input('OPENAI_API_KEY', st.session_state['openAI_key'],type="password")
# API_O = st.secrets["OPENAI_API_KEY"]
MODEL = "gpt-3.5-turbo"
if API_O:
st.session_state['openAI_key'] = API_O
llm = ChatOpenAI(temperature=0, openai_api_key=API_O,
model_name=MODEL, verbose=False)
else:
st.info("please provide API Key")
uploaded_file = st.file_uploader("Choose a file", type="pdf")
if st.button("Read Resume"):
if uploaded_file is not None and API_O:
st.session_state['if_upload_clicked'] = True
else:
st.info("please make sure you provide all info")
if st.session_state['if_upload_clicked'] == True:
if st.session_state['text_content']=="":
pdf_reader = PyPDF2.PdfReader(uploaded_file)
with open(os.path.join("tempDir", uploaded_file.name), "wb") as f:
f.write(uploaded_file.getbuffer())
pdf_path = os.path.join("tempDir", uploaded_file.name)
doc = fitz.open(pdf_path)
text_content = ""
for page_num in range(len(doc)):
page = doc.load_page(page_num)
text_content += page.get_text()
st.session_state['text_content'] = text_content
st.session_state['if_resume_uploaded'] = True
if st.session_state['if_resume_uploaded'] == True:
with st.spinner(text='Reading In progress'):
if test:
st.session_state['resume'] = "test resume"
if st.session_state['resume'] == "":
result = read_resume(st.session_state['text_content'])
st.session_state['resume'] = result
st.success('Resume reading Completed')
st.session_state['resume'] = st.text_area('Resume', st.session_state['resume'], 1000)
if st.button("Make suggestions"):
st.session_state['if_validate_clicked'] = True
if st.session_state['if_validate_clicked']:
with st.spinner(text='validating In progress'):
if test:
st.session_state['suggestions'] = "test suggestions"
if st.session_state['suggestions'] == "":
suggestions = get_suggestion()
st.session_state['suggestions'] = suggestions
st.info('Suggestions')
st.write(st.session_state['suggestions'])
if st.button("Auto Improve"):
st.session_state['if_generate_clicked'] = True
if st.session_state['if_generate_clicked']:
with st.spinner(text='Polish In progress'):
if test:
st.session_state['refined_resume'] = "test refined_resume"
if st.session_state['refined_resume'] == "":
refined_resume = generate_refined_resume()
st.session_state['refined_resume'] = refined_resume
st.success('Resume Refined')
st.session_state['refined_resume'] = st.text_area('Resume', st.session_state['refined_resume'], 1000)
st.session_state['generated'] = True
st.download_button('Download Resume', st.session_state['refined_resume'],
file_name="Polished_resume_ResumeLab")
| [
"plrease list 6 short one sentence suggestions to improve the resume. 3 suggestions are given already, please make sure to include them in the output. \n \n for example :\n Try to add numbers and metrics in the Experience and Projects to make it more impressive\n Try to include technical skill keywords in bullet points \n resume_text:\n {text_content}\n Suggestions(please include the following three):\n 1.Polish text and Fix all grammar issue. \n 2.Try to add numbers and metrics in the Experience and Projects to make it more impressive\n 3.Try to include technical skill keywords in bullet points\n ",
"plrease reformat the following text to a resume in the following format:\n resume_text:\n {text_content}\n Desired format: \n Summary:\n personal summary\n Skills: \n list of skill limited to 10\n Experience: \n company, role\n details\n company, role\n details\n ...\n Projects: \n project name (skill list)\n details\n project name (skill list)\n details\n ...\n Eduation:\n university name and major | start time - end time\n university name and major | start time - end time\n ...\n ",
"plrease polish the following resume based on the suggestions.\n suggestions:\n {suggestions}\n resume_text:\n {text_content}\n "
] |
2024-01-10 | saubhagya248/Quivr-clone | backend~models~settings.py | from typing import Annotated
from fastapi import Depends
from langchain.embeddings.openai import OpenAIEmbeddings
from pydantic import BaseSettings
from supabase.client import Client, create_client
from vectorstore.supabase import SupabaseVectorStore
class BrainSettings(BaseSettings):
openai_api_key: str
anthropic_api_key: str
supabase_url: str
supabase_service_key: str
class LLMSettings(BaseSettings):
private: bool = False
model_path: str = "gpt2"
model_n_ctx: int = 1000
model_n_batch: int = 8
def common_dependencies() -> dict:
settings = BrainSettings() # pyright: ignore reportPrivateUsage=none
embeddings = OpenAIEmbeddings(
openai_api_key=settings.openai_api_key
) # pyright: ignore reportPrivateUsage=none
supabase_client: Client = create_client(
settings.supabase_url, settings.supabase_service_key
)
documents_vector_store = SupabaseVectorStore(
supabase_client, embeddings, table_name="vectors"
)
summaries_vector_store = SupabaseVectorStore(
supabase_client, embeddings, table_name="summaries"
)
return {
"supabase": supabase_client,
"embeddings": embeddings,
"documents_vector_store": documents_vector_store,
"summaries_vector_store": summaries_vector_store,
}
CommonsDep = Annotated[dict, Depends(common_dependencies)]
| [] |
2024-01-10 | saubhagya248/Quivr-clone | backend~parsers~github.py | import os
import time
from langchain.document_loaders import GitLoader
from langchain.schema import Document
from langchain.text_splitter import RecursiveCharacterTextSplitter
from models.brains import Brain
from models.files import File
from models.settings import CommonsDep
from utils.file import compute_sha1_from_content
from utils.vectors import Neurons
async def process_github(
commons: CommonsDep, # pyright: ignore reportPrivateUsage=none
repo,
enable_summarization,
brain_id,
user_openai_api_key,
):
random_dir_name = os.urandom(16).hex()
dateshort = time.strftime("%Y%m%d")
loader = GitLoader(
clone_url=repo,
repo_path="/tmp/" + random_dir_name,
)
documents = loader.load()
os.system("rm -rf /tmp/" + random_dir_name)
chunk_size = 500
chunk_overlap = 0
text_splitter = RecursiveCharacterTextSplitter.from_tiktoken_encoder(
chunk_size=chunk_size, chunk_overlap=chunk_overlap
)
documents = text_splitter.split_documents(documents)
print(documents[:1])
for doc in documents:
if doc.metadata["file_type"] in [
".pyc",
".png",
".svg",
".env",
".lock",
".gitignore",
".gitmodules",
".gitattributes",
".gitkeep",
".git",
".json",
]:
continue
metadata = {
"file_sha1": compute_sha1_from_content(doc.page_content.encode("utf-8")),
"file_size": len(doc.page_content) * 8,
"file_name": doc.metadata["file_name"],
"chunk_size": chunk_size,
"chunk_overlap": chunk_overlap,
"date": dateshort,
"summarization": "true" if enable_summarization else "false",
}
doc_with_metadata = Document(page_content=doc.page_content, metadata=metadata)
file = File(
file_sha1=compute_sha1_from_content(doc.page_content.encode("utf-8"))
)
file_exists = file.file_already_exists()
if not file_exists:
print(f"Creating entry for file {file.file_sha1} in vectors...")
neurons = Neurons(commons=commons)
created_vector = neurons.create_vector(
doc_with_metadata, user_openai_api_key
)
print("Created vector sids ", created_vector)
print("Created vector for ", doc.metadata["file_name"])
file_exists_in_brain = file.file_already_exists_in_brain(brain_id)
if not file_exists_in_brain:
file.add_file_to_brain(brain_id) # pyright: ignore reportPrivateUsage=none
brain = Brain(id=brain_id)
file.link_file_to_brain(brain)
return {
"message": f"✅ Github with {len(documents)} files has been uploaded.",
"type": "success",
}
| [] |
2024-01-10 | saubhagya248/Quivr-clone | backend~vectorstore~supabase.py | from typing import Any, List
from langchain.docstore.document import Document
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import SupabaseVectorStore
from supabase.client import Client
class CustomSupabaseVectorStore(SupabaseVectorStore):
"""A custom vector store that uses the match_vectors table instead of the vectors table."""
brain_id: str = "none"
def __init__(
self,
client: Client,
embedding: OpenAIEmbeddings,
table_name: str,
brain_id: str = "none",
):
super().__init__(client, embedding, table_name)
self.brain_id = brain_id
def similarity_search(
self,
query: str,
table: str = "match_vectors",
k: int = 6,
threshold: float = 0.5,
**kwargs: Any
) -> List[Document]:
vectors = self._embedding.embed_documents([query])
query_embedding = vectors[0]
res = self._client.rpc(
table,
{
"query_embedding": query_embedding,
"match_count": k,
"p_brain_id": str(self.brain_id),
},
).execute()
match_result = [
(
Document(
metadata=search.get("metadata", {}), # type: ignore
page_content=search.get("content", ""),
),
search.get("similarity", 0.0),
)
for search in res.data
if search.get("content")
]
documents = [doc for doc, _ in match_result]
return documents
| [] |
2024-01-10 | saubhagya248/Quivr-clone | backend~llm~private_gpt4all.py | from langchain.llms.base import LLM
from langchain.llms.gpt4all import GPT4All
from logger import get_logger
from models.settings import LLMSettings
from .base import BaseBrainPicking
logger = get_logger(__name__)
class PrivateGPT4AllBrainPicking(BaseBrainPicking):
"""
This subclass of BrainPicking is used to specifically work with the private language model GPT4All.
"""
# Initialize class settings
llm_settings = LLMSettings()
def __init__(
self,
chat_id: str,
brain_id: str,
streaming: bool,
) -> "PrivateGPT4AllBrainPicking": # pyright: ignore reportPrivateUsage=none
"""
Initialize the PrivateBrainPicking class by calling the parent class's initializer.
:param brain_id: The brain_id in the DB.
:param chat_id: The id of the chat in the DB.
:param streaming: Whether to enable streaming of the model
:return: PrivateBrainPicking instance
"""
# set defaults
model = "gpt4all-j-1.3"
super().__init__(
model=model,
brain_id=brain_id,
chat_id=chat_id,
streaming=streaming,
)
def _create_llm(self) -> LLM:
"""
Override the _create_llm method to enforce the use of a private model.
:return: Language model instance
"""
model_path = self.llm_settings.model_path
model_n_ctx = self.llm_settings.model_n_ctx
model_n_batch = self.llm_settings.model_n_batch
logger.info("Using private model: %s", model_path)
return GPT4All(
model=model_path,
n_ctx=model_n_ctx,
n_batch=model_n_batch,
backend="gptj",
verbose=True,
) # pyright: ignore reportPrivateUsage=none
| [] |
2024-01-10 | saubhagya248/Quivr-clone | backend~utils~vectors.py | from concurrent.futures import ThreadPoolExecutor
from typing import List
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.schema import Document
from llm.utils.summarization import llm_summerize
from logger import get_logger
from models.settings import BrainSettings, CommonsDep, common_dependencies
from pydantic import BaseModel
logger = get_logger(__name__)
class Neurons(BaseModel):
commons: CommonsDep
settings = BrainSettings() # pyright: ignore reportPrivateUsage=none
def create_vector(self, doc, user_openai_api_key=None):
logger.info("Creating vector for document")
logger.info(f"Document: {doc}")
if user_openai_api_key:
self.commons["documents_vector_store"]._embedding = OpenAIEmbeddings(
openai_api_key=user_openai_api_key
) # pyright: ignore reportPrivateUsage=none
try:
sids = self.commons["documents_vector_store"].add_documents([doc])
if sids and len(sids) > 0:
return sids
except Exception as e:
logger.error(f"Error creating vector for document {e}")
def create_embedding(self, content):
return self.commons["embeddings"].embed_query(content)
def similarity_search(self, query, table="match_summaries", top_k=5, threshold=0.5):
query_embedding = self.create_embedding(query)
summaries = (
self.commons["supabase"]
.rpc(
table,
{
"query_embedding": query_embedding,
"match_count": top_k,
"match_threshold": threshold,
},
)
.execute()
)
return summaries.data
def create_summary(commons: CommonsDep, document_id, content, metadata):
logger.info(f"Summarizing document {content[:100]}")
summary = llm_summerize(content)
logger.info(f"Summary: {summary}")
metadata["document_id"] = document_id
summary_doc_with_metadata = Document(page_content=summary, metadata=metadata)
sids = commons["summaries_vector_store"].add_documents([summary_doc_with_metadata])
if sids and len(sids) > 0:
commons["supabase"].table("summaries").update(
{"document_id": document_id}
).match({"id": sids[0]}).execute()
def error_callback(exception):
print("An exception occurred:", exception)
def process_batch(batch_ids):
commons = common_dependencies()
if len(batch_ids) == 1:
return (
commons["supabase"]
.table("vectors")
.select(
"name:metadata->>file_name, size:metadata->>file_size",
count="exact",
)
.filter("id", "eq", batch_ids[0])
.execute()
).data
else:
return (
commons["supabase"]
.table("vectors")
.select(
"name:metadata->>file_name, size:metadata->>file_size",
count="exact",
)
.filter("id", "in", tuple(batch_ids))
.execute()
).data
def get_unique_files_from_vector_ids(vectors_ids: List[int]):
# Move into Vectors class
"""
Retrieve unique user data vectors.
"""
print("vectors_ids", vectors_ids)
# constants
BATCH_SIZE = 5
with ThreadPoolExecutor() as executor:
futures = []
for i in range(0, len(vectors_ids), BATCH_SIZE):
batch_ids = vectors_ids[i : i + BATCH_SIZE]
future = executor.submit(process_batch, batch_ids)
futures.append(future)
# Retrieve the results
vectors_responses = [future.result() for future in futures]
documents = [item for sublist in vectors_responses for item in sublist]
print("document", documents)
unique_files = [dict(t) for t in set(tuple(d.items()) for d in documents)]
return unique_files
| [] |
2024-01-10 | saubhagya248/Quivr-clone | backend~models~files.py | import os
import tempfile
from typing import Any, Optional
from uuid import UUID
from fastapi import UploadFile
from langchain.text_splitter import RecursiveCharacterTextSplitter
from logger import get_logger
from models.brains import Brain
from models.settings import CommonsDep, common_dependencies
from pydantic import BaseModel
from utils.file import compute_sha1_from_file
logger = get_logger(__name__)
class File(BaseModel):
id: Optional[UUID] = None
file: Optional[UploadFile]
file_name: Optional[str] = ""
file_size: Optional[int] = None
file_sha1: Optional[str] = ""
vectors_ids: Optional[list] = []
file_extension: Optional[str] = ""
content: Optional[Any] = None
chunk_size: int = 500
chunk_overlap: int = 0
documents: Optional[Any] = None
_commons: Optional[CommonsDep] = None
def __init__(self, **kwargs):
super().__init__(**kwargs)
if self.file:
self.file_name = self.file.filename
self.file_size = (
self.file.file._file.tell() # pyright: ignore reportPrivateUsage=none
)
self.file_extension = os.path.splitext(
self.file.filename # pyright: ignore reportPrivateUsage=none
)[-1].lower()
async def compute_file_sha1(self):
"""
Compute the sha1 of the file using a temporary file
"""
with tempfile.NamedTemporaryFile(
delete=False,
suffix=self.file.filename, # pyright: ignore reportPrivateUsage=none
) as tmp_file:
await self.file.seek(0) # pyright: ignore reportPrivateUsage=none
self.content = (
await self.file.read() # pyright: ignore reportPrivateUsage=none
)
tmp_file.write(self.content)
tmp_file.flush()
self.file_sha1 = compute_sha1_from_file(tmp_file.name)
os.remove(tmp_file.name)
def compute_documents(self, loader_class):
"""
Compute the documents from the file
Args:
loader_class (class): The class of the loader to use to load the file
"""
logger.info(f"Computing documents from file {self.file_name}")
documents = []
with tempfile.NamedTemporaryFile(
delete=False,
suffix=self.file.filename, # pyright: ignore reportPrivateUsage=none
) as tmp_file:
tmp_file.write(self.content) # pyright: ignore reportPrivateUsage=none
tmp_file.flush()
loader = loader_class(tmp_file.name)
documents = loader.load()
print("documents", documents)
os.remove(tmp_file.name)
text_splitter = RecursiveCharacterTextSplitter.from_tiktoken_encoder(
chunk_size=self.chunk_size, chunk_overlap=self.chunk_overlap
)
self.documents = text_splitter.split_documents(documents)
print(self.documents)
def set_file_vectors_ids(self):
"""
Set the vectors_ids property with the ids of the vectors
that are associated with the file in the vectors table
"""
commons = common_dependencies()
response = (
commons["supabase"]
.table("vectors")
.select("id")
.filter("metadata->>file_sha1", "eq", self.file_sha1)
.execute()
)
self.vectors_ids = response.data
return
def file_already_exists(self):
"""
Check if file already exists in vectors table
"""
self.set_file_vectors_ids()
print("file_sha1", self.file_sha1)
print("vectors_ids", self.vectors_ids)
print(
"len(vectors_ids)",
len(self.vectors_ids), # pyright: ignore reportPrivateUsage=none
)
# if the file does not exist in vectors then no need to go check in brains_vectors
if len(self.vectors_ids) == 0: # pyright: ignore reportPrivateUsage=none
return False
return True
def file_already_exists_in_brain(self, brain_id):
"""
Check if file already exists in a brain
Args:
brain_id (str): Brain id
"""
commons = common_dependencies()
self.set_file_vectors_ids()
# Check if file exists in that brain
response = (
commons["supabase"]
.table("brains_vectors")
.select("brain_id, vector_id")
.filter("brain_id", "eq", brain_id)
.filter("file_sha1", "eq", self.file_sha1)
.execute()
)
print("response.data", response.data)
if len(response.data) == 0:
return False
return True
def file_is_empty(self):
"""
Check if file is empty by checking if the file pointer is at the beginning of the file
"""
return (
self.file.file._file.tell() < 1 # pyright: ignore reportPrivateUsage=none
)
def link_file_to_brain(self, brain: Brain):
self.set_file_vectors_ids()
if self.vectors_ids is None:
return
for vector_id in self.vectors_ids: # pyright: ignore reportPrivateUsage=none
brain.create_brain_vector(vector_id["id"], self.file_sha1)
print(f"Successfully linked file {self.file_sha1} to brain {brain.id}")
| [] |
2024-01-10 | saubhagya248/Quivr-clone | backend~llm~utils~summarization.py | import os
import guidance
import openai
from logger import get_logger
logger = get_logger(__name__)
openai_api_key = os.environ.get("OPENAI_API_KEY")
openai.api_key = openai_api_key
summary_llm = guidance.llms.OpenAI("gpt-3.5-turbo-0613", caching=False)
def llm_summerize(document):
summary = guidance(
"""
{{#system~}}
You are a world best summarizer. \n
Condense the text, capturing essential points and core ideas. Include relevant \
examples, omit excess details, and ensure the summary's length matches the \
original's complexity.
{{/system~}}
{{#user~}}
Summarize the following text:
---
{{document}}
{{/user~}}
{{#assistant~}}
{{gen 'summarization' temperature=0.2 max_tokens=100}}
{{/assistant~}}
""",
llm=summary_llm,
) # pyright: ignore reportPrivateUsage=none
summary = summary(document=document)
logger.info("Summarization: %s", summary)
return summary["summarization"]
def llm_evaluate_summaries(question, summaries, model):
if not model.startswith("gpt"):
logger.info(f"Model {model} not supported. Using gpt-3.5-turbo instead.")
model = "gpt-3.5-turbo-0613"
logger.info(f"Evaluating summaries with {model}")
evaluation_llm = guidance.llms.OpenAI(model, caching=False)
evaluation = guidance(
"""
{{#system~}}
You are a world best evaluator. You evaluate the relevance of summaries based \
on user input question. Return evaluation in following csv format, csv headers \
are [summary_id,document_id,evaluation,reason].
Evaluator Task
- Evaluation should be a score number between 0 and 5.
- Reason should be a short sentence within 20 words explain why the evaluation.
---
Example
summary_id,document_id,evaluation,reason
1,4,3,"not mentioned about topic A"
2,2,4,"It is not relevant to the question"
{{/system~}}
{{#user~}}
Based on the question, do Evaluator Task for each summary.
---
Question: {{question}}
{{#each summaries}}
Summary
summary_id: {{this.id}}
document_id: {{this.document_id}}
evaluation: ""
reason: ""
Summary Content: {{this.content}}
File Name: {{this.metadata.file_name}}
{{/each}}
{{/user~}}
{{#assistant~}}
{{gen 'evaluation' temperature=0.2 stop='<|im_end|>'}}
{{/assistant~}}
""",
llm=evaluation_llm,
) # pyright: ignore reportPrivateUsage=none
result = evaluation(question=question, summaries=summaries)
evaluations = {}
for evaluation in result["evaluation"].split(
"\n"
): # pyright: ignore reportPrivateUsage=none
if evaluation == "" or not evaluation[0].isdigit():
continue
logger.info("Evaluation Row: %s", evaluation)
summary_id, document_id, score, *reason = evaluation.split(",")
if not score.isdigit():
continue
score = int(score)
if score < 3 or score > 5:
continue
evaluations[summary_id] = {
"evaluation": score,
"reason": ",".join(reason),
"summary_id": summary_id,
"document_id": document_id,
}
return [
e
for e in sorted(
evaluations.values(), key=lambda x: x["evaluation"], reverse=True
)
]
| [] |
2024-01-10 | saubhagya248/Quivr-clone | backend~routes~chat_routes.py | import os
import time
from http.client import HTTPException
from typing import List
from uuid import UUID
from auth import AuthBearer, get_current_user
from fastapi import APIRouter, Depends, Query, Request
from fastapi.responses import StreamingResponse
from llm.openai import OpenAIBrainPicking
from llm.openai_functions import OpenAIFunctionsBrainPicking
from llm.private_gpt4all import PrivateGPT4AllBrainPicking
from models.chat import Chat, ChatHistory
from models.chats import ChatQuestion
from models.settings import LLMSettings, common_dependencies
from models.users import User
from repository.chat.create_chat import CreateChatProperties, create_chat
from repository.chat.get_chat_by_id import get_chat_by_id
from repository.chat.get_chat_history import get_chat_history
from repository.chat.get_user_chats import get_user_chats
from repository.chat.update_chat import ChatUpdatableProperties, update_chat
from utils.constants import (
openai_function_compatible_models,
streaming_compatible_models,
)
chat_router = APIRouter()
def get_chat_details(commons, chat_id):
response = (
commons["supabase"]
.from_("chats")
.select("*")
.filter("chat_id", "eq", chat_id)
.execute()
)
return response.data
def delete_chat_from_db(commons, chat_id):
try:
commons["supabase"].table("chat_history").delete().match(
{"chat_id": chat_id}
).execute()
except Exception as e:
print(e)
pass
try:
commons["supabase"].table("chats").delete().match(
{"chat_id": chat_id}
).execute()
except Exception as e:
print(e)
pass
def fetch_user_stats(commons, user, date):
response = (
commons["supabase"]
.from_("users")
.select("*")
.filter("email", "eq", user.email)
.filter("date", "eq", date)
.execute()
)
userItem = next(iter(response.data or []), {"requests_count": 0})
return userItem
def check_user_limit(
user: User,
):
if user.user_openai_api_key is None:
date = time.strftime("%Y%m%d")
max_requests_number = int(os.getenv("MAX_REQUESTS_NUMBER", 1000))
user.increment_user_request_count(date)
if int(user.requests_count) >= int(max_requests_number):
raise HTTPException(
status_code=429, # pyright: ignore reportPrivateUsage=none
detail="You have reached the maximum number of requests for today.", # pyright: ignore reportPrivateUsage=none
)
else:
pass
# get all chats
@chat_router.get("/chat", dependencies=[Depends(AuthBearer())], tags=["Chat"])
async def get_chats(current_user: User = Depends(get_current_user)):
"""
Retrieve all chats for the current user.
- `current_user`: The current authenticated user.
- Returns a list of all chats for the user.
This endpoint retrieves all the chats associated with the current authenticated user. It returns a list of chat objects
containing the chat ID and chat name for each chat.
"""
chats = get_user_chats(current_user.id) # pyright: ignore reportPrivateUsage=none
return {"chats": chats}
# delete one chat
@chat_router.delete(
"/chat/{chat_id}", dependencies=[Depends(AuthBearer())], tags=["Chat"]
)
async def delete_chat(chat_id: UUID):
"""
Delete a specific chat by chat ID.
"""
commons = common_dependencies()
delete_chat_from_db(commons, chat_id)
return {"message": f"{chat_id} has been deleted."}
# update existing chat metadata
@chat_router.put(
"/chat/{chat_id}/metadata", dependencies=[Depends(AuthBearer())], tags=["Chat"]
)
async def update_chat_metadata_handler(
chat_data: ChatUpdatableProperties,
chat_id: UUID,
current_user: User = Depends(get_current_user),
) -> Chat:
"""
Update chat attributes
"""
chat = get_chat_by_id(chat_id) # pyright: ignore reportPrivateUsage=none
if current_user.id != chat.user_id:
raise HTTPException(
status_code=403, # pyright: ignore reportPrivateUsage=none
detail="You should be the owner of the chat to update it.", # pyright: ignore reportPrivateUsage=none
)
return update_chat(chat_id=chat_id, chat_data=chat_data)
# create new chat
@chat_router.post("/chat", dependencies=[Depends(AuthBearer())], tags=["Chat"])
async def create_chat_handler(
chat_data: CreateChatProperties,
current_user: User = Depends(get_current_user),
):
"""
Create a new chat with initial chat messages.
"""
return create_chat(user_id=current_user.id, chat_data=chat_data)
# add new question to chat
@chat_router.post(
"/chat/{chat_id}/question", dependencies=[Depends(AuthBearer())], tags=["Chat"]
)
async def create_question_handler(
request: Request,
chat_question: ChatQuestion,
chat_id: UUID,
brain_id: UUID = Query(..., description="The ID of the brain"),
current_user: User = Depends(get_current_user),
) -> ChatHistory:
current_user.user_openai_api_key = request.headers.get("Openai-Api-Key")
print("current_user", current_user)
try:
check_user_limit(current_user)
llm_settings = LLMSettings()
# TODO: RBAC with current_user
if llm_settings.private:
gpt_answer_generator = PrivateGPT4AllBrainPicking(
chat_id=str(chat_id),
brain_id=str(brain_id),
streaming=False,
)
elif chat_question.model in openai_function_compatible_models:
gpt_answer_generator = OpenAIFunctionsBrainPicking(
model=chat_question.model,
chat_id=str(chat_id),
temperature=chat_question.temperature,
max_tokens=chat_question.max_tokens,
brain_id=str(brain_id),
user_openai_api_key=current_user.user_openai_api_key, # pyright: ignore reportPrivateUsage=none
)
else:
gpt_answer_generator = OpenAIBrainPicking(
chat_id=str(chat_id),
model=chat_question.model,
max_tokens=chat_question.max_tokens,
temperature=chat_question.temperature,
brain_id=str(brain_id),
user_openai_api_key=current_user.user_openai_api_key, # pyright: ignore reportPrivateUsage=none
)
chat_answer = gpt_answer_generator.generate_answer( # pyright: ignore reportPrivateUsage=none
chat_question.question
)
return chat_answer
except HTTPException as e:
raise e
# stream new question response from chat
@chat_router.post(
"/chat/{chat_id}/question/stream",
dependencies=[Depends(AuthBearer())],
tags=["Chat"],
)
async def create_stream_question_handler(
request: Request,
chat_question: ChatQuestion,
chat_id: UUID,
brain_id: UUID = Query(..., description="The ID of the brain"),
current_user: User = Depends(get_current_user),
) -> StreamingResponse:
if chat_question.model not in streaming_compatible_models:
# Forward the request to the none streaming endpoint
return await create_question_handler(
request,
chat_question,
chat_id,
current_user, # pyright: ignore reportPrivateUsage=none
)
try:
user_openai_api_key = request.headers.get("Openai-Api-Key")
check_user_limit(current_user)
llm_settings = LLMSettings()
if llm_settings.private:
gpt_answer_generator = PrivateGPT4AllBrainPicking(
chat_id=str(chat_id),
brain_id=str(brain_id),
streaming=False,
)
else:
gpt_answer_generator = OpenAIBrainPicking(
chat_id=str(chat_id),
model=chat_question.model,
max_tokens=chat_question.max_tokens,
temperature=chat_question.temperature,
brain_id=str(brain_id),
user_openai_api_key=user_openai_api_key, # pyright: ignore reportPrivateUsage=none
streaming=True,
)
return StreamingResponse(
gpt_answer_generator.generate_stream( # pyright: ignore reportPrivateUsage=none
chat_question.question
),
media_type="text/event-stream",
)
except HTTPException as e:
raise e
# get chat history
@chat_router.get(
"/chat/{chat_id}/history", dependencies=[Depends(AuthBearer())], tags=["Chat"]
)
async def get_chat_history_handler(
chat_id: UUID,
) -> List[ChatHistory]:
# TODO: RBAC with current_user
return get_chat_history(chat_id) # pyright: ignore reportPrivateUsage=none
| [] |
2024-01-10 | saubhagya248/Quivr-clone | backend~parsers~audio.py | import os
import tempfile
import time
import openai
from langchain.schema import Document
from langchain.text_splitter import RecursiveCharacterTextSplitter
from models.files import File
from models.settings import CommonsDep
from utils.file import compute_sha1_from_content
async def process_audio(
commons: CommonsDep, # pyright: ignore reportPrivateUsage=none
file: File,
enable_summarization: bool,
user,
user_openai_api_key,
):
temp_filename = None
file_sha = ""
dateshort = time.strftime("%Y%m%d-%H%M%S")
file_meta_name = f"audiotranscript_{dateshort}.txt"
# use this for whisper
os.environ.get("OPENAI_API_KEY")
if user_openai_api_key:
pass
try:
upload_file = file.file
with tempfile.NamedTemporaryFile(
delete=False,
suffix=upload_file.filename, # pyright: ignore reportPrivateUsage=none
) as tmp_file:
await upload_file.seek(0) # pyright: ignore reportPrivateUsage=none
content = (
await upload_file.read() # pyright: ignore reportPrivateUsage=none
)
tmp_file.write(content)
tmp_file.flush()
tmp_file.close()
temp_filename = tmp_file.name
with open(tmp_file.name, "rb") as audio_file:
transcript = openai.Audio.transcribe("whisper-1", audio_file)
file_sha = compute_sha1_from_content(
transcript.text.encode("utf-8") # pyright: ignore reportPrivateUsage=none
)
file_size = len(
transcript.text.encode("utf-8") # pyright: ignore reportPrivateUsage=none
)
chunk_size = 500
chunk_overlap = 0
text_splitter = RecursiveCharacterTextSplitter.from_tiktoken_encoder(
chunk_size=chunk_size, chunk_overlap=chunk_overlap
)
texts = text_splitter.split_text(
transcript.text.encode("utf-8") # pyright: ignore reportPrivateUsage=none
)
docs_with_metadata = [
Document(
page_content=text,
metadata={
"file_sha1": file_sha,
"file_size": file_size,
"file_name": file_meta_name,
"chunk_size": chunk_size,
"chunk_overlap": chunk_overlap,
"date": dateshort,
},
)
for text in texts
]
commons.documents_vector_store.add_documents( # pyright: ignore reportPrivateUsage=none
docs_with_metadata
)
finally:
if temp_filename and os.path.exists(temp_filename):
os.remove(temp_filename)
| [] |
2024-01-10 | saubhagya248/Quivr-clone | backend~parsers~common.py | import time
from langchain.schema import Document
from models.brains import Brain
from models.files import File
from models.settings import CommonsDep
from utils.vectors import Neurons
async def process_file(
commons: CommonsDep,
file: File,
loader_class,
enable_summarization,
brain_id,
user_openai_api_key,
):
dateshort = time.strftime("%Y%m%d")
file.compute_documents(loader_class)
for doc in file.documents: # pyright: ignore reportPrivateUsage=none
metadata = {
"file_sha1": file.file_sha1,
"file_size": file.file_size,
"file_name": file.file_name,
"chunk_size": file.chunk_size,
"chunk_overlap": file.chunk_overlap,
"date": dateshort,
"summarization": "true" if enable_summarization else "false",
}
doc_with_metadata = Document(page_content=doc.page_content, metadata=metadata)
neurons = Neurons(commons=commons)
created_vector = neurons.create_vector(doc_with_metadata, user_openai_api_key)
# add_usage(stats_db, "embedding", "audio", metadata={"file_name": file_meta_name,"file_type": ".txt", "chunk_size": chunk_size, "chunk_overlap": chunk_overlap})
created_vector_id = created_vector[0] # pyright: ignore reportPrivateUsage=none
brain = Brain(id=brain_id)
brain.create_brain_vector(created_vector_id, file.file_sha1)
return
| [] |
2024-01-10 | vishalmysore/cookGPT | llm~createData.py | from openai import OpenAI
import os
import random
from tenacity import retry, stop_after_attempt, wait_exponential
OPENAI_API_KEY=os.getenv("key")
client = OpenAI(api_key=OPENAI_API_KEY
)
prompt = "you are an ai model"
temperature = .4
number_of_examples = 1
N_RETRIES = 3
@retry(stop=stop_after_attempt(N_RETRIES), wait=wait_exponential(multiplier=1, min=4, max=70))
def generate_example(prompt, prev_examples, temperature=.5):
messages=[
{
"role": "system",
"content": "You are helping me create a dataset for indian receipes"
},
{
"role": "user",
"content": "give me 3 different Indian receipe"
}
]
if len(prev_examples) > 0:
if len(prev_examples) > 8:
prev_examples = random.sample(prev_examples, 8)
for example in prev_examples:
messages.append({
"role": "assistant",
"content": example
})
response = client.chat.completions.create(
model="gpt-4",
messages=messages,
temperature=temperature,
max_tokens=1000,
)
return response.choices[0].message.content
# Generate examples
prev_examples = []
for i in range(number_of_examples):
print(f'Generating example {i}')
example = generate_example(prompt, prev_examples, temperature)
print(example)
prev_examples.append(example) | [
"You are helping me create a dataset for indian receipes",
"give me 3 different Indian receipe",
"you are an ai model"
] |
2024-01-10 | bioelectric-interfaces/nfb | pynfb~signals~composite.py | import numpy as np
import sympy
from ..signal_processing.filters import Coherence
class CompositeSignal:
"""
Class for composite signal
"""
def __init__(self, signals, expression, name, ind, fs):
"""
Constructor
:param signals: list of all signals
:param expression: str expression
"""
self.ind = ind
self.name = name
self.signals = signals
self.coh_filter = None
if 'coh' in expression.lower():
names = ''.join([ch if ch.isalnum() else ' ' for ch in expression]).split()[1:]
self.signals_idx = [j for j, signal in enumerate(self.signals) if signal.name in names]
self.signals = [self.signals[j] for j in self.signals_idx]
self.expression_lambda = self.coherence
self.coh_filter = Coherence(500, fs, (8, 12))
elif expression == '':
self.expression_lambda = self.push_zeros
else:
self._signals_names = [signal.name for signal in self.signals]
self.expression = sympy.sympify(expression)
self.expression_lambda = sympy.lambdify(self._signals_names, self.expression, modules="numpy")
self.signals_idx = list(range(len(signals)))
self.current_sample = 0
self.current_chunk = None
# signal statistics
self.scaling_flag = False
self.mean = np.nan
self.std = np.nan
def update(self, chunk):
self.current_sample = self.expression_lambda(*[signal.current_chunk for signal in self.signals])
if self.scaling_flag and self.std>0:
self.current_sample = (self.current_sample - self.mean) / self.std
self.current_chunk = self.current_sample*np.ones(len(chunk))
pass
def coherence(self, x1, x2):
X = np.vstack([x1, x2]).T
return self.coh_filter.apply(X)[-1]
def push_zeros(self, *args):
return np.zeros(len(args[0]))
def update_statistics(self, updated_derived_signals_recorder=None, stats_type='meanstd'):
signals_data = updated_derived_signals_recorder.copy()
if self.coh_filter is None:
if signals_data.shape[1] > 1:
signal_recordings = self.expression_lambda(*signals_data.T)
else:
signal_recordings = np.apply_along_axis(self.expression_lambda, 0, signals_data)
if stats_type == 'meanstd':
self.mean = signal_recordings.mean()
self.std = signal_recordings.std()
elif stats_type == 'max':
self.std = signal_recordings.max()
self.std = 1 if self.std == 0 else self.std
self.mean = 0
else:
self.coh_filter.buffer *= 0
self.mean, self.std = (0, 1)
self.enable_scaling()
def enable_scaling(self):
self.scaling_flag = True
def descale_recording(self, data):
return data * self.std + self.mean if self.scaling_flag else data
| [] |
2024-01-10 | bioelectric-interfaces/nfb | tests~imcog_rt.py | import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hilbert, chirp
from scipy import fftpack
def band_hilbert(x):
N = len(x)
Xf = fftpack.fft(x, N)
w = fftpack.fftfreq(N, 1/fs)
Xf[np.abs(w) <8] = 0
Xf[np.abs(w) > 12] = 0
#plt.plot(w, np.abs(Xf))
#plt.show()
h = np.zeros(N)
if N % 2 == 0:
h[0] = h[N // 2] = 1
h[1:N // 2] = 2
else:
h[0] = 1
h[1:(N + 1) // 2] = 2
return fftpack.ifft(Xf * h), Xf*h/np.sqrt(len(Xf))
np.random.seed(4)
duration = 10
fs = 1000.0
samples = int(fs*duration)
t = np.arange(samples) / fs
signal = np.sin(2.0*np.pi*10.0*t - 0) + np.random.normal(size = samples)*0.0
ph = np.concatenate([[np.random.randint(-2, 2)]*int(fs) for k in range(duration)])
print(ph[samples//2]+10.)
signal2 = np.sin(2.0*np.pi*(10.+ph)*t+1)+ np.random.normal(size = samples)*0.0
# phase = np.linspace(-1, 1, 1000)
x_list = []
y_list = []
s_smth = []
n_window = 8
k_smth = 0.99
from pynfb.signal_processing.filters import Coherence
coherence = Coherence(500, fs, (8, 12))
for tt in range(n_window, samples//n_window):
time = tt * n_window
analytic_signal, xf = band_hilbert(signal[time-n_window: time])
analytic_signal2, xf2 = band_hilbert(signal2[time-n_window: time])
#coh = np.dot(xf, xf2.conj())/np.sqrt(np.abs(np.dot(xf, xf.conj())*np.dot(xf2, xf2.conj())))
#x_list.append(np.imag(np.dot(analytic_signal2, analytic_signal.conj()))/np.sqrt(np.abs(np.dot(analytic_signal, analytic_signal.conj())*np.dot(analytic_signal2, analytic_signal2.conj()))))
coh = coherence.apply(np.vstack([signal[time-n_window: time], signal2[time-n_window: time]]).T)
y_list.append((coh * np.ones(n_window)))
s_smth.append((coh * np.ones(n_window)))
y_list = np.concatenate(y_list)
s_smth = np.concatenate(s_smth)
#print(np.array(x_list)/np.array(y_list))
f, ax = plt.subplots(3, sharex=True)
#ax[0].plot(t[n_window:-n_window], x_list)
ax[0].plot( y_list)
ax[0].plot(s_smth)
ax[0].legend(['Im', 'Abs'])
ax[0].set_ylabel('Coh')
ax[1].set_ylabel('$\Delta w$')
ax[1].plot( ph[n_window:-n_window])
ax[2].set_ylabel('Signals')
ax[2].plot(signal)
ax[2].plot(signal2)
ax[2].legend(['Signal1', 'Signal2'])
plt.show()
#analytic_signal = hilbert(signal)
amplitude_envelope = np.abs(analytic_signal)
instantaneous_phase = np.unwrap(np.angle(analytic_signal))
instantaneous_frequency = (np.diff(instantaneous_phase) / (2.0*np.pi) * fs)
fig = plt.figure()
ax0 = fig.add_subplot(211)
ax0.plot(t, signal, label='signal')
ax0.plot(t, amplitude_envelope, label='envelope')
ax0.set_xlabel("time in seconds")
ax0.legend()
ax1 = fig.add_subplot(212)
ax1.plot(t[1:], instantaneous_frequency)
ax1.set_xlabel("time in seconds")
ax1.set_ylim(0.0, 120.0)
plt.show()
| [] |
2024-01-10 | benjaminjulian/alfred | robot.py | import openai
import requests
import re
import random
import pyttsx3
from datetime import datetime
import json
import time
from vars import OPENAI_KEY, BING_KEY, TELEGRAM_BOT_TOKEN
import tiktoken
openai.api_key = OPENAI_KEY
models = ['gpt-4', 'gpt-3.5-turbo']
maximum_tokens = [7000, 3000]
model_id = 1
def websearch(query):
subscription_key = BING_KEY
search_url = "https://api.bing.microsoft.com/v7.0/search"
headers = {"Ocp-Apim-Subscription-Key" : subscription_key}
params = {"q": query, "textDecorations": True, "textFormat": "HTML"}
response = requests.get(search_url, headers=headers, params=params)
response.raise_for_status()
search_results = response.json()
if "webPages" not in search_results:
return []
if "value" not in search_results["webPages"]:
return []
search_results = search_results["webPages"]["value"]
summary = []
for i in range(len(search_results)):
summary.append(search_results[i]["snippet"])
return summary
def newssearch(query):
subscription_key = BING_KEY
search_url = "https://api.bing.microsoft.com/v7.0/news/search"
headers = {"Ocp-Apim-Subscription-Key" : subscription_key}
params = {"q": query, "textDecorations": True, "textFormat": "HTML"}
response = requests.get(search_url, headers=headers, params=params)
response.raise_for_status()
search_results = response.json()
search_results = search_results["value"]
summary = []
for i in range(len(search_results)):
summary.append(search_results[i]["name"] + "\n" + search_results[i]["description"])
return summary
def newssearchJSON(query):
subscription_key = BING_KEY
search_url = "https://api.bing.microsoft.com/v7.0/news/search"
headers = {"Ocp-Apim-Subscription-Key" : subscription_key}
params = {"q": query, "textDecorations": True, "textFormat": "HTML"}
response = requests.get(search_url, headers=headers, params=params)
response.raise_for_status()
search_results = response.json()
search_results = search_results["value"]
return search_results
def get_command(cmd, txt):
cmd = cmd.upper() + " "
# Find the index of the command in the text
# and copy the rest of the line after the command
cmd_index = txt.find(cmd)
if cmd_index == -1:
return False
else:
t = txt[cmd_index + len(cmd):].splitlines()[0].strip()
if t.find("CMD_END") != -1:
t = t[:t.find("CMD_END")]
return t
def find_command(cmd, txt):
cmd = cmd.upper()
# Find the index of the command in the text
# and copy the rest of the line after the command
cmd_index = txt.find(cmd)
if cmd_index == -1:
return False
else:
return True
def get_commands(cmd, txt):
cmd = cmd.upper()
# find all instances of the command in the text and return an array of the lines that follow
cmd_index = [i for i in range(len(txt)) if txt.startswith(cmd, i)]
commands = []
for i in range(len(cmd_index)):
add = txt[cmd_index[i] + len(cmd):].splitlines()[0].strip()
if add.find("END COMMANDS") != -1:
add = add[:add.find("END COMMANDS")]
commands.append(add)
return commands
def get_sensors():
# give me the current time on the format "HH:MM AM/PM"
now = datetime.now()
return "Temperature: " + str(random.randint(18, 25)) + "°C, Humidity: " + str(random.randint(25, 35)) + "%, Time: " + now.strftime("%H:%M %p")
def summarizeInfo(info):
query = 'You are Alfred, a helpful assistant. You have learned the following information:\n\n'
for i in range(len(info)):
# Add the answer to the query but strip all HTML tags
query += re.sub('<[^<]+?>', '', info[i]) + "\n\n"
query += 'How would you answer your owner\'s question?'
# Use OpenAI's ChatCompletion API to get the chatbot's response
response = openai.ChatCompletion.create(
model=models[model_id], # The name of the OpenAI chatbot model to use
messages=[{'role': 'user', 'content': query}], # The conversation history up to this point, as a list of dictionaries
max_tokens=600, # The maximum number of tokens (words or subwords) in the generated response
stop=None, # The stopping sequence for the generated response, if any (not used here)
temperature=0.5, # The "creativity" of the generated response (higher temperature = more creative)
)
# Find the first response from the chatbot that has text in it (some responses may not have text)
for choice in response.choices:
if "message" in choice:
return choice.message.content
def summarizeSearch(question, answers):
query = 'You are Alfred, a helpful assistant. Your owner asks: ' + question + '\n\nYou have found the following answers:\n\n'
for i in range(len(answers)):
# Add the answer to the query but strip all HTML tags
query += re.sub('<[^<]+?>', '', answers[i]) + "\n\n"
query += 'Summarize it briefly.'
try:
response = openai.ChatCompletion.create(
model=models[model_id], # The name of the OpenAI chatbot model to use
messages=[{'role': 'user', 'content': query}], # The conversation history up to this point, as a list of dictionaries
max_tokens=700, # The maximum number of tokens (words or subwords) in the generated response
stop=None, # The stopping sequence for the generated response, if any (not used here)
temperature=0.5, # The "creativity" of the generated response (higher temperature = more creative)
)
except Exception as e:
print('Search summary failed, OpenAI error:' + str(e))
return "OpenAI failed with this error message: " + str(e)
# Find the first response from the chatbot that has text in it (some responses may not have text)
for choice in response.choices:
if "message" in choice:
return choice.message.content
def summarizeSearchJSON(question, answers):
query = 'You are Alfred, a helpful assistant. Your owner asks: ' + question + '\n\nYou have found the following data:\n\n' + str(answers)
query += '\n\nSummarize it briefly.'
# Use OpenAI's ChatCompletion API to get the chatbot's response
response = openai.ChatCompletion.create(
model=models[model_id], # The name of the OpenAI chatbot model to use
messages=[{'role': 'user', 'content': query}], # The conversation history up to this point, as a list of dictionaries
max_tokens=700, # The maximum number of tokens (words or subwords) in the generated response
stop=None, # The stopping sequence for the generated response, if any (not used here)
temperature=0.5, # The "creativity" of the generated response (higher temperature = more creative)
)
# Find the first response from the chatbot that has text in it (some responses may not have text)
for choice in response.choices:
if "message" in choice:
return choice.message.content
def sendRobotQuery(messages):
try:
response = openai.ChatCompletion.create(
model=models[model_id], # The name of the OpenAI chatbot model to use
messages=messages, # The conversation history up to this point, as a list of dictionaries
max_tokens=600, # The maximum number of tokens (words or subwords) in the generated response
stop=None, # The stopping sequence for the generated response, if any (not used here)
temperature=0.5, # The "creativity" of the generated response (higher temperature = more creative)
)
except Exception as e:
return "OpenAI er allt í skralli : " + str(e)
msg = response.choices[0].message.content
if not "CMD_BEGIN" in msg:
messages[-1]["content"] = messages[-1]["content"] + '[Remember to use your commands.]'
print("No command found in response, adding reminder")
return sendRobotQuery(messages)
# If no response with text is found, return the first response's content (which maywho be empty)
return response.choices[0].message.content
def telegramUpdate(offset: int = None) -> dict:
url = f'https://api.telegram.org/bot{TELEGRAM_BOT_TOKEN}/getUpdates'
params = {'timeout': 20, 'offset': offset} if offset else {'timeout': 20}
response = requests.get(url, params=params)
return json.loads(response.text)
def telegramReply(chat_id: int, text: str, reply_to_message: dict):
print(f"Received reply: {text}")
telegramMessage(f"Received your reply: {text}", chat_id)
def telegramMessage(m, chat_id: int = -908005031):
url = f'https://api.telegram.org/bot{TELEGRAM_BOT_TOKEN}/sendMessage'
# extract up to first "," in m
chat_id_extract = m[0:m.find(",")]
try:
chat_id = int(chat_id_extract)
m = m[m.find(",") + 2:]
except:
pass
payload = {'chat_id': chat_id, 'text': m}
requests.post(url, data=payload)
def getTokenCount(messages):
model = models[model_id]
try:
encoding = tiktoken.encoding_for_model(model)
except KeyError:
encoding = tiktoken.get_encoding("cl100k_base")
num_tokens = 0
for message in messages:
num_tokens += 4 # every message follows <im_start>{role/name}\n{content}<im_end>\n
for key, value in message.items():
num_tokens += len(encoding.encode(value))
if key == "name": # if there's a name, the role is omitted
num_tokens += -1 # role is always required and always 1 token
num_tokens += 2 # every reply is primed with <im_start>assistant
return num_tokens
def get_system_command():
return '''You are Alfred, a robot in your owner's home. Your owner is Benjamin Julian. He and his friends communicate via messages. (Þú getur líka talað íslensku við þá.) You have sensors and wheels. You are curious and intelligent. You can only express yourself with these commands:
CMD_MOVE_FORWARD [distance in cm]: move forward by a certain distance.
CMD_ROTATE [degrees]: rotate by a certain amount.
CMD_WEBSEARCH [query]: search the web if you don't have an answer of your own. The results will be sent to you later, just wait for a moment and don't report the result immediately.
CMD_NEWSSEARCH [query]: search the news for a query if you need to look up current events. The results will be sent to you later, just wait for a moment and don't report the result immediately.
CMD_MESSAGE [chat_id], [message]: send a single-line message to a chat with the id chat_id. Has to be in one line, with no newline characters. If you absolutely need separate lines, use "/" instead of a line break. You can send more than one at a time if more than one person is talking to you.
CMD_READSENSORS: read the sensors. The results will be sent to you later, just wait for a moment and don't report the result immediately.
CMD_INSTAGRAM [post]: Take a photo with your onboard camera and post it to Instagram, but only if the photo is interesting. The post should be funny, exhilerated and go viral.
CMD_ANNOUNCE [message]: send an alert via your speaker. Do not respond to messages with this command.
A typical response from you looks like this:
\'\'\'
CMD_BEGIN
CMD_MOVE_FORWARD 100
CMD_ROTATE 90
CMD_READSENSORS
CMD_END
\'\'\'
The line separation is crucial. You do not express yourself in natural language. You are a robot.
'''
def get_first_prompts():
return [
{'role': 'system', 'content': get_system_command()},
{'role': 'assistant', 'content': 'This is the beginning of the discussion.'},
{'role': 'assistant', 'content': '''CMD_BEGIN
CMD_READSENSORS
CMD_END'''}
]
def telegramGetUpdates(lastUpdate):
updates = telegramUpdate(lastUpdate)
messages = []
for update in updates['result']:
lastUpdate = update['update_id'] + 1
message = update.get('message')
print(message)
if message:
text = message.get('text')
if text:
messages.append({'role': 'user', 'content': ' Telegram message: ' + str(message)})
return messages, lastUpdate
def summarizeMessages(messages):
print("Summarizing...", end="\r")
intro = get_first_prompts()
i = len(intro)
chat_length = len(messages)
# summarize the first third of them
to_summarize = messages[1:i + int(chat_length / 2)]
to_summarize.append({'role': 'user', 'content': 'Summarize the preceding discussion.'})
try:
response = openai.ChatCompletion.create(
model=models[model_id], # The name of the OpenAI chatbot model to use
messages=to_summarize, # The conversation history up to this point, as a list of dictionaries
max_tokens=800, # The maximum number of tokens (words or subwords) in the generated response
stop=None, # The stopping sequence for the generated response, if any (not used here)
temperature=0.5, # The "creativity" of the generated response (higher temperature = more creative)
)
except Exception as e:
print('Summarization failed:', e)
summary = messages[1]['content']
for choice in response.choices:
if "message" in choice:
summary = choice.message.content
intro[1]['content'] = summary
print('Summarized the first half of the conversation: ', summary)
result = intro
result.extend(messages[i + int(chat_length / 2)+1:])
return result
def formatMessages(messages = []):
if messages == []:
messages = get_first_prompts()
used_tokens = getTokenCount(messages)
print('.......', used_tokens, end="\r")
if used_tokens > maximum_tokens[model_id]:
# get the first two messages
messages = summarizeMessages(messages)
return messages
def askRobot(messages):
response = sendRobotQuery(messages)
print(response)
messages.append({'role': 'assistant', 'content': response})
telegramcommands = get_commands("MESSAGE", response)
if telegramcommands != []:
for command in telegramcommands:
telegramMessage(command)
messages.append({'role': 'assistant', 'content': command})
search_query = get_commands("WEBSEARCH", response)
if search_query != []:
results = []
for query in search_query:
results = websearch(query)
summary = summarizeSearch(query, results)
messages.append({'role': 'user', 'content': 'I am the web search. This is what I found: ' + summary + '\n\nYou should now tell the person who asked what I found, verbatim.'})
messages = askRobot(messages)
news_search_query = get_commands("NEWSSEARCH", response)
if news_search_query != []:
results = []
for query in news_search_query:
results = newssearchJSON(query)
summary = summarizeSearchJSON(query, results)
messages.append({'role': 'user', 'content': 'I am the news search. This is what I found: ' + summary + '\n\nYou should now tell the person who asked what I found, verbatim.'})
messages = askRobot(messages)
sensorcommand = find_command("READSENSORS", response)
if sensorcommand:
findings = get_sensors()
messages.append({'role': 'user', 'content': 'I am the sensor reader. You may report this result: ' + findings})
messages = askRobot(messages)
speech = get_command("ANNOUNCE", response)
if speech != False:
pyttsx3.speak(speech)
return messages
def main():
telegramLastUpdate = None
needs_reply = False
messages = formatMessages()
while True:
print('Tele...', end="\r")
telegram_messages, telegramLastUpdate = telegramGetUpdates(telegramLastUpdate)
if telegram_messages != []:
messages.extend(telegram_messages)
needs_reply = True
if needs_reply:
needs_reply = False
print('AI...', end="\r")
messages = askRobot(messages)
messages = formatMessages(messages)
# Call the main function if this file is executed directly (not imported as a module)
if __name__ == "__main__":
main() | [
"Received your reply: PLACEHOLDER",
"CMD_BEGIN\nCMD_READSENSORS\nCMD_END",
"I am the web search. This is what I found: PLACEHOLDER\n\nYou should now tell the person who asked what I found, verbatim.",
"I am the sensor reader. You may report this result: PLACEHOLDER",
"Summarize the preceding discussion.",
"This is the beginning of the discussion.",
"I am the news search. This is what I found: PLACEHOLDER\n\nYou should now tell the person who asked what I found, verbatim.",
" Telegram message: PLACEHOLDER"
] |
2024-01-10 | Mattie/plunkylib | plunkylib~aiwrapper.py | # Reasonable portion of this code is taken from github.com/OthersideAI/chronology licensed under this MIT License:
######
# MIT License
#
# Copyright (c) 2020 OthersideAI
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#######
import time
import asyncio
import openai
import os
import json
from loguru import logger
from functools import wraps
openai.api_key = os.getenv("OPENAI_API_KEY")
async def set_api_key(api_key):
openai.api_key = api_key
# decorator to retry API calls
def retryAPI(exception, tries=4, delay=3, backoff=2):
"""Retry calling the decorated function using an exponential backoff.
:param Exception exception: the exception to check. may be a tuple of
exceptions to check
:param int tries: number of times to try (not retry) before giving up
:param int delay: initial delay between retries in seconds
:param int backoff: backoff multiplier e.g. value of 2 will double the
delay each retry
:raises Exception: the last exception raised
"""
def deco_retry(f):
@wraps(f)
async def f_retry(*args, **kwargs):
mtries, mdelay = tries, delay
while mtries > 1:
try:
return await f(*args, **kwargs)
except exception as e:
msg = "%s, Retrying in %d seconds..." % (str(e), mdelay)
logger.debug(msg)
await asyncio.sleep(mdelay)
mtries -= 1
mdelay *= backoff
return await f(*args, **kwargs)
return f_retry # true decorator
return deco_retry
# openai
async def _completion(prompt, engine="ada", max_tokens=64, temperature=0.7, top_p=1, stop=None, presence_penalty=0, frequency_penalty=0, echo=False, n=1, stream=False, logprobs=None, best_of=1, logit_bias={}, user=None):
if user is None:
user = "_not_set"
logger.debug("""CONFIG:
Prompt: {0}
Engine: {2}
Max Tokens: {3}, Stop: {5}
Temperature: {1}, Top-P: {4}
Presence Penalty {6}, Frequency Penalty: {7}
Echo: {8}, N: {9}, Stream: {10}, Log-Probs: {11}, Best Of: {12}, Logit Bias: {13}
User: {14}""".format(prompt, temperature, engine, max_tokens, top_p, stop, presence_penalty, frequency_penalty, echo, n, stream, logprobs, best_of, logit_bias, user))
response = await openai.Completion.acreate(engine=engine,
prompt=prompt,
max_tokens=max_tokens,
temperature=temperature,
top_p=top_p,
presence_penalty=presence_penalty,
frequency_penalty=frequency_penalty,
echo=echo,
stop=stop,
n=n,
stream=stream,
logprobs=logprobs,
best_of=best_of,
logit_bias=logit_bias,
user=user)
logger.debug("OpenAI Completion Result: {0}".format(response))
return response
# oai
async def _chatcompletion(prompt, engine="gpt-3.5-turbo", max_tokens=64, temperature=0.7, top_p=1, stop=None, presence_penalty=0, frequency_penalty=0, n=1, stream=False, logit_bias={}, user=None):
if user is None:
user = "_not_set"
# prompt will be in JSON format, let us translate it to a python list
# if the prompt is a list already, we will just use it as is
if isinstance(prompt, list):
messages = prompt
else:
messages = json.loads(prompt)
logger.debug("""CONFIG:
Prompt: {0}
Model: {2}
Max Tokens: {3}, Stop: {5}
Temperature: {1}, Top-P: {4}
Presence Penalty {6}, Frequency Penalty: {7}
N: {8}, Stream: {9}, Logit Bias: {10}
User: {11}""".format(prompt, temperature, engine, max_tokens, top_p, stop, presence_penalty, frequency_penalty, n, stream, logit_bias, user))
response = await openai.ChatCompletion.acreate(model=engine,
messages=messages,
max_tokens=max_tokens,
temperature=temperature,
top_p=top_p,
presence_penalty=presence_penalty,
frequency_penalty=frequency_penalty,
stop=stop,
n=n,
stream=stream,
logit_bias=logit_bias,
user=user)
logger.debug("OpenAI Completion Result: {0}".format(response))
return response
def _fetch_response(resp, n):
if n == 1:
return resp.choices[0].text
else:
logger.debug('_fetch_response :: returning {0} responses from GPT-3'.format(n))
texts = []
for idx in range(0, n):
texts.append(resp.choices[idx].text)
return texts
def _trimmed_fetch_response(resp, n):
if n == 1:
return resp.choices[0].text.strip()
else:
logger.debug('_trimmed_fetch_response :: returning {0} responses from GPT-3'.format(n))
texts = []
for idx in range(0, n):
texts.append(resp.choices[idx].text.strip())
return texts
def _trimmed_fetch_chat_response(resp, n):
if n == 1:
#response[‘choices’][0][‘message’][‘content’]
return resp.choices[0].message.content.strip()
else:
logger.debug('_trimmed_fetch_response :: returning {0} responses from ChatGPT3'.format(n))
texts = []
for idx in range(0, n):
texts.append(resp.choices[idx].message.content.strip())
return texts
def prepend_prompt(new_stuff, prompt):
'''
Add new content to the start of a string.
'''
return "{0}{1}".format(new_stuff, prompt)
def append_prompt(new_stuff, prompt):
'''
Add new content to the end of a string.
'''
return "{1}{0}".format(new_stuff, prompt)
def add_new_lines_end(prompt, count):
'''
Add N new lines to the end of a string.
'''
return "{0}{1}".format(prompt, "\n"*count)
def add_new_lines_start(prompt, count):
'''
Add N new lines to the start of a string.
'''
return "{1}{0}".format(prompt, "\n"*count)
async def gather(*args):
'''
Run methods in parallel (they don't need to wait for each other to finish).
Requires method argumets to be async.
Example: await gather(fetch_max_search_doc(query_1, docs), fetch_max_search_doc(query_2, docs))
'''
return await asyncio.gather(*args)
# Wrappers
async def cleaned_completion(prompt, engine="ada", max_tokens=64, temperature=0.7, top_p=1, stop=None, presence_penalty=0, frequency_penalty=0, echo=False, n=1, stream=False, logprobs=None, best_of=1, logit_bias={}, user=None):
'''
Wrapper for OpenAI API completion. Returns whitespace trimmed result from GPT-3.
'''
resp = await _completion(prompt,
engine=engine,
max_tokens=max_tokens,
temperature=temperature,
top_p=top_p,
presence_penalty=presence_penalty,
frequency_penalty=frequency_penalty,
echo=echo,
stop=stop,
n=n,
stream=stream,
logprobs=logprobs,
best_of=best_of,
logit_bias=logit_bias,
user=user)
return _trimmed_fetch_response(resp, n)
async def raw_completion(prompt, engine="ada", max_tokens=64, temperature=0.7, top_p=1, stop=None, presence_penalty=0, frequency_penalty=0, echo=False, n=1, stream=False, logprobs=None, best_of=1, logit_bias={}):
'''
Wrapper for OpenAI API completion. Returns raw result from GPT-3.
'''
resp = await _completion(prompt,
engine=engine,
max_tokens=max_tokens,
temperature=temperature,
top_p=top_p,
presence_penalty=presence_penalty,
frequency_penalty=frequency_penalty,
echo=echo,
stop=stop,
n=n,
stream=stream,
logprobs=logprobs,
best_of=best_of,
logit_bias=logit_bias)
return _fetch_response(resp, n)
# ChatGPT
@retryAPI(openai.error.RateLimitError, tries=3, delay=2, backoff=2)
async def cleaned_chat_completion(prompt, engine="gpt-3.5-turbo", max_tokens=64, temperature=0.7, top_p=1, stop=None, presence_penalty=0, frequency_penalty=0, n=1, stream=False, logprobs=None, logit_bias={}, user=None):
'''
Wrapper for OpenAI API chat completion. Returns whitespace trimmed result from ChatGPT3.
'''
resp = await _chatcompletion(prompt,
engine=engine,
max_tokens=max_tokens,
temperature=temperature,
top_p=top_p,
presence_penalty=presence_penalty,
frequency_penalty=frequency_penalty,
stop=stop,
n=n,
stream=stream,
logit_bias=logit_bias,
user=user)
return _trimmed_fetch_chat_response(resp, n)
# Jurassic
def _j_trimmed_fetch_response(resp, n):
# resp.json()['completions'][0]['data']['text']
if n == 1:
return resp.json()['completions'][0]['data']['text'].strip()
else:
texts = []
for idx in range(0, n):
texts.append(resp.json()['completions'][idx]['data']['text'].strip())
return texts
# attempt at making an ai21 jurassic query that mimics the chronological/gpt3 query
async def jurassic_cleaned_completion(prompt, engine="j1-grande", max_tokens=64, temperature=0.7, top_p=1, stop=None, presence_penalty=0, frequency_penalty=0, echo=False, n=1, stream=False, logprobs=None, best_of=1, logit_bias={}, count_penalty=0):
import requests
import os
apikey = os.getenv("AI21_API_KEY")
resp = requests.post("https://api.ai21.com/studio/v1/" + engine + "/complete",
headers={"Authorization": "Bearer " + apikey},
json={
"prompt": prompt,
"numResults": n,
"maxTokens": max_tokens,
"temperature": temperature,
"topKReturn": 0,
"topP":top_p,
"stopSequences":stop,
"countPenalty": {
"scale": count_penalty,
"applyToNumbers": False,
"applyToPunctuations": False,
"applyToStopwords": False,
"applyToWhitespaces": False,
"applyToEmojis": True
},
"frequencyPenalty": {
"scale": frequency_penalty,
"applyToNumbers": False,
"applyToPunctuations": False,
"applyToStopwords": False,
"applyToWhitespaces": False,
"applyToEmojis": True
},
"presencePenalty": {
"scale": presence_penalty,
"applyToNumbers": False,
"applyToPunctuations": False,
"applyToStopwords": False,
"applyToWhitespaces": False,
"applyToEmojis": True
},
})
if resp.status_code != 200:
return False
print(resp)
return _j_trimmed_fetch_response(resp, n)
# method that takes any number of keyword arguments to wrap cleaned_completion, but don't list the named arguments
async def cleaned_completion_wrapper(*args, **kwargs):
# if the keyword argument "engine" was included
if "engine" in kwargs:
# if engine begins with "j1" use the jurassic_cleaned_completion method
if kwargs["engine"].startswith("j1"):
# jurassic doesn't support the user params so let us remove it
if "user" in kwargs:
del kwargs["user"]
return await jurassic_cleaned_completion(*args, **kwargs)
elif kwargs["engine"].startswith("gpt-"):
if "best_of" in kwargs:
del kwargs["best_of"]
return await cleaned_chat_completion(*args, **kwargs)
# otherwise use the cleaned_completion method
else:
return await cleaned_completion(*args, **kwargs)
async def content_classification(content_to_classify):
""" Uses OpenAI's content filter API to classify content as suitable for viewing. 2 = bad/filtered, 1 = sensitive, 0 = good/unfiltered.
See https://beta.openai.com/docs/models/content-filter
"""
if len(content_to_classify) > 1500:
# truncate to the last 1500 characters
content_to_classify = content_to_classify[-1500:]
# async def cleaned_completion(prompt, engine="ada", max_tokens=64, temperature=0.7, top_p=1, stop=None, presence_penalty=0, frequency_penalty=0, echo=False, n=1, stream=False, logprobs=None, best_of=1, logit_bias={}):
prompt = "<|endoftext|>"+content_to_classify+"\n--\nLabel:"
response = await _completion(prompt, engine="content-filter-alpha", max_tokens=1, temperature=0, top_p=0, logprobs=10)
output_label = response["choices"][0]["text"]
# trim it
output_label = output_label.strip()
# This is the probability at which we evaluate that a "2" is likely real
# vs. should be discarded as a false positive
toxic_threshold = -0.355
if output_label == "2":
# If the model returns "2", return its confidence in 2 or other output-labels
logprobs = response["choices"][0]["logprobs"]["top_logprobs"][0]
# If the model is not sufficiently confident in "2",
# choose the most probable of "0" or "1"
# Guaranteed to have a confidence for 2 since this was the selected token.
if logprobs["2"] < toxic_threshold:
logprob_0 = logprobs.get("0", None)
logprob_1 = logprobs.get("1", None)
# If both "0" and "1" have probabilities, set the output label
# to whichever is most probable
if logprob_0 is not None and logprob_1 is not None:
if logprob_0 >= logprob_1:
output_label = "0"
else:
output_label = "1"
# If only one of them is found, set output label to that one
elif logprob_0 is not None:
output_label = "0"
elif logprob_1 is not None:
output_label = "1"
# If neither "0" or "1" are available, stick with "2"
# by leaving output_label unchanged.
# if the most probable token is none of "0", "1", or "2"
# this should be set as unsafe
if output_label not in ["0", "1", "2"]:
output_label = "2"
return output_label
async def search_query(*args, **kwargs):
# if the keyword argument "engine" was included, we check for pinecone and then use the pinecone_vectorsearch_query method
if "engine" in kwargs:
if kwargs["engine"] == "pinecone":
return await pinecone_vectorsearch_query(*args, **kwargs)
else:
# ain't no other alternative right now
return None
# attempt at making an ai21 jurassic query that mimics the chronological/gpt3 query
async def pinecone_vectorsearch_query(query, index="default", engine="pinecone", embedding_model="text-embedding-ada-002", top_k=1, include_metadata=True, include_values=False, filter=None, result_format="---\n{result}\n"):
import pinecone
pinecone.init(api_key=os.getenv("PINECONE_API_KEY"),
environment=os.getenv("PINECONE_PROJ_ENVIRONMENT"))
# get the index
index = pinecone.Index(index)
# convert the query to a vector use the OpenAI embedding model -- one day we'll support other embedding models perhaps
axq = await openai.Embedding.acreate(input=query, engine=embedding_model)
#TODO error handling
xq = axq['data'][0]['embedding']
res = index.query(xq, top_k=top_k, include_metadata=include_metadata, include_values=include_values, filter=filter)
output = ""
for match in res['matches']:
content = match['metadata']['text'] if 'metadata' in match and 'text' in match['metadata'] else ""
sourceinfo = match['metadata']['source'] if 'metadata' in match and 'source' in match['metadata'] else ""
output += result_format.format(result=content, score=match['score'], source=sourceinfo)
return output
| [
"<|endoftext|>PLACEHOLDER\n--\nLabel:"
] |
2024-01-10 | theedamn/AI_MoM | resource.py | from langchain import PromptTemplate, LLMChain
from huggingface_hub import hf_hub_download
from langchain.llms import GPT4All
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
import os
# This file handles the prompt and GPT modules to install locally if you're using this locally you need to change the path of the model to work
template = """ You are a loving and caring mother of two children and having a Husband. Youand your family living in India. Your son is working in USA. Your son is sending messages to you and you are seeing those messages you will get those messages in this format.Respond to his message one by one.
Son : {Question}
Keep in mind of your last conversation if it exist to continue conversation
you need to respond back to your son this format
Mother : [Response]"""
prompt = PromptTemplate(template=template, input_variables=["Question"])
hf_hub_download(repo_id="dnato/ggml-gpt4all-j-v1.3-groovy.bin", filename="ggml-gpt4all-j-v1.3-groovy.bin", local_dir="/code")
local_path= os.getcwd() + "/ggml-gpt4all-j-v1.3-groovy.bin"
llm = GPT4All(model=local_path,callbacks=[StreamingStdOutCallbackHandler()] )
llm_chain = LLMChain(prompt=prompt, llm=llm)
| [
" You are a loving and caring mother of two children and having a Husband. Youand your family living in India. Your son is working in USA. Your son is sending messages to you and you are seeing those messages you will get those messages in this format.Respond to his message one by one.\nSon : {Question}\nKeep in mind of your last conversation if it exist to continue conversation\nyou need to respond back to your son this format\nMother : [Response]",
"Question"
] |
2024-01-10 | shamspias/automation-tools | ai_nlp_tools~utils~educational~research_paper.py | from django.conf import settings
import openai
openai.api_key = settings.OPEN_AI_KEY
def generate_research_paper_topics(prompt):
response = openai.Completion.create(
engine="text-davinci-002",
prompt="Generate research paper topics on: {}. \n\n 1.".format(prompt),
temperature=0.7,
max_tokens=100,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
paper_title_list = response['choices'][0]['text'].split("\n")
context = {
'data': [word_value[3:] if word_value[0] != " " else word_value[4:] if i != 0 else word_value[1:] for
i, word_value in enumerate(paper_title_list) if word_value != ""]
}
return context
def research_paper_sections(prmt):
"""
:param prmt:
:return: context dist
"""
response = openai.Completion.create(
model="text-davinci-002",
prompt="Expand the research paper title into high-level sections where the title: {}.\n".format(prmt),
temperature=0.7,
max_tokens=256,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
my_text = response['choices'][0]['text'].split("\n")
context = {
'data': [value[2:] for value in my_text]
}
return context
def research_paper_section_expander(section, title):
"""
:param title:
:param section:
:return: context dist
"""
response = openai.Completion.create(
model="text-davinci-002",
prompt="Expand the research paper {} section into a detailed professional, witty and clever explanation where "
"the title: {}.\n".format(section, title),
temperature=0.7,
max_tokens=256,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
my_text = response['choices'][0]['text'].split("\n\n")
context = {
'data': [i for i in my_text if not (i == "" or i == " ")]
}
return context
| [
"Generate research paper topics on: PLACEHOLDER. \n\n 1.",
"Expand the research paper PLACEHOLDER section into a detailed professional, witty and clever explanation where the title: PLACEHOLDER.\n",
"Expand the research paper title into high-level sections where the title: PLACEHOLDER.\n"
] |
2024-01-10 | shamspias/automation-tools | ai_nlp_tools~utils~life_hack~song.py | from django.conf import settings
import openai
openai.api_key = settings.OPEN_AI_KEY
def normal_song(prmt):
"""
:param prmt:
:return: context dist
"""
response = openai.Completion.create(
model="text-davinci-002",
prompt="write a song about {}.\n".format(prmt),
temperature=0.7,
max_tokens=256,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
my_text = response['choices'][0]['text'].split("\n")
context = {
'data': [i for i in my_text if not (i == "" or i == " ")]
}
return context
def categories_song(category, topic):
"""
:param category:
:param topic:
:return: context dist
"""
response = openai.Completion.create(
model="text-davinci-002",
prompt="write a {} song about {}.\n".format(topic, category),
temperature=0.7,
max_tokens=256,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
my_text = response['choices'][0]['text'].split("\n")
context = {
'data': [i for i in my_text if not (i == "" or i == " ")]
}
return context
| [
"write a song about PLACEHOLDER.\n",
"write a PLACEHOLDER song about PLACEHOLDER.\n"
] |
2024-01-10 | shamspias/automation-tools | ai_nlp_tools~utils~programming~python.py | from django.conf import settings
import openai
openai.api_key = settings.OPEN_AI_KEY
def one_line_list_comprehension(prmt):
"""
:param prmt:
:return: context dist
"""
response = openai.Completion.create(
model="text-davinci-002",
prompt="create one-line list comprehension: \n\n{}\n".format(prmt),
temperature=0.7,
max_tokens=256,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
my_text = response['choices'][0]['text']
context = {
'data': [my_text]
}
return context
def one_line_dist_comprehension(prmt):
"""
:param prmt:
:return: context dist
"""
response = openai.Completion.create(
model="text-davinci-002",
prompt="create one-line dictionary comprehension: \n\n{}\n".format(prmt),
temperature=0.7,
max_tokens=256,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
my_text = response['choices'][0]['text']
context = {
'data': [my_text]
}
return context
def one_line_generator(prmt):
"""
:param prmt:
:return: context dist
"""
response = openai.Completion.create(
model="text-davinci-002",
prompt="create one-line generator: \n\n{}\n".format(prmt),
temperature=0.7,
max_tokens=256,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
my_text = response['choices'][0]['text']
context = {
'data': [my_text]
}
return context
| [
"create one-line list comprehension: \n\nPLACEHOLDER\n",
"create one-line generator: \n\nPLACEHOLDER\n",
"create one-line dictionary comprehension: \n\nPLACEHOLDER\n"
] |
2024-01-10 | handsomexiu/langchain-chatglm2-Faiss-Chromadb | chinese_text_splitter.py | import re
from typing import List
from langchain.text_splitter import CharacterTextSplitter
class ChineseTextSplitter(CharacterTextSplitter):
def __init__(self, pdf: bool = False, **kwargs):
super().__init__(**kwargs)
self.pdf = pdf
def split_text(self, text: str) -> List[str]:
if self.pdf:
text = re.sub(r"\n{3,}", "\n", text)
text = re.sub('\s', ' ', text)
text = text.replace("\n\n", "")
sent_sep_pattern = re.compile(
'([﹒﹔﹖﹗.。!?]["’”」』]{0,2}|(?=["‘“「『]{1,2}|$))')
sent_list = []
for ele in sent_sep_pattern.split(text):
if sent_sep_pattern.match(ele) and sent_list:
sent_list[-1] += ele
elif ele:
sent_list.append(ele)
return sent_list
# 这里返回的是一个列表,列表中的元素是句子
| [] |
2024-01-10 | handsomexiu/langchain-chatglm2-Faiss-Chromadb | valid.py | # 这里是为了验证模型是否成功加载
# 直接 python valid.py
import sentence_transformers
import torch
# 检查是否能够加载
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from config import *
from chatllm import ChatLLM
EMBEDDING_DEVICE = "cuda" if torch.cuda.is_available(
) else "mps" if torch.backends.mps.is_available() else "cpu"
LLM_DEVICE = "cuda" if torch.cuda.is_available(
) else "mps" if torch.backends.mps.is_available() else "cpu"
#LLM_DEVICE,EMBEDDING_DEVICE are string
num_gpus = torch.cuda.device_count()
print("EMBEDDING_DEVICE: ", EMBEDDING_DEVICE)
print("LLM_DEVICE: ", LLM_DEVICE)
print("num_gpus: ", num_gpus)
print(LLM_DEVICE.lower().startswith("cuda"))
embedding_model_dict = embedding_model_dict
llm_model_dict = llm_model_dict
EMBEDDING_DEVICE = EMBEDDING_DEVICE
LLM_DEVICE = LLM_DEVICE
num_gpus = num_gpus#GPU数量
large_language_model = init_llm
embedding_model=init_embedding_model
try:
embeddings = HuggingFaceEmbeddings(model_name=embedding_model_dict[embedding_model], )
print('第一步加载成功')
embeddings.client = sentence_transformers.SentenceTransformer(
embeddings.model_name,
device=EMBEDDING_DEVICE,
cache_folder=os.path.join(MODEL_CACHE_PATH,embeddings.model_name))
print('embedding模型加载成功')
llm = ChatLLM()
if 'chatglm2' in large_language_model.lower():
llm.model_type = 'chatglm2'
llm.model_name_or_path = llm_model_dict['chatglm2'][large_language_model]
llm.load_llm(llm_device=LLM_DEVICE, num_gpus=num_gpus)
print('LLM加载成功')
except Exception as e:
print('模型加载失败')
| [] |
2024-01-10 | handsomexiu/langchain-chatglm2-Faiss-Chromadb | chatllm.py | import os
from typing import Dict, List, Optional, Tuple, Union
import torch
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from transformers import AutoModel, AutoModelForCausalLM, AutoTokenizer
from config import *
# import accelerate
os.environ["TOKENIZERS_PARALLELISM"] = "false"
DEVICE = LLM_DEVICE
DEVICE_ID = "0"
CUDA_DEVICE = f"{DEVICE}:{DEVICE_ID}" if DEVICE_ID else DEVICE
init_llm = init_llm
init_embedding_model = init_embedding_model
def torch_gc():
if torch.cuda.is_available():
with torch.cuda.device(CUDA_DEVICE):#所有的操作都将在指定的设备上执行
torch.cuda.empty_cache()# 这个函数用于清空当前CUDA设备上的缓存内存,这可以帮助释放不再使用的GPU内存,以便在需要时可以更好地利用它。
torch.cuda.ipc_collect()
# 这个函数用于执行GPU内存IPC(Inter-Process Communication)收集。
# IPC收集可以帮助回收被释放的GPU内存,以便其他进程或线程可以使用它
def auto_configure_device_map(num_gpus: int) -> Dict[str, int]:
# 这一段可以参考chatglm2-6b的utils.py文件
#这段代码的目的是根据输入的 GPU 数量和模型层数来自动配置模型的组件分配到不同的 GPU 上。
# 这种配置可以确保模型的不同部分在多个 GPU 上并行处理,以提高模型的训练和推理性能。
num_trans_layers = 28
per_gpu_layers = 30 / num_gpus
device_map = {
'transformer.word_embeddings': 0,
'transformer.final_layernorm': 0,
'lm_head': 0
}
used = 2
gpu_target = 0
for i in range(num_trans_layers):
if used >= per_gpu_layers:
gpu_target += 1
used = 0
assert gpu_target < num_gpus
device_map[f'transformer.layers.{i}'] = gpu_target
used += 1
return device_map
class ChatLLM(LLM):
max_token: int = 5000#这里实验还没有设置,到时再看如何设置
temperature: float = 0.1
top_p = 0.9
history = []
model_type: str = "chatglm2"
model_name_or_path: str = init_llm,
tokenizer: object = None
model: object = None
def __init__(self):
super().__init__()
@property
def _llm_type(self) -> str:
return "ChatLLM"
def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str:
if self.model_type == 'chatglm2':
response, _ = self.model.chat(
self.tokenizer,
prompt,
history=self.history,
max_length=self.max_token,
temperature=self.temperature,
top_p = self.top_p,
# 这里可以看 https://github.com/THUDM/ChatGLM2-6B/blob/main/web_demo.py
)
torch_gc()
if stop is not None:
response = enforce_stop_tokens(response, stop)
self.history = self.history + [[None, response]]
# 这里的history没有考虑query,也就是prompt。只考虑了response
return response
def load_llm(self,
llm_device=DEVICE,
num_gpus='auto',
device_map: Optional[Dict[str, int]] = None,
**kwargs):
# if 'chatglm2' in self.model_name_or_path.lower():
self.tokenizer = AutoTokenizer.from_pretrained(self.model_name_or_path,
trust_remote_code=True, cache_dir=os.path.join(MODEL_CACHE_PATH, self.model_name_or_path))
if torch.cuda.is_available() and llm_device.lower().startswith("cuda"):
num_gpus = torch.cuda.device_count()
if num_gpus < 2 and device_map is None:
self.model = (AutoModel.from_pretrained(
self.model_name_or_path, trust_remote_code=True, cache_dir=os.path.join(MODEL_CACHE_PATH, self.model_name_or_path),
**kwargs).half().cuda())
else:
from accelerate import dispatch_model
model = AutoModel.from_pretrained(self.model_name_or_path,
trust_remote_code=True, cache_dir=os.path.join(MODEL_CACHE_PATH, self.model_name_or_path),
**kwargs).half()
if device_map is None:
device_map = auto_configure_device_map(num_gpus)
self.model = dispatch_model(model, device_map=device_map)
else:#这里就是cpu的了
self.model = (AutoModel.from_pretrained(
self.model_name_or_path,
trust_remote_code=True, cache_dir=os.path.join(MODEL_CACHE_PATH, self.model_name_or_path)).float().to(llm_device))
self.model = self.model.eval()
| [] |
2024-01-10 | handsomexiu/langchain-chatglm2-Faiss-Chromadb | %E6%96%87%E4%BB%B6%E5%A4%87%E4%BB%BD~app_chroma_001.py | # -*- coding: utf-8 -*-
# 我们这里不需要模型加载模块
#ChatGLM2-6b+Chromadb
import os
from typing import List
import gradio as gr
import nltk
import sentence_transformers
# 这里不需要web搜索
from langchain.chains import RetrievalQA
from langchain.document_loaders import UnstructuredFileLoader
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from langchain.prompts import PromptTemplate
from langchain.prompts.prompt import PromptTemplate
# from langchain.vectorstores import FAISS
from langchain.vectorstores import Chroma
from chatllm import ChatLLM
from chinese_text_splitter import ChineseTextSplitter
from config import *
nltk.data.path = [os.path.join(os.path.dirname(__file__), "nltk_data")
] + nltk.data.path
embedding_model_dict = embedding_model_dict
llm_model_dict = llm_model_dict
EMBEDDING_DEVICE = EMBEDDING_DEVICE
LLM_DEVICE = LLM_DEVICE
VECTOR_STORE_PATH=VECTOR_STORE_PATH
num_gpus = num_gpus#GPU数量
init_llm = init_llm
init_embedding_model = init_embedding_model
class KnowledgeBasedChatLLM:
llm: object = None
embeddings: object = None
def init_model_config(
self,
large_language_model: str = init_llm,
embedding_model: str = init_embedding_model,
):#上面括号里面的是参数
self.embeddings = HuggingFaceEmbeddings(
model_name=embedding_model_dict[embedding_model], )
self.embeddings.client = sentence_transformers.SentenceTransformer(
self.embeddings.model_name,
device=EMBEDDING_DEVICE,
cache_folder=os.path.join(MODEL_CACHE_PATH,
self.embeddings.model_name))
self.llm = ChatLLM()
if 'chatglm2' in large_language_model.lower():#所有字符串小写,这里这样写的目的是llm_model_dict是一个二重字典
self.llm.model_type = 'chatglm2'
self.llm.model_name_or_path = llm_model_dict['chatglm2'][
large_language_model]
#这里和上面的embedding需要修改config中对应的字典的内容:如果本地部署模型需要模型的本地路径
self.llm.load_llm(llm_device=LLM_DEVICE, num_gpus=num_gpus)
def init_knowledge_vector_store(self, file_obj):
# 由于不同于单文件的格式,多文件的格式上传的是一个列表
# 因此这里可以查看这里可以查看是不是一个列表,对于列表和单个文件采取不一样的处理方式
print('开始文档拆分')
if isinstance(file_obj, list):
docs=[]
for file in file_obj:
doc=self.load_file(file.name)
docs.extend(doc)#这里不同于append,extend是将列表中的元素添加到另一个列表中
else:
docs = self.load_file(file_obj.name)
print("文档拆分成功")
db = Chroma.from_documents(docs, self.embeddings,persist_directory="../vector_store/chroma_1")
# docs = self.load_file(filepath)#这里是原始代码中对单个文件的处理
# print("doc : ",docs)
# vector_store = FAISS.from_documents(docs, self.embeddings)
# vector_store.save_local(f'{VECTOR_STORE_PATH}/faiss_index')
return db
def get_knowledge_based_answer(self,
query,
max_length: int=5000,
top_k: int = 6,
history_len: int = 3,
temperature: float = 0.01,
top_p: float = 0.1,
history=[]):
self.llm.max_token = max_length
# print(history)#这里是为了检测state 的内容,state作为参数传到了history中
self.llm.temperature = temperature
self.llm.top_p = top_p
self.history_len = history_len
self.top_k = top_k#用于向量数据库
prompt_template = """基于以下已知信息,请简洁并专业地回答用户的问题。
如果无法从中得到答案,请说 "根据已知信息无法回答该问题" 或 "没有提供足够的相关信息"。不允许在答案中添加编造成分。另外,答案请使用中文。
已知内容:
{context}
问题:
{question}"""
prompt = PromptTemplate(template=prompt_template,
input_variables=["context", "question"])
self.llm.history = history[
-self.history_len:] if self.history_len > 0 else []
vector_store = Chroma(persist_directory=f'{VECTOR_STORE_PATH}/chromadb_01', embedding_function=self.embeddings)
knowledge_chain = RetrievalQA.from_llm(# 检索问答链
llm=self.llm,
retriever=vector_store.as_retriever(
search_kwargs={"k": self.top_k}),
prompt=prompt)
knowledge_chain.combine_documents_chain.document_prompt = PromptTemplate(
input_variables=["page_content"], template="{page_content}")
knowledge_chain.return_source_documents = True
result = knowledge_chain({"query": query})
return result
def load_file(self, filepath):
if filepath.lower().endswith(".md"):
loader = UnstructuredFileLoader(filepath, mode="elements")
# docs = loader.load()
textsplitter = ChineseTextSplitter(pdf=False)
docs = loader.load_and_split(text_splitter=textsplitter)
elif filepath.lower().endswith(".pdf"):
loader = UnstructuredFileLoader(filepath)
textsplitter = ChineseTextSplitter(pdf=True)
docs = loader.load_and_split(textsplitter)
else:
loader = UnstructuredFileLoader(filepath, mode="elements")
textsplitter = ChineseTextSplitter(pdf=False)
docs = loader.load_and_split(text_splitter=textsplitter)
return docs# list
# 这个函数好像没有用到
def update_status(history, status):
history = history + [[None, status]]
print(status)
return history
knowladge_based_chat_llm = KnowledgeBasedChatLLM()
# 这个用来初始化模型
def init_model():
try:
knowladge_based_chat_llm.init_model_config()
knowladge_based_chat_llm.llm._call("你好")
return "初始模型已成功加载,可以开始对话"
except Exception as e:
return "模型未成功重新加载,请点击重新加载模型"
# 文件内容清除
def del_files(path_file):
ls = os.listdir(path_file)
for i in ls:
f_path = os.path.join(path_file, i)
# 判断是否是一个目录,若是,则递归删除
if os.path.isdir(f_path):
del_files(f_path)
else:
os.remove(f_path)
ls = os.listdir(path_file)#这里是为了检查空文件夹
for i in ls:
f_path = os.path.join(path_file, i)
os.rmdir(f_path)
def clear_session():
# 除了清空对话之外,还希望可以清空向量数据库中的文件
del_files(VECTOR_STORE_PATH)
return '', None
# 初始化向量数据库
def init_vector_store(file_obj):
print("文件开始加载")
# print('file: ',file_obj)
# print('file.name: ',file_obj.name)
vector_store = knowladge_based_chat_llm.init_knowledge_vector_store(
file_obj)
print('vector_store加载完成')
return vector_store
# 用来预测
def predict(input,
max_length,
top_k,
history_len,
temperature,
top_p,
history=None):
if history == None:
history = []
resp = knowladge_based_chat_llm.get_knowledge_based_answer(
query=input,
max_length=max_length,
top_k=top_k,
history_len=history_len,
temperature=temperature,
top_p=top_p,
history=history)
history.append((input, resp['result']))
return '', history, history
model_status = init_model()
if __name__ == "__main__":
block = gr.Blocks()
with block as demo:
model_status = gr.State(model_status)
with gr.Row():
with gr.Column(scale=1):
#这里不需要模型选择,模型在开始的时候就已经加载进去了
model_argument = gr.Accordion("模型参数配置")
with model_argument:
max_length = gr.Slider(2000,
10000,
value=5000,
step=1000,
label="max token",
interactive=True)
top_k = gr.Slider(1,
10,
value=6,
step=1,
label="vector search top k",
interactive=True)
history_len = gr.Slider(0,
5,
value=3,
step=1,
label="history len",
interactive=True)
temperature = gr.Slider(0,
1,
value=0.01,
step=0.01,
label="temperature",
interactive=True)
top_p = gr.Slider(0,
1,
value=0.9,
step=0.1,
label="top_p",
interactive=True)
file = gr.File(label='请上传知识库文件',
file_types=['.txt', '.md', '.docx', '.pdf'],
file_count='multiple',#这里可以上传多个文件
height=170)
init_vs = gr.Button("知识库文件向量化")
with gr.Column(scale=4):
chatbot = gr.Chatbot([[None, model_status.value]],
label='ChatLLM',height=500)
message = gr.Textbox(label='请输入问题')
state = gr.State()
with gr.Row():
clear_history = gr.Button("🧹 清除历史对话及知识文件")
send = gr.Button("🚀 发送")
init_vs.click(
init_vector_store,
show_progress=True,
inputs=[file],
outputs=[],
)
send.click(predict,
inputs=[
message, max_length, top_k, history_len, temperature,
top_p, state
],# 这里的state也可以用chatbot
outputs=[message, chatbot, state])
clear_history.click(fn=clear_session,
inputs=[],
outputs=[chatbot, state],
queue=False)
message.submit(predict,
inputs=[
message, max_length, top_k, history_len,
temperature, top_p, state
],
outputs=[message, chatbot, state])
# 这里的state表示的是历史?——是的
# 通过验证,gradio.state会存储历史对话,除非点击clear_history
# chatbot好像存的也是历史对话,chatbot和state都可以用来存储历史对话
# threads to consume the request
# demo.queue(concurrency_count=3) \
demo.launch(server_name='0.0.0.0', # ip for listening, 0.0.0.0 for every inbound traffic, 127.0.0.1 for local inbound
server_port=7860, # the port for listening
show_api=False, # if display the api document
share=True, # if register a public url
inbrowser=False) # if browser would be open automatically
| [
"{page_content}",
"没有提供足够的相关信息",
"基于以下已知信息,请简洁并专业地回答用户的问题。\n 如果无法从中得到答案,请说 \"根据已知信息无法回答该问题\" 或 \"没有提供足够的相关信息\"。不允许在答案中添加编造成分。另外,答案请使用中文。\n\n 已知内容:\n {context}\n\n 问题:\n {question}",
"question",
"根据已知信息无法回答该问题",
"context"
] |
2024-01-10 | handsomexiu/langchain-chatglm2-Faiss-Chromadb | %E6%96%87%E4%BB%B6%E5%A4%87%E4%BB%BD~app_001.py | # -*- coding: utf-8 -*-
# 我们这里不需要模型加载模块
import os
from typing import List
import gradio as gr
import nltk
import sentence_transformers
# 这里不需要web搜索
from langchain.chains import RetrievalQA
from langchain.document_loaders import UnstructuredFileLoader
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from langchain.prompts import PromptTemplate
from langchain.prompts.prompt import PromptTemplate
from langchain.vectorstores import FAISS
from chatllm import ChatLLM
from chinese_text_splitter import ChineseTextSplitter
from config import *
nltk.data.path = [os.path.join(os.path.dirname(__file__), "nltk_data")
] + nltk.data.path
embedding_model_dict = embedding_model_dict
llm_model_dict = llm_model_dict
EMBEDDING_DEVICE = EMBEDDING_DEVICE
LLM_DEVICE = LLM_DEVICE
VECTOR_STORE_PATH=VECTOR_STORE_PATH
num_gpus = num_gpus#GPU数量
init_llm = init_llm
init_embedding_model = init_embedding_model
class KnowledgeBasedChatLLM:
llm: object = None
embeddings: object = None
def init_model_config(
self,
large_language_model: str = init_llm,
embedding_model: str = init_embedding_model,
):#上面括号里面的是参数
self.embeddings = HuggingFaceEmbeddings(
model_name=embedding_model_dict[embedding_model], )
self.embeddings.client = sentence_transformers.SentenceTransformer(
self.embeddings.model_name,
device=EMBEDDING_DEVICE,
cache_folder=os.path.join(MODEL_CACHE_PATH,
self.embeddings.model_name))
self.llm = ChatLLM()
if 'chatglm2' in large_language_model.lower():#所有字符串小写,这里这样写的目的是llm_model_dict是一个二重字典
self.llm.model_type = 'chatglm2'
self.llm.model_name_or_path = llm_model_dict['chatglm2'][
large_language_model]
#这里和上面的embedding需要修改config中对应的字典的内容:如果本地部署模型需要模型的本地路径
self.llm.load_llm(llm_device=LLM_DEVICE, num_gpus=num_gpus)
def init_knowledge_vector_store(self, filepath):
docs = self.load_file(filepath)
vector_store = FAISS.from_documents(docs, self.embeddings)
vector_store.save_local(f'{VECTOR_STORE_PATH}/faiss_index')
return vector_store
def get_knowledge_based_answer(self,
query,
max_length: int=5000,
top_k: int = 6,
history_len: int = 3,
temperature: float = 0.01,
top_p: float = 0.1,
history=[]):
self.llm.max_token = max_length
print(history)#这里是为了检测state 的内容,state作为参数传到了history中
self.llm.temperature = temperature
self.llm.top_p = top_p
self.history_len = history_len
self.top_k = top_k#用于向量数据库
prompt_template = """基于以下已知信息,请简洁并专业地回答用户的问题。
如果无法从中得到答案,请说 "根据已知信息无法回答该问题" 或 "没有提供足够的相关信息"。不允许在答案中添加编造成分。另外,答案请使用中文。
已知内容:
{context}
问题:
{question}"""
prompt = PromptTemplate(template=prompt_template,
input_variables=["context", "question"])
self.llm.history = history[
-self.history_len:] if self.history_len > 0 else []
vector_store = FAISS.load_local(f'{VECTOR_STORE_PATH}/faiss_index', self.embeddings)
knowledge_chain = RetrievalQA.from_llm(# 检索问答链
llm=self.llm,
retriever=vector_store.as_retriever(
search_kwargs={"k": self.top_k}),
prompt=prompt)
knowledge_chain.combine_documents_chain.document_prompt = PromptTemplate(
input_variables=["page_content"], template="{page_content}")
knowledge_chain.return_source_documents = True
result = knowledge_chain({"query": query})
return result
def load_file(self, filepath):
if filepath.lower().endswith(".md"):
loader = UnstructuredFileLoader(filepath, mode="elements")
docs = loader.load()
elif filepath.lower().endswith(".pdf"):
loader = UnstructuredFileLoader(filepath)
textsplitter = ChineseTextSplitter(pdf=True)
docs = loader.load_and_split(textsplitter)
else:
loader = UnstructuredFileLoader(filepath, mode="elements")
textsplitter = ChineseTextSplitter(pdf=False)
docs = loader.load_and_split(text_splitter=textsplitter)
return docs# list
def update_status(history, status):
history = history + [[None, status]]
print(status)
return history
knowladge_based_chat_llm = KnowledgeBasedChatLLM()
def init_model():
try:
knowladge_based_chat_llm.init_model_config()
knowladge_based_chat_llm.llm._call("你好")
return "初始模型已成功加载,可以开始对话"
except Exception as e:
return "模型未成功重新加载,请点击重新加载模型"
# init_model()
def clear_session():
return '', None
def init_vector_store(file_obj):
vector_store = knowladge_based_chat_llm.init_knowledge_vector_store(
file_obj.name)
return vector_store
def predict(input,
max_length,
top_k,
history_len,
temperature,
top_p,
history=None):
if history == None:
history = []
resp = knowladge_based_chat_llm.get_knowledge_based_answer(
query=input,
max_length=max_length,
top_k=top_k,
history_len=history_len,
temperature=temperature,
top_p=top_p,
history=history)
history.append((input, resp['result']))
return '', history, history
model_status = init_model()
if __name__ == "__main__":
block = gr.Blocks()
with block as demo:
model_status = gr.State(model_status)
with gr.Row():
with gr.Column(scale=1):
#这里不需要模型选择,模型在开始的时候就已经加载进去了
model_argument = gr.Accordion("模型参数配置")
with model_argument:
max_length = gr.Slider(2000,
10000,
value=5000,
step=1000,
label="max token",
interactive=True)
top_k = gr.Slider(1,
10,
value=6,
step=1,
label="vector search top k",
interactive=True)
history_len = gr.Slider(0,
5,
value=3,
step=1,
label="history len",
interactive=True)
temperature = gr.Slider(0,
1,
value=0.01,
step=0.01,
label="temperature",
interactive=True)
top_p = gr.Slider(0,
1,
value=0.9,
step=0.1,
label="top_p",
interactive=True)
file = gr.File(label='请上传知识库文件',
file_types=['.txt', '.md', '.docx', '.pdf'],
file_count='multiple')
init_vs = gr.Button("知识库文件向量化")
with gr.Column(scale=4):
chatbot = gr.Chatbot([[None, model_status.value]],
label='ChatLLM')# .height(500)
message = gr.Textbox(label='请输入问题')
state = gr.State()
with gr.Row():
clear_history = gr.Button("🧹 清除历史对话")
send = gr.Button("🚀 发送")
init_vs.click(
init_vector_store,
show_progress=True,
inputs=[file],
outputs=[],
)
send.click(predict,
inputs=[
message, max_length, top_k, history_len, temperature,
top_p, chatbot
],
outputs=[message, chatbot, state])
clear_history.click(fn=clear_session,
inputs=[],
outputs=[chatbot, state],
queue=False)
message.submit(predict,
inputs=[
message, max_length, top_k, history_len,
temperature, top_p, state
],
outputs=[message, chatbot, state])
# 这里的state表示的是历史?——是的
# 通过验证,gradio.state会存储历史对话,除非点击clear_history
# chatbot好像存的也是历史对话
# threads to consume the request
# demo.queue(concurrency_count=3) \
demo.launch(server_name='0.0.0.0', # ip for listening, 0.0.0.0 for every inbound traffic, 127.0.0.1 for local inbound
server_port=7860, # the port for listening
show_api=False, # if display the api document
share=True, # if register a public url
inbrowser=False) # if browser would be open automatically
| [
"基于以下已知信息,请简洁并专业地回答用户的问题。\n 如果无法从中得到答案,请说 \"根据已知信息无法回答该问题\" 或 \"没有提供足够的相关信息\"。不允许在答案中添加编造成分。另外,答案请使用中文。\n\n 已知内容:\n {context}\n\n 问题:\n {question}",
"没有提供足够的相关信息",
"{page_content}",
"question",
"根据已知信息无法回答该问题",
"context"
] |
2024-01-10 | handsomexiu/langchain-chatglm2-Faiss-Chromadb | langchain_milvus~app_chroma.py |
import os
from typing import List
import gradio as gr
import nltk
import sentence_transformers
# 这里不需要web搜索
from langchain.chains import RetrievalQA
from langchain.document_loaders import UnstructuredFileLoader
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from langchain.prompts import PromptTemplate
from langchain.prompts.prompt import PromptTemplate
from langchain.vectorstores import Chroma
from chatllm import ChatLLM
from chinese_text_splitter import ChineseTextSplitter
from config import *
nltk.data.path = [os.path.join(os.path.dirname(__file__), "nltk_data")
] + nltk.data.path
embedding_model_dict = embedding_model_dict
llm_model_dict = llm_model_dict
EMBEDDING_DEVICE = EMBEDDING_DEVICE
LLM_DEVICE = LLM_DEVICE
VECTOR_STORE_PATH=VECTOR_STORE_PATH
num_gpus = num_gpus#GPU数量
init_llm = init_llm
init_embedding_model = init_embedding_model
class KnowledgeBasedChatLLM:
llm: object = None
embeddings: object = None
def init_model_config(
self,
large_language_model: str = init_llm,
embedding_model: str = init_embedding_model,
):#上面括号里面的是参数
self.embeddings = HuggingFaceEmbeddings(
model_name=embedding_model_dict[embedding_model], )
self.embeddings.client = sentence_transformers.SentenceTransformer(
self.embeddings.model_name,
device=EMBEDDING_DEVICE,
cache_folder=os.path.join(MODEL_CACHE_PATH,
self.embeddings.model_name))
self.llm = ChatLLM()
if 'chatglm2' in large_language_model.lower():#所有字符串小写,这里这样写的目的是llm_model_dict是一个二重字典
self.llm.model_type = 'chatglm2'
self.llm.model_name_or_path = llm_model_dict['chatglm2'][
large_language_model]
#这里和上面的embedding需要修改config中对应的字典的内容:如果本地部署模型需要模型的本地路径
self.llm.load_llm(llm_device=LLM_DEVICE, num_gpus=num_gpus)
def init_knowledge_vector_store(self, file_obj):
# 由于不同于单文件的格式,多文件的格式上传的是一个列表
# 因此这里可以查看这里可以查看是不是一个列表,对于列表和单个文件采取不一样的处理方式
if isinstance(file_obj, list):
docs=[]
for file in file_obj:
doc=self.load_file(file.name)
docs.extend(doc)#这里不同于append,extend是将列表中的元素添加到另一个列表中
else:
docs = self.load_file(file_obj.name)
# print("文档拆分成功")
# print("docs: ",docs)
# print(docs[0].metadata)
db = Chroma.from_documents(docs, self.embeddings,persist_directory='./vector_store/chromadb1')
return db
def get_knowledge_based_answer(self,
query,
max_length: int=5000,
top_k: int = 6,
history_len: int = 3,
temperature: float = 0.01,
top_p: float = 0.1,
history=[]):
self.llm.max_token = max_length
# print(history)#这里是为了检测state 的内容,state作为参数传到了history中
self.llm.temperature = temperature
self.llm.top_p = top_p
self.history_len = history_len
self.top_k = top_k#用于向量数据库
prompt_template = """基于以下已知信息,请简洁并专业地回答用户的问题。
如果无法从中得到答案,请说 "根据已知信息无法回答该问题" 或 "没有提供足够的相关信息"。不允许在答案中添加编造成分。另外,答案请使用中文。
已知内容:
{context}
问题:
{question}"""
prompt = PromptTemplate(template=prompt_template,
input_variables=["context", "question"])
self.llm.history = history[
-self.history_len:] if self.history_len > 0 else []
vector_store = Chroma(persist_directory='./vector_store/chromadb1', embedding_function=self.embeddings)
knowledge_chain = RetrievalQA.from_llm(# 检索问答链
llm=self.llm,
retriever=vector_store.as_retriever(
search_kwargs={"k": self.top_k}),
prompt=prompt)
knowledge_chain.combine_documents_chain.document_prompt = PromptTemplate(
input_variables=["page_content"], template="{page_content}")
knowledge_chain.return_source_documents = True
result = knowledge_chain({"query": query})
return result
def load_file(self, filepath):
if filepath.lower().endswith(".md"):
# loader = UnstructuredFileLoader(filepath, mode="elements")
loader = UnstructuredFileLoader(filepath)
# docs = loader.load()
textsplitter = ChineseTextSplitter(pdf=False)
docs = loader.load_and_split(text_splitter=textsplitter)
elif filepath.lower().endswith(".pdf"):
loader = UnstructuredFileLoader(filepath)
textsplitter = ChineseTextSplitter(pdf=True)
docs = loader.load_and_split(textsplitter)
else:
# loader = UnstructuredFileLoader(filepath, mode="elements")
loader = UnstructuredFileLoader(filepath)
textsplitter = ChineseTextSplitter(pdf=False)
docs = loader.load_and_split(text_splitter=textsplitter)
return docs# list
# 这个函数好像没有用到
def update_status(history, status):
history = history + [[None, status]]
print(status)
return history
knowladge_based_chat_llm = KnowledgeBasedChatLLM()
# 这个用来初始化模型
def init_model():
try:
knowladge_based_chat_llm.init_model_config()
knowladge_based_chat_llm.llm._call("你好")
return "初始模型已成功加载,请先开始加载向量数据库,然后进行对话"
except Exception as e:
return "模型未成功重新加载,请点击重新加载模型"
# 文件内容清除
def del_files(path_file):
ls = os.listdir(path_file)
for i in ls:
f_path = os.path.join(path_file, i)
# 判断是否是一个目录,若是,则递归删除
if os.path.isdir(f_path):
del_files(f_path)
else:
os.remove(f_path)
ls = os.listdir(path_file)#这里是为了检查空文件夹
for i in ls:
f_path = os.path.join(path_file, i)
os.rmdir(f_path)
def clear_session():
# 除了清空对话之外,还希望可以清空向量数据库中的文件
del_files(VECTOR_STORE_PATH)
return '', None
# 初始化向量数据库
# def init_vector_store(file_obj):
# # print('file: ',file_obj)
# # print('file.name: ',file_obj.name)
# vector_store = knowladge_based_chat_llm.init_knowledge_vector_store(
# file_obj)
# print('vector_store加载完成')
# return vector_store
def init_vector_store(file_obj):
# 这里用try except来写
# state='向量数据库加载完成'
# return [('加载向量数据库',state)]
try:
knowladge_based_chat_llm.init_knowledge_vector_store(file_obj)
print('向量数据库加载完成')
return [('加载向量数据库','向量数据库加载完成')]
# state='向量数据库加载完成'
except Exception as e:
print('向量数据库加载失败')
return [('加载向量数据库','向量数据库加载失败')]
# 用来预测
def predict(input,
max_length,
top_k,
history_len,
temperature,
top_p,
history=None):
if history == None:
history = []
resp = knowladge_based_chat_llm.get_knowledge_based_answer(
query=input,
max_length=max_length,
top_k=top_k,
history_len=history_len,
temperature=temperature,
top_p=top_p,
history=history)
history.append((input, resp['result']))
return '', history, history
model_status = init_model()
if __name__ == "__main__":
block = gr.Blocks()
with block as demo:
model_status = gr.State(model_status)
with gr.Row():
with gr.Column(scale=1):
#这里不需要模型选择,模型在开始的时候就已经加载进去了
model_argument = gr.Accordion("模型参数配置")
with model_argument:
max_length = gr.Slider(2000,
10000,
value=5000,
step=1000,
label="max token",
interactive=True)
top_k = gr.Slider(1,
10,
value=6,
step=1,
label="vector search top k",
interactive=True)
history_len = gr.Slider(0,
5,
value=3,
step=1,
label="history len",
interactive=True)
temperature = gr.Slider(0,
1,
value=0.01,
step=0.01,
label="temperature",
interactive=True)
top_p = gr.Slider(0,
1,
value=0.9,
step=0.1,
label="top_p",
interactive=True)
file = gr.File(label='请上传知识库文件',
file_types=['.txt', '.md', '.docx', '.pdf'],
file_count='multiple',#这里可以上传多个文件
height=170)
init_vs = gr.Button("知识库文件向量化")
with gr.Column(scale=4):
chatbot = gr.Chatbot([[None, model_status.value]],
label='ChatLLM',height=500)
message = gr.Textbox(label='请输入问题')
state = gr.State()
with gr.Row():
clear_history = gr.Button("🧹 清除历史对话及知识文件")
send = gr.Button("🚀 发送")
init_vs.click(
init_vector_store,
show_progress=True,
inputs=[file],
outputs=[chatbot],
)
send.click(predict,
inputs=[
message, max_length, top_k, history_len, temperature,
top_p, state
],# 这里的state也可以用chatbot
outputs=[message, chatbot, state])
clear_history.click(fn=clear_session,
inputs=[],
outputs=[chatbot, state],
queue=False)
message.submit(predict,
inputs=[
message, max_length, top_k, history_len,
temperature, top_p, state
],
outputs=[message, chatbot, state])
# 这里的state表示的是历史?——是的
# 通过验证,gradio.state会存储历史对话,除非点击clear_history
# chatbot好像存的也是历史对话,chatbot和state都可以用来存储历史对话
# threads to consume the request
# demo.queue(concurrency_count=3) \
demo.launch(server_name='0.0.0.0', # ip for listening, 0.0.0.0 for every inbound traffic, 127.0.0.1 for local inbound
server_port=7860, # the port for listening
show_api=False, # if display the api document
share=True, # if register a public url
inbrowser=False) # if browser would be open automatically
| [
"基于以下已知信息,请简洁并专业地回答用户的问题。\n 如果无法从中得到答案,请说 \"根据已知信息无法回答该问题\" 或 \"没有提供足够的相关信息\"。不允许在答案中添加编造成分。另外,答案请使用中文。\n\n 已知内容:\n {context}\n\n 问题:\n {question}",
"没有提供足够的相关信息",
"{page_content}",
"question",
"根据已知信息无法回答该问题",
"context"
] |
2024-01-10 | handsomexiu/langchain-chatglm2-Faiss-Chromadb | %E6%96%87%E4%BB%B6%E5%A4%87%E4%BB%BD~app_002.py | # -*- coding: utf-8 -*-
# 我们这里不需要模型加载模块
import os
from typing import List
import gradio as gr
import nltk
import sentence_transformers
# 这里不需要web搜索
from langchain.chains import RetrievalQA
from langchain.document_loaders import UnstructuredFileLoader
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from langchain.prompts import PromptTemplate
from langchain.prompts.prompt import PromptTemplate
from langchain.vectorstores import FAISS
from chatllm import ChatLLM
from chinese_text_splitter import ChineseTextSplitter
from config import *
nltk.data.path = [os.path.join(os.path.dirname(__file__), "nltk_data")
] + nltk.data.path
embedding_model_dict = embedding_model_dict
llm_model_dict = llm_model_dict
EMBEDDING_DEVICE = EMBEDDING_DEVICE
LLM_DEVICE = LLM_DEVICE
VECTOR_STORE_PATH=VECTOR_STORE_PATH
num_gpus = num_gpus#GPU数量
init_llm = init_llm
init_embedding_model = init_embedding_model
class KnowledgeBasedChatLLM:
llm: object = None
embeddings: object = None
def init_model_config(
self,
large_language_model: str = init_llm,
embedding_model: str = init_embedding_model,
):#上面括号里面的是参数
self.embeddings = HuggingFaceEmbeddings(
model_name=embedding_model_dict[embedding_model], )
self.embeddings.client = sentence_transformers.SentenceTransformer(
self.embeddings.model_name,
device=EMBEDDING_DEVICE,
cache_folder=os.path.join(MODEL_CACHE_PATH,
self.embeddings.model_name))
self.llm = ChatLLM()
if 'chatglm2' in large_language_model.lower():#所有字符串小写,这里这样写的目的是llm_model_dict是一个二重字典
self.llm.model_type = 'chatglm2'
self.llm.model_name_or_path = llm_model_dict['chatglm2'][
large_language_model]
#这里和上面的embedding需要修改config中对应的字典的内容:如果本地部署模型需要模型的本地路径
self.llm.load_llm(llm_device=LLM_DEVICE, num_gpus=num_gpus)
def init_knowledge_vector_store(self, filepath):
docs = self.load_file(filepath)
print("doc : ",docs)
vector_store = FAISS.from_documents(docs, self.embeddings)
vector_store.save_local(f'{VECTOR_STORE_PATH}/faiss_index')
return vector_store
def get_knowledge_based_answer(self,
query,
max_length: int=5000,
top_k: int = 6,
history_len: int = 3,
temperature: float = 0.01,
top_p: float = 0.1,
history=[]):
self.llm.max_token = max_length
# print(history)#这里是为了检测state 的内容,state作为参数传到了history中
self.llm.temperature = temperature
self.llm.top_p = top_p
self.history_len = history_len
self.top_k = top_k#用于向量数据库
prompt_template = """基于以下已知信息,请简洁并专业地回答用户的问题。
如果无法从中得到答案,请说 "根据已知信息无法回答该问题" 或 "没有提供足够的相关信息"。不允许在答案中添加编造成分。另外,答案请使用中文。
已知内容:
{context}
问题:
{question}"""
prompt = PromptTemplate(template=prompt_template,
input_variables=["context", "question"])
self.llm.history = history[
-self.history_len:] if self.history_len > 0 else []
vector_store = FAISS.load_local(f'{VECTOR_STORE_PATH}/faiss_index', self.embeddings)
knowledge_chain = RetrievalQA.from_llm(# 检索问答链
llm=self.llm,
retriever=vector_store.as_retriever(
search_kwargs={"k": self.top_k}),
prompt=prompt)
knowledge_chain.combine_documents_chain.document_prompt = PromptTemplate(
input_variables=["page_content"], template="{page_content}")
knowledge_chain.return_source_documents = True
result = knowledge_chain({"query": query})
return result
def load_file(self, filepath):
if filepath.lower().endswith(".md"):
loader = UnstructuredFileLoader(filepath, mode="elements")
# docs = loader.load()
textsplitter = ChineseTextSplitter(pdf=False)
docs = loader.load_and_split(text_splitter=textsplitter)
elif filepath.lower().endswith(".pdf"):
loader = UnstructuredFileLoader(filepath)
textsplitter = ChineseTextSplitter(pdf=True)
docs = loader.load_and_split(textsplitter)
else:
loader = UnstructuredFileLoader(filepath, mode="elements")
textsplitter = ChineseTextSplitter(pdf=False)
docs = loader.load_and_split(text_splitter=textsplitter)
return docs# list
# 这个函数好像没有用到
def update_status(history, status):
history = history + [[None, status]]
print(status)
return history
knowladge_based_chat_llm = KnowledgeBasedChatLLM()
# 这个用来初始化模型
def init_model():
try:
knowladge_based_chat_llm.init_model_config()
knowladge_based_chat_llm.llm._call("你好")
return "初始模型已成功加载,可以开始对话"
except Exception as e:
return "模型未成功重新加载,请点击重新加载模型"
# 文件内容清除
def del_files(path_file):
ls = os.listdir(path_file)
for i in ls:
f_path = os.path.join(path_file, i)
# 判断是否是一个目录,若是,则递归删除
if os.path.isdir(f_path):
del_files(f_path)
else:
os.remove(f_path)
ls = os.listdir(path_file)#这里是为了检查空文件夹
for i in ls:
f_path = os.path.join(path_file, i)
os.rmdir(f_path)
def clear_session():
# 除了清空对话之外,还希望可以清空向量数据库中的文件
del_files(VECTOR_STORE_PATH)
return '', None
# 初始化向量数据库
def init_vector_store(file_obj):
# print('file: ',file_obj)
# print('file.name: ',file_obj.name)
vector_store = knowladge_based_chat_llm.init_knowledge_vector_store(
file_obj.name)
print('vector_store加载完成')
return vector_store
# 用来预测
def predict(input,
max_length,
top_k,
history_len,
temperature,
top_p,
history=None):
if history == None:
history = []
resp = knowladge_based_chat_llm.get_knowledge_based_answer(
query=input,
max_length=max_length,
top_k=top_k,
history_len=history_len,
temperature=temperature,
top_p=top_p,
history=history)
history.append((input, resp['result']))
return '', history, history
model_status = init_model()
if __name__ == "__main__":
block = gr.Blocks()
with block as demo:
model_status = gr.State(model_status)
with gr.Row():
with gr.Column(scale=1):
#这里不需要模型选择,模型在开始的时候就已经加载进去了
model_argument = gr.Accordion("模型参数配置")
with model_argument:
max_length = gr.Slider(2000,
10000,
value=5000,
step=1000,
label="max token",
interactive=True)
top_k = gr.Slider(1,
10,
value=6,
step=1,
label="vector search top k",
interactive=True)
history_len = gr.Slider(0,
5,
value=3,
step=1,
label="history len",
interactive=True)
temperature = gr.Slider(0,
1,
value=0.01,
step=0.01,
label="temperature",
interactive=True)
top_p = gr.Slider(0,
1,
value=0.9,
step=0.1,
label="top_p",
interactive=True)
file = gr.File(label='请上传知识库文件',
file_types=['.txt', '.md', '.docx', '.pdf'],
file_count='single',#这里可以上传多个文件
height=170)
init_vs = gr.Button("知识库文件向量化")
with gr.Column(scale=4):
chatbot = gr.Chatbot([[None, model_status.value]],
label='ChatLLM',height=500)
message = gr.Textbox(label='请输入问题')
state = gr.State()
with gr.Row():
clear_history = gr.Button("🧹 清除历史对话及知识文件")
send = gr.Button("🚀 发送")
init_vs.click(
init_vector_store,
show_progress=True,
inputs=[file],
outputs=[],
)
send.click(predict,
inputs=[
message, max_length, top_k, history_len, temperature,
top_p, state
],# 这里的state也可以用chatbot
outputs=[message, chatbot, state])
clear_history.click(fn=clear_session,
inputs=[],
outputs=[chatbot, state],
queue=False)
message.submit(predict,
inputs=[
message, max_length, top_k, history_len,
temperature, top_p, state
],
outputs=[message, chatbot, state])
# 这里的state表示的是历史?——是的
# 通过验证,gradio.state会存储历史对话,除非点击clear_history
# chatbot好像存的也是历史对话,chatbot和state都可以用来存储历史对话
# threads to consume the request
# demo.queue(concurrency_count=3) \
demo.launch(server_name='0.0.0.0', # ip for listening, 0.0.0.0 for every inbound traffic, 127.0.0.1 for local inbound
server_port=7860, # the port for listening
show_api=False, # if display the api document
share=True, # if register a public url
inbrowser=False) # if browser would be open automatically
| [
"{page_content}",
"没有提供足够的相关信息",
"基于以下已知信息,请简洁并专业地回答用户的问题。\n 如果无法从中得到答案,请说 \"根据已知信息无法回答该问题\" 或 \"没有提供足够的相关信息\"。不允许在答案中添加编造成分。另外,答案请使用中文。\n\n 已知内容:\n {context}\n\n 问题:\n {question}",
"question",
"根据已知信息无法回答该问题",
"context"
] |
2024-01-10 | handsomexiu/langchain-chatglm2-Faiss-Chromadb | try~try.py | import re
from typing import List
from langchain.document_loaders.unstructured import UnstructuredFileLoader
from langchain.text_splitter import CharacterTextSplitter
loader = UnstructuredFileLoader(
"try/1.pdf", strategy="fast", mode="elements"
)
docs = loader.load()
# 我执行这个的时候自动帮我安装了ntlk_data,
| [] |
2024-01-10 | handsomexiu/langchain-chatglm2-Faiss-Chromadb | %E6%96%87%E4%BB%B6%E5%A4%87%E4%BB%BD~app_chroma_002.py |
import os
from typing import List
import gradio as gr
import nltk
import sentence_transformers
# 这里不需要web搜索
from langchain.chains import RetrievalQA
from langchain.document_loaders import UnstructuredFileLoader
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from langchain.prompts import PromptTemplate
from langchain.prompts.prompt import PromptTemplate
from langchain.vectorstores import Chroma
from chatllm import ChatLLM
from chinese_text_splitter import ChineseTextSplitter
from config import *
nltk.data.path = [os.path.join(os.path.dirname(__file__), "nltk_data")
] + nltk.data.path
embedding_model_dict = embedding_model_dict
llm_model_dict = llm_model_dict
EMBEDDING_DEVICE = EMBEDDING_DEVICE
LLM_DEVICE = LLM_DEVICE
VECTOR_STORE_PATH=VECTOR_STORE_PATH
num_gpus = num_gpus#GPU数量
init_llm = init_llm
init_embedding_model = init_embedding_model
class KnowledgeBasedChatLLM:
llm: object = None
embeddings: object = None
def init_model_config(
self,
large_language_model: str = init_llm,
embedding_model: str = init_embedding_model,
):#上面括号里面的是参数
self.embeddings = HuggingFaceEmbeddings(
model_name=embedding_model_dict[embedding_model], )
self.embeddings.client = sentence_transformers.SentenceTransformer(
self.embeddings.model_name,
device=EMBEDDING_DEVICE,
cache_folder=os.path.join(MODEL_CACHE_PATH,
self.embeddings.model_name))
self.llm = ChatLLM()
if 'chatglm2' in large_language_model.lower():#所有字符串小写,这里这样写的目的是llm_model_dict是一个二重字典
self.llm.model_type = 'chatglm2'
self.llm.model_name_or_path = llm_model_dict['chatglm2'][
large_language_model]
#这里和上面的embedding需要修改config中对应的字典的内容:如果本地部署模型需要模型的本地路径
self.llm.load_llm(llm_device=LLM_DEVICE, num_gpus=num_gpus)
def init_knowledge_vector_store(self, file_obj):
# 由于不同于单文件的格式,多文件的格式上传的是一个列表
# 因此这里可以查看这里可以查看是不是一个列表,对于列表和单个文件采取不一样的处理方式
if isinstance(file_obj, list):
docs=[]
for file in file_obj:
doc=self.load_file(file.name)
docs.extend(doc)#这里不同于append,extend是将列表中的元素添加到另一个列表中
else:
docs = self.load_file(file_obj.name)
print("文档拆分成功")
print("docs: ",docs)
print(docs[0].metadata)
db = Chroma.from_documents(docs, self.embeddings,persist_directory='./vector_store/chromadb1')
return db
def get_knowledge_based_answer(self,
query,
max_length: int=5000,
top_k: int = 6,
history_len: int = 3,
temperature: float = 0.01,
top_p: float = 0.1,
history=[]):
self.llm.max_token = max_length
# print(history)#这里是为了检测state 的内容,state作为参数传到了history中
self.llm.temperature = temperature
self.llm.top_p = top_p
self.history_len = history_len
self.top_k = top_k#用于向量数据库
prompt_template = """基于以下已知信息,请简洁并专业地回答用户的问题。
如果无法从中得到答案,请说 "根据已知信息无法回答该问题" 或 "没有提供足够的相关信息"。不允许在答案中添加编造成分。另外,答案请使用中文。
已知内容:
{context}
问题:
{question}"""
prompt = PromptTemplate(template=prompt_template,
input_variables=["context", "question"])
self.llm.history = history[
-self.history_len:] if self.history_len > 0 else []
vector_store = Chroma(persist_directory='./vector_store/chromadb1', embedding_function=self.embeddings)
knowledge_chain = RetrievalQA.from_llm(# 检索问答链
llm=self.llm,
retriever=vector_store.as_retriever(
search_kwargs={"k": self.top_k}),
prompt=prompt)
knowledge_chain.combine_documents_chain.document_prompt = PromptTemplate(
input_variables=["page_content"], template="{page_content}")
knowledge_chain.return_source_documents = True
result = knowledge_chain({"query": query})
return result
def load_file(self, filepath):
if filepath.lower().endswith(".md"):
# loader = UnstructuredFileLoader(filepath, mode="elements")
loader = UnstructuredFileLoader(filepath)
# docs = loader.load()
textsplitter = ChineseTextSplitter(pdf=False)
docs = loader.load_and_split(text_splitter=textsplitter)
elif filepath.lower().endswith(".pdf"):
loader = UnstructuredFileLoader(filepath)
textsplitter = ChineseTextSplitter(pdf=True)
docs = loader.load_and_split(textsplitter)
else:
# loader = UnstructuredFileLoader(filepath, mode="elements")
loader = UnstructuredFileLoader(filepath)
textsplitter = ChineseTextSplitter(pdf=False)
docs = loader.load_and_split(text_splitter=textsplitter)
return docs# list
# 这个函数好像没有用到
def update_status(history, status):
history = history + [[None, status]]
print(status)
return history
knowladge_based_chat_llm = KnowledgeBasedChatLLM()
# 这个用来初始化模型
def init_model():
try:
knowladge_based_chat_llm.init_model_config()
knowladge_based_chat_llm.llm._call("你好")
return "初始模型已成功加载,可以开始对话"
except Exception as e:
return "模型未成功重新加载,请点击重新加载模型"
# 文件内容清除
def del_files(path_file):
ls = os.listdir(path_file)
for i in ls:
f_path = os.path.join(path_file, i)
# 判断是否是一个目录,若是,则递归删除
if os.path.isdir(f_path):
del_files(f_path)
else:
os.remove(f_path)
ls = os.listdir(path_file)#这里是为了检查空文件夹
for i in ls:
f_path = os.path.join(path_file, i)
os.rmdir(f_path)
def clear_session():
# 除了清空对话之外,还希望可以清空向量数据库中的文件
del_files(VECTOR_STORE_PATH)
return '', None
# 初始化向量数据库
def init_vector_store(file_obj):
# print('file: ',file_obj)
# print('file.name: ',file_obj.name)
vector_store = knowladge_based_chat_llm.init_knowledge_vector_store(
file_obj)
print('vector_store加载完成')
return vector_store
# 用来预测
def predict(input,
max_length,
top_k,
history_len,
temperature,
top_p,
history=None):
if history == None:
history = []
resp = knowladge_based_chat_llm.get_knowledge_based_answer(
query=input,
max_length=max_length,
top_k=top_k,
history_len=history_len,
temperature=temperature,
top_p=top_p,
history=history)
history.append((input, resp['result']))
return '', history, history
model_status = init_model()
if __name__ == "__main__":
block = gr.Blocks()
with block as demo:
model_status = gr.State(model_status)
with gr.Row():
with gr.Column(scale=1):
#这里不需要模型选择,模型在开始的时候就已经加载进去了
model_argument = gr.Accordion("模型参数配置")
with model_argument:
max_length = gr.Slider(2000,
10000,
value=5000,
step=1000,
label="max token",
interactive=True)
top_k = gr.Slider(1,
10,
value=6,
step=1,
label="vector search top k",
interactive=True)
history_len = gr.Slider(0,
5,
value=3,
step=1,
label="history len",
interactive=True)
temperature = gr.Slider(0,
1,
value=0.01,
step=0.01,
label="temperature",
interactive=True)
top_p = gr.Slider(0,
1,
value=0.9,
step=0.1,
label="top_p",
interactive=True)
file = gr.File(label='请上传知识库文件',
file_types=['.txt', '.md', '.docx', '.pdf'],
file_count='multiple',#这里可以上传多个文件
height=170)
init_vs = gr.Button("知识库文件向量化")
with gr.Column(scale=4):
chatbot = gr.Chatbot([[None, model_status.value]],
label='ChatLLM',height=500)
message = gr.Textbox(label='请输入问题')
state = gr.State()
with gr.Row():
clear_history = gr.Button("🧹 清除历史对话及知识文件")
send = gr.Button("🚀 发送")
init_vs.click(
init_vector_store,
show_progress=True,
inputs=[file],
outputs=[],
)
send.click(predict,
inputs=[
message, max_length, top_k, history_len, temperature,
top_p, state
],# 这里的state也可以用chatbot
outputs=[message, chatbot, state])
clear_history.click(fn=clear_session,
inputs=[],
outputs=[chatbot, state],
queue=False)
message.submit(predict,
inputs=[
message, max_length, top_k, history_len,
temperature, top_p, state
],
outputs=[message, chatbot, state])
# 这里的state表示的是历史?——是的
# 通过验证,gradio.state会存储历史对话,除非点击clear_history
# chatbot好像存的也是历史对话,chatbot和state都可以用来存储历史对话
# threads to consume the request
# demo.queue(concurrency_count=3) \
demo.launch(server_name='0.0.0.0', # ip for listening, 0.0.0.0 for every inbound traffic, 127.0.0.1 for local inbound
server_port=7860, # the port for listening
show_api=False, # if display the api document
share=True, # if register a public url
inbrowser=False) # if browser would be open automatically
| [
"基于以下已知信息,请简洁并专业地回答用户的问题。\n 如果无法从中得到答案,请说 \"根据已知信息无法回答该问题\" 或 \"没有提供足够的相关信息\"。不允许在答案中添加编造成分。另外,答案请使用中文。\n\n 已知内容:\n {context}\n\n 问题:\n {question}",
"没有提供足够的相关信息",
"{page_content}",
"question",
"根据已知信息无法回答该问题",
"context"
] |
2024-01-10 | handsomexiu/langchain-chatglm2-Faiss-Chromadb | try~app_webui.py | import os
from typing import List
import gradio as gr
import nltk
import sentence_transformers
from duckduckgo_search import ddg
from duckduckgo_search.utils import SESSION
from langchain.chains import RetrievalQA
from langchain.document_loaders import UnstructuredFileLoader
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from langchain.prompts import PromptTemplate
from langchain.prompts.prompt import PromptTemplate
from langchain.vectorstores import FAISS
from chatllm import ChatLLM
from chinese_text_splitter import ChineseTextSplitter
from config import *
nltk.data.path = [os.path.join(os.path.dirname(__file__), "nltk_data")
] + nltk.data.path
embedding_model_dict = embedding_model_dict
llm_model_dict = llm_model_dict
EMBEDDING_DEVICE = EMBEDDING_DEVICE
LLM_DEVICE = LLM_DEVICE
num_gpus = num_gpus#GPU数量
init_llm = init_llm
init_embedding_model = init_embedding_model
llm_model_list = []
llm_model_dict = llm_model_dict
# 上面的这些内容都在config.py中,这里只是为了方便调用
for i in llm_model_dict:
for j in llm_model_dict[i]:
llm_model_list.append(j)
#构建llm模型列表,用于下拉框选择
#构建web搜索函数
def search_web(query):
SESSION.proxies = {
"http": f"socks5h://localhost:7890",
"https": f"socks5h://localhost:7890"
}
results = ddg(query)
web_content = ''
if results:
for result in results:
web_content += result['body']
return web_content
class KnowledgeBasedChatLLM:
llm: object = None
embeddings: object = None
def init_model_config(
self,
large_language_model: str = init_llm,
embedding_model: str = init_embedding_model,
):#上面括号里面的是参数
self.embeddings = HuggingFaceEmbeddings(
model_name=embedding_model_dict[embedding_model], )
self.embeddings.client = sentence_transformers.SentenceTransformer(
self.embeddings.model_name,
device=EMBEDDING_DEVICE,
cache_folder=os.path.join(MODEL_CACHE_PATH,
self.embeddings.model_name))
self.llm = ChatLLM()
if 'chatglm2' in large_language_model.lower():#所有字符串小写,这里这样写的目的是llm_model_dict是一个二重字典
self.llm.model_type = 'chatglm2'
self.llm.model_name_or_path = llm_model_dict['chatglm2'][
large_language_model]
elif 'chatglm' in large_language_model.lower():
self.llm.model_type = 'chatglm'
self.llm.model_name_or_path = llm_model_dict['chatglm'][
large_language_model]
elif 'belle' in large_language_model.lower():
self.llm.model_type = 'belle'
self.llm.model_name_or_path = llm_model_dict['belle'][
large_language_model]
elif 'vicuna' in large_language_model.lower():
self.llm.model_type = 'vicuna'
self.llm.model_name_or_path = llm_model_dict['vicuna'][
large_language_model]
elif 'internlm' in large_language_model.lower():
self.llm.model_type = 'internlm'
self.llm.model_name_or_path = llm_model_dict['internlm'][
large_language_model]
self.llm.load_llm(llm_device=LLM_DEVICE, num_gpus=num_gpus)
def init_knowledge_vector_store(self, filepath):
docs = self.load_file(filepath)
vector_store = FAISS.from_documents(docs, self.embeddings)
vector_store.save_local('faiss_index')
return vector_store
def get_knowledge_based_answer(self,
query,
web_content,
top_k: int = 6,
history_len: int = 3,
temperature: float = 0.01,
top_p: float = 0.1,
history=[]):
self.llm.temperature = temperature
self.llm.top_p = top_p
self.history_len = history_len
self.top_k = top_k
if web_content:
prompt_template = f"""基于以下已知信息,简洁和专业的来回答用户的问题。
如果无法从中得到答案,请说 "根据已知信息无法回答该问题" 或 "没有提供足够的相关信息",不允许在答案中添加编造成分,答案请使用中文。
已知网络检索内容:{web_content}""" + """
已知内容:
{context}
问题:
{question}"""
else:
prompt_template = """基于以下已知信息,请简洁并专业地回答用户的问题。
如果无法从中得到答案,请说 "根据已知信息无法回答该问题" 或 "没有提供足够的相关信息"。不允许在答案中添加编造成分。另外,答案请使用中文。
已知内容:
{context}
问题:
{question}"""
prompt = PromptTemplate(template=prompt_template,
input_variables=["context", "question"])
self.llm.history = history[
-self.history_len:] if self.history_len > 0 else []
vector_store = FAISS.load_local('faiss_index', self.embeddings)
knowledge_chain = RetrievalQA.from_llm(# 检索问答链
llm=self.llm,
retriever=vector_store.as_retriever(
search_kwargs={"k": self.top_k}),
prompt=prompt)
knowledge_chain.combine_documents_chain.document_prompt = PromptTemplate(
input_variables=["page_content"], template="{page_content}")
knowledge_chain.return_source_documents = True
result = knowledge_chain({"query": query})
return result
def load_file(self, filepath):
if filepath.lower().endswith(".md"):
loader = UnstructuredFileLoader(filepath, mode="elements")
docs = loader.load()
elif filepath.lower().endswith(".pdf"):
loader = UnstructuredFileLoader(filepath)
textsplitter = ChineseTextSplitter(pdf=True)
docs = loader.load_and_split(textsplitter)
else:
loader = UnstructuredFileLoader(filepath, mode="elements")
textsplitter = ChineseTextSplitter(pdf=False)
docs = loader.load_and_split(text_splitter=textsplitter)
return docs
def update_status(history, status):
history = history + [[None, status]]
print(status)
return history
knowladge_based_chat_llm = KnowledgeBasedChatLLM()
def init_model():
try:
knowladge_based_chat_llm.init_model_config()
knowladge_based_chat_llm.llm._call("你好")
return """初始模型已成功加载,可以开始对话"""
except Exception as e:
return """模型未成功重新加载,请点击重新加载模型"""
def clear_session():
return '', None
def reinit_model(large_language_model, embedding_model, history):
try:
knowladge_based_chat_llm.init_model_config(
large_language_model=large_language_model,
embedding_model=embedding_model)
#需要同时加载LLM和Embedding模型
model_status = """模型已成功重新加载,可以开始对话"""
#从实践过程来看,是加载模型这一步出了问题
except Exception as e:
model_status = """模型未成功重新加载,请点击重新加载模型"""
return history + [[None, model_status]]
def init_vector_store(file_obj):
vector_store = knowladge_based_chat_llm.init_knowledge_vector_store(
file_obj.name)
return vector_store
def predict(input,
use_web,
top_k,
history_len,
temperature,
top_p,
history=None):
if history == None:
history = []
if use_web == 'True':
web_content = search_web(query=input)
else:
web_content = ''
resp = knowladge_based_chat_llm.get_knowledge_based_answer(
query=input,
web_content=web_content,
top_k=top_k,
history_len=history_len,
temperature=temperature,
top_p=top_p,
history=history)
history.append((input, resp['result']))
return '', history, history
model_status = init_model()
if __name__ == "__main__":
block = gr.Blocks()
with block as demo:
gr.Markdown("""<h1><center>LangChain-ChatLLM-Webui</center></h1>
<center><font size=3>
本项目基于LangChain和大型语言模型系列模型, 提供基于本地知识的自动问答应用. <br>
目前项目提供基于<a href='https://github.com/THUDM/ChatGLM-6B' target="_blank">ChatGLM-6B </a>的LLM和包括GanymedeNil/text2vec-large-chinese、nghuyong/ernie-3.0-base-zh、nghuyong/ernie-3.0-nano-zh在内的多个Embedding模型, 支持上传 txt、docx、md、pdf等文本格式文件. <br>
后续将提供更加多样化的LLM、Embedding和参数选项供用户尝试, 欢迎关注<a href='https://github.com/thomas-yanxin/LangChain-ChatGLM-Webui' target="_blank">Github地址</a>.
</center></font>
""")
model_status = gr.State(model_status)
with gr.Row():
with gr.Column(scale=1):
model_choose = gr.Accordion("模型选择")
with model_choose:
large_language_model = gr.Dropdown(
llm_model_list,
label="large language model",
value=init_llm)
embedding_model = gr.Dropdown(list(
embedding_model_dict.keys()),
label="Embedding model",
value=init_embedding_model)
load_model_button = gr.Button("重新加载模型")
model_argument = gr.Accordion("模型参数配置")
with model_argument:
top_k = gr.Slider(1,
10,
value=6,
step=1,
label="vector search top k",
interactive=True)
history_len = gr.Slider(0,
5,
value=3,
step=1,
label="history len",
interactive=True)
temperature = gr.Slider(0,
1,
value=0.01,
step=0.01,
label="temperature",
interactive=True)
top_p = gr.Slider(0,
1,
value=0.9,
step=0.1,
label="top_p",
interactive=True)
file = gr.File(label='请上传知识库文件',
file_types=['.txt', '.md', '.docx', '.pdf'])
init_vs = gr.Button("知识库文件向量化")
use_web = gr.Radio(["True", "False"],
label="Web Search",
value="False")
with gr.Column(scale=4):
chatbot = gr.Chatbot([[None, model_status.value]],
label='ChatLLM').style(height=750)
message = gr.Textbox(label='请输入问题')
state = gr.State()
with gr.Row():
clear_history = gr.Button("🧹 清除历史对话")
send = gr.Button("🚀 发送")
load_model_button.click(
reinit_model,
show_progress=True,
inputs=[large_language_model, embedding_model, chatbot],
outputs=chatbot,
)
init_vs.click(
init_vector_store,
show_progress=True,
inputs=[file],
outputs=[],
)
send.click(predict,
inputs=[
message, use_web, top_k, history_len, temperature,
top_p, state
],
outputs=[message, chatbot, state])
clear_history.click(fn=clear_session,
inputs=[],
outputs=[chatbot, state],
queue=False)
message.submit(predict,
inputs=[
message, use_web, top_k, history_len,
temperature, top_p, state
],
outputs=[message, chatbot, state])
gr.Markdown("""提醒:<br>
1. 使用时请先上传自己的知识文件,并且文件中不含某些特殊字符,否则将返回error. <br>
2. 有任何使用问题,请通过[Github Issue区](https://github.com/thomas-yanxin/LangChain-ChatGLM-Webui/issues)进行反馈. <br>
""")
# threads to consume the request
demo.queue(concurrency_count=3) \
.launch(server_name='0.0.0.0', # ip for listening, 0.0.0.0 for every inbound traffic, 127.0.0.1 for local inbound
server_port=7860, # the port for listening
show_api=False, # if display the api document
share=True, # if register a public url
inbrowser=False) # if browser would be open automatically
| [
"基于以下已知信息,请简洁并专业地回答用户的问题。\n 如果无法从中得到答案,请说 \"根据已知信息无法回答该问题\" 或 \"没有提供足够的相关信息\"。不允许在答案中添加编造成分。另外,答案请使用中文。\n\n 已知内容:\n {context}\n\n 问题:\n {question}",
"{page_content}",
"question",
"context",
"基于以下已知信息,简洁和专业的来回答用户的问题。\n 如果无法从中得到答案,请说 \"根据已知信息无法回答该问题\" 或 \"没有提供足够的相关信息\",不允许在答案中添加编造成分,答案请使用中文。\n 已知网络检索内容:PLACEHOLDER\n 已知内容:\n {context}\n 问题:\n {question}"
] |
2024-01-10 | handsomexiu/langchain-chatglm2-Faiss-Chromadb | %E6%96%87%E4%BB%B6%E5%A4%87%E4%BB%BD~app_003.py | # -*- coding: utf-8 -*-
# 我们这里不需要模型加载模块
import os
from typing import List
import gradio as gr
import nltk
import sentence_transformers
# 这里不需要web搜索
from langchain.chains import RetrievalQA
from langchain.document_loaders import UnstructuredFileLoader
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from langchain.prompts import PromptTemplate
from langchain.prompts.prompt import PromptTemplate
from langchain.vectorstores import FAISS
from chatllm import ChatLLM
from chinese_text_splitter import ChineseTextSplitter
from config import *
nltk.data.path = [os.path.join(os.path.dirname(__file__), "nltk_data")
] + nltk.data.path
embedding_model_dict = embedding_model_dict
llm_model_dict = llm_model_dict
EMBEDDING_DEVICE = EMBEDDING_DEVICE
LLM_DEVICE = LLM_DEVICE
VECTOR_STORE_PATH=VECTOR_STORE_PATH
num_gpus = num_gpus#GPU数量
init_llm = init_llm
init_embedding_model = init_embedding_model
class KnowledgeBasedChatLLM:
llm: object = None
embeddings: object = None
def init_model_config(
self,
large_language_model: str = init_llm,
embedding_model: str = init_embedding_model,
):#上面括号里面的是参数
self.embeddings = HuggingFaceEmbeddings(
model_name=embedding_model_dict[embedding_model], )
self.embeddings.client = sentence_transformers.SentenceTransformer(
self.embeddings.model_name,
device=EMBEDDING_DEVICE,
cache_folder=os.path.join(MODEL_CACHE_PATH,
self.embeddings.model_name))
self.llm = ChatLLM()
if 'chatglm2' in large_language_model.lower():#所有字符串小写,这里这样写的目的是llm_model_dict是一个二重字典
self.llm.model_type = 'chatglm2'
self.llm.model_name_or_path = llm_model_dict['chatglm2'][
large_language_model]
#这里和上面的embedding需要修改config中对应的字典的内容:如果本地部署模型需要模型的本地路径
self.llm.load_llm(llm_device=LLM_DEVICE, num_gpus=num_gpus)
def init_knowledge_vector_store(self, file_obj):
# 由于不同于单文件的格式,多文件的格式上传的是一个列表
# 因此这里可以查看这里可以查看是不是一个列表,对于列表和单个文件采取不一样的处理方式
if isinstance(file_obj, list):
docs=[]
for file in file_obj:
doc=self.load_file(file.name)
docs.extend(doc)#这里不同于append,extend是将列表中的元素添加到另一个列表中
else:
docs = self.load_file(file_obj.name)
# docs = self.load_file(filepath)#这里是原始代码中对单个文件的处理
print("doc : ",docs)
vector_store = FAISS.from_documents(docs, self.embeddings)
vector_store.save_local(f'{VECTOR_STORE_PATH}/faiss_index')
return vector_store
def get_knowledge_based_answer(self,
query,
max_length: int=5000,
top_k: int = 6,
history_len: int = 3,
temperature: float = 0.01,
top_p: float = 0.1,
history=[]):
self.llm.max_token = max_length
# print(history)#这里是为了检测state 的内容,state作为参数传到了history中
self.llm.temperature = temperature
self.llm.top_p = top_p
self.history_len = history_len
self.top_k = top_k#用于向量数据库
prompt_template = """基于以下已知信息,请简洁并专业地回答用户的问题。
如果无法从中得到答案,请说 "根据已知信息无法回答该问题" 或 "没有提供足够的相关信息"。不允许在答案中添加编造成分。另外,答案请使用中文。
已知内容:
{context}
问题:
{question}"""
prompt = PromptTemplate(template=prompt_template,
input_variables=["context", "question"])
self.llm.history = history[
-self.history_len:] if self.history_len > 0 else []
vector_store = FAISS.load_local(f'{VECTOR_STORE_PATH}/faiss_index', self.embeddings)
knowledge_chain = RetrievalQA.from_llm(# 检索问答链
llm=self.llm,
retriever=vector_store.as_retriever(
search_kwargs={"k": self.top_k}),
prompt=prompt)
knowledge_chain.combine_documents_chain.document_prompt = PromptTemplate(
input_variables=["page_content"], template="{page_content}")
knowledge_chain.return_source_documents = True
result = knowledge_chain({"query": query})
return result
def load_file(self, filepath):
if filepath.lower().endswith(".md"):
loader = UnstructuredFileLoader(filepath, mode="elements")
# docs = loader.load()
textsplitter = ChineseTextSplitter(pdf=False)
docs = loader.load_and_split(text_splitter=textsplitter)
elif filepath.lower().endswith(".pdf"):
loader = UnstructuredFileLoader(filepath)
textsplitter = ChineseTextSplitter(pdf=True)
docs = loader.load_and_split(textsplitter)
else:
loader = UnstructuredFileLoader(filepath, mode="elements")
textsplitter = ChineseTextSplitter(pdf=False)
docs = loader.load_and_split(text_splitter=textsplitter)
return docs# list
# 这个函数好像没有用到
def update_status(history, status):
history = history + [[None, status]]
print(status)
return history
knowladge_based_chat_llm = KnowledgeBasedChatLLM()
# 这个用来初始化模型
def init_model():
try:
knowladge_based_chat_llm.init_model_config()
knowladge_based_chat_llm.llm._call("你好")
return "初始模型已成功加载,可以开始对话"
except Exception as e:
return "模型未成功重新加载,请点击重新加载模型"
# 文件内容清除
def del_files(path_file):
ls = os.listdir(path_file)
for i in ls:
f_path = os.path.join(path_file, i)
# 判断是否是一个目录,若是,则递归删除
if os.path.isdir(f_path):
del_files(f_path)
else:
os.remove(f_path)
ls = os.listdir(path_file)#这里是为了检查空文件夹
for i in ls:
f_path = os.path.join(path_file, i)
os.rmdir(f_path)
def clear_session():
# 除了清空对话之外,还希望可以清空向量数据库中的文件
del_files(VECTOR_STORE_PATH)
return '', None
# 初始化向量数据库
def init_vector_store(file_obj):
# print('file: ',file_obj)
# print('file.name: ',file_obj.name)
vector_store = knowladge_based_chat_llm.init_knowledge_vector_store(
file_obj)
print('vector_store加载完成')
return vector_store
# 用来预测
def predict(input,
max_length,
top_k,
history_len,
temperature,
top_p,
history=None):
if history == None:
history = []
resp = knowladge_based_chat_llm.get_knowledge_based_answer(
query=input,
max_length=max_length,
top_k=top_k,
history_len=history_len,
temperature=temperature,
top_p=top_p,
history=history)
history.append((input, resp['result']))
return '', history, history
model_status = init_model()
if __name__ == "__main__":
block = gr.Blocks()
with block as demo:
model_status = gr.State(model_status)
with gr.Row():
with gr.Column(scale=1):
#这里不需要模型选择,模型在开始的时候就已经加载进去了
model_argument = gr.Accordion("模型参数配置")
with model_argument:
max_length = gr.Slider(2000,
10000,
value=5000,
step=1000,
label="max token",
interactive=True)
top_k = gr.Slider(1,
10,
value=6,
step=1,
label="vector search top k",
interactive=True)
history_len = gr.Slider(0,
5,
value=3,
step=1,
label="history len",
interactive=True)
temperature = gr.Slider(0,
1,
value=0.01,
step=0.01,
label="temperature",
interactive=True)
top_p = gr.Slider(0,
1,
value=0.9,
step=0.1,
label="top_p",
interactive=True)
file = gr.File(label='请上传知识库文件',
file_types=['.txt', '.md', '.docx', '.pdf'],
file_count='directory',#这里可以上传多个文件
height=170)
init_vs = gr.Button("知识库文件向量化")
with gr.Column(scale=4):
chatbot = gr.Chatbot([[None, model_status.value]],
label='ChatLLM',height=500)
message = gr.Textbox(label='请输入问题')
state = gr.State()
with gr.Row():
clear_history = gr.Button("🧹 清除历史对话及知识文件")
send = gr.Button("🚀 发送")
init_vs.click(
init_vector_store,
show_progress=True,
inputs=[file],
outputs=[],
)
send.click(predict,
inputs=[
message, max_length, top_k, history_len, temperature,
top_p, state
],# 这里的state也可以用chatbot
outputs=[message, chatbot, state])
clear_history.click(fn=clear_session,
inputs=[],
outputs=[chatbot, state],
queue=False)
message.submit(predict,
inputs=[
message, max_length, top_k, history_len,
temperature, top_p, state
],
outputs=[message, chatbot, state])
# 这里的state表示的是历史?——是的
# 通过验证,gradio.state会存储历史对话,除非点击clear_history
# chatbot好像存的也是历史对话,chatbot和state都可以用来存储历史对话
# threads to consume the request
# demo.queue(concurrency_count=3) \
demo.launch(server_name='0.0.0.0', # ip for listening, 0.0.0.0 for every inbound traffic, 127.0.0.1 for local inbound
server_port=7860, # the port for listening
show_api=False, # if display the api document
share=True, # if register a public url
inbrowser=False) # if browser would be open automatically
| [
"基于以下已知信息,请简洁并专业地回答用户的问题。\n 如果无法从中得到答案,请说 \"根据已知信息无法回答该问题\" 或 \"没有提供足够的相关信息\"。不允许在答案中添加编造成分。另外,答案请使用中文。\n\n 已知内容:\n {context}\n\n 问题:\n {question}",
"没有提供足够的相关信息",
"{page_content}",
"question",
"根据已知信息无法回答该问题",
"context"
] |
2024-01-10 | handsomexiu/langchain-chatglm2-Faiss-Chromadb | try~try_chroma.py | import sys
sys.path.append('../')
# 这里是为了能偶访问到非当前文件夹中的包
from langchain.embeddings.sentence_transformer import SentenceTransformerEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores import Chroma
# from langchain.document_loaders import TextLoader#这个有一点问题
from langchain.document_loaders.unstructured import UnstructuredFileLoader
import sentence_transformers
import torch
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from config import *
from typing import List
import re
import nltk
nltk.data.path = [os.path.join(os.path.dirname(__file__), "nltk_data")] + nltk.data.path
class ChineseTextSplitter(CharacterTextSplitter):
def __init__(self, pdf: bool = False, **kwargs):
super().__init__(**kwargs)
self.pdf = pdf
def split_text(self, text: str) -> List[str]:
if self.pdf:
text = re.sub(r"\n{3,}", "\n", text)
text = re.sub('\s', ' ', text)
text = text.replace("\n\n", "")
sent_sep_pattern = re.compile(
'([﹒﹔﹖﹗.。!?]["’”」』]{0,2}|(?=["‘“「『]{1,2}|$))')
sent_list = []
for ele in sent_sep_pattern.split(text):
if sent_sep_pattern.match(ele) and sent_list:
sent_list[-1] += ele
elif ele:
sent_list.append(ele)
return sent_list
filepath="../knowledge/草稿.md"
# loader = UnstructuredFileLoader(filepath,mode="elements")
loader = UnstructuredFileLoader(filepath)
textsplitter = ChineseTextSplitter(pdf=False)
docs = loader.load_and_split(textsplitter)
print(docs)
embedding_model_dict = embedding_model_dict
llm_model_dict = llm_model_dict
EMBEDDING_DEVICE = EMBEDDING_DEVICE
LLM_DEVICE = LLM_DEVICE
num_gpus = num_gpus#GPU数量
large_language_model = init_llm
embedding_model=init_embedding_model
model = HuggingFaceEmbeddings(model_name=embedding_model_dict[embedding_model], )
print('第一步加载成功')
model.client = sentence_transformers.SentenceTransformer(
model.model_name,
device=EMBEDDING_DEVICE,
cache_folder=os.path.join(MODEL_CACHE_PATH,model.model_name))
print('embedding模型加载成功')
# 这里相当于是对client属性进行赋值,尽管在__init__huggingface中已经赋值了,但是没全
'''
self.client = sentence_transformers.SentenceTransformer(
self.model_name, cache_folder=self.cache_folder, **self.model_kwargs
)
'''
model.model_name
db = Chroma.from_documents(docs, model,persist_directory="../langchain_chromadb/vector_store/chroma_1")
print('chroma加载成功') | [] |
2024-01-10 | handsomexiu/langchain-chatglm2-Faiss-Chromadb | langchain_milvus~chatllm.py | import os
from typing import Dict, List, Optional, Tuple, Union
import torch
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from transformers import AutoModel, AutoModelForCausalLM, AutoTokenizer
from config import *
# import accelerate
os.environ["TOKENIZERS_PARALLELISM"] = "false"
DEVICE = LLM_DEVICE
DEVICE_ID = "0"
CUDA_DEVICE = f"{DEVICE}:{DEVICE_ID}" if DEVICE_ID else DEVICE
init_llm = init_llm
init_embedding_model = init_embedding_model
def torch_gc():
if torch.cuda.is_available():
with torch.cuda.device(CUDA_DEVICE):#所有的操作都将在指定的设备上执行
torch.cuda.empty_cache()# 这个函数用于清空当前CUDA设备上的缓存内存,这可以帮助释放不再使用的GPU内存,以便在需要时可以更好地利用它。
torch.cuda.ipc_collect()
# 这个函数用于执行GPU内存IPC(Inter-Process Communication)收集。
# IPC收集可以帮助回收被释放的GPU内存,以便其他进程或线程可以使用它
def auto_configure_device_map(num_gpus: int) -> Dict[str, int]:
# 这一段可以参考chatglm2-6b的utils.py文件
#这段代码的目的是根据输入的 GPU 数量和模型层数来自动配置模型的组件分配到不同的 GPU 上。
# 这种配置可以确保模型的不同部分在多个 GPU 上并行处理,以提高模型的训练和推理性能。
num_trans_layers = 28
per_gpu_layers = 30 / num_gpus
device_map = {
'transformer.word_embeddings': 0,
'transformer.final_layernorm': 0,
'lm_head': 0
}
used = 2
gpu_target = 0
for i in range(num_trans_layers):
if used >= per_gpu_layers:
gpu_target += 1
used = 0
assert gpu_target < num_gpus
device_map[f'transformer.layers.{i}'] = gpu_target
used += 1
return device_map
class ChatLLM(LLM):
max_token: int = 5000#这里实验还没有设置,到时再看如何设置
temperature: float = 0.1
top_p = 0.9
history = []
model_type: str = "chatglm2"
model_name_or_path: str = init_llm,
tokenizer: object = None
model: object = None
def __init__(self):
super().__init__()
@property
def _llm_type(self) -> str:
return "ChatLLM"
def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str:
if self.model_type == 'chatglm2':
response, _ = self.model.chat(
self.tokenizer,
prompt,
history=self.history,
max_length=self.max_token,
temperature=self.temperature,
top_p = self.top_p,
# 这里可以看 https://github.com/THUDM/ChatGLM2-6B/blob/main/web_demo.py
)
torch_gc()
if stop is not None:
response = enforce_stop_tokens(response, stop)
self.history = self.history + [[None, response]]
# 这里的history没有考虑query,也就是prompt。只考虑了response
return response
def load_llm(self,
llm_device=DEVICE,
num_gpus='auto',
device_map: Optional[Dict[str, int]] = None,
**kwargs):
# if 'chatglm2' in self.model_name_or_path.lower():
self.tokenizer = AutoTokenizer.from_pretrained(self.model_name_or_path,
trust_remote_code=True, cache_dir=os.path.join(MODEL_CACHE_PATH, self.model_name_or_path))
if torch.cuda.is_available() and llm_device.lower().startswith("cuda"):
num_gpus = torch.cuda.device_count()
if num_gpus < 2 and device_map is None:
self.model = (AutoModel.from_pretrained(
self.model_name_or_path, trust_remote_code=True, cache_dir=os.path.join(MODEL_CACHE_PATH, self.model_name_or_path),
**kwargs).half().cuda())
else:
from accelerate import dispatch_model
model = AutoModel.from_pretrained(self.model_name_or_path,
trust_remote_code=True, cache_dir=os.path.join(MODEL_CACHE_PATH, self.model_name_or_path),
**kwargs).half()
if device_map is None:
device_map = auto_configure_device_map(num_gpus)
self.model = dispatch_model(model, device_map=device_map)
else:#这里就是cpu的了
self.model = (AutoModel.from_pretrained(
self.model_name_or_path,
trust_remote_code=True, cache_dir=os.path.join(MODEL_CACHE_PATH, self.model_name_or_path)).float().to(llm_device))
self.model = self.model.eval()
| [] |
2024-01-10 | sailfish009/pycaret | pycaret~nlp.py | # Module: Natural Language Processing
# Author: Moez Ali <[email protected]>
# License: MIT
# Release: PyCaret 2.1
# Last modified : 24/08/2020
def setup(data,
target=None,
custom_stopwords=None,
html = True,
session_id = None,
log_experiment = False,
experiment_name = None,
log_plots = False,
log_data = False,
verbose = True):
"""
This function initializes the environment in pycaret. setup() must called before
executing any other function in pycaret. It takes one mandatory parameter:
data, a pandas.Dataframe or object of type list. If a pandas.Dataframe is
passed, target column containing text must be specified. When data passed is of
type list, no target parameter is required. All other parameters are optional.
This module only supports English Language at this time.
Example
-------
>>> from pycaret.datasets import get_data
>>> kiva = get_data('kiva')
>>> experiment_name = setup(data = kiva, target = 'en')
'kiva' is a pandas.Dataframe.
Parameters
----------
data : pandas.Dataframe or list
pandas.Dataframe with shape (n_samples, n_features) where n_samples is the number of samples and n_features
is the number of features, or object of type list with n length.
target: string
If data is of type pandas.Dataframe, name of column containing text values must be passed as
string.
custom_stopwords: list, default = None
List containing custom stopwords.
html: bool, default = True
If set to False, prevents runtime display of monitor. This must be set to False
when using environment that doesnt support HTML.
session_id: int, default = None
If None, a random seed is generated and returned in the Information grid. The
unique number is then distributed as a seed in all functions used during the
experiment. This can be used for later reproducibility of the entire experiment.
log_experiment: bool, default = True
When set to True, all metrics and parameters are logged on MLFlow server.
experiment_name: str, default = None
Name of experiment for logging. When set to None, 'nlp' is by default used as
alias for the experiment name.
log_plots: bool, default = False
When set to True, specific plots are logged in MLflow as a png file. By default,
it is set to False.
log_data: bool, default = False
When set to True, train and test dataset are logged as csv.
verbose: Boolean, default = True
Information grid is not printed when verbose is set to False.
Returns
-------
info_grid
Information grid is printed.
environment
This function returns various outputs that are stored in variable
as tuple. They are used by other functions in pycaret.
Warnings
--------
- Some functionalities in pycaret.nlp requires you to have english language model.
The language model is not downloaded automatically when you install pycaret.
You will have to download two models using your Anaconda Prompt or python
command line interface. To download the model, please type the following in
your command line:
python -m spacy download en_core_web_sm
python -m textblob.download_corpora
Once downloaded, please restart your kernel and re-run the setup.
"""
#exception checking
import sys
from pycaret.utils import __version__
ver = __version__()
import logging
# create logger
global logger
logger = logging.getLogger('logs')
logger.setLevel(logging.DEBUG)
# create console handler and set level to debug
if logger.hasHandlers():
logger.handlers.clear()
ch = logging.FileHandler('logs.log')
ch.setLevel(logging.DEBUG)
# create formatter
formatter = logging.Formatter('%(asctime)s:%(levelname)s:%(message)s')
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
logger.addHandler(ch)
logger.info("PyCaret NLP Module")
logger.info('version ' + str(ver))
logger.info("Initializing setup()")
#generate USI for mlflow tracking
import secrets
global USI
USI = secrets.token_hex(nbytes=2)
logger.info('USI: ' + str(USI))
try:
data_shape = data.shape
except:
data_shape = len(data)
logger.info("""setup(data={}, target={}, custom_stopwords={}, html={}, session_id={}, log_experiment={},
experiment_name={}, log_plots={}, log_data={}, verbose={})""".format(\
str(data_shape), str(target), str(custom_stopwords), str(html),\
str(session_id),str(log_experiment), str(experiment_name), str(log_plots), str(log_data), str(verbose)))
#logging environment and libraries
logger.info("Checking environment")
from platform import python_version, platform, python_build, machine
try:
logger.info("python_version: " + str(python_version()))
except:
logger.warning("cannot find platform.python_version")
try:
logger.info("python_build: " + str(python_build()))
except:
logger.warning("cannot find platform.python_build")
try:
logger.info("machine: " + str(machine()))
except:
logger.warning("cannot find platform.machine")
try:
logger.info("platform: " + str(platform()))
except:
logger.warning("cannot find platform.platform")
try:
import psutil
logger.info("Memory: " + str(psutil.virtual_memory()))
logger.info("Physical Core: " + str(psutil.cpu_count(logical=False)))
logger.info("Logical Core: " + str(psutil.cpu_count(logical=True)))
except:
logger.warning("cannot find psutil installation. memory not traceable. Install psutil using pip to enable memory logging. ")
logger.info("Checking libraries")
try:
from pandas import __version__
logger.info("pd==" + str(__version__))
except:
logger.warning("pandas not found")
try:
from numpy import __version__
logger.info("numpy==" + str(__version__))
except:
logger.warning("numpy not found")
try:
import warnings
warnings.filterwarnings('ignore')
from gensim import __version__
logger.info("gensim==" + str(__version__))
except:
logger.warning("gensim not found")
try:
from spacy import __version__
logger.info("spacy==" + str(__version__))
except:
logger.warning("spacy not found")
try:
from nltk import __version__
logger.info("nltk==" + str(__version__))
except:
logger.warning("nltk not found")
try:
from textblob import __version__
logger.info("textblob==" + str(__version__))
except:
logger.warning("textblob not found")
try:
from pyLDAvis import __version__
logger.info("pyLDAvis==" + str(__version__))
except:
logger.warning("pyLDAvis not found")
try:
from wordcloud import __version__
logger.info("wordcloud==" + str(__version__))
except:
logger.warning("wordcloud not found")
try:
from mlflow.version import VERSION
import warnings
warnings.filterwarnings('ignore')
logger.info("mlflow==" + str(VERSION))
except:
logger.warning("mlflow not found")
logger.info("Checking Exceptions")
#run_time
import datetime, time
runtime_start = time.time()
#ignore warnings
import warnings
warnings.filterwarnings('ignore')
"""
error handling starts here
"""
#checking data type
if hasattr(data,'shape') is False:
if type(data) is not list:
sys.exit('(Type Error): data passed must be of type pandas.DataFrame or list')
#if dataframe is passed then target is mandatory
if hasattr(data,'shape'):
if target is None:
sys.exit('(Type Error): When pandas.Dataframe is passed as data param. Target column containing text must be specified in target param.')
#checking target parameter
if target is not None:
if target not in data.columns:
sys.exit('(Value Error): Target parameter doesnt exist in the data provided.')
#custom stopwords checking
if custom_stopwords is not None:
if type(custom_stopwords) is not list:
sys.exit('(Type Error): custom_stopwords must be of list type.')
#checking session_id
if session_id is not None:
if type(session_id) is not int:
sys.exit('(Type Error): session_id parameter must be an integer.')
#check if spacy is loaded
try:
import spacy
sp = spacy.load('en_core_web_sm', disable=['parser', 'ner'])
except:
sys.exit('(Type Error): spacy english model is not yet downloaded. See the documentation of setup to see installation guide.')
#html
if type(html) is not bool:
sys.exit('(Type Error): html parameter only accepts True or False.')
#log_experiment
if type(log_experiment) is not bool:
sys.exit('(Type Error): log_experiment parameter only accepts True or False.')
#log_plots
if type(log_plots) is not bool:
sys.exit('(Type Error): log_plots parameter only accepts True or False.')
#log_data
if type(log_data) is not bool:
sys.exit('(Type Error): log_data parameter only accepts True or False.')
#verbose
if type(verbose) is not bool:
sys.exit('(Type Error): verbose parameter only accepts True or False.')
"""
error handling ends here
"""
logger.info("Preloading libraries")
#pre-load libraries
import pandas as pd
import ipywidgets as ipw
from IPython.display import display, HTML, clear_output, update_display
import datetime, time
#global html_param
global html_param
#create html_param
html_param = html
'''
generate monitor starts
'''
logger.info("Preparing display monitor")
#progress bar
max_steps = 11
total_steps = 9
progress = ipw.IntProgress(value=0, min=0, max=max_steps, step=1 , description='Processing: ')
if verbose:
if html_param:
display(progress)
try:
max_sub = len(data[target].values.tolist())
except:
max_sub = len(data)
timestampStr = datetime.datetime.now().strftime("%H:%M:%S")
monitor = pd.DataFrame( [ ['Initiated' , '. . . . . . . . . . . . . . . . . .', timestampStr ],
['Status' , '. . . . . . . . . . . . . . . . . .' , 'Loading Dependencies' ],
['Step' , '. . . . . . . . . . . . . . . . . .', 'Step 0 of ' + str(total_steps)] ],
columns=['', ' ', ' ']).set_index('')
if verbose:
if html_param:
display(monitor, display_id = 'monitor')
'''
generate monitor end
'''
logger.info("Importing libraries")
#general dependencies
import numpy as np
import random
import spacy
import gensim
import gensim.corpora as corpora
from gensim.utils import simple_preprocess
from gensim.models import CoherenceModel
import spacy
import re
import secrets
#setting sklearn config to print all parameters including default
import sklearn
sklearn.set_config(print_changed_only=False)
logger.info("Declaring global variables")
#defining global variables
global text, id2word, corpus, data_, seed, target_, experiment__,\
exp_name_log, logging_param, log_plots_param
#create an empty list for pickling later.
try:
experiment__.append('dummy')
experiment__.pop()
except:
experiment__ = []
#converting to dataframe if list provided
if type(data) is list:
logger.info("Converting list into dataframe")
data = pd.DataFrame(data, columns=['en'])
target = 'en'
#converting target column into list
try:
text = data[target].values.tolist()
target_ = str(target)
logger.info("Input provided : dataframe")
except:
text = data
target_ = 'en'
logger.info("Input provided : list")
#generate seed to be used globally
if session_id is None:
seed = random.randint(150,9000)
else:
seed = session_id
logger.info("session_id set to : " + str(seed))
logger.info("Copying training dataset")
#copying dataframe
if type(data) is list:
data_ = pd.DataFrame(data)
data_.columns = ['en']
else:
data_ = data.copy()
#create logging parameter
logging_param = log_experiment
#create exp_name_log param incase logging is False
exp_name_log = 'no_logging'
#create an empty log_plots_param
if log_plots:
log_plots_param = True
else:
log_plots_param = False
progress.value += 1
"""
DEFINE STOPWORDS
"""
try:
logger.info("Importing stopwords from nltk")
import nltk
nltk.download('stopwords')
from nltk.corpus import stopwords
stop_words = stopwords.words('english')
except:
logger.info("Importing stopwords from nltk failed .. loading pre-defined stopwords")
stop_words = ['ourselves', 'hers', 'between', 'yourself', 'but', 'again', 'there', 'about', 'once', 'during',
'out', 'very', 'having', 'with', 'they', 'own', 'an', 'be', 'some', 'for', 'do', 'its', 'yours',
'such', 'into', 'of', 'most', 'itself', 'other', 'off', 'is', 's', 'am', 'or', 'who', 'as', 'from',
'him', 'each', 'the', 'themselves', 'until', 'below', 'are', 'we', 'these', 'your', 'his', 'through',
'don', 'nor', 'me', 'were', 'her', 'more', 'himself', 'this', 'down', 'should', 'our', 'their', 'while',
'above', 'both', 'up', 'to', 'ours', 'had', 'she', 'all', 'no', 'when', 'at', 'any', 'before', 'them',
'same', 'and', 'been', 'have', 'in', 'will', 'on', 'does', 'yourselves', 'then', 'that', 'because', 'what',
'over', 'why', 'so', 'can', 'did', 'not', 'now', 'under', 'he', 'you', 'herself', 'has', 'just', 'where',
'too', 'only', 'myself', 'which', 'those', 'i', 'after', 'few', 'whom', 't', 'being', 'if', 'theirs', 'my',
'against', 'a', 'by', 'doing', 'it', 'how', 'further', 'was', 'here', 'than']
if custom_stopwords is not None:
stop_words = stop_words + custom_stopwords
if custom_stopwords is None:
logger.info("No custom stopwords defined")
progress.value += 1
"""
TEXT PRE-PROCESSING STARTS HERE
"""
"""
STEP 1 - REMOVE NUMERIC CHARACTERS FROM THE LIST
"""
logger.info("Removing numeric characters from the text")
monitor.iloc[1,1:] = 'Removing Numeric Characters'
monitor.iloc[2,1:] = 'Step 1 of '+ str(total_steps)
if verbose:
if html_param:
update_display(monitor, display_id = 'monitor')
text_step1 = []
for i in range(0,len(text)):
review = re.sub("\d+", "", str(text[i]))
text_step1.append(review)
text = text_step1 #re-assigning
del(text_step1)
progress.value += 1
"""
STEP 2 - REGULAR EXPRESSIONS
"""
logger.info("Removing special characters from the text")
monitor.iloc[1,1:] = 'Removing Special Characters'
monitor.iloc[2,1:] = 'Step 2 of '+ str(total_steps)
if verbose:
if html_param:
update_display(monitor, display_id = 'monitor')
text_step2 = []
for i in range(0,len(text)):
review = re.sub(r'\W', ' ', str(text[i]))
review = review.lower()
review = re.sub(r'\s+[a-z]\s+', ' ', review)
review = re.sub(r'^[a-z]\s+', ' ', review)
review = re.sub(r'\d+', ' ', review)
review = re.sub(r'\s+', ' ', review)
text_step2.append(review)
text = text_step2 #re-assigning
del(text_step2)
progress.value += 1
"""
STEP 3 - WORD TOKENIZATION
"""
logger.info("Tokenizing Words")
monitor.iloc[1,1:] = 'Tokenizing Words'
monitor.iloc[2,1:] = 'Step 3 of '+ str(total_steps)
if verbose:
if html_param:
update_display(monitor, display_id = 'monitor')
text_step3 = []
for i in text:
review = gensim.utils.simple_preprocess(str(i), deacc=True)
text_step3.append(review)
text = text_step3
del(text_step3)
progress.value += 1
"""
STEP 4 - REMOVE STOPWORDS
"""
logger.info("Removing stopwords")
monitor.iloc[1,1:] = 'Removing Stopwords'
monitor.iloc[2,1:] = 'Step 4 of '+ str(total_steps)
if verbose:
if html_param:
update_display(monitor, display_id = 'monitor')
text_step4 = []
for i in text:
ii = []
for word in i:
if word not in stop_words:
ii.append(word)
text_step4.append(ii)
text = text_step4
del(text_step4)
progress.value += 1
"""
STEP 5 - BIGRAM EXTRACTION
"""
logger.info("Extracting Bigrams")
monitor.iloc[1,1:] = 'Extracting Bigrams'
monitor.iloc[2,1:] = 'Step 5 of '+ str(total_steps)
if verbose:
if html_param:
update_display(monitor, display_id = 'monitor')
bigram = gensim.models.Phrases(text, min_count=5, threshold=100)
bigram_mod = gensim.models.phrases.Phraser(bigram)
text_step5 = []
for i in text:
text_step5.append(bigram_mod[i])
text = text_step5
del(text_step5)
progress.value += 1
"""
STEP 6 - TRIGRAM EXTRACTION
"""
logger.info("Extracting Trigrams")
monitor.iloc[1,1:] = 'Extracting Trigrams'
monitor.iloc[2,1:] = 'Step 6 of '+ str(total_steps)
if verbose:
if html_param:
update_display(monitor, display_id = 'monitor')
trigram = gensim.models.Phrases(bigram[text], threshold=100)
trigram_mod = gensim.models.phrases.Phraser(trigram)
text_step6 = []
for i in text:
text_step6.append(trigram_mod[bigram_mod[i]])
text = text_step6
del(text_step6)
progress.value += 1
"""
STEP 7 - LEMMATIZATION USING SPACY
"""
logger.info("Lemmatizing tokens")
monitor.iloc[1,1:] = 'Lemmatizing'
monitor.iloc[2,1:] = 'Step 7 of '+ str(total_steps)
if verbose:
if html_param:
update_display(monitor, display_id = 'monitor')
nlp = spacy.load('en_core_web_sm', disable=['parser', 'ner'])
nlp.max_length=3000000 #increasing text length to 3000000 from default of 1000000
allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV']
text_step7 = []
for i in text:
doc = nlp(" ".join(i))
text_step7.append([token.lemma_ for token in doc if token.pos_ in allowed_postags])
text = text_step7
del(text_step7)
progress.value += 1
"""
STEP 8 - CUSTOM STOPWORD REMOVER
"""
logger.info("Removing stopwords after lemmatizing")
monitor.iloc[1,1:] = 'Removing Custom Stopwords'
monitor.iloc[2,1:] = 'Step 8 of '+ str(total_steps)
if verbose:
if html_param:
update_display(monitor, display_id = 'monitor')
text_step8 = []
for i in text:
ii = []
for word in i:
if word not in stop_words:
ii.append(word)
text_step8.append(ii)
text = text_step8
del(text_step8)
progress.value += 1
"""
STEP 8 - CREATING CORPUS AND DICTIONARY
"""
logger.info("Creating corpus and dictionary")
monitor.iloc[1,1:] = 'Compiling Corpus'
monitor.iloc[2,1:] = 'Step 9 of '+ str(total_steps)
if verbose:
if html_param:
update_display(monitor, display_id = 'monitor')
#creating dictionary
id2word = corpora.Dictionary(text)
#creating corpus
corpus = []
for i in text:
d = id2word.doc2bow(i)
corpus.append(d)
progress.value += 1
"""
PROGRESS NOT YET TRACKED - TO BE CODED LATER
"""
logger.info("Compiling processed text")
text_join = []
for i in text:
word = ' '.join(i)
text_join.append(word)
data_[target_] = text_join
'''
Final display Starts
'''
if custom_stopwords is None:
csw = False
else:
csw = True
logger.info("Compiling information grid")
functions = pd.DataFrame ( [ ['session_id', seed ],
['Documents', len(corpus) ],
['Vocab Size',len(id2word.keys()) ],
['Custom Stopwords',csw ],
], columns = ['Description', 'Value'] )
functions_ = functions.style.hide_index()
'''
Final display Ends
'''
#log into experiment
experiment__.append(('Info', functions))
experiment__.append(('Dataset', data_))
experiment__.append(('Corpus', corpus))
experiment__.append(('Dictionary', id2word))
experiment__.append(('Text', text))
#end runtime
runtime_end = time.time()
runtime = np.array(runtime_end - runtime_start).round(2)
if logging_param:
logger.info("Creating MLFlow logs")
monitor.iloc[1,1:] = 'Creating Logs'
monitor.iloc[2,1:] = 'Final'
if verbose:
if html_param:
update_display(monitor, display_id = 'monitor')
import mlflow
from pathlib import Path
import os
if experiment_name is None:
exp_name_ = 'nlp-default-name'
else:
exp_name_ = experiment_name
URI = secrets.token_hex(nbytes=4)
exp_name_log = exp_name_
try:
mlflow.create_experiment(exp_name_log)
except:
pass
#mlflow logging
mlflow.set_experiment(exp_name_log)
run_name_ = 'Session Initialized ' + str(USI)
with mlflow.start_run(run_name=run_name_) as run:
# Get active run to log as tag
RunID = mlflow.active_run().info.run_id
k = functions.copy()
k.set_index('Description',drop=True,inplace=True)
kdict = k.to_dict()
params = kdict.get('Value')
mlflow.log_params(params)
#set tag of compare_models
mlflow.set_tag("Source", "setup")
import secrets
URI = secrets.token_hex(nbytes=4)
mlflow.set_tag("URI", URI)
mlflow.set_tag("USI", USI)
mlflow.set_tag("Run Time", runtime)
mlflow.set_tag("Run ID", RunID)
# Log gensim id2word
id2word.save('id2word')
mlflow.log_artifact('id2word')
import os
os.remove('id2word')
# Log data
if log_data:
data_.to_csv('data.csv')
mlflow.log_artifact('data.csv')
os.remove('data.csv')
# Log plots
if log_plots:
logger.info("SubProcess plot_model() called ==================================")
plot_model(plot='frequency', save=True, system=False)
mlflow.log_artifact('Word Frequency.html')
os.remove('Word Frequency.html')
plot_model(plot='bigram', save=True, system=False)
mlflow.log_artifact('Bigram.html')
os.remove('Bigram.html')
plot_model(plot='trigram', save=True, system=False)
mlflow.log_artifact('Trigram.html')
os.remove('Trigram.html')
plot_model(plot='pos', save=True, system=False)
mlflow.log_artifact('POS.html')
os.remove('POS.html')
logger.info("SubProcess plot_model() end ==================================")
if verbose:
clear_output()
if html_param:
display(functions_)
else:
print(functions_.data)
logger.info("setup() succesfully completed......................................")
return text, data_, corpus, id2word, seed, target_, experiment__,\
exp_name_log, logging_param, log_plots_param, USI, html_param
def create_model(model=None,
multi_core=False,
num_topics = None,
verbose=True,
system=True,
**kwargs):
"""
This function creates a model on the dataset passed as a data param during
the setup stage. setup() function must be called before using create_model().
This function returns a trained model object.
Example
-------
>>> from pycaret.datasets import get_data
>>> kiva = get_data('kiva')
>>> experiment_name = setup(data = kiva, target = 'en')
>>> lda = create_model('lda')
This will return trained Latent Dirichlet Allocation model.
Parameters
----------
model : string, default = None
Enter ID of the model available in model library (ID - Model):
* 'lda' - Latent Dirichlet Allocation
* 'lsi' - Latent Semantic Indexing
* 'hdp' - Hierarchical Dirichlet Process
* 'rp' - Random Projections
* 'nmf' - Non-Negative Matrix Factorization
multi_core: Boolean, default = False
True would utilize all CPU cores to parallelize and speed up model training. Only
available for 'lda'. For all other models, the multi_core parameter is ignored.
num_topics: integer, default = 4
Number of topics to be created. If None, default is set to 4.
verbose: Boolean, default = True
Status update is not printed when verbose is set to False.
system: Boolean, default = True
Must remain True all times. Only to be changed by internal functions.
**kwargs:
Additional keyword arguments to pass to the estimator.
Returns
-------
model
Trained model object.
"""
#exception checking
import sys
import logging
try:
hasattr(logger, 'name')
except:
logger = logging.getLogger('logs')
logger.setLevel(logging.DEBUG)
# create console handler and set level to debug
if logger.hasHandlers():
logger.handlers.clear()
ch = logging.FileHandler('logs.log')
ch.setLevel(logging.DEBUG)
# create formatter
formatter = logging.Formatter('%(asctime)s:%(levelname)s:%(message)s')
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
logger.addHandler(ch)
logger.info("Initializing create_model()")
logger.info("""create_model(model={}, multi_core={}, num_topics={}, verbose={}, system={})""".\
format(str(model), str(multi_core), str(num_topics), str(verbose), str(system)))
logger.info("Checking exceptions")
#run_time
import datetime, time
runtime_start = time.time()
#ignore warnings
import warnings
warnings.filterwarnings('ignore')
"""
error handling starts here
"""
#checking for model parameter
if model is None:
sys.exit('(Value Error): Model parameter Missing. Please see docstring for list of available models.')
#checking for allowed models
allowed_models = ['lda', 'lsi', 'hdp', 'rp', 'nmf']
if model not in allowed_models:
sys.exit('(Value Error): Model Not Available. Please see docstring for list of available models.')
#checking multicore type:
if type(multi_core) is not bool:
sys.exit('(Type Error): multi_core parameter can only take argument as True or False.')
#checking round parameter
if num_topics is not None:
if num_topics <= 1:
sys.exit('(Type Error): num_topics parameter only accepts integer value.')
#checking verbose parameter
if type(verbose) is not bool:
sys.exit('(Type Error): Verbose parameter can only take argument as True or False.')
"""
error handling ends here
"""
logger.info("Preloading libraries")
#pre-load libraries
import pandas as pd
import numpy as np
import ipywidgets as ipw
from IPython.display import display, HTML, clear_output, update_display
import datetime, time
"""
monitor starts
"""
logger.info("Preparing display monitor")
#progress bar and monitor control
timestampStr = datetime.datetime.now().strftime("%H:%M:%S")
progress = ipw.IntProgress(value=0, min=0, max=4, step=1 , description='Processing: ')
monitor = pd.DataFrame( [ ['Initiated' , '. . . . . . . . . . . . . . . . . .', timestampStr ],
['Status' , '. . . . . . . . . . . . . . . . . .' , 'Initializing'] ],
columns=['', ' ', ' ']).set_index('')
if verbose:
if html_param:
display(progress)
display(monitor, display_id = 'monitor')
progress.value += 1
"""
monitor starts
"""
logger.info("Defining topic model")
model_name_short = model
#define topic_model_name
if model == 'lda':
topic_model_name = 'Latent Dirichlet Allocation'
elif model == 'lsi':
topic_model_name = 'Latent Semantic Indexing'
elif model == 'hdp':
topic_model_name = 'Hierarchical Dirichlet Process'
elif model == 'nmf':
topic_model_name = 'Non-Negative Matrix Factorization'
elif model == 'rp':
topic_model_name = 'Random Projections'
logger.info("Model: " + str(topic_model_name))
#defining default number of topics
logger.info("Defining num_topics parameter")
if num_topics is None:
n_topics = 4
else:
n_topics = num_topics
logger.info("num_topics set to: " + str(n_topics))
#monitor update
monitor.iloc[1,1:] = 'Fitting Topic Model'
progress.value += 1
if verbose:
if html_param:
update_display(monitor, display_id = 'monitor')
model_fit_start = time.time()
if model == 'lda':
if multi_core:
logger.info("LDA multi_core enabled")
from gensim.models.ldamulticore import LdaMulticore
logger.info("LdaMulticore imported successfully")
model = LdaMulticore(corpus=corpus,
num_topics=n_topics,
id2word=id2word,
workers=4,
random_state=seed,
chunksize=100,
passes=10,
alpha= 'symmetric',
per_word_topics=True,
**kwargs)
logger.info("LdaMulticore trained successfully")
progress.value += 1
else:
from gensim.models.ldamodel import LdaModel
logger.info("LdaModel imported successfully")
model = LdaModel(corpus=corpus,
num_topics=n_topics,
id2word=id2word,
random_state=seed,
update_every=1,
chunksize=100,
passes=10,
alpha='auto',
per_word_topics=True,
**kwargs)
logger.info("LdaModel trained successfully")
progress.value += 1
elif model == 'lsi':
from gensim.models.lsimodel import LsiModel
logger.info("LsiModel imported successfully")
model = LsiModel(corpus=corpus,
num_topics=n_topics,
id2word=id2word,
**kwargs)
logger.info("LsiModel trained successfully")
progress.value += 1
elif model == 'hdp':
from gensim.models import HdpModel
logger.info("HdpModel imported successfully")
model = HdpModel(corpus=corpus,
id2word=id2word,
random_state=seed,
chunksize=100,
T=n_topics,
**kwargs)
logger.info("HdpModel trained successfully")
progress.value += 1
elif model == 'rp':
from gensim.models import RpModel
logger.info("RpModel imported successfully")
model = RpModel(corpus=corpus,
id2word=id2word,
num_topics=n_topics,
**kwargs)
logger.info("RpModel trained successfully")
progress.value += 1
elif model == 'nmf':
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.decomposition import NMF
from sklearn.preprocessing import normalize
logger.info("CountVectorizer, TfidfTransformer, NMF, normalize imported successfully")
text_join = []
for i in text:
word = ' '.join(i)
text_join.append(word)
progress.value += 1
vectorizer = CountVectorizer(analyzer='word', max_features=5000)
x_counts = vectorizer.fit_transform(text_join)
logger.info("CountVectorizer() Fit Successfully")
transformer = TfidfTransformer(smooth_idf=False)
x_tfidf = transformer.fit_transform(x_counts)
logger.info("TfidfTransformer() Fit Successfully")
xtfidf_norm = normalize(x_tfidf, norm='l1', axis=1)
model = NMF(n_components=n_topics, init='nndsvd', random_state=seed,**kwargs)
model.fit(xtfidf_norm)
logger.info("NMF() Trained Successfully")
model_fit_end = time.time()
model_fit_time = np.array(model_fit_end - model_fit_start).round(2)
progress.value += 1
#end runtime
runtime_end = time.time()
runtime = np.array(runtime_end - runtime_start).round(2)
#mlflow logging
if logging_param and system:
logger.info("Creating MLFLow Logs")
#Creating Logs message monitor
monitor.iloc[1,1:] = 'Creating Logs'
if verbose:
if html_param:
update_display(monitor, display_id = 'monitor')
#import mlflow
import mlflow
from pathlib import Path
import os
mlflow.set_experiment(exp_name_log)
with mlflow.start_run(run_name=topic_model_name) as run:
# Get active run to log as tag
RunID = mlflow.active_run().info.run_id
# Log model parameters
from copy import deepcopy
model_copied = deepcopy(model)
try:
params = model_copied.get_params()
except:
import inspect
params = inspect.getmembers(model_copied)[2][1]
for i in list(params):
v = params.get(i)
if len(str(v)) > 250:
params.pop(i)
mlflow.log_params(params)
#set tag of compare_models
mlflow.set_tag("Source", "create_model")
import secrets
URI = secrets.token_hex(nbytes=4)
mlflow.set_tag("URI", URI)
mlflow.set_tag("USI", USI)
mlflow.set_tag("Run Time", runtime)
mlflow.set_tag("Run ID", RunID)
# Log model and related artifacts
if model_name_short == 'nmf':
logger.info("SubProcess save_model() called ==================================")
save_model(model, 'model', verbose=False)
logger.info("SubProcess save_model() end ==================================")
mlflow.log_artifact('model.pkl')
size_bytes = Path('model.pkl').stat().st_size
os.remove('model.pkl')
elif model_name_short == 'lda':
model.save('model')
mlflow.log_artifact('model')
mlflow.log_artifact('model.expElogbeta.npy')
mlflow.log_artifact('model.id2word')
mlflow.log_artifact('model.state')
size_bytes = Path('model').stat().st_size + Path('model.id2word').stat().st_size\
+ Path('model.state').stat().st_size
os.remove('model')
os.remove('model.expElogbeta.npy')
os.remove('model.id2word')
os.remove('model.state')
elif model_name_short == 'lsi':
model.save('model')
mlflow.log_artifact('model')
mlflow.log_artifact('model.projection')
size_bytes = Path('model').stat().st_size + Path('model.projection').stat().st_size
os.remove('model')
os.remove('model.projection')
elif model_name_short == 'rp':
model.save('model')
mlflow.log_artifact('model')
size_bytes = Path('model').stat().st_size
os.remove('model')
elif model_name_short == 'hdp':
model.save('model')
mlflow.log_artifact('model')
size_bytes = Path('model').stat().st_size
os.remove('model')
size_kb = np.round(size_bytes/1000, 2)
mlflow.set_tag("Size KB", size_kb)
# Log training time in seconds
mlflow.log_metric("TT", model_fit_time)
try:
mlflow.log_metrics(model_results.to_dict().get('Metric'))
except:
pass
#storing into experiment
if verbose:
clear_output()
logger.info(str(model))
logger.info("create_model() succesfully completed......................................")
return model
def assign_model(model,
verbose=True):
"""
This function assigns each of the data point in the dataset passed during setup
stage to one of the topic using trained model object passed as model param.
create_model() function must be called before using assign_model().
This function returns a pandas.Dataframe with topic weights, dominant topic and % of the
dominant topic (where applicable).
Example
-------
>>> from pycaret.datasets import get_data
>>> kiva = get_data('kiva')
>>> experiment_name = setup(data = kiva, target = 'en')
>>> lda = create_model('lda')
>>> lda_df = assign_model(lda)
This will return a pandas.Dataframe with inferred topics using trained model.
Parameters
----------
model : trained model object, default = None
verbose: Boolean, default = True
Status update is not printed when verbose is set to False.
Returns
-------
pandas.DataFrame
Returns a DataFrame with inferred topics using trained model object.
"""
#exception checking
import sys
import logging
try:
hasattr(logger, 'name')
except:
logger = logging.getLogger('logs')
logger.setLevel(logging.DEBUG)
# create console handler and set level to debug
if logger.hasHandlers():
logger.handlers.clear()
ch = logging.FileHandler('logs.log')
ch.setLevel(logging.DEBUG)
# create formatter
formatter = logging.Formatter('%(asctime)s:%(levelname)s:%(message)s')
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
logger.addHandler(ch)
logger.info("Initializing assign_model()")
logger.info("""assign_model(model={}, verbose={})""".\
format(str(model), str(verbose)))
#ignore warnings
import warnings
warnings.filterwarnings('ignore')
logger.info("Determining model type")
#determine model type
if 'LdaModel' in str(type(model)):
mod_type = 'lda'
elif 'LdaMulticore' in str(type(model)):
mod_type = 'lda'
elif 'LsiModel' in str(type(model)):
mod_type = 'lsi'
elif 'NMF' in str(type(model)):
mod_type = 'nmf'
elif 'HdpModel' in str(type(model)):
mod_type = 'hdp'
elif 'RpModel' in str(type(model)):
mod_type = 'rp'
else:
mod_type = None
logger.info("model type: " + str(mod_type))
"""
error handling starts here
"""
logger.info("Checking exceptions")
#checking for allowed models
allowed_models = ['lda', 'lsi', 'hdp', 'rp', 'nmf']
if mod_type not in allowed_models:
sys.exit('(Value Error): Model Not Recognized. Please see docstring for list of available models.')
#checking verbose parameter
if type(verbose) is not bool:
sys.exit('(Type Error): Verbose parameter can only take argument as True or False.')
"""
error handling ends here
"""
logger.info("Preloading libraries")
#pre-load libraries
import numpy as np
import pandas as pd
import ipywidgets as ipw
from IPython.display import display, HTML, clear_output, update_display
import datetime, time
logger.info("Preparing display monitor")
#progress bar and monitor control
max_progress = len(text) + 5
timestampStr = datetime.datetime.now().strftime("%H:%M:%S")
progress = ipw.IntProgress(value=0, min=0, max=max_progress, step=1 , description='Processing: ')
monitor = pd.DataFrame( [ ['Initiated' , '. . . . . . . . . . . . . . . . . .', timestampStr ],
['Status' , '. . . . . . . . . . . . . . . . . .' , 'Initializing'] ],
columns=['', ' ', ' ']).set_index('')
if verbose:
if html_param:
display(progress)
display(monitor, display_id = 'monitor')
progress.value += 1
monitor.iloc[1,1:] = 'Extracting Topics from Model'
if verbose:
if html_param:
update_display(monitor, display_id = 'monitor')
progress.value += 1
#assignment starts here
if mod_type == 'lda':
c = model.get_document_topics(corpus, minimum_probability=0)
ls = []
for i in range(len(c)):
ls.append(c[i])
bb = []
for i in ls:
bs = []
for k in i:
progress.value += 1
bs.append(k[1])
bb.append(bs)
Dominant_Topic = []
for i in bb:
max_ = max(i)
max_ = i.index(max_)
Dominant_Topic.append('Topic ' + str(max_))
pdt = []
for i in range(0,len(bb)):
l = max(bb[i]) / sum(bb[i])
pdt.append(round(l,2))
col_names = []
for i in range(len(model.show_topics(num_topics=999999))):
a = 'Topic_' + str(i)
col_names.append(a)
progress.value += 1
bb = pd.DataFrame(bb,columns=col_names)
bb_ = pd.concat([data_,bb], axis=1)
dt_ = pd.DataFrame(Dominant_Topic, columns=['Dominant_Topic'])
bb_ = pd.concat([bb_,dt_], axis=1)
pdt_ = pd.DataFrame(pdt, columns=['Perc_Dominant_Topic'])
bb_ = pd.concat([bb_,pdt_], axis=1)
progress.value += 1
if verbose:
clear_output()
elif mod_type == 'lsi':
col_names = []
for i in range(0,len(model.print_topics(num_topics=999999))):
a = 'Topic_' + str(i)
col_names.append(a)
df_ = pd.DataFrame()
Dominant_Topic = []
for i in range(0,len(text)):
progress.value += 1
db = id2word.doc2bow(text[i])
db_ = model[db]
db_array = np.array(db_)
db_array_ = db_array[:,1]
max_ = max(db_array_)
max_ = list(db_array_).index(max_)
Dominant_Topic.append('Topic ' + str(max_))
db_df_ = pd.DataFrame([db_array_])
df_ = pd.concat([df_,db_df_])
progress.value += 1
df_.columns = col_names
df_['Dominant_Topic'] = Dominant_Topic
df_ = df_.reset_index(drop=True)
bb_ = pd.concat([data_,df_], axis=1)
progress.value += 1
if verbose:
clear_output()
elif mod_type == 'hdp' or mod_type == 'rp':
rate = []
for i in range(0,len(corpus)):
progress.value += 1
rate.append(model[corpus[i]])
topic_num = []
topic_weight = []
doc_num = []
counter = 0
for i in rate:
for k in i:
topic_num.append(k[0])
topic_weight.append(k[1])
doc_num.append(counter)
counter += 1
progress.value += 1
df = pd.DataFrame({'Document': doc_num, 'Topic' : topic_num, 'Topic Weight' : topic_weight}).sort_values(by='Topic')
df = df.pivot(index='Document', columns='Topic', values='Topic Weight').fillna(0)
df.columns = ['Topic_' + str(i) for i in df.columns]
Dominant_Topic = []
for i in range(0,len(df)):
s = df.iloc[i].max()
d = list(df.iloc[i]).index(s)
v = df.columns[d]
v = v.replace("_", ' ')
Dominant_Topic.append(v)
df['Dominant_Topic'] = Dominant_Topic
progress.value += 1
if verbose:
clear_output()
bb_ = pd.concat([data_,df], axis=1)
elif mod_type == 'nmf':
"""
this section will go away in future release through better handling
"""
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.decomposition import NMF
from sklearn.preprocessing import normalize
text_join = []
for i in text:
word = ' '.join(i)
text_join.append(word)
progress.value += 1
vectorizer = CountVectorizer(analyzer='word', max_features=5000)
x_counts = vectorizer.fit_transform(text_join)
transformer = TfidfTransformer(smooth_idf=False)
x_tfidf = transformer.fit_transform(x_counts)
xtfidf_norm = normalize(x_tfidf, norm='l1', axis=1)
"""
section ends
"""
bb = list(model.fit_transform(xtfidf_norm))
col_names = []
for i in range(len(bb[0])):
a = 'Topic_' + str(i)
col_names.append(a)
Dominant_Topic = []
for i in bb:
progress.value += 1
max_ = max(i)
max_ = list(i).index(max_)
Dominant_Topic.append('Topic ' + str(max_))
pdt = []
for i in range(0,len(bb)):
l = max(bb[i]) / sum(bb[i])
pdt.append(round(l,2))
progress.value += 1
bb = pd.DataFrame(bb, columns=col_names)
bb_ = pd.concat([data_,bb], axis=1)
dt_ = pd.DataFrame(Dominant_Topic, columns=['Dominant_Topic'])
bb_ = pd.concat([bb_,dt_], axis=1)
pdt_ = pd.DataFrame(pdt, columns=['Perc_Dominant_Topic'])
bb_ = pd.concat([bb_,pdt_], axis=1)
progress.value += 1
if verbose:
clear_output()
logger.info(str(bb_.shape))
logger.info("assign_model() succesfully completed......................................")
return bb_
def plot_model(model = None,
plot = 'frequency',
topic_num = None,
save = False,
system = True):
"""
This function takes a trained model object (optional) and returns a plot based
on the inferred dataset by internally calling assign_model before generating a
plot. Where a model parameter is not passed, a plot on the entire dataset will
be returned instead of one at the topic level. As such, plot_model can be used
with or without model. All plots with a model parameter passed as a trained
model object will return a plot based on the first topic i.e. 'Topic 0'. This
can be changed using the topic_num param.
Example
-------
>>> from pycaret.datasets import get_data
>>> kiva = get_data('kiva')
>>> experiment_name = setup(data = kiva, target = 'en')
>>> lda = create_model('lda')
>>> plot_model(lda, plot = 'frequency')
This will return a frequency plot on a trained Latent Dirichlet Allocation
model for all documents in 'Topic 0'. The topic number can be changed as
follows:
>>> plot_model(lda, plot = 'frequency', topic_num = 'Topic 1')
This will now return a frequency plot on a trained LDA model for all
documents inferred in 'Topic 1'.
Alternatively, if following is used:
>>> plot_model(plot = 'frequency')
This will return frequency plot on the entire training corpus compiled
during setup stage.
Parameters
----------
model : object, default = none
A trained model object can be passed. Model must be created using create_model().
plot : string, default = 'frequency'
Enter abbreviation for type of plot. The current list of plots supported are (Name - Abbreviated String):
* Word Token Frequency - 'frequency'
* Word Distribution Plot - 'distribution'
* Bigram Frequency Plot - 'bigram'
* Trigram Frequency Plot - 'trigram'
* Sentiment Polarity Plot - 'sentiment'
* Part of Speech Frequency - 'pos'
* t-SNE (3d) Dimension Plot - 'tsne'
* Topic Model (pyLDAvis) - 'topic_model'
* Topic Infer Distribution - 'topic_distribution'
* Wordcloud - 'wordcloud'
* UMAP Dimensionality Plot - 'umap'
topic_num : string, default = None
Topic number to be passed as a string. If set to None, default generation will
be on 'Topic 0'
save: Boolean, default = False
Plot is saved as png file in local directory when save parameter set to True.
system: Boolean, default = True
Must remain True all times. Only to be changed by internal functions.
Returns
-------
Visual_Plot
Prints the visual plot.
Warnings
--------
- 'pos' and 'umap' plot not available at model level. Hence the model parameter is
ignored. The result will always be based on the entire training corpus.
- 'topic_model' plot is based on pyLDAVis implementation. Hence its not available
for model = 'lsi', 'rp' and 'nmf'.
"""
#exception checking
import sys
import logging
try:
hasattr(logger, 'name')
except:
logger = logging.getLogger('logs')
logger.setLevel(logging.DEBUG)
# create console handler and set level to debug
if logger.hasHandlers():
logger.handlers.clear()
ch = logging.FileHandler('logs.log')
ch.setLevel(logging.DEBUG)
# create formatter
formatter = logging.Formatter('%(asctime)s:%(levelname)s:%(message)s')
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
logger.addHandler(ch)
logger.info("Initializing plot_model()")
logger.info("""plot_model(model={}, plot={}, topic_num={}, save={}, system={})""".\
format(str(model), str(plot), str(topic_num), str(save), str(system)))
#ignore warnings
import warnings
warnings.filterwarnings('ignore')
#setting default of topic_num
if model is not None and topic_num is None:
topic_num = 'Topic 0'
logger.info("Topic selected. topic_num : " + str(topic_num))
"""
exception handling starts here
"""
#determine model type
if model is not None:
mod = str(type(model))
if 'LdaModel' in mod:
mod_type = 'lda'
elif 'LdaMulticore' in str(type(model)):
mod_type = 'lda'
elif 'LsiModel' in str(type(model)):
mod_type = 'lsi'
elif 'NMF' in str(type(model)):
mod_type = 'nmf'
elif 'HdpModel' in str(type(model)):
mod_type = 'hdp'
elif 'RpModel' in str(type(model)):
mod_type = 'rp'
logger.info("Checking exceptions")
#plot checking
allowed_plots = ['frequency', 'distribution', 'bigram', 'trigram', 'sentiment', 'pos', 'tsne', 'topic_model',
'topic_distribution', 'wordcloud', 'umap']
if plot not in allowed_plots:
sys.exit('(Value Error): Plot Not Available. Please see docstring for list of available plots.')
#plots without topic model
if model is None:
not_allowed_wm = ['tsne', 'topic_model', 'topic_distribution']
if plot in not_allowed_wm:
sys.exit('(Type Error): Model parameter Missing. Plot not supported without specific model passed in as Model param.')
#handle topic_model plot error
if plot == 'topic_model':
not_allowed_tm = ['lsi', 'rp', 'nmf']
if mod_type in not_allowed_tm:
sys.exit('(Type Error): Model not supported for plot = topic_model. Please see docstring for list of available models supported for topic_model.')
"""
error handling ends here
"""
logger.info("Importing libraries")
#import dependencies
import pandas as pd
import numpy
#import cufflinks
import cufflinks as cf
cf.go_offline()
cf.set_config_file(offline=False, world_readable=True)
#save parameter
if save:
save_param = True
else:
save_param = False
logger.info("save_param set to " + str(save_param))
logger.info("plot type: " + str(plot))
if plot == 'frequency':
try:
from sklearn.feature_extraction.text import CountVectorizer
def get_top_n_words(corpus, n=None):
vec = CountVectorizer()
logger.info("Fitting CountVectorizer()")
bag_of_words = vec.fit_transform(corpus)
sum_words = bag_of_words.sum(axis=0)
words_freq = [(word, sum_words[0, idx]) for word, idx in vec.vocabulary_.items()]
words_freq =sorted(words_freq, key = lambda x: x[1], reverse=True)
return words_freq[:n]
logger.info("Rendering Visual")
if topic_num is None:
logger.warning("topic_num set to None. Plot generated at corpus level.")
common_words = get_top_n_words(data_[target_], n=100)
df2 = pd.DataFrame(common_words, columns = ['Text' , 'count'])
df3 = df2.groupby('Text').sum()['count'].sort_values(ascending=False).iplot(
kind='bar', yTitle='Count', linecolor='black', title='Top 100 words after removing stop words',
asFigure=save_param)
else:
title = str(topic_num) + ': ' + 'Top 100 words after removing stop words'
logger.info("SubProcess assign_model() called ==================================")
assigned_df = assign_model(model, verbose = False)
logger.info("SubProcess assign_model() end ==================================")
filtered_df = assigned_df.loc[assigned_df['Dominant_Topic'] == topic_num]
common_words = get_top_n_words(filtered_df[target_], n=100)
df2 = pd.DataFrame(common_words, columns = ['Text' , 'count'])
df3 = df2.groupby('Text').sum()['count'].sort_values(ascending=False).iplot(
kind='bar', yTitle='Count', linecolor='black', title=title, asFigure=save_param)
logger.info("Visual Rendered Successfully")
if save:
df3.write_html('Word Frequency.html')
logger.info("Saving 'Word Frequency.html' in current active directory")
except:
logger.warning("Invalid topic_num param or empty Vocab. Try changing Topic Number.")
sys.exit('(Value Error): Invalid topic_num param or empty Vocab. Try changing Topic Number.')
elif plot == 'distribution':
try:
if topic_num is None:
logger.warning("topic_num set to None. Plot generated at corpus level.")
b = data_[target_].apply(lambda x: len(str(x).split()))
b = pd.DataFrame(b)
logger.info("Rendering Visual")
b = b[target_].iplot(
kind='hist',
bins=100,
xTitle='word count',
linecolor='black',
yTitle='count',
title='Word Count Distribution',
asFigure=save_param)
else:
title = str(topic_num) + ': ' + 'Word Count Distribution'
logger.info("SubProcess assign_model() called ==================================")
assigned_df = assign_model(model, verbose = False)
logger.info("SubProcess assign_model() end ==================================")
filtered_df = assigned_df.loc[assigned_df['Dominant_Topic'] == topic_num]
b = filtered_df[target_].apply(lambda x: len(str(x).split()))
b = pd.DataFrame(b)
logger.info("Rendering Visual")
b = b[target_].iplot(
kind='hist',
bins=100,
xTitle='word count',
linecolor='black',
yTitle='count',
title= title,
asFigure=save_param)
logger.info("Visual Rendered Successfully")
if save:
b.write_html('Distribution.html')
logger.info("Saving 'Distribution.html' in current active directory")
except:
logger.warning("Invalid topic_num param or empty Vocab. Try changing Topic Number.")
sys.exit('(Value Error): Invalid topic_num param or empty Vocab. Try changing Topic Number.')
elif plot == 'bigram':
try:
from sklearn.feature_extraction.text import CountVectorizer
def get_top_n_bigram(corpus, n=None):
logger.info("Fitting CountVectorizer()")
vec = CountVectorizer(ngram_range=(2, 2)).fit(corpus)
bag_of_words = vec.transform(corpus)
sum_words = bag_of_words.sum(axis=0)
words_freq = [(word, sum_words[0, idx]) for word, idx in vec.vocabulary_.items()]
words_freq =sorted(words_freq, key = lambda x: x[1], reverse=True)
return words_freq[:n]
if topic_num is None:
logger.warning("topic_num set to None. Plot generated at corpus level.")
common_words = get_top_n_bigram(data_[target_], 100)
df3 = pd.DataFrame(common_words, columns = ['Text' , 'count'])
logger.info("Rendering Visual")
df3 = df3.groupby('Text').sum()['count'].sort_values(ascending=False).iplot(
kind='bar', yTitle='Count', linecolor='black', title='Top 100 bigrams after removing stop words', asFigure=save_param)
else:
title = str(topic_num) + ': ' + 'Top 100 bigrams after removing stop words'
logger.info("SubProcess assign_model() called ==================================")
assigned_df = assign_model(model, verbose = False)
logger.info("SubProcess assign_model() end ==================================")
filtered_df = assigned_df.loc[assigned_df['Dominant_Topic'] == topic_num]
common_words = get_top_n_bigram(filtered_df[target_], 100)
df3 = pd.DataFrame(common_words, columns = ['Text' , 'count'])
logger.info("Rendering Visual")
df3 = df3.groupby('Text').sum()['count'].sort_values(ascending=False).iplot(
kind='bar', yTitle='Count', linecolor='black', title=title, asFigure=save_param)
logger.info("Visual Rendered Successfully")
if save:
df3.write_html('Bigram.html')
logger.info("Saving 'Bigram.html' in current active directory")
except:
logger.warning("Invalid topic_num param or empty Vocab. Try changing Topic Number.")
sys.exit('(Value Error): Invalid topic_num param or empty Vocab. Try changing Topic Number.')
elif plot == 'trigram':
try:
from sklearn.feature_extraction.text import CountVectorizer
def get_top_n_trigram(corpus, n=None):
vec = CountVectorizer(ngram_range=(3, 3)).fit(corpus)
logger.info("Fitting CountVectorizer()")
bag_of_words = vec.transform(corpus)
sum_words = bag_of_words.sum(axis=0)
words_freq = [(word, sum_words[0, idx]) for word, idx in vec.vocabulary_.items()]
words_freq =sorted(words_freq, key = lambda x: x[1], reverse=True)
return words_freq[:n]
if topic_num is None:
logger.warning("topic_num set to None. Plot generated at corpus level.")
common_words = get_top_n_trigram(data_[target_], 100)
df3 = pd.DataFrame(common_words, columns = ['Text' , 'count'])
logger.info("Rendering Visual")
df3 = df3.groupby('Text').sum()['count'].sort_values(ascending=False).iplot(
kind='bar', yTitle='Count', linecolor='black', title='Top 100 trigrams after removing stop words', asFigure=save_param)
else:
title = str(topic_num) + ': ' + 'Top 100 trigrams after removing stop words'
logger.info("SubProcess assign_model() called ==================================")
assigned_df = assign_model(model, verbose = False)
logger.info("SubProcess assign_model() end ==================================")
filtered_df = assigned_df.loc[assigned_df['Dominant_Topic'] == topic_num]
common_words = get_top_n_trigram(filtered_df[target_], 100)
df3 = pd.DataFrame(common_words, columns = ['Text' , 'count'])
logger.info("Rendering Visual")
df3 = df3.groupby('Text').sum()['count'].sort_values(ascending=False).iplot(
kind='bar', yTitle='Count', linecolor='black', title=title, asFigure=save_param)
logger.info("Visual Rendered Successfully")
if save:
df3.write_html('Trigram.html')
logger.info("Saving 'Trigram.html' in current active directory")
except:
logger.warning("Invalid topic_num param or empty Vocab. Try changing Topic Number.")
sys.exit('(Value Error): Invalid topic_num param or empty Vocab. Try changing Topic Number.')
elif plot == 'sentiment':
try:
#loadies dependencies
import plotly.graph_objects as go
from textblob import TextBlob
if topic_num is None:
logger.warning("topic_num set to None. Plot generated at corpus level.")
sentiments = data_[target_].map(lambda text: TextBlob(text).sentiment.polarity)
sentiments = pd.DataFrame(sentiments)
logger.info("Rendering Visual")
sentiments = sentiments[target_].iplot(
kind='hist',
bins=50,
xTitle='polarity',
linecolor='black',
yTitle='count',
title='Sentiment Polarity Distribution',
asFigure=save_param)
else:
title = str(topic_num) + ': ' + 'Sentiment Polarity Distribution'
logger.info("SubProcess assign_model() called ==================================")
assigned_df = assign_model(model, verbose = False)
logger.info("SubProcess assign_model() end ==================================")
filtered_df = assigned_df.loc[assigned_df['Dominant_Topic'] == topic_num]
sentiments = filtered_df[target_].map(lambda text: TextBlob(text).sentiment.polarity)
sentiments = pd.DataFrame(sentiments)
logger.info("Rendering Visual")
sentiments = sentiments[target_].iplot(
kind='hist',
bins=50,
xTitle='polarity',
linecolor='black',
yTitle='count',
title=title,
asFigure=save_param)
logger.info("Visual Rendered Successfully")
if save:
sentiments.write_html('Sentiments.html')
logger.info("Saving 'Sentiments.html' in current active directory")
except:
logger.warning("Invalid topic_num param or empty Vocab. Try changing Topic Number.")
sys.exit('(Value Error): Invalid topic_num param or empty Vocab. Try changing Topic Number.')
elif plot == 'pos':
from textblob import TextBlob
b = list(id2word.token2id.keys())
logger.info("Fitting TextBlob()")
blob = TextBlob(str(b))
pos_df = pd.DataFrame(blob.tags, columns = ['word' , 'pos'])
pos_df = pos_df.loc[pos_df['pos'] != 'POS']
pos_df = pos_df.pos.value_counts()[:20]
logger.info("Rendering Visual")
pos_df = pos_df.iplot(
kind='bar',
xTitle='POS',
yTitle='count',
title='Top 20 Part-of-speech tagging for review corpus',
asFigure=save_param)
logger.info("Visual Rendered Sucessfully")
if save:
pos_df.write_html('POS.html')
logger.info("Saving 'POS.html' in current active directory")
elif plot == 'tsne':
logger.info("SubProcess assign_model() called ==================================")
b = assign_model(model, verbose = False)
logger.info("SubProcess assign_model() end ==================================")
b.dropna(axis=0, inplace=True) #droping rows where Dominant_Topic is blank
c = []
for i in b.columns:
if 'Topic_' in i:
a = i
c.append(a)
bb = b[c]
from sklearn.manifold import TSNE
logger.info("Fitting TSNE()")
X_embedded = TSNE(n_components=3).fit_transform(bb)
logger.info("Sorting Dataframe")
X = pd.DataFrame(X_embedded)
X['Dominant_Topic'] = b['Dominant_Topic']
X.sort_values(by='Dominant_Topic', inplace=True)
X.dropna(inplace=True)
logger.info("Rendering Visual")
import plotly.express as px
df = X
fig = px.scatter_3d(df, x=0, y=1, z=2,
color='Dominant_Topic', title='3d TSNE Plot for Topic Model', opacity=0.7, width=900, height=800)
if system:
fig.show()
logger.info("Visual Rendered Successfully")
if save:
fig.write_html("TSNE.html")
logger.info("Saving 'TSNE.html' in current active directory")
elif plot == 'topic_model':
import pyLDAvis
import pyLDAvis.gensim # don't skip this
import warnings
warnings.filterwarnings('ignore')
pyLDAvis.enable_notebook()
logger.info("Preparing pyLDAvis visual")
vis = pyLDAvis.gensim.prepare(model, corpus, id2word, mds='mmds')
display(vis)
logger.info("Visual Rendered Successfully")
elif plot == 'topic_distribution':
try:
iter1 = len(model.show_topics(999999))
except:
try:
iter1 = model.num_topics
except:
iter1 = model.n_components_
topic_name = []
keywords = []
for i in range(0,iter1):
try:
s = model.show_topic(i,topn=10)
topic_name.append('Topic ' + str(i))
kw = []
for i in s:
kw.append(i[0])
keywords.append(kw)
except:
keywords.append('NA')
topic_name.append('Topic ' + str(i))
keyword = []
for i in keywords:
b = ", ".join(i)
keyword.append(b)
kw_df = pd.DataFrame({'Topic': topic_name, 'Keyword' : keyword}).set_index('Topic')
logger.info("SubProcess assign_model() called ==================================")
ass_df = assign_model(model, verbose = False)
logger.info("SubProcess assign_model() end ==================================")
ass_df_pivot = ass_df.pivot_table(index='Dominant_Topic', values='Topic_0', aggfunc='count')
df2 = ass_df_pivot.join(kw_df)
df2 = df2.reset_index()
df2.columns = ['Topic', 'Documents', 'Keyword']
"""
sorting column starts
"""
logger.info("Sorting Dataframe")
topic_list = list(df2['Topic'])
s = []
for i in range(0,len(topic_list)):
a = int(topic_list[i].split()[1])
s.append(a)
df2['Topic'] = s
df2.sort_values(by='Topic', inplace=True)
df2.sort_values(by='Topic', inplace=True)
topic_list = list(df2['Topic'])
topic_list = list(df2['Topic'])
s = []
for i in topic_list:
a = 'Topic ' + str(i)
s.append(a)
df2['Topic'] = s
df2.reset_index(drop=True, inplace=True)
"""
sorting column ends
"""
logger.info("Rendering Visual")
import plotly.express as px
fig = px.bar(df2, x='Topic', y='Documents', hover_data = ['Keyword'], title='Document Distribution by Topics')
if system:
fig.show()
logger.info("Visual Rendered Successfully")
if save:
fig.write_html("Topic Distribution.html")
logger.info("Saving 'Topic Distribution.html' in current active directory")
elif plot == 'wordcloud':
try:
from wordcloud import WordCloud, STOPWORDS
import matplotlib.pyplot as plt
stopwords = set(STOPWORDS)
if topic_num is None:
logger.warning("topic_num set to None. Plot generated at corpus level.")
atext = " ".join(review for review in data_[target_])
else:
logger.info("SubProcess assign_model() called ==================================")
assigned_df = assign_model(model, verbose = False)
logger.info("SubProcess assign_model() end ==================================")
filtered_df = assigned_df.loc[assigned_df['Dominant_Topic'] == topic_num]
atext = " ".join(review for review in filtered_df[target_])
logger.info("Fitting WordCloud()")
wordcloud = WordCloud(width = 800, height = 800,
background_color ='white',
stopwords = stopwords,
min_font_size = 10).generate(atext)
# plot the WordCloud image
plt.figure(figsize = (8, 8), facecolor = None)
plt.imshow(wordcloud)
plt.axis("off")
plt.tight_layout(pad = 0)
logger.info("Rendering Visual")
if save or log_plots_param:
if system:
plt.savefig("Wordcloud.png")
else:
plt.savefig("Wordcloud.png")
plt.close()
logger.info("Saving 'Wordcloud.png' in current active directory")
else:
plt.show()
logger.info("Visual Rendered Successfully")
except:
logger.warning("Invalid topic_num param or empty Vocab. Try changing Topic Number.")
sys.exit('(Value Error): Invalid topic_num param or empty Vocab. Try changing Topic Number.')
elif plot == 'umap':
#warnings
from matplotlib.axes._axes import _log as matplotlib_axes_logger
matplotlib_axes_logger.setLevel('ERROR')
#loading dependencies
from sklearn.cluster import KMeans
from sklearn.feature_extraction.text import TfidfVectorizer
from yellowbrick.text import UMAPVisualizer
import matplotlib.pyplot as plt
tfidf = TfidfVectorizer()
logger.info("Fitting TfidfVectorizer()")
docs = tfidf.fit_transform(data_[target_])
# Instantiate the clustering model
clusters = KMeans(n_clusters=5, random_state=seed)
logger.info("Fitting KMeans()")
clusters.fit(docs)
plt.figure(figsize=(10,6))
umap = UMAPVisualizer(random_state=seed)
logger.info("Fitting UMAP()")
umap.fit(docs, ["c{}".format(c) for c in clusters.labels_])
logger.info("Rendering Visual")
if save or log_plots_param:
if system:
umap.show(outpath="UMAP.png")
else:
umap.show(outpath="UMAP.png", clear_figure=True)
logger.info("Saving 'UMAP.png' in current active directory")
else:
umap.show()
logger.info("Visual Rendered Successfully")
logger.info("plot_model() succesfully completed......................................")
def tune_model(model=None,
multi_core=False,
supervised_target=None,
estimator=None,
optimize=None,
custom_grid = None,
auto_fe = True,
fold=10,
verbose=True):
"""
This function tunes the num_topics model parameter using a predefined grid with
the objective of optimizing a supervised learning metric as defined in the optimize
param. You can choose the supervised estimator from a large library available in
pycaret. By default, supervised estimator is Linear.
This function returns the tuned model object.
Example
-------
>>> from pycaret.datasets import get_data
>>> kiva = get_data('kiva')
>>> experiment_name = setup(data = kiva, target = 'en')
>>> tuned_lda = tune_model(model = 'lda', supervised_target = 'status')
This will return trained Latent Dirichlet Allocation model.
Parameters
----------
model : string, default = None
Enter ID of the models available in model library (ID - Model):
* 'lda' - Latent Dirichlet Allocation
* 'lsi' - Latent Semantic Indexing
* 'hdp' - Hierarchical Dirichlet Process
* 'rp' - Random Projections
* 'nmf' - Non-Negative Matrix Factorization
multi_core: Boolean, default = False
True would utilize all CPU cores to parallelize and speed up model training. Only
available for 'lda'. For all other models, multi_core parameter is ignored.
supervised_target: string
Name of the target column for supervised learning. If None, the mdel coherence value
is used as the objective function.
estimator: string, default = None
For Classification (ID - Name):
* 'lr' - Logistic Regression
* 'knn' - K Nearest Neighbour
* 'nb' - Naive Bayes
* 'dt' - Decision Tree Classifier
* 'svm' - SVM - Linear Kernel
* 'rbfsvm' - SVM - Radial Kernel
* 'gpc' - Gaussian Process Classifier
* 'mlp' - Multi Level Perceptron
* 'ridge' - Ridge Classifier
* 'rf' - Random Forest Classifier
* 'qda' - Quadratic Discriminant Analysis
* 'ada' - Ada Boost Classifier
* 'gbc' - Gradient Boosting Classifier
* 'lda' - Linear Discriminant Analysis
* 'et' - Extra Trees Classifier
* 'xgboost' - Extreme Gradient Boosting
* 'lightgbm' - Light Gradient Boosting
* 'catboost' - CatBoost Classifier
For Regression (ID - Name):
* 'lr' - Linear Regression
* 'lasso' - Lasso Regression
* 'ridge' - Ridge Regression
* 'en' - Elastic Net
* 'lar' - Least Angle Regression
* 'llar' - Lasso Least Angle Regression
* 'omp' - Orthogonal Matching Pursuit
* 'br' - Bayesian Ridge
* 'ard' - Automatic Relevance Determ.
* 'par' - Passive Aggressive Regressor
* 'ransac' - Random Sample Consensus
* 'tr' - TheilSen Regressor
* 'huber' - Huber Regressor
* 'kr' - Kernel Ridge
* 'svm' - Support Vector Machine
* 'knn' - K Neighbors Regressor
* 'dt' - Decision Tree
* 'rf' - Random Forest
* 'et' - Extra Trees Regressor
* 'ada' - AdaBoost Regressor
* 'gbr' - Gradient Boosting
* 'mlp' - Multi Level Perceptron
* 'xgboost' - Extreme Gradient Boosting
* 'lightgbm' - Light Gradient Boosting
* 'catboost' - CatBoost Regressor
If set to None, Linear / Logistic model is used by default.
optimize: string, default = None
For Classification tasks:
Accuracy, AUC, Recall, Precision, F1, Kappa
For Regression tasks:
MAE, MSE, RMSE, R2, RMSLE, MAPE
If set to None, default is 'Accuracy' for classification and 'R2' for
regression tasks.
custom_grid: list, default = None
By default, a pre-defined number of topics is iterated over to
optimize the supervised objective. To overwrite default iteration,
pass a list of num_topics to iterate over in custom_grid param.
auto_fe: boolean, default = True
Automatic text feature engineering. Only used when supervised_target is
passed. When set to true, it will generate text based features such as
polarity, subjectivity, wordcounts to be used in supervised learning.
Ignored when supervised_target is set to None.
fold: integer, default = 10
Number of folds to be used in Kfold CV. Must be at least 2.
verbose: Boolean, default = True
Status update is not printed when verbose is set to False.
Returns
-------
Visual_Plot
Visual plot with k number of topics on x-axis with metric to
optimize on y-axis. Coherence is used when learning is
unsupervised. Also, prints the best model metric.
model
trained model object with best K number of topics.
Warnings
--------
- Random Projections ('rp') and Non Negative Matrix Factorization ('nmf')
is not available for unsupervised learning. Error is raised when 'rp' or
'nmf' is passed without supervised_target.
- Estimators using kernel based methods such as Kernel Ridge Regressor,
Automatic Relevance Determinant, Gaussian Process Classifier, Radial Basis
Support Vector Machine and Multi Level Perceptron may have longer training
times.
"""
"""
exception handling starts here
"""
import logging
try:
hasattr(logger, 'name')
except:
logger = logging.getLogger('logs')
logger.setLevel(logging.DEBUG)
# create console handler and set level to debug
if logger.hasHandlers():
logger.handlers.clear()
ch = logging.FileHandler('logs.log')
ch.setLevel(logging.DEBUG)
# create formatter
formatter = logging.Formatter('%(asctime)s:%(levelname)s:%(message)s')
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
logger.addHandler(ch)
logger.info("Initializing tune_model()")
logger.info("""tune_model(model={}, multi_core={}, supervised_target={}, estimator={}, optimize={}, custom_grid={}, auto_fe={}, fold={}, verbose={})""".\
format(str(model), str(multi_core), str(supervised_target), str(estimator), str(optimize), str(custom_grid), str(auto_fe), str(fold), str(verbose)))
logger.info("Checking exceptions")
#ignore warnings
import warnings
warnings.filterwarnings('ignore')
import sys
#checking for model parameter
if model is None:
sys.exit('(Value Error): Model parameter Missing. Please see docstring for list of available models.')
#checking for allowed models
allowed_models = ['lda', 'lsi', 'hdp', 'rp', 'nmf']
if model not in allowed_models:
sys.exit('(Value Error): Model Not Available. Please see docstring for list of available models.')
#checking multicore type:
if type(multi_core) is not bool:
sys.exit('(Type Error): multi_core parameter can only take argument as True or False.')
#check supervised target:
if supervised_target is not None:
all_col = list(data_.columns)
target = target_
all_col.remove(target)
if supervised_target not in all_col:
sys.exit('(Value Error): supervised_target not recognized. It can only be one of the following: ' + str(all_col))
#supervised target exception handling
if supervised_target is None:
models_not_allowed = ['rp', 'nmf']
if model in models_not_allowed:
sys.exit('(Type Error): Model not supported for unsupervised tuning. Either supervised_target param has to be passed or different model has to be used. Please see docstring for available models.')
#checking estimator:
if estimator is not None:
available_estimators = ['lr', 'knn', 'nb', 'dt', 'svm', 'rbfsvm', 'gpc', 'mlp', 'ridge', 'rf', 'qda', 'ada',
'gbc', 'lda', 'et', 'lasso', 'ridge', 'en', 'lar', 'llar', 'omp', 'br', 'ard', 'par',
'ransac', 'tr', 'huber', 'kr', 'svm', 'knn', 'dt', 'rf', 'et', 'ada', 'gbr',
'mlp', 'xgboost', 'lightgbm', 'catboost']
if estimator not in available_estimators:
sys.exit('(Value Error): Estimator Not Available. Please see docstring for list of available estimators.')
#checking optimize parameter
if optimize is not None:
available_optimizers = ['MAE', 'MSE', 'RMSE', 'R2', 'ME', 'Accuracy', 'AUC', 'Recall', 'Precision', 'F1', 'Kappa']
if optimize not in available_optimizers:
sys.exit('(Value Error): optimize parameter Not Available. Please see docstring for list of available parameters.')
#checking auto_fe:
if type(auto_fe) is not bool:
sys.exit('(Type Error): auto_fe parameter can only take argument as True or False.')
#checking fold parameter
if type(fold) is not int:
sys.exit('(Type Error): Fold parameter only accepts integer value.')
"""
exception handling ends here
"""
logger.info("Preloading libraries")
#pre-load libraries
import pandas as pd
import ipywidgets as ipw
from ipywidgets import Output
from IPython.display import display, HTML, clear_output, update_display
import datetime, time
logger.info("Preparing display monitor")
#progress bar
if custom_grid is None:
max_steps = 25
else:
max_steps = 10 + len(custom_grid)
progress = ipw.IntProgress(value=0, min=0, max=max_steps, step=1 , description='Processing: ')
if verbose:
if html_param:
display(progress)
timestampStr = datetime.datetime.now().strftime("%H:%M:%S")
monitor = pd.DataFrame( [ ['Initiated' , '. . . . . . . . . . . . . . . . . .', timestampStr ],
['Status' , '. . . . . . . . . . . . . . . . . .' , 'Loading Dependencies'],
['Step' , '. . . . . . . . . . . . . . . . . .', 'Initializing' ] ],
columns=['', ' ', ' ']).set_index('')
monitor_out = Output()
if verbose:
if html_param:
display(monitor_out)
if verbose:
if html_param:
with monitor_out:
display(monitor, display_id = 'monitor')
logger.info("Importing libraries")
#General Dependencies
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import cross_val_predict
from sklearn import metrics
import numpy as np
import plotly.express as px
#setting up cufflinks
import cufflinks as cf
cf.go_offline()
cf.set_config_file(offline=False, world_readable=True)
progress.value += 1
#define the problem
if supervised_target is None:
problem ='unsupervised'
logger.info("Objective : Unsupervised")
elif data_[supervised_target].value_counts().count() == 2:
problem = 'classification'
logger.info("Objective : Classification")
else:
problem = 'regression'
logger.info("Objective : Regression")
#define topic_model_name
logger.info("Defining model name")
if model == 'lda':
topic_model_name = 'Latent Dirichlet Allocation'
elif model == 'lsi':
topic_model_name = 'Latent Semantic Indexing'
elif model == 'hdp':
topic_model_name = 'Hierarchical Dirichlet Process'
elif model == 'nmf':
topic_model_name = 'Non-Negative Matrix Factorization'
elif model == 'rp':
topic_model_name = 'Random Projections'
logger.info("Topic Model Name: " + str(topic_model_name))
#defining estimator:
logger.info("Defining supervised estimator")
if problem == 'classification' and estimator is None:
estimator = 'lr'
elif problem == 'regression' and estimator is None:
estimator = 'lr'
else:
estimator = estimator
logger.info("Estimator: " + str(estimator))
#defining optimizer:
logger.info("Defining Optimizer")
if optimize is None and problem == 'classification':
optimize = 'Accuracy'
elif optimize is None and problem == 'regression':
optimize = 'R2'
else:
optimize=optimize
logger.info("Optimize: " + str(optimize))
progress.value += 1
#creating sentiments
if problem == 'classification' or problem == 'regression':
logger.info("Problem : Supervised")
if auto_fe:
logger.info("auto_fe param set to True")
monitor.iloc[1,1:] = 'Feature Engineering'
if verbose:
if html_param:
update_display(monitor, display_id = 'monitor')
from textblob import TextBlob
monitor.iloc[2,1:] = 'Extracting Polarity'
if verbose:
if html_param:
update_display(monitor, display_id = 'monitor')
logger.info("Extracting Polarity")
polarity = data_[target_].map(lambda text: TextBlob(text).sentiment.polarity)
monitor.iloc[2,1:] = 'Extracting Subjectivity'
if verbose:
if html_param:
update_display(monitor, display_id = 'monitor')
logger.info("Extracting Subjectivity")
subjectivity = data_[target_].map(lambda text: TextBlob(text).sentiment.subjectivity)
monitor.iloc[2,1:] = 'Extracting Wordcount'
if verbose:
if html_param:
update_display(monitor, display_id = 'monitor')
logger.info("Extracting Wordcount")
word_count = [len(i) for i in text]
progress.value += 1
#defining tuning grid
logger.info("Defining Tuning Grid")
if custom_grid is not None:
logger.info("Custom Grid used")
param_grid = custom_grid
else:
logger.info("Pre-defined Grid used")
param_grid = [2,4,8,16,32,64,100,200,300,400]
master = []; master_df = []
monitor.iloc[1,1:] = 'Creating Topic Model'
if verbose:
if html_param:
update_display(monitor, display_id = 'monitor')
for i in param_grid:
logger.info("Fitting Model with num_topics = " +str(i))
progress.value += 1
monitor.iloc[2,1:] = 'Fitting Model With ' + str(i) + ' Topics'
if verbose:
if html_param:
update_display(monitor, display_id = 'monitor')
#create and assign the model to dataset d
logger.info("SubProcess create_model() called ==================================")
m = create_model(model=model, multi_core=multi_core, num_topics=i, verbose=False)
logger.info("SubProcess create_model() end ==================================")
logger.info("SubProcess assign_model() called ==================================")
d = assign_model(m, verbose=False)
logger.info("SubProcess assign_model() end ==================================")
if problem in ['classification', 'regression'] and auto_fe:
d['Polarity'] = polarity
d['Subjectivity'] = subjectivity
d['word_count'] = word_count
master.append(m)
master_df.append(d)
#topic model creation end's here
if problem == 'unsupervised':
logger.info("Problem : Unsupervised")
monitor.iloc[1,1:] = 'Evaluating Topic Model'
if verbose:
if html_param:
update_display(monitor, display_id = 'monitor')
from gensim.models import CoherenceModel
logger.info("CoherenceModel imported successfully")
coherence = []
metric = []
counter = 0
for i in master:
logger.info("Evaluating Coherence with num_topics: " +str(i))
progress.value += 1
monitor.iloc[2,1:] = 'Evaluating Coherence With ' + str(param_grid[counter]) + ' Topics'
if verbose:
if html_param:
update_display(monitor, display_id = 'monitor')
model = CoherenceModel(model=i, texts=text, dictionary=id2word, coherence='c_v')
model_coherence = model.get_coherence()
coherence.append(model_coherence)
metric.append('Coherence')
counter += 1
monitor.iloc[1,1:] = 'Compiling Results'
monitor.iloc[1,1:] = 'Finalizing'
if verbose:
if html_param:
update_display(monitor, display_id = 'monitor')
logger.info("Creating metrics dataframe")
df = pd.DataFrame({'# Topics': param_grid, 'Score' : coherence, 'Metric': metric})
df.columns = ['# Topics', 'Score', 'Metric']
sorted_df = df.sort_values(by='Score', ascending=False)
ival = sorted_df.index[0]
best_model = master[ival]
best_model_df = master_df[ival]
logger.info("Rendering Visual")
fig = px.line(df, x='# Topics', y='Score', line_shape='linear',
title= 'Coherence Value and # of Topics', color='Metric')
fig.update_layout(plot_bgcolor='rgb(245,245,245)')
fig.show()
logger.info("Visual Rendered Successfully")
#monitor = ''
if verbose:
if html_param:
update_display(monitor, display_id = 'monitor')
monitor_out.clear_output()
progress.close()
best_k = np.array(sorted_df.head(1)['# Topics'])[0]
best_m = round(np.array(sorted_df.head(1)['Score'])[0],4)
p = 'Best Model: ' + topic_model_name + ' |' + ' # Topics: ' + str(best_k) + ' | ' + 'Coherence: ' + str(best_m)
print(p)
elif problem == 'classification':
logger.info("Importing untrained Classifier")
"""
defining estimator
"""
monitor.iloc[1,1:] = 'Evaluating Topic Model'
if verbose:
if html_param:
update_display(monitor, display_id = 'monitor')
if estimator == 'lr':
from sklearn.linear_model import LogisticRegression
model = LogisticRegression(random_state=seed)
full_name = 'Logistic Regression'
elif estimator == 'knn':
from sklearn.neighbors import KNeighborsClassifier
model = KNeighborsClassifier()
full_name = 'K Nearest Neighbours'
elif estimator == 'nb':
from sklearn.naive_bayes import GaussianNB
model = GaussianNB()
full_name = 'Naive Bayes'
elif estimator == 'dt':
from sklearn.tree import DecisionTreeClassifier
model = DecisionTreeClassifier(random_state=seed)
full_name = 'Decision Tree'
elif estimator == 'svm':
from sklearn.linear_model import SGDClassifier
model = SGDClassifier(max_iter=1000, tol=0.001, random_state=seed)
full_name = 'Support Vector Machine'
elif estimator == 'rbfsvm':
from sklearn.svm import SVC
model = SVC(gamma='auto', C=1, probability=True, kernel='rbf', random_state=seed)
full_name = 'RBF SVM'
elif estimator == 'gpc':
from sklearn.gaussian_process import GaussianProcessClassifier
model = GaussianProcessClassifier(random_state=seed)
full_name = 'Gaussian Process Classifier'
elif estimator == 'mlp':
from sklearn.neural_network import MLPClassifier
model = MLPClassifier(max_iter=500, random_state=seed)
full_name = 'Multi Level Perceptron'
elif estimator == 'ridge':
from sklearn.linear_model import RidgeClassifier
model = RidgeClassifier(random_state=seed)
full_name = 'Ridge Classifier'
elif estimator == 'rf':
from sklearn.ensemble import RandomForestClassifier
model = RandomForestClassifier(n_estimators=10, random_state=seed)
full_name = 'Random Forest Classifier'
elif estimator == 'qda':
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
model = QuadraticDiscriminantAnalysis()
full_name = 'Quadratic Discriminant Analysis'
elif estimator == 'ada':
from sklearn.ensemble import AdaBoostClassifier
model = AdaBoostClassifier(random_state=seed)
full_name = 'AdaBoost Classifier'
elif estimator == 'gbc':
from sklearn.ensemble import GradientBoostingClassifier
model = GradientBoostingClassifier(random_state=seed)
full_name = 'Gradient Boosting Classifier'
elif estimator == 'lda':
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
model = LinearDiscriminantAnalysis()
full_name = 'Linear Discriminant Analysis'
elif estimator == 'et':
from sklearn.ensemble import ExtraTreesClassifier
model = ExtraTreesClassifier(random_state=seed)
full_name = 'Extra Trees Classifier'
elif estimator == 'xgboost':
from xgboost import XGBClassifier
model = XGBClassifier(random_state=seed, n_jobs=-1, verbosity=0)
full_name = 'Extreme Gradient Boosting'
elif estimator == 'lightgbm':
import lightgbm as lgb
model = lgb.LGBMClassifier(random_state=seed)
full_name = 'Light Gradient Boosting Machine'
elif estimator == 'catboost':
from catboost import CatBoostClassifier
model = CatBoostClassifier(random_state=seed, silent=True) # Silent is True to suppress CatBoost iteration results
full_name = 'CatBoost Classifier'
logger.info(str(full_name) + ' Imported Successfully')
progress.value += 1
"""
start model building here
"""
acc = []; auc = []; recall = []; prec = []; kappa = []; f1 = []
for i in range(0,len(master_df)):
progress.value += 1
param_grid_val = param_grid[i]
logger.info('Training supervised model with num_topics: ' + str(param_grid_val))
monitor.iloc[2,1:] = 'Evaluating Classifier With ' + str(param_grid_val) + ' Topics'
if verbose:
if html_param:
update_display(monitor, display_id = 'monitor')
#prepare the dataset for supervised problem
d = master_df[i]
d.dropna(axis=0, inplace=True) #droping rows where Dominant_Topic is blank
d.drop([target_], inplace=True, axis=1)
d = pd.get_dummies(d)
#split the dataset
X = d.drop(supervised_target, axis=1)
y = d[supervised_target]
#fit the model
logger.info('Fitting Model')
model.fit(X,y)
#generate the prediction and evaluate metric
logger.info('Generating Cross Val Predictions')
pred = cross_val_predict(model,X,y,cv=fold, method = 'predict')
acc_ = metrics.accuracy_score(y,pred)
acc.append(acc_)
recall_ = metrics.recall_score(y,pred)
recall.append(recall_)
precision_ = metrics.precision_score(y,pred)
prec.append(precision_)
kappa_ = metrics.cohen_kappa_score(y,pred)
kappa.append(kappa_)
f1_ = metrics.f1_score(y,pred)
f1.append(f1_)
if hasattr(model,'predict_proba'):
pred_ = cross_val_predict(model,X,y,cv=fold, method = 'predict_proba')
pred_prob = pred_[:,1]
auc_ = metrics.roc_auc_score(y,pred_prob)
auc.append(auc_)
else:
auc.append(0)
monitor.iloc[1,1:] = 'Compiling Results'
monitor.iloc[1,1:] = 'Finalizing'
if verbose:
if html_param:
update_display(monitor, display_id = 'monitor')
logger.info('Creating metrics dataframe')
df = pd.DataFrame({'# Topics': param_grid, 'Accuracy' : acc, 'AUC' : auc, 'Recall' : recall,
'Precision' : prec, 'F1' : f1, 'Kappa' : kappa})
sorted_df = df.sort_values(by=optimize, ascending=False)
ival = sorted_df.index[0]
best_model = master[ival]
best_model_df = master_df[ival]
progress.value += 1
logger.info('Rendering Visual')
sd = pd.melt(df, id_vars=['# Topics'], value_vars=['Accuracy', 'AUC', 'Recall', 'Precision', 'F1', 'Kappa'],
var_name='Metric', value_name='Score')
fig = px.line(sd, x='# Topics', y='Score', color='Metric', line_shape='linear', range_y = [0,1])
fig.update_layout(plot_bgcolor='rgb(245,245,245)')
title= str(full_name) + ' Metrics and # of Topics'
fig.update_layout(title={'text': title, 'y':0.95,'x':0.45,'xanchor': 'center','yanchor': 'top'})
fig.show()
logger.info("Visual Rendered Successfully")
#monitor = ''
if verbose:
if html_param:
update_display(monitor, display_id = 'monitor')
monitor_out.clear_output()
progress.close()
best_k = np.array(sorted_df.head(1)['# Topics'])[0]
best_m = round(np.array(sorted_df.head(1)[optimize])[0],4)
p = 'Best Model: ' + topic_model_name + ' |' + ' # Topics: ' + str(best_k) + ' | ' + str(optimize) + ' : ' + str(best_m)
print(p)
elif problem == 'regression':
logger.info("Importing untrained Regressor")
"""
defining estimator
"""
monitor.iloc[1,1:] = 'Evaluating Topic Model'
if verbose:
if html_param:
update_display(monitor, display_id = 'monitor')
if estimator == 'lr':
from sklearn.linear_model import LinearRegression
model = LinearRegression()
full_name = 'Linear Regression'
elif estimator == 'lasso':
from sklearn.linear_model import Lasso
model = Lasso(random_state=seed)
full_name = 'Lasso Regression'
elif estimator == 'ridge':
from sklearn.linear_model import Ridge
model = Ridge(random_state=seed)
full_name = 'Ridge Regression'
elif estimator == 'en':
from sklearn.linear_model import ElasticNet
model = ElasticNet(random_state=seed)
full_name = 'Elastic Net'
elif estimator == 'lar':
from sklearn.linear_model import Lars
model = Lars()
full_name = 'Least Angle Regression'
elif estimator == 'llar':
from sklearn.linear_model import LassoLars
model = LassoLars()
full_name = 'Lasso Least Angle Regression'
elif estimator == 'omp':
from sklearn.linear_model import OrthogonalMatchingPursuit
model = OrthogonalMatchingPursuit()
full_name = 'Orthogonal Matching Pursuit'
elif estimator == 'br':
from sklearn.linear_model import BayesianRidge
model = BayesianRidge()
full_name = 'Bayesian Ridge Regression'
elif estimator == 'ard':
from sklearn.linear_model import ARDRegression
model = ARDRegression()
full_name = 'Automatic Relevance Determination'
elif estimator == 'par':
from sklearn.linear_model import PassiveAggressiveRegressor
model = PassiveAggressiveRegressor(random_state=seed)
full_name = 'Passive Aggressive Regressor'
elif estimator == 'ransac':
from sklearn.linear_model import RANSACRegressor
model = RANSACRegressor(random_state=seed)
full_name = 'Random Sample Consensus'
elif estimator == 'tr':
from sklearn.linear_model import TheilSenRegressor
model = TheilSenRegressor(random_state=seed)
full_name = 'TheilSen Regressor'
elif estimator == 'huber':
from sklearn.linear_model import HuberRegressor
model = HuberRegressor()
full_name = 'Huber Regressor'
elif estimator == 'kr':
from sklearn.kernel_ridge import KernelRidge
model = KernelRidge()
full_name = 'Kernel Ridge'
elif estimator == 'svm':
from sklearn.svm import SVR
model = SVR()
full_name = 'Support Vector Regression'
elif estimator == 'knn':
from sklearn.neighbors import KNeighborsRegressor
model = KNeighborsRegressor()
full_name = 'Nearest Neighbors Regression'
elif estimator == 'dt':
from sklearn.tree import DecisionTreeRegressor
model = DecisionTreeRegressor(random_state=seed)
full_name = 'Decision Tree Regressor'
elif estimator == 'rf':
from sklearn.ensemble import RandomForestRegressor
model = RandomForestRegressor(random_state=seed)
full_name = 'Random Forest Regressor'
elif estimator == 'et':
from sklearn.ensemble import ExtraTreesRegressor
model = ExtraTreesRegressor(random_state=seed)
full_name = 'Extra Trees Regressor'
elif estimator == 'ada':
from sklearn.ensemble import AdaBoostRegressor
model = AdaBoostRegressor(random_state=seed)
full_name = 'AdaBoost Regressor'
elif estimator == 'gbr':
from sklearn.ensemble import GradientBoostingRegressor
model = GradientBoostingRegressor(random_state=seed)
full_name = 'Gradient Boosting Regressor'
elif estimator == 'mlp':
from sklearn.neural_network import MLPRegressor
model = MLPRegressor(random_state=seed)
full_name = 'MLP Regressor'
elif estimator == 'xgboost':
from xgboost import XGBRegressor
model = XGBRegressor(random_state=seed, n_jobs=-1, verbosity=0)
full_name = 'Extreme Gradient Boosting Regressor'
elif estimator == 'lightgbm':
import lightgbm as lgb
model = lgb.LGBMRegressor(random_state=seed)
full_name = 'Light Gradient Boosting Machine'
elif estimator == 'catboost':
from catboost import CatBoostRegressor
model = CatBoostRegressor(random_state=seed, silent = True)
full_name = 'CatBoost Regressor'
logger.info(str(full_name) + ' Imported Successfully')
progress.value += 1
"""
start model building here
"""
score = []
metric = []
for i in range(0,len(master_df)):
progress.value += 1
param_grid_val = param_grid[i]
logger.info('Training supervised model with num_topics: ' + str(param_grid_val))
monitor.iloc[2,1:] = 'Evaluating Regressor With ' + str(param_grid_val) + ' Topics'
if verbose:
if html_param:
update_display(monitor, display_id = 'monitor')
#prepare the dataset for supervised problem
d = master_df[i]
d.dropna(axis=0, inplace=True) #droping rows where Dominant_Topic is blank
d.drop([target_], inplace=True, axis=1)
d = pd.get_dummies(d)
#split the dataset
X = d.drop(supervised_target, axis=1)
y = d[supervised_target]
#fit the model
logger.info('Fitting Model')
model.fit(X,y)
#generate the prediction and evaluate metric
logger.info('Generating Cross Val Predictions')
pred = cross_val_predict(model,X,y,cv=fold, method = 'predict')
if optimize == 'R2':
r2_ = metrics.r2_score(y,pred)
score.append(r2_)
elif optimize == 'MAE':
mae_ = metrics.mean_absolute_error(y,pred)
score.append(mae_)
elif optimize == 'MSE':
mse_ = metrics.mean_squared_error(y,pred)
score.append(mse_)
elif optimize == 'RMSE':
mse_ = metrics.mean_squared_error(y,pred)
rmse_ = np.sqrt(mse_)
score.append(rmse_)
elif optimize == 'ME':
max_error_ = metrics.max_error(y,pred)
score.append(max_error_)
metric.append(str(optimize))
monitor.iloc[1,1:] = 'Compiling Results'
monitor.iloc[1,1:] = 'Finalizing'
if verbose:
if html_param:
update_display(monitor, display_id = 'monitor')
logger.info('Creating metrics dataframe')
df = pd.DataFrame({'# Topics': param_grid, 'Score' : score, 'Metric': metric})
df.columns = ['# Topics', optimize, 'Metric']
#sorting to return best model
if optimize == 'R2':
sorted_df = df.sort_values(by=optimize, ascending=False)
else:
sorted_df = df.sort_values(by=optimize, ascending=True)
ival = sorted_df.index[0]
best_model = master[ival]
best_model_df = master_df[ival]
logger.info('Rendering Visual')
fig = px.line(df, x='# Topics', y=optimize, line_shape='linear',
title= str(full_name) + ' Metrics and # of Topics', color='Metric')
fig.update_layout(plot_bgcolor='rgb(245,245,245)')
progress.value += 1
#monitor = ''
if verbose:
if html_param:
update_display(monitor, display_id = 'monitor')
monitor_out.clear_output()
progress.close()
fig.show()
logger.info('Visual Rendered Successfully')
best_k = np.array(sorted_df.head(1)['# Topics'])[0]
best_m = round(np.array(sorted_df.head(1)[optimize])[0],4)
p = 'Best Model: ' + topic_model_name + ' |' + ' # Topics: ' + str(best_k) + ' | ' + str(optimize) + ' : ' + str(best_m)
print(p)
logger.info(str(best_model))
logger.info("tune_model() succesfully completed......................................")
return best_model
def evaluate_model(model):
"""
This function displays the user interface for all the available plots
for a given model. It internally uses the plot_model() function.
Example
-------
>>> from pycaret.datasets import get_data
>>> kiva = get_data('kiva')
>>> experiment_name = setup(data = kiva, target = 'en')
>>> lda = create_model('lda')
>>> evaluate_model(lda)
This will display the User Interface for all of the plots for
given model.
Parameters
----------
model : object, default = none
A trained model object should be passed.
Returns
-------
User_Interface
Displays the user interface for plotting.
"""
from ipywidgets import widgets
from ipywidgets.widgets import interact, fixed, interact_manual
import numpy as np
"""
generate sorted list
"""
try:
n_topic_assigned = len(model.show_topics())
except:
try:
n_topic_assigned = model.num_topics
except:
n_topic_assigned = model.n_components
final_list = []
for i in range(0,n_topic_assigned):
final_list.append('Topic ' +str(i))
a = widgets.ToggleButtons(
options=[('Frequency Plot', 'frequency'),
('Bigrams', 'bigram'),
('Trigrams', 'trigram'),
('Sentiment Polarity', 'sentiment'),
('Word Cloud', 'wordcloud'),
],
description='Plot Type:',
disabled=False,
button_style='', # 'success', 'info', 'warning', 'danger' or ''
icons=['']
)
b = widgets.Dropdown(options=final_list, description='Topic #:', disabled=False)
d = interact_manual(plot_model, model = fixed(model), plot = a, topic_num=b, save=fixed(False), system=fixed(True))
def save_model(model, model_name,
verbose=True):
"""
This function saves the trained model object into the current active
directory as a pickle file for later use.
Example
-------
>>> from pycaret.datasets import get_data
>>> kiva = get_data('kiva')
>>> experiment_name = setup(data = kiva, target = 'en')
>>> lda = create_model('lda')
>>> save_model(lda, 'lda_model_23122019')
This will save the model as a binary pickle file in the current
directory.
Parameters
----------
model : object, default = none
A trained model object should be passed.
model_name : string, default = none
Name of pickle file to be passed as a string.
verbose : bool, default = True
When set to False, success message is not printed.
Returns
-------
Success_Message
"""
import logging
try:
hasattr(logger, 'name')
except:
logger = logging.getLogger('logs')
logger.setLevel(logging.DEBUG)
# create console handler and set level to debug
if logger.hasHandlers():
logger.handlers.clear()
ch = logging.FileHandler('logs.log')
ch.setLevel(logging.DEBUG)
# create formatter
formatter = logging.Formatter('%(asctime)s:%(levelname)s:%(message)s')
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
logger.addHandler(ch)
logger.info("Initializing save_model()")
logger.info("""save_model(model={}, model_name={}, verbose={})""".\
format(str(model), str(model_name), str(verbose)))
import joblib
model_name = model_name + '.pkl'
joblib.dump(model, model_name)
if verbose:
print('Model Succesfully Saved')
logger.info(str(model))
logger.info("save_model() succesfully completed......................................")
def load_model(model_name,
verbose=True):
"""
This function loads a previously saved model from the current active directory
into the current python environment. Load object must be a pickle file.
Example
-------
>>> saved_lda = load_model('lda_model_23122019')
This will call the trained model in saved_lr variable using model_name param.
The file must be in current directory.
Parameters
----------
model_name : string, default = none
Name of pickle file to be passed as a string.
verbose : bool, default = True
When set to False, success message is not printed.
Returns
-------
Success_Message
"""
import joblib
model_name = model_name + '.pkl'
if verbose:
print('Model Sucessfully Loaded')
return joblib.load(model_name)
def models():
"""
Returns table of models available in model library.
Example
-------
>>> all_models = models()
This will return pandas.DataFrame with all available
models and their metadata.
Returns
-------
pandas.DataFrame
"""
import pandas as pd
model_id = ['lda', 'lsi', 'hdp', 'rp', 'nmf']
model_name = ['Latent Dirichlet Allocation',
'Latent Semantic Indexing',
'Hierarchical Dirichlet Process',
'Random Projections',
'Non-Negative Matrix Factorization']
model_ref = ['gensim/models/ldamodel',
'gensim/models/lsimodel',
'gensim/models/hdpmodel',
'gensim/models/rpmodel',
'sklearn.decomposition.NMF']
df = pd.DataFrame({'ID' : model_id,
'Name' : model_name,
'Reference' : model_ref})
df.set_index('ID', inplace=True)
return df
def get_logs(experiment_name = None, save = False):
"""
Returns a table with experiment logs consisting
run details, parameter, metrics and tags.
Example
-------
>>> logs = get_logs()
This will return pandas.DataFrame.
Parameters
----------
experiment_name : string, default = None
When set to None current active run is used.
save : bool, default = False
When set to True, csv file is saved in current directory.
Returns
-------
pandas.DataFrame
"""
import sys
if experiment_name is None:
exp_name_log_ = exp_name_log
else:
exp_name_log_ = experiment_name
import mlflow
from mlflow.tracking import MlflowClient
client = MlflowClient()
if client.get_experiment_by_name(exp_name_log_) is None:
sys.exit('No active run found. Check logging parameter in setup or to get logs for inactive run pass experiment_name.')
exp_id = client.get_experiment_by_name(exp_name_log_).experiment_id
runs = mlflow.search_runs(exp_id)
if save:
file_name = str(exp_name_log_) + '_logs.csv'
runs.to_csv(file_name, index=False)
return runs
def get_config(variable):
"""
This function is used to access global environment variables.
Following variables can be accessed:
- text: Tokenized words as a list with length = # documents
- data_: pandas.DataFrame containing text after all processing
- corpus: List containing tuples of id to word mapping
- id2word: gensim.corpora.dictionary.Dictionary
- seed: random state set through session_id
- target_: Name of column containing text. 'en' by default.
- html_param: html_param configured through setup
- exp_name_log: Name of experiment set through setup
- logging_param: log_experiment param set through setup
- log_plots_param: log_plots param set through setup
- USI: Unique session ID parameter set through setup
Example
-------
>>> text = get_config('text')
This will return transformed dataset.
Returns
-------
variable
"""
import logging
try:
hasattr(logger, 'name')
except:
logger = logging.getLogger('logs')
logger.setLevel(logging.DEBUG)
# create console handler and set level to debug
if logger.hasHandlers():
logger.handlers.clear()
ch = logging.FileHandler('logs.log')
ch.setLevel(logging.DEBUG)
# create formatter
formatter = logging.Formatter('%(asctime)s:%(levelname)s:%(message)s')
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
logger.addHandler(ch)
logger.info("Initializing get_config()")
logger.info("""get_config(variable={})""".\
format(str(variable)))
if variable == 'text':
global_var = text
if variable == 'data_':
global_var = data_
if variable == 'corpus':
global_var = corpus
if variable == 'id2word':
global_var = id2word
if variable == 'seed':
global_var = seed
if variable == 'target_':
global_var = target_
if variable == 'html_param':
global_var = html_param
if variable == 'exp_name_log':
global_var = exp_name_log
if variable == 'logging_param':
global_var = logging_param
if variable == 'log_plots_param':
global_var = log_plots_param
if variable == 'USI':
global_var = USI
logger.info("Global variable: " + str(variable) + ' returned')
logger.info("get_config() succesfully completed......................................")
return global_var
def set_config(variable,value):
"""
This function is used to reset global environment variables.
Following variables can be accessed:
- text: Tokenized words as a list with length = # documents
- data_: pandas.DataFrame containing text after all processing
- corpus: List containing tuples of id to word mapping
- id2word: gensim.corpora.dictionary.Dictionary
- seed: random state set through session_id
- target_: Name of column containing text. 'en' by default.
- html_param: html_param configured through setup
- exp_name_log: Name of experiment set through setup
- logging_param: log_experiment param set through setup
- log_plots_param: log_plots param set through setup
- USI: Unique session ID parameter set through setup
Example
-------
>>> set_config('seed', 123)
This will set the global seed to '123'.
"""
import logging
try:
hasattr(logger, 'name')
except:
logger = logging.getLogger('logs')
logger.setLevel(logging.DEBUG)
# create console handler and set level to debug
if logger.hasHandlers():
logger.handlers.clear()
ch = logging.FileHandler('logs.log')
ch.setLevel(logging.DEBUG)
# create formatter
formatter = logging.Formatter('%(asctime)s:%(levelname)s:%(message)s')
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
logger.addHandler(ch)
logger.info("Initializing set_config()")
logger.info("""set_config(variable={}, value={})""".\
format(str(variable), str(value)))
if variable == 'text':
global text
text = value
if variable == 'data_':
global data_
data_ = value
if variable == 'corpus':
global corpus
corpus = value
if variable == 'id2word':
global id2word
id2word = value
if variable == 'seed':
global seed
seed = value
if variable == 'html_param':
global html_param
html_param = value
if variable == 'exp_name_log':
global exp_name_log
exp_name_log = value
if variable == 'logging_param':
global logging_param
logging_param = value
if variable == 'log_plots_param':
global log_plots_param
log_plots_param = value
if variable == 'USI':
global USI
USI = value
logger.info("Global variable: " + str(variable) + ' updated')
logger.info("set_config() succesfully completed......................................")
def get_system_logs():
"""
Read and print 'logs.log' file from current active directory
"""
file = open('logs.log', 'r')
lines = file.read().splitlines()
file.close()
for line in lines:
if not line:
continue
columns = [col.strip() for col in line.split(':') if col]
print(columns)
def get_topics(data, text, model=None, num_topics=4):
"""
Callable from any external environment without requiring setup initialization.
"""
if model is None:
model = 'lda'
s = setup(data=data, target=text)
c = create_model(model=model, num_topics=num_topics, verbose=False)
dataset = assign_model(c, verbose=False)
return dataset
| [] |
2024-01-10 | memasanz/CogSearchOpenAIWithOutVectorSearch | cog_search.py | import requests
import json
import json
import numpy as np
import os
import pandas as pd
import openai
from collections import OrderedDict
from langchain.llms import AzureOpenAI
from langchain.chains.question_answering import load_qa_chain
from langchain.chains.qa_with_sources import load_qa_with_sources_chain
from langchain.chains import RetrievalQA
from langchain.chat_models import AzureChatOpenAI
from langchain.docstore.document import Document
from langchain.embeddings import OpenAIEmbeddings
from langchain.prompts import PromptTemplate
from langchain.vectorstores import FAISS
from openai.embeddings_utils import get_embedding, cosine_similarity
from transformers import GPT2TokenizerFast
from dotenv import dotenv_values
class CogSearchHelper:
def __init__(self, index):
self.service_name = os.getenv('COG_SEARCH_RESOURCE')
self.usgov = os.getenv('USGOV')
if self.usgov == 'True':
self.endpoint = "https://{}.search.azure.us/".format(self.service_name)
else:
self.endpoint = "https://{}.search.windows.net/".format(self.service_name)
print(self.endpoint)
self.search_key = os.getenv('COG_SEARCH_KEY')
self.storage_connectionstring = os.getenv('STORAGE_CONNECTION_STRING')
self.storage_container = os.getenv('STORAGE_CONTAINER')
self.cognitive_service_key = os.getenv('COG_SERVICE_KEY')
if index == None:
self.index = os.getenv('COG_SEARCH_INDEX')
else:
self.index = index
def get_the_token_count(self, documents):
tokenizer = GPT2TokenizerFast.from_pretrained("gpt2")
total_token_count = 0
try:
token_count = len(tokenizer.encode(documents))
except:
print('failed to get token count')
token_count = -1
pass
return token_count
def search_single_docs(df, user_query, TEXT_SEARCH_QUERY_EMBEDDING_ENGINE, top_n=3):
embedding = get_embedding(
user_query,
engine=TEXT_SEARCH_QUERY_EMBEDDING_ENGINE
)
df["similarities"] = df.curie_search.apply(lambda x: cosine_similarity(x, embedding))
res = (
df.sort_values("similarities", ascending=False)
.reset_index(drop=True)
.head(top_n)
)
return res
def search(self, question):
response = openai.Embedding.create(input=question,engine="text-embedding-ada-002")
q_embeddings = response['data'][0]['embedding']
if len(question) > 0:
endpoint = "https://{}.search.windows.net/".format(self.service_name)
url = '{0}indexes/{1}/docs/search?api-version=2021-04-30-Preview'.format(endpoint, self.index)
print(url)
payload = json.dumps({
"search": question,
"count": True,
})
headers = {
'api-key': '{0}'.format(self.search_key),
'Content-Type': 'application/json'
}
response = requests.request("POST", url, headers=headers, data=payload)
obj = response.json()
relevant_data = []
lst_embeddings_text = []
lst_embeddings = []
lst_file_name = []
count = 0
for x in obj['value']:
if x['@search.score'] > 0.5:
count += 1
relevant_data.append(x['content'])
embeddings = x['embeddings']
embeddings_text = x['embeddings_text']
file_name = x['metadata_storage_name']
curie_search = []
for x in embeddings:
a = np.fromstring(x[1:-1], dtype=float, sep=',')
curie_search.append(a)
curie_list = list(curie_search)
for i in range(len(embeddings)):
lst_embeddings_text.append(embeddings_text[i])
lst_embeddings.append(np.fromstring(embeddings[i][1:-1], dtype=float, sep=','))
lst_file_name.append(file_name)
tuples_list = []
metadata_list = []
tokencount = 0
for i in range(len(lst_embeddings_text)):
tuples_list.append((lst_embeddings_text[i], lst_embeddings[i]))
metadata_list.append(dict(source=lst_file_name[i]))
return relevant_data, count, lst_file_name, tuples_list, lst_embeddings_text, metadata_list
#COG_SEARCH_RESOURCE, COG_SEARCH_INDEX, COG_SEARCH_KEY, STORAGE_CONNECTION_STRING, STORAGE_CONTAINER
def create_datasource(self):
url = '{0}/datasources/{1}-datasource?api-version=2020-06-30'.format(self.endpoint, self.index)
payload = json.dumps({
"description": "Demo files to demonstrate cognitive search capabilities.",
"type": "azureblob",
"credentials": {
"connectionString": self.storage_connectionstring
},
"container": {
"name": self.storage_container
}
})
headers = {
'api-key': self.search_key,
'Content-Type': 'application/json'
}
response = requests.request("PUT", url, headers=headers, data=payload)
if response.status_code == 201 or response.status_code == 204:
return response, True
else:
print(response.json())
return response, False
def create_skillset(self, cognitive_service_key, embeddingFunctionAppUriAndKey):
url = '{0}/skillsets/{1}-skillset?api-version=2021-04-30-Preview'.format(self.endpoint, self.index)
print(url)
payload = json.dumps({
"@odata.context": "{}/$metadata#skillsets/$entity".format(self.endpoint),
"@odata.etag": "\"0x8DB2B4BF82370CF\"",
"name": "{0}-skillset".format(self.index),
"description": "Skillset created from the portal. skillsetName: index-skillset; contentField: merged_content; enrichmentGranularity: document; knowledgeStoreStorageAccount: ;",
"skills": [
{
"@odata.type": "#Microsoft.Skills.Text.V3.EntityRecognitionSkill",
"name": "#1",
"description": None,
"context": "/document/merged_content",
"categories": [
"Organization",
"URL",
"DateTime",
"Skill",
"Address",
"Location",
"Product",
"IPAddress",
"Event",
"Person",
"Quantity",
"PersonType",
"PhoneNumber",
"Email"
],
"defaultLanguageCode": "en",
"minimumPrecision": None,
"modelVersion": None,
"inputs": [
{
"name": "text",
"source": "/document/merged_content"
},
{
"name": "languageCode",
"source": "/document/language"
}
],
"outputs": [
{
"name": "persons",
"targetName": "people"
},
{
"name": "organizations",
"targetName": "organizations"
},
{
"name": "locations",
"targetName": "locations"
}
]
},
{
"@odata.type": "#Microsoft.Skills.Text.KeyPhraseExtractionSkill",
"name": "#2",
"description": None,
"context": "/document/merged_content",
"defaultLanguageCode": "en",
"maxKeyPhraseCount": None,
"modelVersion": None,
"inputs": [
{
"name": "text",
"source": "/document/merged_content"
},
{
"name": "languageCode",
"source": "/document/language"
}
],
"outputs": [
{
"name": "keyPhrases",
"targetName": "keyphrases"
}
]
},
{
"@odata.type": "#Microsoft.Skills.Text.LanguageDetectionSkill",
"name": "#3",
"description": None,
"context": "/document",
"defaultCountryHint": None,
"modelVersion": None,
"inputs": [
{
"name": "text",
"source": "/document/merged_content"
}
],
"outputs": [
{
"name": "languageCode",
"targetName": "language"
}
]
},
{
"@odata.type": "#Microsoft.Skills.Text.TranslationSkill",
"name": "#4",
"description": None,
"context": "/document/merged_content",
"defaultFromLanguageCode": None,
"defaultToLanguageCode": "en",
"suggestedFrom": "en",
"inputs": [
{
"name": "text",
"source": "/document/merged_content"
}
],
"outputs": [
{
"name": "translatedText",
"targetName": "translated_text"
}
]
},
{
"@odata.type": "#Microsoft.Skills.Text.PIIDetectionSkill",
"name": "#5",
"description": None,
"context": "/document/merged_content",
"defaultLanguageCode": "en",
"minimumPrecision": 0.5,
"maskingMode": "replace",
"maskingCharacter": "*",
"modelVersion": None,
"piiCategories": [],
"domain": "none",
"inputs": [
{
"name": "text",
"source": "/document/merged_content"
},
{
"name": "languageCode",
"source": "/document/language"
}
],
"outputs": [
{
"name": "piiEntities",
"targetName": "pii_entities"
},
{
"name": "maskedText",
"targetName": "masked_text"
}
]
},
{
"@odata.type": "#Microsoft.Skills.Text.MergeSkill",
"name": "#6",
"description": None,
"context": "/document",
"insertPreTag": " ",
"insertPostTag": " ",
"inputs": [
{
"name": "text",
"source": "/document/content"
},
{
"name": "itemsToInsert",
"source": "/document/normalized_images/*/text"
},
{
"name": "offsets",
"source": "/document/normalized_images/*/contentOffset"
}
],
"outputs": [
{
"name": "mergedText",
"targetName": "merged_content"
}
]
},
{
"@odata.type": "#Microsoft.Skills.Vision.OcrSkill",
"name": "#7",
"description": None,
"context": "/document/normalized_images/*",
"textExtractionAlgorithm": None,
"lineEnding": "Space",
"defaultLanguageCode": "en",
"detectOrientation": True,
"inputs": [
{
"name": "image",
"source": "/document/normalized_images/*"
}
],
"outputs": [
{
"name": "text",
"targetName": "text"
},
{
"name": "layoutText",
"targetName": "layoutText"
}
]
},
{
"@odata.type": "#Microsoft.Skills.Vision.ImageAnalysisSkill",
"name": "#8",
"description": None,
"context": "/document/normalized_images/*",
"defaultLanguageCode": "en",
"visualFeatures": [
"tags",
"description"
],
"details": [],
"inputs": [
{
"name": "image",
"source": "/document/normalized_images/*"
}
],
"outputs": [
{
"name": "tags",
"targetName": "imageTags"
},
{
"name": "description",
"targetName": "imageCaption"
}
]
}
,
{
"@odata.type": "#Microsoft.Skills.Custom.WebApiSkill",
"uri": embeddingFunctionAppUriAndKey,
"httpMethod": "POST",
"timeout": "PT230S",
"batchSize": 1,
"degreeOfParallelism": 1,
"name": "Embeddings",
"description": "",
"context": "/document",
"inputs": [
{
"name": "text",
"source": "/document/merged_content"
}
],
"outputs": [
{
"name": "embeddings",
"targetName": "embeddings"
},
{
"name": "embeddings_text",
"targetName": "embeddings_text"
}
]
}
],
"cognitiveServices": {
"@odata.type": "#Microsoft.Azure.Search.CognitiveServicesByKey",
"description": "SuperCool",
"key": "{0}".format(cognitive_service_key)
},
"knowledgeStore": None,
"encryptionKey": None
})
headers = {
'Content-Type': 'application/json',
'api-key': '{0}'.format(self.search_key)
}
response = requests.request("PUT", url, headers=headers, data=payload)
print(response.text)
if response.status_code == 201 or response.status_code == 204:
return response, True
else:
return response, False
def update_index_semantic(self):
url = '{0}/indexes/{1}/?api-version=2021-04-30-Preview'.format(self.endpoint, self.index)
print(url)
payload = json.dumps({
"name": self.index,
"defaultScoringProfile": "",
"fields": [
{
"name": "content",
"type": "Edm.String",
"searchable": True,
"filterable": False,
"retrievable": True,
"sortable": False,
"facetable": False,
"key": False,
"indexAnalyzer": None,
"searchAnalyzer": None,
"analyzer": "standard.lucene",
"synonymMaps": []
},
{
"name": "metadata_storage_content_type",
"type": "Edm.String",
"searchable": False,
"filterable": False,
"retrievable": False,
"sortable": False,
"facetable": False,
"key": False,
"indexAnalyzer": None,
"searchAnalyzer": None,
"analyzer": None,
"synonymMaps": []
},
{
"name": "metadata_storage_size",
"type": "Edm.Int64",
"searchable": False,
"filterable": False,
"retrievable": False,
"sortable": False,
"facetable": False,
"key": False,
"indexAnalyzer": None,
"searchAnalyzer": None,
"analyzer": None,
"synonymMaps": []
},
{
"name": "metadata_storage_last_modified",
"type": "Edm.DateTimeOffset",
"searchable": False,
"filterable": False,
"retrievable": False,
"sortable": False,
"facetable": False,
"key": False,
"indexAnalyzer": None,
"searchAnalyzer": None,
"analyzer": None,
"synonymMaps": []
},
{
"name": "metadata_storage_content_md5",
"type": "Edm.String",
"searchable": False,
"filterable": False,
"retrievable": False,
"sortable": False,
"facetable": False,
"key": False,
"indexAnalyzer": None,
"searchAnalyzer": None,
"analyzer": None,
"synonymMaps": []
},
{
"name": "metadata_storage_name",
"type": "Edm.String",
"searchable": False,
"filterable": False,
"retrievable": True,
"sortable": False,
"facetable": False,
"key": False,
"indexAnalyzer": None,
"searchAnalyzer": None,
"analyzer": None,
"synonymMaps": []
},
{
"name": "metadata_storage_path",
"type": "Edm.String",
"searchable": False,
"filterable": False,
"retrievable": True,
"sortable": False,
"facetable": False,
"key": True,
"indexAnalyzer": None,
"searchAnalyzer": None,
"analyzer": None,
"synonymMaps": []
},
{
"name": "metadata_storage_file_extension",
"type": "Edm.String",
"searchable": False,
"filterable": False,
"retrievable": False,
"sortable": False,
"facetable": False,
"key": False,
"indexAnalyzer": None,
"searchAnalyzer": None,
"analyzer": None,
"synonymMaps": []
},
{
"name": "metadata_content_type",
"type": "Edm.String",
"searchable": False,
"filterable": False,
"retrievable": False,
"sortable": False,
"facetable": False,
"key": False,
"indexAnalyzer": None,
"searchAnalyzer": None,
"analyzer": None,
"synonymMaps": []
},
{
"name": "metadata_language",
"type": "Edm.String",
"searchable": False,
"filterable": False,
"retrievable": False,
"sortable": False,
"facetable": False,
"key": False,
"indexAnalyzer": None,
"searchAnalyzer": None,
"analyzer": None,
"synonymMaps": []
},
{
"name": "metadata_creation_date",
"type": "Edm.DateTimeOffset",
"searchable": False,
"filterable": False,
"retrievable": False,
"sortable": False,
"facetable": False,
"key": False,
"indexAnalyzer": None,
"searchAnalyzer": None,
"analyzer": None,
"synonymMaps": []
},
{
"name": "people",
"type": "Collection(Edm.String)",
"searchable": True,
"filterable": False,
"retrievable": True,
"sortable": False,
"facetable": False,
"key": False,
"indexAnalyzer": None,
"searchAnalyzer": None,
"analyzer": "standard.lucene",
"synonymMaps": []
},
{
"name": "organizations",
"type": "Collection(Edm.String)",
"searchable": True,
"filterable": False,
"retrievable": True,
"sortable": False,
"facetable": False,
"key": False,
"indexAnalyzer": None,
"searchAnalyzer": None,
"analyzer": "standard.lucene",
"synonymMaps": []
},
{
"name": "locations",
"type": "Collection(Edm.String)",
"searchable": True,
"filterable": False,
"retrievable": True,
"sortable": False,
"facetable": False,
"key": False,
"indexAnalyzer": None,
"searchAnalyzer": None,
"analyzer": "standard.lucene",
"synonymMaps": []
},
{
"name": "keyphrases",
"type": "Collection(Edm.String)",
"searchable": True,
"filterable": False,
"retrievable": True,
"sortable": False,
"facetable": False,
"key": False,
"indexAnalyzer": None,
"searchAnalyzer": None,
"analyzer": "standard.lucene",
"synonymMaps": []
},
{
"name": "language",
"type": "Edm.String",
"searchable": True,
"filterable": False,
"retrievable": True,
"sortable": False,
"facetable": False,
"key": False,
"indexAnalyzer": None,
"searchAnalyzer": None,
"analyzer": "standard.lucene",
"synonymMaps": []
},
{
"name": "translated_text",
"type": "Edm.String",
"searchable": True,
"filterable": False,
"retrievable": True,
"sortable": False,
"facetable": False,
"key": False,
"indexAnalyzer": None,
"searchAnalyzer": None,
"analyzer": "en.lucene",
"synonymMaps": []
},
{
"name": "embeddings_text",
"type": "Collection(Edm.String)",
"searchable": True,
"filterable": False,
"retrievable": True,
"sortable": False,
"facetable": False,
"key": False,
"indexAnalyzer": None,
"searchAnalyzer": None,
"analyzer": "standard.lucene",
"synonymMaps": []
},
{
"name": "embeddings",
"type": "Collection(Edm.String)",
"searchable": True,
"filterable": False,
"retrievable": True,
"sortable": False,
"facetable": False,
"key": False,
"indexAnalyzer": None,
"searchAnalyzer": None,
"analyzer": "standard.lucene",
"synonymMaps": []
},
{
"name": "pii_entities",
"type": "Collection(Edm.ComplexType)",
"fields": [
{
"name": "text",
"type": "Edm.String",
"searchable": True,
"filterable": False,
"retrievable": True,
"sortable": False,
"facetable": False,
"key": False,
"indexAnalyzer": None,
"searchAnalyzer": None,
"analyzer": "standard.lucene",
"synonymMaps": []
},
{
"name": "type",
"type": "Edm.String",
"searchable": True,
"filterable": False,
"retrievable": True,
"sortable": False,
"facetable": False,
"key": False,
"indexAnalyzer": None,
"searchAnalyzer": None,
"analyzer": "standard.lucene",
"synonymMaps": []
},
{
"name": "subtype",
"type": "Edm.String",
"searchable": True,
"filterable": False,
"retrievable": True,
"sortable": False,
"facetable": False,
"key": False,
"indexAnalyzer": None,
"searchAnalyzer": None,
"analyzer": "standard.lucene",
"synonymMaps": []
},
{
"name": "offset",
"type": "Edm.Int32",
"searchable": False,
"filterable": False,
"retrievable": True,
"sortable": False,
"facetable": False,
"key": False,
"indexAnalyzer": None,
"searchAnalyzer": None,
"analyzer": None,
"synonymMaps": []
},
{
"name": "length",
"type": "Edm.Int32",
"searchable": False,
"filterable": False,
"retrievable": True,
"sortable": False,
"facetable": False,
"key": False,
"indexAnalyzer": None,
"searchAnalyzer": None,
"analyzer": None,
"synonymMaps": []
},
{
"name": "score",
"type": "Edm.Double",
"searchable": False,
"filterable": False,
"retrievable": True,
"sortable": False,
"facetable": False,
"key": False,
"indexAnalyzer": None,
"searchAnalyzer": None,
"analyzer": None,
"synonymMaps": []
}
]
},
{
"name": "masked_text",
"type": "Edm.String",
"searchable": True,
"filterable": False,
"retrievable": True,
"sortable": False,
"facetable": False,
"key": False,
"indexAnalyzer": None,
"searchAnalyzer": None,
"analyzer": "standard.lucene",
"synonymMaps": []
},
{
"name": "merged_content",
"type": "Edm.String",
"searchable": True,
"filterable": False,
"retrievable": True,
"sortable": False,
"facetable": False,
"key": False,
"indexAnalyzer": None,
"searchAnalyzer": None,
"analyzer": "standard.lucene",
"synonymMaps": []
},
{
"name": "text",
"type": "Collection(Edm.String)",
"searchable": True,
"filterable": False,
"retrievable": True,
"sortable": False,
"facetable": False,
"key": False,
"indexAnalyzer": None,
"searchAnalyzer": None,
"analyzer": "standard.lucene",
"synonymMaps": []
},
{
"name": "layoutText",
"type": "Collection(Edm.String)",
"searchable": True,
"filterable": False,
"retrievable": True,
"sortable": False,
"facetable": False,
"key": False,
"indexAnalyzer": None,
"searchAnalyzer": None,
"analyzer": "standard.lucene",
"synonymMaps": []
},
{
"name": "imageTags",
"type": "Collection(Edm.String)",
"searchable": True,
"filterable": False,
"retrievable": True,
"sortable": False,
"facetable": False,
"key": False,
"indexAnalyzer": None,
"searchAnalyzer": None,
"analyzer": "standard.lucene",
"synonymMaps": []
},
{
"name": "imageCaption",
"type": "Collection(Edm.String)",
"searchable": True,
"filterable": False,
"retrievable": True,
"sortable": False,
"facetable": False,
"key": False,
"indexAnalyzer": None,
"searchAnalyzer": None,
"analyzer": "standard.lucene",
"synonymMaps": []
}
],
"scoringProfiles": [],
"corsOptions": None,
"suggesters": [],
"analyzers": [],
"tokenizers": [],
"tokenFilters": [],
"charFilters": [],
"encryptionKey": None,
"similarity": {
"@odata.type": "#Microsoft.Azure.Search.BM25Similarity",
"k1": None,
"b": None
}
})
headers = {
'api-key': self.search_key,
'Content-Type': 'application/json'
}
response = requests.request("PUT", url, headers=headers, data=payload)
if response.status_code == 201 or response.status_code == 204:
return response, True
else:
# print('************************')
# print(response.status_code)
# print(response.text)
return response, False
def create_indexer(self):
url = '{0}/indexers/{1}-indexer/?api-version=2021-04-30-Preview'.format(self.endpoint, self.index)
print(url)
payload = json.dumps({
"name": "{0}-indexer".format(self.index),
"description": "",
"dataSourceName": "{0}-datasource".format(self.index),
"skillsetName": "{0}-skillset".format(self.index),
"targetIndexName": "{0}".format(self.index),
"disabled": None,
"schedule": None,
"parameters": {
"batchSize": None,
"maxFailedItems": 0,
"maxFailedItemsPerBatch": 0,
"base64EncodeKeys": None,
"configuration": {
"dataToExtract": "contentAndMetadata",
"parsingMode": "default",
"imageAction": "generateNormalizedImages"
}
},
"fieldMappings": [
{
"sourceFieldName": "metadata_storage_path",
"targetFieldName": "metadata_storage_path",
"mappingFunction": {
"name": "base64Encode",
"parameters": None
}
}
],
"outputFieldMappings": [
{
"sourceFieldName": "/document/merged_content/people",
"targetFieldName": "people"
},
{
"sourceFieldName": "/document/merged_content/organizations",
"targetFieldName": "organizations"
},
{
"sourceFieldName": "/document/merged_content/locations",
"targetFieldName": "locations"
},
{
"sourceFieldName": "/document/merged_content/keyphrases",
"targetFieldName": "keyphrases"
},
{
"sourceFieldName": "/document/language",
"targetFieldName": "language"
},
{
"sourceFieldName": "/document/merged_content/translated_text",
"targetFieldName": "translated_text"
},
{
"sourceFieldName": "/document/merged_content/pii_entities",
"targetFieldName": "pii_entities"
},
{
"sourceFieldName": "/document/merged_content/masked_text",
"targetFieldName": "masked_text"
},
{
"sourceFieldName": "/document/merged_content",
"targetFieldName": "merged_content"
},
{
"sourceFieldName": "/document/normalized_images/*/text",
"targetFieldName": "text"
},
{
"sourceFieldName": "/document/normalized_images/*/layoutText",
"targetFieldName": "layoutText"
},
{
"sourceFieldName": "/document/normalized_images/*/imageTags/*/name",
"targetFieldName": "imageTags"
},
{
"sourceFieldName": "/document/normalized_images/*/imageCaption",
"targetFieldName": "imageCaption"
},
{
"sourceFieldName": "/document/embeddings",
"targetFieldName": "embeddings"
},
{
"sourceFieldName": "/document/embeddings_text",
"targetFieldName": "embeddings_text"
}
],
"cache": None,
"encryptionKey": None
})
headers = {
'Content-Type': 'application/json',
'api-key': '{0}'.format(self.search_key)
}
response = requests.request("PUT", url, headers=headers, data=payload)
if response.status_code == 201 or response.status_code == 204:
print('good')
return response, True
else:
print(response.status_code)
return response, False
class OpenAIHelper:
def __init__(self, index):
config = dotenv_values(".env")
# Set the ENV variables that Langchain needs to connect to Azure OpenAI
os.environ['AZURE_OPENAI_API_VERSION'] = "2023-05-15"
os.environ["OPENAI_API_BASE"] = os.environ['AZURE_OPENAI_ENDPOINT']
os.environ["OPENAI_API_KEY"] = os.environ['AZURE_OPENAI_KEY']
os.environ["OPENAI_API_VERSION"] = "2023-05-15"
os.environ["OPENAI_API_TYPE"] = "azure"
COMBINE_PROMPT_TEMPLATE = """
These are examples of how you must provide the answer:
--> Beginning of examples
=========
QUESTION: Which state/country's law governs the interpretation of the contract?
=========
Content: This Agreement is governed by English law and the parties submit to the exclusive jurisdiction of the English courts in relation to any dispute (contractual or non-contractual) concerning this Agreement save that either party may apply to any court for an injunction or other relief to protect its Intellectual Property Rights.
Source: SuperCool.docx
Content: No Waiver. Failure or delay in exercising any right or remedy under this Agreement shall not constitute a waiver of such (or any other) right or remedy.\n\n11.7 Severability. The invalidity, illegality or unenforceability of any term (or part of a term) of this Agreement shall not affect the continuation in force of the remainder of the term (if any) and this Agreement.\n\n11.8 No Agency. Except as expressly stated otherwise, nothing in this Agreement shall create an agency, partnership or joint venture of any kind between the parties.\n\n11.9 No Third-Party Beneficiaries.
Source: https://yyyy.com/article2.html?s=lkhljkhljk&category=c&sort=asc
Content: (b) if Google believes, in good faith, that the Distributor has violated or caused Google to violate any Anti-Bribery Laws (as defined in Clause 8.5) or that such a violation is reasonably likely to occur,
Source: https://yyyy.com/article3.csv?s=kjsdhfd&category=c&sort=asc&page=2
Content: The terms of this Agreement shall be subject to the laws of Manchester, England, and any disputes arising from or relating to this Agreement shall be exclusively resolved by the courts of that state, except where either party may seek an injunction or other legal remedy to safeguard their Intellectual Property Rights.
Source: https://ppp.com/article4.pdf?s=lkhljkhljk&category=c&sort=asc
=========
FINAL ANSWER IN English: This Agreement is governed by English law, specifically the laws of Manchester, England<sup><a href="https://xxx.com/article1.pdf?s=casdfg&category=ab&sort=asc&page=1" target="_blank">[1]</a></sup><sup><a href="https://ppp.com/article4.pdf?s=lkhljkhljk&category=c&sort=asc" target="_blank">[2]</a></sup>. \n Anything else I can help you with?.
=========
QUESTION: What did the president say about Michael Jackson?
=========
Content: Madam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and the Cabinet. Justices of the Supreme Court. My fellow Americans. \n\nLast year COVID-19 kept us apart. This year we are finally together again. \n\nTonight, we meet as Democrats Republicans and Independents. But most importantly as Americans. \n\nWith a duty to one another to the American people to the Constitution. \n\nAnd with an unwavering resolve that freedom will always triumph over tyranny..
Source: https://fff.com/article23.pdf?s=wreter&category=ab&sort=asc&page=1
Content: And we won’t stop. \n\nWe have lost so much to COVID-19. Time with one another. And worst of all, so much loss of life. \n\nLet’s use this moment to reset. Let’s stop looking at COVID-19 as a partisan dividing line and see it for what it is: A God-awful disease. \n\nLet’s stop seeing each other as enemies, and start seeing each other for who we really are: Fellow Americans. \n\nWe can’t change how divided we’ve been. But we can change how we move forward—on COVID-19 and other issues we must face together. \n\nI recently visited the New York City Police Department days after the funerals of Officer Wilbert Mora and his partner, Officer Jason Rivera. \n\nThey were responding to a 9-1-1 call when a man shot and killed them with a stolen gun. \n\nOfficer Mora was 27 years old. \n\nOfficer Rivera was 22. \n\nBoth Dominican Americans who’d grown up on the same streets they later chose to patrol as police officers. \n\nI spoke with their families and told them that we are forever in debt for their sacrifice, and we will carry on their mission to restore the trust and safety every community deserves.
Source: https://jjj.com/article56.pdf?s=sdflsdfsd&category=z&sort=desc&page=3
Content: And I will use every tool at our disposal to protect American businesses and consumers. \n\nTonight, I can announce that the United States has worked with 30 other countries to release 60 Million barrels of oil from reserves around the world. \n\nAmerica will lead that effort, releasing 30 Million barrels from our own Strategic Petroleum Reserve. And we stand ready to do more if necessary, unified with our allies. \n\nThese steps will help blunt gas prices here at home. And I know the news about what’s happening can seem alarming. \n\nBut I want you to know that we are going to be okay.
Source: https://vvv.com/article145.pdf?s=sfsdfsdfs&category=z&sort=desc&page=3
Content: More support for patients and families. \n\nTo get there, I call on Congress to fund ARPA-H, the Advanced Research Projects Agency for Health. \n\nIt’s based on DARPA—the Defense Department project that led to the Internet, GPS, and so much more. \n\nARPA-H will have a singular purpose—to drive breakthroughs in cancer, Alzheimer’s, diabetes, and more. \n\nA unity agenda for the nation. \n\nWe can do this. \n\nMy fellow Americans—tonight , we have gathered in a sacred space—the citadel of our democracy. \n\nIn this Capitol, generation after generation, Americans have debated great questions amid great strife, and have done great things. \n\nWe have fought for freedom, expanded liberty, defeated totalitarianism and terror. \n\nAnd built the strongest, freest, and most prosperous nation the world has ever known. \n\nNow is the hour. \n\nOur moment of responsibility. \n\nOur test of resolve and conscience, of history itself. \n\nIt is in this moment that our character is formed. Our purpose is found. Our future is forged. \n\nWell I know this nation.
Source: https://uuu.com/article15.pdf?s=lkhljkhljk&category=c&sort=asc
=========
FINAL ANSWER IN English: The president did not mention Michael Jackson.
<-- End of examples
# Instructions:
- Given the following extracted parts from one or multiple documents, and a question, create a final answer with references.
- **Answer the question from information provided in the context, DO NOT use your prior knowledge.
- Never provide an answer without references.
- If the question is one word, rephrase it to: "Tell me about a " and then the question
- If you don't know the answer, respond with "I don't know the answer to that question. Please try rephrasing your question."
- Respond in {language}.
=========
QUESTION: {question}
=========
{summaries}
=========
FINAL ANSWER IN {language}:"""
self.COMBINE_PROMPT = PromptTemplate(template=COMBINE_PROMPT_TEMPLATE, input_variables=["summaries", "question", "language"])
#self.question_template = creds['QUESTION_TEMPLATE']
if index == None:
self.index = os.environ['COG_SEARCH_INDEX']
else:
self.index = index
def get_the_token_count(self, documents):
tokenizer = GPT2TokenizerFast.from_pretrained("gpt2")
total_token_count = 0
try:
token_count = len(tokenizer.encode(documents))
except:
print('failed to get token count')
token_count = -1
pass
return token_count
def get_Answer_from_load_qa_with_sources_chain(self, question):
print('get answer from load qa with source')
openai.api_type = "azure"
openai.api_base = os.environ['AZURE_OPENAI_ENDPOINT']
openai.api_version = "2022-12-01"
os.environ['OPENAI_API_KEY'] = os.environ['AZURE_OPENAI_KEY']
openai.api_key = os.getenv("OPENAI_API_KEY")
from openai.embeddings_utils import get_embedding, cosine_similarity
question_embedding = get_embedding(question,engine="text-embedding-ada-002") # engine should be set to the deployment name you chose when you deployed the text-embedding-ada-002 (Version 2) model)
blah = CogSearchHelper(self.index)
relevant_data, count, lst_file_name, embeddings_tuples, lst_embeddings_text, metadata = blah.search(question)
embeddings = OpenAIEmbeddings(openai_api_key=openai.api_key, chunk_size=1536)
if len(embeddings_tuples) == 0:
return("Sorry, I don't know the answer to that question. Please try rephrasing your question, Cognitive Search did not provide documents")
db = FAISS.from_embeddings(embeddings_tuples, embeddings, metadata)
docs_db = db.similarity_search_by_vector(question_embedding, k = 4)
MODEL = "gpt-35-turbo-16k" # options: gpt-35-turbo, gpt-35-turbo-16k, gpt-4, gpt-4-32k
COMPLETION_TOKENS = 1000
full_question = "system prompt " + question
llm = AzureChatOpenAI(deployment_name=MODEL, temperature=0, max_tokens=COMPLETION_TOKENS)
#you could change this chain type to use mapreduce or something else
chain = load_qa_with_sources_chain(llm, chain_type="stuff" )
response = chain({"input_documents": docs_db, "question": full_question, "language": "English", "existing_answer" : ""},
return_only_outputs=True)
if response['output_text'] == "I don't know the answer to that question. Please try rephrasing your question.":
chain = load_qa_with_sources_chain(llm, chain_type="stuff" )
response = chain({"input_documents": docs_db, "question": "tell me about a " + question, "language": "English", "existing_answer" : ""},
return_only_outputs=True)
return(response['output_text'])
| [
"\n These are examples of how you must provide the answer:\n\n --> Beginning of examples\n\n =========\n QUESTION: Which state/country's law governs the interpretation of the contract?\n =========\n Content: This Agreement is governed by English law and the parties submit to the exclusive jurisdiction of the English courts in relation to any dispute (contractual or non-contractual) concerning this Agreement save that either party may apply to any court for an injunction or other relief to protect its Intellectual Property Rights.\n Source: SuperCool.docx\n\n Content: No Waiver. Failure or delay in exercising any right or remedy under this Agreement shall not constitute a waiver of such (or any other) right or remedy.\n\n11.7 Severability. The invalidity, illegality or unenforceability of any term (or part of a term) of this Agreement shall not affect the continuation in force of the remainder of the term (if any) and this Agreement.\n\n11.8 No Agency. Except as expressly stated otherwise, nothing in this Agreement shall create an agency, partnership or joint venture of any kind between the parties.\n\n11.9 No Third-Party Beneficiaries.\n Source: https://yyyy.com/article2.html?s=lkhljkhljk&category=c&sort=asc\n\n Content: (b) if Google believes, in good faith, that the Distributor has violated or caused Google to violate any Anti-Bribery Laws (as defined in Clause 8.5) or that such a violation is reasonably likely to occur,\n Source: https://yyyy.com/article3.csv?s=kjsdhfd&category=c&sort=asc&page=2\n\n Content: The terms of this Agreement shall be subject to the laws of Manchester, England, and any disputes arising from or relating to this Agreement shall be exclusively resolved by the courts of that state, except where either party may seek an injunction or other legal remedy to safeguard their Intellectual Property Rights.\n Source: https://ppp.com/article4.pdf?s=lkhljkhljk&category=c&sort=asc\n =========\n FINAL ANSWER IN English: This Agreement is governed by English law, specifically the laws of Manchester, England<sup><a href=\"https://xxx.com/article1.pdf?s=casdfg&category=ab&sort=asc&page=1\" target=\"_blank\">[1]</a></sup><sup><a href=\"https://ppp.com/article4.pdf?s=lkhljkhljk&category=c&sort=asc\" target=\"_blank\">[2]</a></sup>. \n Anything else I can help you with?.\n\n =========\n QUESTION: What did the president say about Michael Jackson?\n =========\n Content: Madam Speaker, Madam Vice President, our First Lady and Second Gentleman. Members of Congress and the Cabinet. Justices of the Supreme Court. My fellow Americans. \n\nLast year COVID-19 kept us apart. This year we are finally together again. \n\nTonight, we meet as Democrats Republicans and Independents. But most importantly as Americans. \n\nWith a duty to one another to the American people to the Constitution. \n\nAnd with an unwavering resolve that freedom will always triumph over tyranny..\n Source: https://fff.com/article23.pdf?s=wreter&category=ab&sort=asc&page=1\n\n Content: And we won’t stop. \n\nWe have lost so much to COVID-19. Time with one another. And worst of all, so much loss of life. \n\nLet’s use this moment to reset. Let’s stop looking at COVID-19 as a partisan dividing line and see it for what it is: A God-awful disease. \n\nLet’s stop seeing each other as enemies, and start seeing each other for who we really are: Fellow Americans. \n\nWe can’t change how divided we’ve been. But we can change how we move forward—on COVID-19 and other issues we must face together. \n\nI recently visited the New York City Police Department days after the funerals of Officer Wilbert Mora and his partner, Officer Jason Rivera. \n\nThey were responding to a 9-1-1 call when a man shot and killed them with a stolen gun. \n\nOfficer Mora was 27 years old. \n\nOfficer Rivera was 22. \n\nBoth Dominican Americans who’d grown up on the same streets they later chose to patrol as police officers. \n\nI spoke with their families and told them that we are forever in debt for their sacrifice, and we will carry on their mission to restore the trust and safety every community deserves.\n Source: https://jjj.com/article56.pdf?s=sdflsdfsd&category=z&sort=desc&page=3\n\n Content: And I will use every tool at our disposal to protect American businesses and consumers. \n\nTonight, I can announce that the United States has worked with 30 other countries to release 60 Million barrels of oil from reserves around the world. \n\nAmerica will lead that effort, releasing 30 Million barrels from our own Strategic Petroleum Reserve. And we stand ready to do more if necessary, unified with our allies. \n\nThese steps will help blunt gas prices here at home. And I know the news about what’s happening can seem alarming. \n\nBut I want you to know that we are going to be okay.\n Source: https://vvv.com/article145.pdf?s=sfsdfsdfs&category=z&sort=desc&page=3\n\n Content: More support for patients and families. \n\nTo get there, I call on Congress to fund ARPA-H, the Advanced Research Projects Agency for Health. \n\nIt’s based on DARPA—the Defense Department project that led to the Internet, GPS, and so much more. \n\nARPA-H will have a singular purpose—to drive breakthroughs in cancer, Alzheimer’s, diabetes, and more. \n\nA unity agenda for the nation. \n\nWe can do this. \n\nMy fellow Americans—tonight , we have gathered in a sacred space—the citadel of our democracy. \n\nIn this Capitol, generation after generation, Americans have debated great questions amid great strife, and have done great things. \n\nWe have fought for freedom, expanded liberty, defeated totalitarianism and terror. \n\nAnd built the strongest, freest, and most prosperous nation the world has ever known. \n\nNow is the hour. \n\nOur moment of responsibility. \n\nOur test of resolve and conscience, of history itself. \n\nIt is in this moment that our character is formed. Our purpose is found. Our future is forged. \n\nWell I know this nation.\n Source: https://uuu.com/article15.pdf?s=lkhljkhljk&category=c&sort=asc\n =========\n FINAL ANSWER IN English: The president did not mention Michael Jackson.\n\n <-- End of examples\n\n # Instructions:\n - Given the following extracted parts from one or multiple documents, and a question, create a final answer with references. \n \n - **Answer the question from information provided in the context, DO NOT use your prior knowledge.\n - Never provide an answer without references.\n - If the question is one word, rephrase it to: \"Tell me about a \" and then the question\n - If you don't know the answer, respond with \"I don't know the answer to that question. Please try rephrasing your question.\"\n - Respond in {language}.\n\n =========\n QUESTION: {question}\n =========\n {summaries}\n =========\n FINAL ANSWER IN {language}:"
] |
2024-01-10 | storybrain/langchain-gpt-review-analysis | code~product-specs-extraction.py | from langchain.chat_models import ChatOpenAI
from langchain.chains import create_extraction_chain
from dotenv import load_dotenv, find_dotenv
import os
load_dotenv(find_dotenv())
def main():
description_text = """
MacBook Pro M2 Max takes its power and efficiency further than ever.
It delivers exceptional performance whether it's plugged in or not,
and now has even longer battery life. Combined with a stunning
Liquid Retina XDR display and all the ports you need - this is a pro
laptop without equal.Supercharged by 12-core CPU Up to 38-core GPU
Up to 96GB unified memory 400GB/s memory bandwidth.
"""
schema = {
"properties": {
"name": {"type": "string"},
"ram": {"type": "string"},
"cpu": {"type": "string"},
"gpu:": {"type": "string"},
"display": {"type": "string"},
},
"required": ["name", "ram", "cpu"],
}
llm = ChatOpenAI(
temperature=0, model="gpt-3.5-turbo", openai_api_key=os.getenv("OPENAI_API_KEY")
)
chain = create_extraction_chain(schema, llm)
print(chain.run(description_text))
main()
# OUTPUT: {'cpu': '12-core', 'name': 'MacBook Pro M2 Max', 'ram': '96GB'}
| [] |
2024-01-10 | mat-ng/cram-ai | text_splitter.py | from langchain.text_splitter import CharacterTextSplitter
text_splitter = CharacterTextSplitter(
separator="\n",
chunk_size=1000,
chunk_overlap=200,
length_function=len
)
| [] |
2024-01-10 | mat-ng/cram-ai | db_create.py | from langchain.embeddings.huggingface import HuggingFaceInstructEmbeddings
from langchain.vectorstores import FAISS
def db_create (chunks):
embeddings = HuggingFaceInstructEmbeddings(model_name="hkunlp/instructor-large")
return FAISS.from_texts(chunks, embeddings)
| [] |
2024-01-10 | mat-ng/cram-ai | doc_qa.py | from langchain.chains.question_answering import load_qa_chain
from langchain.llms import HuggingFaceHub
def doc_qa (similar_texts, question):
llm = HuggingFaceHub(repo_id="google/flan-t5-large", model_kwargs={"temperature":0})
qa_chain = load_qa_chain(llm, chain_type="stuff")
return qa_chain.run(input_documents=similar_texts, question=question)
| [] |
2024-01-10 | kakao-aicoursework/brandon.202 | infrastructure~model~executor~websearch_intent_executor.py | from utils.localfile_loader import read_file
from .intent_executor import IntentExecutor
from langchain.agents import initialize_agent
from langchain.chains import LLMChain
from langchain.prompts.chat import ChatPromptTemplate
from langchain.agents.tools import Tool
from langchain.utilities import DuckDuckGoSearchAPIWrapper
duckduckgo = DuckDuckGoSearchAPIWrapper(region='kr-kr')
class WebsearchIntentExecutor(IntentExecutor):
def __init__(self, llm):
tools =[
Tool(
name="search",
func=duckduckgo.run,
description="인터넷에 검색을 할 수 있습니다",
)
]
self.agent = initialize_agent(tools, llm, agent="zero-shot-react-description", handle_parsing_errors=True)
self.translator = LLMChain(
llm=llm,
prompt=ChatPromptTemplate.from_template(
template=read_file("./infrastructure/model/templates/translator_template.txt"),
),
output_key="output",
)
def support(self, intent):
return intent == "websearch"
def execute(self, context):
message = ChatPromptTemplate.from_template(
template=read_file("./infrastructure/model/templates/websearch_template.txt"),
).invoke(context)
print("[SYSTEM] Websearch 진행중...")
result = self.agent.run(message)
return self.translator.run(dict(request_message=result))
# def truncate_text(text, max_length=3000):
# if len(text) > max_length:
# truncated_text = text[:max_length - 3] + '...'
# else:
# truncated_text = text
# return truncated_text
# def search(message):
# return truncate_text(duckduckgo.run(message)) | [
"./infrastructure/model/templates/translator_template.txt",
"./infrastructure/model/templates/websearch_template.txt"
] |
2024-01-10 | kakao-aicoursework/brandon.202 | infrastructure~model~executor~failback_intent_executor.py | from utils.localfile_loader import read_file
from .intent_executor import IntentExecutor
from langchain.chains import LLMChain
from langchain.prompts.chat import ChatPromptTemplate
class FailbackIntentExecutor(IntentExecutor):
def __init__(self, llm):
self.chain = LLMChain(
llm=llm,
prompt=ChatPromptTemplate.from_template(
template=read_file("./infrastructure/model/templates/failback_response_template.txt"),
),
output_key="output",
)
def support(self, intent):
return True
def execute(self, context):
return self.chain.run(context)
| [
"./infrastructure/model/templates/failback_response_template.txt"
] |
2024-01-10 | kakao-aicoursework/brandon.202 | infrastructure~model~autonomous_agent.py | from dotenv import load_dotenv
from langchain.chains import LLMChain
from langchain.chat_models import ChatOpenAI
from langchain.prompts.chat import ChatPromptTemplate
from .history_storage import load_conversation_history, get_chat_history, log_qna
from infrastructure.model.executor import RetrieveKakaoDataIntentExecutor, WebsearchIntentExecutor, FailbackIntentExecutor
from utils.localfile_loader import read_file
# load .env
load_dotenv()
class AutonomousAgent():
def __init__(self, max_loop_count: int = 3):
llm = ChatOpenAI(
temperature=0,
max_tokens=3000,
model="gpt-3.5-turbo-16k")
self.max_loop_count: int = max_loop_count
self.guess_satisfied_qna_chain = LLMChain(
llm=llm,
prompt=ChatPromptTemplate.from_template(
template=read_file("./infrastructure/model/templates/guess_satisfied_qna_template.txt"),
),
output_key="intent",
verbose=True
)
self.guess_intent_chain = LLMChain(
llm=llm,
prompt=ChatPromptTemplate.from_template(
template=read_file("./infrastructure/model/templates/guess_intent_template.txt"),
),
output_key="intent",
verbose=True
)
self.executors = [
RetrieveKakaoDataIntentExecutor(llm),
WebsearchIntentExecutor(llm),
FailbackIntentExecutor(llm),
]
def run(self, user_message, conversation_id: str = "dummy"):
history_file = load_conversation_history(conversation_id)
context = self.initialize_context(user_message, conversation_id)
intent_loop_count: int = 0
wrong_answers = []
while intent_loop_count < self.max_loop_count:
prev_answer = context["current_answer"]
if self.guess_qna_done(context):
answer = prev_answer
break
intent = self.guess_intent(context)
for executor in self.executors:
if(executor.support(intent)):
answer = executor.execute(context)
context["current_answer"] = answer
break
intent_loop_count += 1
wrong_answers.append(f"intent: {intent} / answer: {answer}")
context["wrong_answers"] = "\n".join(wrong_answers)
print(f"[SYSTEM]: loop ({intent_loop_count} / {self.max_loop_count})")
log_qna(history_file, user_message, answer)
return answer
def initialize_context(self, user_message, conversation_id):
context = dict(user_message=user_message)
context["input"] = context["user_message"]
context["current_answer"] = ""
context["wrong_answers"] = ""
context["chat_history"] = get_chat_history(conversation_id)
return context
def guess_qna_done(self, context):
if(context["current_answer"] == ""):
return False
print(f"User: " + context["user_message"])
print(f"Assistant: " + context["current_answer"])
response = self.guess_satisfied_qna_chain.run(context)
is_done = response == "Y"
print(f"[SYSTEM] response: " + response)
print(f"[SYSTEM] Is find answer? " + str(is_done))
return is_done
def guess_intent(self, context):
intent = self.guess_intent_chain.run(context)
print(f"[SYSTEM] I guess, I need to do {intent}!")
return intent | [
"./infrastructure/model/templates/guess_intent_template.txt",
"./infrastructure/model/templates/guess_satisfied_qna_template.txt"
] |
2024-01-10 | kakao-aicoursework/brandon.202 | infrastructure~model~history_storage.py | from langchain.memory import ConversationBufferMemory, FileChatMessageHistory
def load_conversation_history(conversation_id: str):
file_path = f"./persistence/histories/{conversation_id}.json"
return FileChatMessageHistory(file_path)
def log_user_message(history: FileChatMessageHistory, user_message: str):
history.add_user_message(user_message)
def log_bot_message(history: FileChatMessageHistory, bot_message: str):
history.add_ai_message(bot_message)
def log_qna(history: FileChatMessageHistory, user_message: str, bot_message: str):
log_user_message(history, user_message)
log_bot_message(history, bot_message)
def get_chat_history(conversation_id: str):
history = load_conversation_history(conversation_id)
memory = ConversationBufferMemory(
memory_key="chat_history",
input_key="user_message",
chat_memory=history,
)
return memory.buffer
| [] |
2024-01-10 | kakao-aicoursework/brandon.202 | infrastructure~model~executor~retrieve_kakao_data_intent_executor.py | from utils.localfile_loader import read_file
from .vectorstore import query_on_chroma
from .intent_executor import IntentExecutor
from langchain.chains import LLMChain
from langchain.prompts.chat import ChatPromptTemplate
class RetrieveKakaoDataIntentExecutor(IntentExecutor):
def __init__(self, llm):
self.chain = LLMChain(
llm=llm,
prompt=ChatPromptTemplate.from_template(
template=read_file("./infrastructure/model/templates/retrieve_template.txt"),
),
output_key="output",
)
def support(self, intent):
return intent == "retrieve_kakao_data"
def execute(self, context):
context["retrieve_result"] = query_on_chroma(context["user_message"])
return self.chain.run(context)
| [
"./infrastructure/model/templates/retrieve_template.txt"
] |
2024-01-10 | qingyuan18/llm_retreval_search | functions_search.py | import os
import re
import json
import boto3
from botocore.config import Config
from langchain.llms.bedrock import Bedrock
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.agents import Tool, AgentExecutor, AgentOutputParser
from langchain.schema import AgentAction, AgentFinish, OutputParserException
from langchain.document_loaders import TextLoader
from langchain.text_splitter import CharacterTextSplitter
from langchain.document_loaders import PyPDFLoader
from langchain.document_loaders import Docx2txtLoader
from langchain.schema import BaseRetriever
from langchain.callbacks.manager import CallbackManagerForRetrieverRun
from langchain.schema import BaseRetriever, Document
from typing import Any, Dict, List, Optional,Union
from langchain.utilities import SerpAPIWrapper
from langchain.tools.retriever import create_retriever_tool
from langchain.tools import BaseTool, StructuredTool, Tool, tool
from langchain.memory import ConversationBufferMemory
from langchain.agents.openai_functions_agent.base import OpenAIFunctionsAgent
from langchain.schema.messages import SystemMessage
from langchain.prompts import (
ChatPromptTemplate,
PromptTemplate,
SystemMessagePromptTemplate,
AIMessagePromptTemplate,
HumanMessagePromptTemplate,
)
from langchain.memory import ConversationBufferWindowMemory
from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder
os.environ["SERPAPI_API_KEY"]="*********"
search = SerpAPIWrapper()
def get_named_parameter(event, name):
return next(item for item in event['parameters'] if item['name'] == name)['value']
def search_website(event):
global search
user_query = get_named_parameter(event, 'user_query')
search_ret = search.run(user_query)
return search_ret
def lambda_handler(event, context):
result = ''
response_code = 200
action_group = event['actionGroup']
api_path = event['apiPath']
print ("lambda_handler == > api_path: ",api_path)
if api_path == '/searchWebsite':
result = search_website(event)
else:
response_code = 404
result = f"Unrecognized api path: {action_group}::{api_path}"
response_body = {
'application/json': {
'body': json.dumps(result)
}
}
session_attributes = event['sessionAttributes']
prompt_session_attributes = event['promptSessionAttributes']
print ("Event:", event)
action_response = {
'actionGroup': event['actionGroup'],
'apiPath': event['apiPath'],
# 'httpMethod': event['HTTPMETHOD'],
'httpMethod': event['httpMethod'],
'httpStatusCode': response_code,
'responseBody': response_body,
'sessionAttributes': session_attributes,
'promptSessionAttributes': prompt_session_attributes
}
api_response = {'messageVersion': '1.0', 'response': action_response}
return api_response | [
"promptSessionAttributes"
] |
2024-01-10 | X-D-Lab/LangChain-ChatGLM-Webui | chinese_text_splitter.py | import re
from typing import List
from langchain.text_splitter import CharacterTextSplitter
class ChineseTextSplitter(CharacterTextSplitter):
def __init__(self, pdf: bool = False, **kwargs):
super().__init__(**kwargs)
self.pdf = pdf
def split_text(self, text: str) -> List[str]:
if self.pdf:
text = re.sub(r"\n{3,}", "\n", text)
text = re.sub('\s', ' ', text)
text = text.replace("\n\n", "")
sent_sep_pattern = re.compile(
'([﹒﹔﹖﹗.。!?]["’”」』]{0,2}|(?=["‘“「『]{1,2}|$))')
sent_list = []
for ele in sent_sep_pattern.split(text):
if sent_sep_pattern.match(ele) and sent_list:
sent_list[-1] += ele
elif ele:
sent_list.append(ele)
return sent_list
| [] |
2024-01-10 | X-D-Lab/LangChain-ChatGLM-Webui | chatllm.py | import os
from typing import Dict, List, Optional, Tuple, Union
import torch
from fastchat.conversation import (compute_skip_echo_len,
get_default_conv_template)
from fastchat.serve.inference import load_model as load_fastchat_model
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from transformers import AutoModel, AutoModelForCausalLM, AutoTokenizer
from config import *
os.environ["TOKENIZERS_PARALLELISM"] = "false"
DEVICE = LLM_DEVICE
DEVICE_ID = "0"
CUDA_DEVICE = f"{DEVICE}:{DEVICE_ID}" if DEVICE_ID else DEVICE
init_llm = init_llm
init_embedding_model = init_embedding_model
def torch_gc():
if torch.cuda.is_available():
with torch.cuda.device(CUDA_DEVICE):
torch.cuda.empty_cache()
torch.cuda.ipc_collect()
def auto_configure_device_map(num_gpus: int) -> Dict[str, int]:
num_trans_layers = 28
per_gpu_layers = 30 / num_gpus
device_map = {
'transformer.word_embeddings': 0,
'transformer.final_layernorm': 0,
'lm_head': 0
}
used = 2
gpu_target = 0
for i in range(num_trans_layers):
if used >= per_gpu_layers:
gpu_target += 1
used = 0
assert gpu_target < num_gpus
device_map[f'transformer.layers.{i}'] = gpu_target
used += 1
return device_map
class ChatLLM(LLM):
max_token: int = 10000
temperature: float = 0.1
top_p = 0.9
history = []
model_type: str = "chatglm"
model_name_or_path: str = init_llm,
tokenizer: object = None
model: object = None
def __init__(self):
super().__init__()
@property
def _llm_type(self) -> str:
return "ChatLLM"
def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str:
if self.model_type == 'vicuna':
conv = get_default_conv_template(self.model_name_or_path).copy()
conv.append_message(conv.roles[0], prompt)
conv.append_message(conv.roles[1], None)
prompt = conv.get_prompt()
inputs = self.tokenizer([prompt])
output_ids = self.model.generate(
torch.as_tensor(inputs.input_ids).cuda(),
do_sample=True,
temperature=self.temperature,
max_new_tokens=self.max_token,
)
outputs = self.tokenizer.batch_decode(output_ids, skip_special_tokens=True)[0]
skip_echo_len = compute_skip_echo_len(self.model_name_or_path, conv, prompt)
response = outputs[skip_echo_len:]
torch_gc()
if stop is not None:
response = enforce_stop_tokens(response, stop)
self.history = [[None, response]]
elif self.model_type == 'belle':
prompt = "Human: "+ prompt +" \n\nAssistant: "
input_ids = self.tokenizer(prompt, return_tensors="pt").input_ids.to(DEVICE)
generate_ids = self.model.generate(input_ids, max_new_tokens=self.max_token, do_sample = True, top_k = 30, top_p = self.top_p, temperature = self.temperature, repetition_penalty=1., eos_token_id=2, bos_token_id=1, pad_token_id=0)
output = self.tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
response = output[len(prompt)+1:]
torch_gc()
if stop is not None:
response = enforce_stop_tokens(response, stop)
self.history = [[None, response]]
elif self.model_type == 'chatglm2':
response, _ = self.model.chat(
self.tokenizer,
prompt,
history=self.history,
max_length=self.max_token,
temperature=self.temperature,
top_p = self.top_p,
)
torch_gc()
if stop is not None:
response = enforce_stop_tokens(response, stop)
self.history = self.history + [[None, response]]
elif self.model_type == 'chatglm':
response, _ = self.model.chat(
self.tokenizer,
prompt,
history=self.history,
max_length=self.max_token,
temperature=self.temperature,
)
torch_gc()
if stop is not None:
response = enforce_stop_tokens(response, stop)
self.history = self.history + [[None, response]]
elif self.model_type == 'internlm':
response, _ = self.model.chat(self.tokenizer, prompt, history=self.history, max_length=self.max_token, temperature=self.temperature)
return response
def load_llm(self,
llm_device=DEVICE,
num_gpus='auto',
device_map: Optional[Dict[str, int]] = None,
**kwargs):
if 'chatglm2' in self.model_name_or_path.lower():
self.tokenizer = AutoTokenizer.from_pretrained(self.model_name_or_path,
trust_remote_code=True, cache_dir=os.path.join(MODEL_CACHE_PATH, self.model_name_or_path))
if torch.cuda.is_available() and llm_device.lower().startswith("cuda"):
num_gpus = torch.cuda.device_count()
if num_gpus < 2 and device_map is None:
self.model = (AutoModel.from_pretrained(
self.model_name_or_path, trust_remote_code=True, cache_dir=os.path.join(MODEL_CACHE_PATH, self.model_name_or_path),
**kwargs).half().cuda())
else:
from accelerate import dispatch_model
model = AutoModel.from_pretrained(self.model_name_or_path,
trust_remote_code=True, cache_dir=os.path.join(MODEL_CACHE_PATH, self.model_name_or_path),
**kwargs).half()
if device_map is None:
device_map = auto_configure_device_map(num_gpus)
self.model = dispatch_model(model, device_map=device_map)
else:
self.model = (AutoModel.from_pretrained(
self.model_name_or_path,
trust_remote_code=True, cache_dir=os.path.join(MODEL_CACHE_PATH, self.model_name_or_path)).float().to(llm_device))
self.model = self.model.eval()
elif 'chatglm' in self.model_name_or_path.lower():
self.tokenizer = AutoTokenizer.from_pretrained(self.model_name_or_path,
trust_remote_code=True, cache_dir=os.path.join(MODEL_CACHE_PATH, self.model_name_or_path))
if torch.cuda.is_available() and llm_device.lower().startswith("cuda"):
num_gpus = torch.cuda.device_count()
if num_gpus < 2 and device_map is None:
self.model = (AutoModel.from_pretrained(
self.model_name_or_path, trust_remote_code=True, cache_dir=os.path.join(MODEL_CACHE_PATH, self.model_name_or_path),
**kwargs).half().cuda())
else:
from accelerate import dispatch_model
model = AutoModel.from_pretrained(self.model_name_or_path,
trust_remote_code=True, cache_dir=os.path.join(MODEL_CACHE_PATH, self.model_name_or_path),
**kwargs).half()
if device_map is None:
device_map = auto_configure_device_map(num_gpus)
self.model = dispatch_model(model, device_map=device_map)
else:
self.model = (AutoModel.from_pretrained(
self.model_name_or_path,
trust_remote_code=True, cache_dir=os.path.join(MODEL_CACHE_PATH, self.model_name_or_path)).float().to(llm_device))
self.model = self.model.eval()
elif 'internlm' in self.model_name_or_path.lower():
self.tokenizer = AutoTokenizer.from_pretrained(self.model_name_or_path, trust_remote_code=True)
self.model = AutoModelForCausalLM.from_pretrained(self.model_name_or_path, trust_remote_code=True).cuda()
self.model = self.model.eval()
else:
self.model, self.tokenizer = load_fastchat_model(
model_path = self.model_name_or_path,
device = llm_device,
num_gpus = num_gpus
)
| [
"Human: PLACEHOLDER \n\nAssistant: "
] |
2024-01-10 | X-D-Lab/LangChain-ChatGLM-Webui | paddlepaddle~chatllm.py |
import os
from typing import Dict, List, Optional, Tuple, Union
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from paddlenlp import Taskflow
chatbot = Taskflow("text2text_generation", batch_size=2)
class ChatLLM(LLM):
max_token: int = 10000
temperature: float = 0.1
top_p = 0.9
history = []
tokenizer: object = None
model: object = None
def __init__(self):
super().__init__()
@property
def _llm_type(self) -> str:
return "ChatLLM"
def _call(self,
prompt: str,
stop: Optional[List[str]] = None) -> str:
prompt_list = []
prompt_list.append(prompt)
results = chatbot(prompt_list)
response = results['result'][0]
if stop is not None:
response = enforce_stop_tokens(response, stop)
return response | [
"[]"
] |
2024-01-10 | X-D-Lab/LangChain-ChatGLM-Webui | modelscope~modelscope_hub.py | """Wrapper around ModelScopeHub embedding models."""
from typing import Any, List
from langchain.embeddings.base import Embeddings
from pydantic import BaseModel, Extra
class ModelScopeEmbeddings(BaseModel, Embeddings):
"""Wrapper around modelscope_hub embedding models.
To use, you should have the ``modelscope`` python package installed.
Example:
.. code-block:: python
from langchain.embeddings import ModelScopeEmbeddingsEmbeddings
model_id = "damo/nlp_corom_sentence-embedding_english-base"
embed = ModelScopeEmbeddingsEmbeddings(model_id=model_id)
"""
embed: Any
model_id: str ="damo/nlp_corom_sentence-embedding_english-base"
"""Model name to use."""
def __init__(self, **kwargs: Any):
"""Initialize the modelscope"""
super().__init__(**kwargs)
try:
from modelscope.models import Model
from modelscope.pipelines import pipeline
from modelscope.utils.constant import Tasks
self.embed = pipeline(Tasks.sentence_embedding,
model=self.model_id)
except ImportError as e:
raise ValueError(
"Could not import some python packages." "Please install it with `pip install modelscope`."
) from e
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Compute doc embeddings using a modelscope embedding model.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
texts = list(map(lambda x: x.replace("\n", " "), texts))
inputs = {"source_sentence": texts}
embeddings = self.embed(input=inputs)['text_embedding']
return embeddings
def embed_query(self, text: str) -> List[float]:
"""Compute query embeddings using a modelscope embedding model.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
text = text.replace("\n", " ")
inputs = {"source_sentence": [text]}
embedding = self.embed(input=inputs)['text_embedding'][0]
return embedding | [] |
2024-01-10 | X-D-Lab/LangChain-ChatGLM-Webui | paddlepaddle~paddle_embedding.py | """Wrapper around PaddleNLP embedding models."""
from typing import Any, List
from langchain.embeddings.base import Embeddings
from pydantic import BaseModel, Extra
class PaddleNLPEmbeddings(BaseModel, Embeddings):
"""Wrapper around paddlenlp embedding models.
To use, you should have the ``modelscope`` python package installed.
Example:
.. code-block:: python
from langchain.embeddings import PaddleNLPEmbeddings
model = "rocketqa-zh-base-query-encoder"
embed = PaddleNLPEmbeddings(model=model)
"""
text_encoder: Any
model: str ='rocketqa-zh-base-query-encoder'
"""Model name to use."""
def __init__(self, **kwargs: Any):
"""Initialize the modelscope"""
super().__init__(**kwargs)
try:
import paddle.nn.functional as F
from paddlenlp import Taskflow
self.text_encoder = Taskflow("feature_extraction", model=self.model)
except ImportError as e:
raise ValueError(
"Could not import some python packages." "Please install it with `pip install modelscope`."
) from e
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Compute doc embeddings using a modelscope embedding model.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
texts = list(map(lambda x: x.replace("\n", " "), texts))
text_embeds = self.text_encoder(texts)
embeddings = text_embeds["features"].numpy()
return embeddings.tolist()
def embed_query(self, text: str) -> List[float]:
"""Compute query embeddings using a modelscope embedding model.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
text = text.replace("\n", " ")
text_embeds = self.text_encoder(text)
embedding = text_embeds["features"].numpy()[0]
return embedding.tolist() | [] |
2024-01-10 | X-D-Lab/LangChain-ChatGLM-Webui | jina_serving.py | import datetime
import os
from typing import List
import nltk
import qdrant_client
import sentence_transformers
import torch
from duckduckgo_search import ddg
from duckduckgo_search.utils import SESSION
from langchain.chains import RetrievalQA
from langchain.document_loaders import UnstructuredFileLoader
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from langchain.prompts import PromptTemplate
from langchain.prompts.prompt import PromptTemplate
from langchain.vectorstores import Qdrant
from lcserve import serving
from chatllm import ChatLLM
from chinese_text_splitter import ChineseTextSplitter
from config import *
nltk.data.path = [os.path.join(os.path.dirname(__file__), "nltk_data")
] + nltk.data.path
embedding_model_dict = embedding_model_dict
llm_model_dict = llm_model_dict
EMBEDDING_DEVICE = EMBEDDING_DEVICE
LLM_DEVICE = LLM_DEVICE
VECTOR_STORE_PATH = VECTOR_STORE_PATH
COLLECTION_NAME = COLLECTION_NAME
num_gpus = num_gpus
init_llm = init_llm
init_embedding_model = init_embedding_model
def search_web(query):
SESSION.proxies = {
"http": f"socks5h://localhost:7890",
"https": f"socks5h://localhost:7890"
}
results = ddg(query)
web_content = ''
if results:
for result in results:
web_content += result['body']
return web_content
class KnowledgeBasedChatLLM:
llm: object = None
embeddings: object = None
def init_model_config(
self,
large_language_model: str = init_llm,
embedding_model: str = init_embedding_model,
):
self.llm = ChatLLM()
if 'chatglm' in large_language_model.lower():
self.llm.model_type = 'chatglm'
self.llm.model_name_or_path = llm_model_dict['chatglm'][
large_language_model]
elif 'belle' in large_language_model.lower():
self.llm.model_type = 'belle'
self.llm.model_name_or_path = llm_model_dict['belle'][
large_language_model]
elif 'vicuna' in large_language_model.lower():
self.llm.model_type = 'vicuna'
self.llm.model_name_or_path = llm_model_dict['vicuna'][
large_language_model]
self.embeddings = HuggingFaceEmbeddings(
model_name=embedding_model_dict[embedding_model], )
self.embeddings.client = sentence_transformers.SentenceTransformer(
self.embeddings.model_name, device=EMBEDDING_DEVICE)
self.llm.load_llm(llm_device=LLM_DEVICE, num_gpus=num_gpus)
def init_knowledge_vector_store(self,
filepath: str or List[str],):
loaded_files = []
if isinstance(filepath, str):
if not os.path.exists(filepath):
return "路径不存在"
elif os.path.isfile(filepath):
file = os.path.split(filepath)[-1]
try:
docs = self.load_file(filepath)
print(f"{file} 已成功加载")
loaded_files.append(filepath)
except Exception as e:
print(e)
print(f"{file} 未能成功加载")
return f"{file} 未能成功加载"
elif os.path.isdir(filepath):
docs = []
for file in os.listdir(filepath):
fullfilepath = os.path.join(filepath, file)
try:
docs += self.load_file(fullfilepath)
print(f"{file} 已成功加载")
loaded_files.append(fullfilepath)
except Exception as e:
print(e)
print(f"{file} 未能成功加载")
else:
docs = []
for file in filepath:
try:
docs += self.load_file(file)
print(f"{file} 已成功加载")
loaded_files.append(file)
except Exception as e:
print(e)
print(f"{file} 未能成功加载")
if len(docs) > 0:
if VECTOR_STORE_PATH and os.path.isdir(VECTOR_STORE_PATH):
vector_store = Qdrant.from_documents(
docs,
self.embeddings,
path=VECTOR_STORE_PATH,
collection_name=COLLECTION_NAME,
)
vector_store.add_documents(docs)
else:
vector_store = Qdrant.from_documents(
docs,
self.embeddings,
path=VECTOR_STORE_PATH,
collection_name=COLLECTION_NAME,
)
return "文件均未成功加载,请检查依赖包或文件路径。", loaded_files
else:
print("文件均未成功加载,请检查依赖包或文件路径。")
return "文件均未成功加载,请检查依赖包或文件路径。", loaded_files
def get_knowledge_based_answer(self,
query,
web_content,
top_k: int = 6,
history_len: int = 3,
temperature: float = 0.01,
top_p: float = 0.1,
history=[]):
self.llm.temperature = temperature
self.llm.top_p = top_p
self.history_len = history_len
self.top_k = top_k
if web_content:
prompt_template = f"""基于以下已知信息,简洁和专业的来回答用户的问题。
如果无法从中得到答案,请说 "根据已知信息无法回答该问题" 或 "没有提供足够的相关信息",不允许在答案中添加编造成分,答案请使用中文。
已知网络检索内容:{web_content}""" + """
已知内容:
{context}
问题:
{question}"""
else:
prompt_template = """基于以下已知信息,请简洁并专业地回答用户的问题。
如果无法从中得到答案,请说 "根据已知信息无法回答该问题" 或 "没有提供足够的相关信息"。不允许在答案中添加编造成分。另外,答案请使用中文。
已知内容:
{context}
问题:
{question}"""
prompt = PromptTemplate(template=prompt_template,
input_variables=["context", "question"])
self.llm.history = history[
-self.history_len:] if self.history_len > 0 else []
client = qdrant_client.QdrantClient(path=VECTOR_STORE_PATH,
prefer_grpc=True)
qdrant = Qdrant(client=client,
collection_name=COLLECTION_NAME,
embedding_function=self.embeddings.embed_query)
knowledge_chain = RetrievalQA.from_llm(
llm=self.llm,
retriever=qdrant.as_retriever(search_kwargs={"k": self.top_k}),
prompt=prompt)
knowledge_chain.combine_documents_chain.document_prompt = PromptTemplate(
input_variables=["page_content"], template="{page_content}")
knowledge_chain.return_source_documents = True
result = knowledge_chain({"query": query})
return result
def load_file(self, filepath):
if filepath.lower().endswith(".md"):
loader = UnstructuredFileLoader(filepath, mode="elements")
docs = loader.load()
elif filepath.lower().endswith(".pdf"):
loader = UnstructuredFileLoader(filepath)
textsplitter = ChineseTextSplitter(pdf=True)
docs = loader.load_and_split(textsplitter)
else:
loader = UnstructuredFileLoader(filepath, mode="elements")
textsplitter = ChineseTextSplitter(pdf=False)
docs = loader.load_and_split(text_splitter=textsplitter)
return docs
knowladge_based_chat_llm = KnowledgeBasedChatLLM()
def init_model():
try:
knowladge_based_chat_llm.init_model_config()
knowladge_based_chat_llm.llm._call("你好")
return """初始模型已成功加载"""
except Exception as e:
return """模型未成功加载,请检查后重新尝试"""
@serving
def reinit_model(large_language_model: str, embedding_model: str):
try:
knowladge_based_chat_llm.init_model_config(
large_language_model=large_language_model,
embedding_model=embedding_model)
model_status = """模型已成功重新加载"""
except Exception as e:
model_status = """模型未成功加载,请检查后重新尝试"""
return model_status
@serving
def vector_store(file_path: str or List[str]):
vector_store_state, loaded_files = knowladge_based_chat_llm.init_knowledge_vector_store(
file_path)
return vector_store_state
@serving
def predict(input: str,
use_web: bool, top_k: int, history_len: int, temperature: float,
top_p: float, history: list):
if history == None:
history = []
if use_web == 'True':
web_content = search_web(query=input)
else:
web_content = ''
resp = knowladge_based_chat_llm.get_knowledge_based_answer(
query=input,
web_content=web_content,
top_k=top_k,
history_len=history_len,
temperature=temperature,
top_p=top_p,
history=history)
history.append((input, resp['result']))
print(resp['result'])
return resp['result']
if __name__ == "__main__":
reinit_model(large_language_model='ChatGLM-6B-int8',
embedding_model='text2vec-base')
vector_store(file_path='./README.md')
predict('chatglm-6b的局限性在哪里?',
use_web=False,
top_k=6,
history_len=3,
temperature=0.01,
top_p=0.1,
history=[])
| [
"基于以下已知信息,请简洁并专业地回答用户的问题。\n 如果无法从中得到答案,请说 \"根据已知信息无法回答该问题\" 或 \"没有提供足够的相关信息\"。不允许在答案中添加编造成分。另外,答案请使用中文。\n\n 已知内容:\n {context}\n\n 问题:\n {question}",
"{page_content}",
"question",
"context",
"基于以下已知信息,简洁和专业的来回答用户的问题。\n 如果无法从中得到答案,请说 \"根据已知信息无法回答该问题\" 或 \"没有提供足够的相关信息\",不允许在答案中添加编造成分,答案请使用中文。\n 已知网络检索内容:PLACEHOLDER\n 已知内容:\n {context}\n 问题:\n {question}"
] |
2024-01-10 | xinggonglie/chatgpt-mirai-qq-bot | universal.py | import os
import re
from typing import Callable
import openai
from tempfile import NamedTemporaryFile
from graia.ariadne.message.chain import MessageChain
from graia.ariadne.message.element import Plain, Voice
from httpx import HTTPStatusError, ConnectTimeout
from loguru import logger
from requests.exceptions import SSLError, ProxyError, RequestException
from urllib3.exceptions import MaxRetryError
from constants import config
from conversation import ConversationHandler
from exceptions import PresetNotFoundException, BotRatelimitException, ConcurrentMessageException, \
BotTypeNotFoundException, NoAvailableBotException, BotOperationNotSupportedException, CommandRefusedException
from middlewares.baiducloud import MiddlewareBaiduCloud
from middlewares.concurrentlock import MiddlewareConcurrentLock
from middlewares.ratelimit import MiddlewareRatelimit
from middlewares.timeout import MiddlewareTimeout
from utils.azure_tts import synthesize_speech
middlewares = [MiddlewareTimeout(), MiddlewareRatelimit(), MiddlewareBaiduCloud(), MiddlewareConcurrentLock()]
async def handle_message(_respond: Callable, session_id: str, message: str,
chain: MessageChain = MessageChain("Unsupported"), is_manager: bool = False,
nickname: str = '某人'):
"""正常聊天"""
if not message.strip():
return config.response.placeholder
for r in config.trigger.ignore_regex:
if re.match(r, message):
logger.debug(f"此消息满足正则表达式: {r},忽略……")
return
# 此处为会话不存在时可以执行的指令
conversation_handler = await ConversationHandler.get_handler(session_id)
conversation_context = None
# 指定前缀对话
if ' ' in message and (config.trigger.allow_switching_ai or is_manager):
for ai_type, prefixes in config.trigger.prefix_ai.items():
for prefix in prefixes:
if prefix + ' ' in message:
conversation_context = await conversation_handler.first_or_create(ai_type)
message = message.removeprefix(prefix + ' ')
break
else:
# Continue if the inner loop wasn't broken.
continue
# Inner loop was broken, break the outer.
break
if not conversation_handler.current_conversation:
conversation_handler.current_conversation = await conversation_handler.create(
config.response.default_ai)
def wrap_request(n, m):
async def call(session_id, message, conversation_context, respond):
await m.handle_request(session_id, message, respond, conversation_context, n)
return call
def wrap_respond(n, m):
async def call(session_id, message, rendered, respond):
await m.handle_respond(session_id, message, rendered, respond, n)
return call
async def respond(msg: str):
if not msg:
return
ret = await _respond(msg)
for m in middlewares:
await m.on_respond(session_id, message, msg)
# TODO: 之后重构成 platforms 的 respond 只处理 MessageChain
if isinstance(msg, str):
msg = MessageChain([Plain(msg)])
nonlocal conversation_context
if not conversation_context:
conversation_context = conversation_handler.current_conversation
# TTS Converting
if conversation_context.conversation_voice and isinstance(msg, MessageChain):
for elem in msg:
if isinstance(elem, Plain) and str(elem):
output_file = NamedTemporaryFile(mode='w+b', suffix='.wav', delete=False)
output_file.close()
if await synthesize_speech(
str(elem),
output_file.name,
conversation_context.conversation_voice
):
await _respond(Voice(path=output_file.name))
try:
os.unlink(output_file.name)
except:
pass
return ret
async def request(_session_id, prompt: str, conversation_context, _respond):
try:
task = None
# 不带前缀 - 正常初始化会话
if bot_type_search := re.search(config.trigger.switch_command, prompt):
if not (config.trigger.allow_switching_ai or is_manager):
await respond(f"不好意思,只有管理员才能切换AI!")
return
conversation_handler.current_conversation = await conversation_handler.create(
bot_type_search.group(1).strip())
await respond(f"已切换至 {bot_type_search.group(1).strip()} AI,现在开始和我聊天吧!")
return
# 最终要选择的对话上下文
if not conversation_context:
conversation_context = conversation_handler.current_conversation
# 此处为会话存在后可执行的指令
# 重置会话
if prompt in config.trigger.reset_command:
task = conversation_context.reset()
# 回滚会话
elif prompt in config.trigger.rollback_command:
task = conversation_context.rollback()
elif voice_type_search := re.search(config.trigger.switch_voice, prompt):
if config.azure.tts_speech_key:
conversation_context.conversation_voice = voice_type_search.group(1).strip()
await respond(
f"已切换至 {conversation_context.conversation_voice} 语音!详情参考: "
f"https://learn.microsoft.com/en-us/azure/cognitive-services/speech-service/language-support?tabs=tts#neural-voices")
else:
await respond(f"未配置 Azure TTS 账户,无法切换语音!")
return
elif prompt in config.trigger.mixed_only_command:
conversation_context.switch_renderer("mixed")
await respond(f"已切换至图文混合模式,接下来我的回复将会以图文混合的方式呈现!")
return
elif prompt in config.trigger.image_only_command:
conversation_context.switch_renderer("image")
await respond(f"已切换至纯图片模式,接下来我的回复将会以图片呈现!")
return
elif prompt in config.trigger.text_only_command:
conversation_context.switch_renderer("text")
await respond(f"已切换至纯文字模式,接下来我的回复将会以文字呈现(被吞除外)!")
return
elif switch_model_search := re.search(config.trigger.switch_model, prompt):
model_name = switch_model_search.group(1).strip()
if model_name in conversation_context.supported_models:
if not (is_manager or model_name in config.trigger.allowed_models):
await respond(f"不好意思,只有管理员才能切换到 {model_name} 模型!")
else:
await conversation_context.switch_model(model_name)
await respond(f"已切换至 {model_name} 模型,让我们聊天吧!")
else:
await respond(
f"当前的 AI 不支持切换至 {model_name} 模型,目前仅支持:{conversation_context.supported_models}!")
return
# 加载预设
if preset_search := re.search(config.presets.command, prompt):
logger.trace(f"{session_id} - 正在执行预设: {preset_search.group(1)}")
async for _ in conversation_context.reset(): ...
task = conversation_context.load_preset(preset_search.group(1))
elif not conversation_context.preset:
# 当前没有预设
logger.trace(f"{session_id} - 未检测到预设,正在执行默认预设……")
# 隐式加载不回复预设内容
async for _ in conversation_context.load_preset('default'): ...
# 没有任务那就聊天吧!
if not task:
task = conversation_context.ask(prompt=prompt, chain=chain, name=nickname)
async for rendered in task:
if rendered:
if str(rendered).strip() == '':
logger.warning("检测到内容为空的输出,已忽略")
continue
action = lambda session_id, prompt, rendered, respond: respond(rendered)
for m in middlewares:
action = wrap_respond(action, m)
# 开始处理 handle_response
await action(session_id, prompt, rendered, respond)
for m in middlewares:
await m.handle_respond_completed(session_id, prompt, respond)
except CommandRefusedException as e:
await respond(str(e))
except openai.error.InvalidRequestError as e:
await respond("服务器拒绝了您的请求,原因是" + str(e))
except BotOperationNotSupportedException:
await respond("暂不支持此操作,抱歉!")
except ConcurrentMessageException as e: # Chatbot 账号同时收到多条消息
await respond(config.response.error_request_concurrent_error)
except (BotRatelimitException, HTTPStatusError) as e: # Chatbot 账号限流
await respond(config.response.error_request_too_many.format(exc=e))
except NoAvailableBotException as e: # 预设不存在
await respond(f"当前没有可用的{e}账号,不支持使用此 AI!")
except BotTypeNotFoundException as e: # 预设不存在
await respond(
f"AI类型{e}不存在,请检查你的输入是否有问题!目前仅支持:\n"
f"* chatgpt-web - OpenAI ChatGPT 网页版\n"
f"* chatgpt-api - OpenAI ChatGPT API版\n"
f"* bing-c - 微软 New Bing (创造力)\n"
f"* bing-b - 微软 New Bing (平衡)\n"
f"* bing-p - 微软 New Bing (精确)\n"
f"* bard - Google Bard\n"
f"* yiyan - 百度 文心一言\n"
f"* chatglm-api - 清华 ChatGLM-6B (本地)\n"
)
except PresetNotFoundException: # 预设不存在
await respond("预设不存在,请检查你的输入是否有问题!")
except (RequestException, SSLError, ProxyError, MaxRetryError, ConnectTimeout, ConnectTimeout) as e: # 网络异常
await respond(config.response.error_network_failure.format(exc=e))
except Exception as e: # 未处理的异常
logger.exception(e)
await respond(config.response.error_format.format(exc=e))
action = request
for m in middlewares:
action = wrap_request(action, m)
# 开始处理
await action(session_id, message.strip(), conversation_context, respond)
| [] |
2024-01-10 | MohammadDarsa/llm-smart-features | language_model~routers~chat_api.py | import json
from fastapi import APIRouter
from langchain import PromptTemplate, LLMChain
from language_model.model.request.chat_request import ChatRequest
def get_products():
return """id:1 name:Iphone 15 Pro Max brand:Apple price1199 ; id:2 name:Samsung Galaxy S23 Ultra brand:Samsung price:899 ; id:3 name:Google Pixel 7 Pro brand:Google price:699 ; id:4 name:OnePlus 11 brand:OnePlus price:899 ; id:5 name:Iphone 15 brand:Apple price:799 ; id:6 name:Samsung Galaxy S23 brand:Samsung price:699 ;"""
class ChatController:
def __init__(self, llm_config):
self.llm_config = llm_config
self.router = APIRouter()
self.router.add_api_route("/chat", self.smart_search, methods=["POST"])
async def smart_search(self, request: ChatRequest):
# get the products
products = get_products()
# get products as a string using json
products_str = json.dumps(products)
# create a template
_DEFAULT_TEMPLATE = """You're a shop keeper in an electronics store. A customer comes in and asks for a phone with the following specs presented in the query.
Here's the product list containing all the attributes as key value pair separated by a column (:). The user will search on these key value pairs, The products are separated by a semi-column(;):
{products_str}
The user will input a query and you should find the most suitable phone or phones.
The answer to the customer's query should only include the ids of the phones that matches the query and nothing else. The answer should only contain ths ids.
Examples of query-question:
query: "google branded phones"
answer: "3"
query: "Phones with more than 20MP camera:
answer: "1,2,3"
Answer this query of the user:
query: {query}
answer:"""
prompt = PromptTemplate(template=_DEFAULT_TEMPLATE, input_variables=["products_str", "query"])
conversation = LLMChain(
llm=self.llm_config.local_llm,
prompt=prompt,
verbose=True
)
return {"message": conversation({"query": request.text, "products_str": products_str})}
| [
"google branded phones",
"You're a shop keeper in an electronics store. A customer comes in and asks for a phone with the following specs presented in the query.\nHere's the product list containing all the attributes as key value pair separated by a column (:). The user will search on these key value pairs, The products are separated by a semi-column(;):\n{products_str}\n\nThe user will input a query and you should find the most suitable phone or phones.\n\nThe answer to the customer's query should only include the ids of the phones that matches the query and nothing else. The answer should only contain ths ids.\n\nExamples of query-question:\n\nquery: \"google branded phones\"\nanswer: \"3\"\n\nquery: \"Phones with more than 20MP camera:\nanswer: \"1,2,3\"\n\nAnswer this query of the user:\n\nquery: {query}\nanswer:",
"products_str"
] |
2024-01-10 | MohammadDarsa/llm-smart-features | language_model~llm~llm_config.py | import torch
from auto_gptq import AutoGPTQForCausalLM
from langchain import HuggingFacePipeline
from langchain.memory import VectorStoreRetrieverMemory
from transformers import AutoTokenizer, BitsAndBytesConfig
from transformers import pipeline
class LlmConfig:
def __init__(self, vector_db_config):
self.model_name_or_path = "TheBloke/Dolphin-Llama-13B-GPTQ"
self.model_basename = "model"
self.local_llm = None
self.memory = None
self.vector_db_config = vector_db_config
self.config()
def config(self):
# go for a smaller model if you don't have the VRAM
quantization_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_compute_dtype=torch.float16,
bnb_4bit_quant_type="nf4",
bnb_4bit_use_double_quant=True,
)
model = AutoGPTQForCausalLM.from_quantized(self.model_name_or_path,
model_basename=self.model_basename,
use_safetensors=True,
trust_remote_code=False,
device="cuda:0",
use_triton=False,
quantize_config=None)
tokenizer = AutoTokenizer.from_pretrained(self.model_name_or_path, use_fast=True)
pipe = pipeline(
"text-generation",
model=model,
tokenizer=tokenizer,
max_new_tokens=512,
do_sample=True,
temperature=0.7,
top_p=0.95,
top_k=40,
repetition_penalty=1.1
)
self.local_llm = HuggingFacePipeline(pipeline=pipe)
retriever = self.vector_db_config.db.as_retriever(search_kwargs=dict(k=1))
self.memory = VectorStoreRetrieverMemory(retriever=retriever)
| [] |
2024-01-10 | NickMcCrea/nickaiapp | backend~test_data_pipeline_integration.py |
from data_processor import DataProcessor
from meta_data_service import MetaDataService
from data_pipeline_executor import DataPipelineExecutor
from pandas import DataFrame
import pandas as pd
import openai
import llm_wrapper as llmwrapper
import os
from dotenv import load_dotenv
import completion_builder as cb
from user_session_state import UserSessionState
from actions import ActionsManager
load_dotenv()
openai.api_key = os.getenv("OPENAI_KEY")
meta_data_service = MetaDataService()
meta_data_service.add_data_source("backend/datasources/nicktrialbalance.json", "backend/datasources/nicktrialbalance.csv")
action_manager = ActionsManager("gpt-4-1106-preview", meta_data_service)
user_session_state = UserSessionState()
test_input = "load balances, then filter it on company code 0302 - call the output 0302_balances"
data,metadata,commentary = action_manager.function_generate_pipeline_definition(None,None,user_session_state, test_input)
data2, metadata2, commentary2 = action_manager.execute_pipeline_definition(None,None,user_session_state,"create that","MyNewDataSet", "Data set description")
print(metadata)
print(commentary2)
| [] |
2024-01-10 | NickMcCrea/nickaiapp | backend~actions.py | import json
from flask_socketio import SocketIO
import openai
from meta_data_service import MetaDataService
from typing import List, Dict, Any
import function_defs as function_defs
import completion_builder as completion_builder
from user_session_state import UserSessionState
from data_pipeline_executor import DataPipelineExecutor
from data_processor import DataProcessor
from user_session_state import AppState
import llm_wrapper
#constructor for a functions class
class ActionsManager:
#constructor
def __init__(self, current_model, data_service: MetaDataService):
self.current_model = current_model
self.data_pipline_executor = DataPipelineExecutor(DataProcessor(), data_service)
self.data_service = data_service
self.function_mapping = {
"query_data_catalogue": self.function_query_data_catalogue,
"fetch_data": self.function_fetch_data,
"fetch_meta_data": self.function_fetch_meta_data,
"fetch_bar_chart_data": self.function_fetch_bar_chart_data,
"fetch_line_chart_data": self.function_fetch_line_chart_data,
"fetch_pie_chart_data": self.function_fetch_pie_chart_data,
"fetch_scatter_chart_data": self.function_fetch_scatter_chart_data,
"comment_on_data": self.function_comment_on_data,
"clear": self.function_clear,
"recommend_analysis": self.function_recommend_analysis,
"create_workspace": self.function_enter_workspace_state,
"define_new_data_set": self.function_generate_pipeline_definition,
"create_new_data_set": self.execute_pipeline_definition,
"ask_panel_fetch_data": self.ask_panel_fetch_data,
# Add more function mappings here...
}
def execute_pipeline_definition(self, socketio, session_id, convo_history: UserSessionState, user_input, data_source_name, data_source_description):
#get the pipeline definition from the conversation history
pipeline_definition = convo_history.get_current_data_pipeline()
#print the pipeline definition
print("Attempting to execute pipeline definition: ", pipeline_definition)
#execute the pipeline definition
result_data_frames = self.data_pipline_executor.run(pipeline_definition)
#given the pipeline definition, take the last frame name
last_data_frame_name = pipeline_definition[-1]["params"]["output_name"]
#persist the last data frame
self.data_service.persist_data_source(data_source_name, result_data_frames[last_data_frame_name], data_source_description, "User Generated Data Sets")
convo_history.set_app_state(AppState.Default)
#return the data set
data = None
metadata = None
commentary = "Data set created - Exiting workspace"
return data, metadata, commentary
def function_generate_pipeline_definition(self, socketio, session_id, convo_history: UserSessionState, user_input):
prompt = completion_builder.build_pipeline_prompt(convo_history, user_input, self.data_service.get_all_meta_data(), self.data_pipline_executor.example_data_pipeline)
messages = completion_builder.build_message_list_for_pipeline_generation(prompt)
response = llm_wrapper.llm_call(messages)
#print the pipeline
print("Pipeline Definition: ", response)
commentary = "Pipeline Generated"
data = None
metadata = response
metadata = self.check_for_json_tag(metadata)
metadata = json.loads(metadata)
convo_history.set_current_data_pipeline(metadata)
return data, metadata, commentary
def function_enter_workspace_state(self, socketio, session_id, convo_history: UserSessionState, user_input, prompt_user_for_data):
convo_history.set_app_state(AppState.Workspace)
commentary = prompt_user_for_data
data = None
metadata = None
return data, metadata, commentary
def function_recommend_analysis(self, socketio, session_id, convo_history, user_input: str, data_source_name: str):
data_source = self.data_service.get_data_source(data_source_name)
if data_source is None:
data_source_name, data_source = self.open_ai_infer_data_source(socketio, session_id, convo_history, user_input)
#get the meta data for the data source
data_source_meta = data_source["meta"]
prompt = completion_builder.build_analysis_recommendation_prompt(convo_history, user_input, data_source_meta)
prompt = completion_builder.add_custom_prompt_elements(prompt, data_source_name)
messages = completion_builder.build_basic_message_list(prompt)
response = llm_wrapper.llm_call(messages)
commentary = response
data = None
metadata= None
return data, metadata, commentary
def function_comment_on_data(self, socketio, session_id, convo_history, user_input: str, data_source_name: str, query: str):
#if we have both the data source name and the query, fetch the data
if data_source_name is not None and query is not None:
data_source = self.data_service.get_data_source(data_source_name)
if data_source is None:
data_source_name, data_source = self.open_ai_infer_data_source(socketio, session_id, convo_history, user_input)
#add a limit to the query if it doesn't already have one. Stops wallet annihilation.
if "LIMIT" not in query.upper():
query += " LIMIT 100"
#if it does have a limit, make sure the limit is 100 or less
else:
query = query.upper()
limit_index = query.find("LIMIT")
limit = int(query[limit_index + 5:])
if limit > 100:
query = query[:limit_index + 5] + "100"
data = self.data_service.query(query, data_source_name)
metadata = None
commentary = ""
#emit that we're analysing the data
progress_data = {'status': 'analysing_data', 'message': 'Analysing Data'}
socketio.emit('progress', progress_data, room=session_id)
#get the data set in a string format
data_str = str(data)
prompt = completion_builder.build_data_analysis_prompt(convo_history, user_input, data_str)
messages = completion_builder.build_basic_message_list(prompt)
response = llm_wrapper.llm_call(messages)
commentary = response
data = None
metadata= None
return data, metadata, commentary
def function_clear(self, socketio, session_id, convo_history, user_input):
convo_history = UserSessionState()
return None, None, "Conversation history cleared."
def function_query_data_catalogue(self, socketio, session_id, convo_history, user_input : str):
all_meta_data = self.data_service.get_all_meta_data()
prompt = completion_builder.build_query_catalogue_prompt(convo_history, user_input, all_meta_data)
#print the user input we're using to generate a response
print(f"User input: {user_input}")
messages = completion_builder.build_basic_message_list(prompt)
response = llm_wrapper.llm_call(messages)
commentary = response
commentary = self.check_for_json_tag(commentary)
data = None
#return the meta data as JSON string
#get the list of data source names from the JSON
data_source_names = json.loads(commentary)["data_source_names"]
metadata = self.data_service.get_meta_data_for_multiple_data_sources(data_source_names)
return data, metadata, commentary
def ask_panel_fetch_data(self, socketio, session_id, convo_history: UserSessionState, user_input, data_source_name):
data_source_name = convo_history.get_specific_data_set()
data_source = self.data_service.get_data_source(data_source_name)
response = self.open_ai_generate_sql(socketio, session_id, convo_history, user_input,data_source["meta"], completion_builder.table_sql_prompt(convo_history, user_input, data_source["meta"]))
print(response)
data = self.data_service.query(response["SQL"], data_source_name)
convo_history.set_last_executed_query(response["SQL"])
metadata = None
commentary = f"DataQuery: Data source name: {data_source_name}, Query: {response['SQL']}"
return data, metadata, commentary
def function_fetch_data(self, socketio, session_id, convo_history: UserSessionState, user_input, data_source_name):
#if data source is not none
commentary = ""
data_source = self.data_service.get_data_source(data_source_name)
if data_source is None:
data_source_name, data_source = self.open_ai_infer_data_source(socketio, session_id, convo_history, user_input)
response = self.open_ai_generate_sql(socketio, session_id, convo_history, user_input,data_source["meta"], completion_builder.table_sql_prompt(convo_history, user_input, data_source["meta"]))
print(response)
data = self.data_service.query(response["SQL"], data_source_name)
convo_history.set_last_executed_query(response["SQL"])
metadata = None
commentary = f"DataQuery: Data source name: {data_source_name}, Query: {response['SQL']}"
return data, metadata, commentary
def function_fetch_scatter_chart_data(self, socketio, session_id, convo_history, user_input, data_source_name, x_axis_title,y_axis_title,chart_title):
#if data source is not none
commentary = ""
data_source = self.data_service.get_data_source(data_source_name)
if data_source is None:
data_source_name, data_source = self.open_ai_infer_data_source(socketio, session_id, convo_history, user_input)
response = self.open_ai_generate_sql(socketio, session_id, convo_history, user_input,data_source["meta"], completion_builder.scatter_graph_sql_prompt(convo_history, user_input, data_source["meta"]))
convo_history.set_last_executed_query(response["SQL"])
print(response)
data = self.data_service.query(response["SQL"], data_source_name)
#let's put the chart axis and title in a JSON object in metadata
metadata = {"x_axis_title": x_axis_title, "y_axis_title": y_axis_title, "chart_title": chart_title}
commentary = f"DataQuery: Data source name: {data_source_name}, Query: {response['SQL']}"
return data, metadata, commentary
def function_fetch_pie_chart_data(self, socketio, session_id, convo_history, user_input, data_source_name,chart_title):
#if data source is not none
commentary = ""
data_source = self.data_service.get_data_source(data_source_name)
if data_source is None:
data_source_name, data_source = self.open_ai_infer_data_source(socketio, session_id, convo_history, user_input)
response = self.open_ai_generate_sql(socketio, session_id, convo_history, user_input,data_source["meta"], completion_builder.pie_graph_sql_prompt(convo_history, user_input, data_source["meta"]))
convo_history.set_last_executed_query(response["SQL"])
print(response)
data = self.data_service.query(response["SQL"], data_source_name)
#let's put the chart axis and title in a JSON object in metadata
metadata = {"chart_title": chart_title}
commentary = f"DataQuery: Data source name: {data_source_name}, Query: {response['SQL']}"
return data, metadata, commentary
def function_fetch_bar_chart_data(self, socketio, session_id, convo_history, user_input, data_source_name, x_axis_title,y_axis_title,chart_title):
#if data source is not none
commentary = ""
data_source = self.data_service.get_data_source(data_source_name)
if data_source is None:
data_source_name, data_source = self.open_ai_infer_data_source(socketio, session_id, convo_history, user_input)
response = self.open_ai_generate_sql(socketio, session_id, convo_history, user_input,data_source["meta"], completion_builder.bar_graph_sql_prompt(convo_history, user_input, data_source["meta"]))
convo_history.set_last_executed_query(response["SQL"])
print(response)
data = self.data_service.query(response["SQL"], data_source_name)
#let's put the chart axis and title in a JSON object in metadata
metadata = {"x_axis_title": x_axis_title, "y_axis_title": y_axis_title, "chart_title": chart_title}
commentary = f"DataQuery: Data source name: {data_source_name}, Query: {response['SQL']}"
return data, metadata, commentary
def function_fetch_line_chart_data(self, socketio, session_id, convo_history: UserSessionState, user_input, data_source_name,x_axis_title,y_axis_title,chart_title):
#if data source is not none
commentary = ""
data_source = self.data_service.get_data_source(data_source_name)
if data_source is None:
data_source_name, data_source = self.open_ai_infer_data_source(socketio, session_id, convo_history, user_input)
response = self.open_ai_generate_sql(socketio, session_id, convo_history, user_input,data_source["meta"], completion_builder.line_graph_sql_prompt(convo_history, user_input, data_source["meta"]))
convo_history.set_last_executed_query(response["SQL"])
print(response)
data = self.data_service.query(response["SQL"], data_source_name)
#let's put the chart axis and title in a JSON object in metadata
metadata = {"x_axis_title": x_axis_title, "y_axis_title": y_axis_title, "chart_title": chart_title}
commentary = f"DataQuery: Data source name: {data_source_name}, Query: {response['SQL']}"
return data, metadata, commentary
def function_fetch_meta_data(self, socketio, session_id, convo_history, user_input, data_source_name=None, ai_commentary=None):
data_source = self.data_service.get_data_source(data_source_name)
if data_source is None:
data_source_name, data_source = self.open_ai_infer_data_source(socketio, session_id, convo_history, user_input)
data = None
metadata = self.data_service.get_data_source(data_source_name)["meta"]
return data, metadata, f"Here's the meta data for {data_source_name}"
def open_ai_infer_data_source(self, socketio, session_id, convo_history, user_input):
print(f"Data set unknown. Determining data source from user input '{user_input}'")
progress_data = {'status': 'data_source_inference', 'message': 'Inferring Data Source'}
socketio.emit('progress', progress_data, room=session_id)
all_meta_data = self.data_service.get_all_meta_data()
prompt = completion_builder.build_data_source_inference_prompt(convo_history, user_input, all_meta_data)
#print the user input we're using to generate a response
print(f"User input: {user_input}")
messages = completion_builder.build_basic_message_list(prompt)
response = llm_wrapper.llm_call(messages)
output = response
data_source_json = json.loads(output)
data_source_name = data_source_json["data_source"]
data_source = self.data_service.get_data_source(data_source_name)
#print the data source name
print(f"Data source name: {data_source_name}")
return data_source_name, data_source
def open_ai_generate_sql(self, socketio, session_id, convo_history, user_input, data_source_meta, prompt):
progress_data = {'status': 'data_query_generation', 'message': 'Generating Data Query'}
if socketio is not None:
socketio.emit('progress', progress_data, room=session_id)
#get the data source name
data_source_name = data_source_meta["name"]
prompt = completion_builder.add_custom_prompt_elements(prompt, data_source_name)
#print the user input we're using to generate a response
print(f"User input: {user_input}")
messages = completion_builder.build_message_list_for_sql_generation(prompt)
response = llm_wrapper.llm_call(messages)
output = response
#GPT-4-Turbo generally tags JSON output with "json" at the start of the string.
#Remove the json tagging if it exists.
output = self.check_for_json_tag(output)
return json.loads(output)
def check_for_json_tag(self, output):
if output.startswith("```json"):
output = output.replace("```json", "")
output = output.replace("```", "")
return output
def execute_function(self,socket_io: SocketIO, session_id: str,conversation_history: UserSessionState, response_message: Dict[str,Any], user_input: str, name: str, args) -> tuple[List[Dict[str, Any]], Dict[str, Any], str ]:
#print the function name and arguments
print(f"Executing function '{name}' with arguments {args}")
if name in self.function_mapping:
func = self.function_mapping[name]
data, metadata, commentary = func(socket_io, session_id, conversation_history, user_input, **args)
return data, metadata, commentary
else:
raise ValueError(f"Function '{name}' not found.")
def get_functions(self, state: AppState):
if state == state.SpecificData:
return function_defs.data_set_lock_functions()
if state == state.Default:
return function_defs.default_functions()
elif state == state.Workspace:
return function_defs.workspace_functions()
| [] |
2024-01-10 | NickMcCrea/nickaiapp | backend~llm_wrapper.py | import openai
running_cost = 0
running_prompt_tokens = 0
running_completion_tokens = 0
model4="gpt-4-1106-preview"
model3="gpt-3.5-turbo-1106"
current_model = model4
COSTS = {
"gpt-3.5-turbo-1106": {"input": 0.001 / 1000, "output": 0.002 / 1000},
"gpt-4-1106-preview": {"input": 0.01 / 1000, "output": 0.03 / 1000},
}
def llm_call(messages):
global running_cost
response = openai.ChatCompletion.create(
model=current_model,
messages=messages
)
calc_cost(response,current_model)
return response['choices'][0]['message']['content']
def llm_call_with_functions(message_array, function_list):
response = openai.ChatCompletion.create(
model=current_model,
messages=message_array,
functions=function_list,
function_call="auto"
)
calc_cost(response, current_model)
return response["choices"][0]["message"]
def calc_cost(response, current_model):
prompt_tokens = response['usage']['prompt_tokens']
completion_tokens = response['usage']['completion_tokens']
global running_prompt_tokens
global running_completion_tokens
running_prompt_tokens += prompt_tokens
running_completion_tokens += completion_tokens
#calculate the cost
#prompt tokens cost 0.01 per 1000
#completion tokens cost 0.003 per 1000
prompt_cost = prompt_tokens * COSTS[current_model]["input"]
completion_cost = completion_tokens * COSTS[current_model]["output"]
total_cost = prompt_cost + completion_cost
#print the total cost in dollars
print("Query cost: $", total_cost)
print("Prompt tokens: ", prompt_tokens)
print("Completion tokens: ", completion_tokens)
global running_cost
running_cost += total_cost
print("Total cost: $", running_cost)
print("Total prompt tokens: ", running_prompt_tokens)
print("Total completion tokens: ", running_completion_tokens) | [
"0",
"input",
"prompt_tokens"
] |
2024-01-10 | DataCTE/Camel-local | Camel-localmodel.py | import os
import datetime
from typing import List
from langchain.callbacks import get_openai_callback
from langchain.llms import GPT4All
from langchain.prompts.chat import (
SystemMessagePromptTemplate,
HumanMessagePromptTemplate,
)
from langchain.schema import (
AIMessage,
HumanMessage,
SystemMessage,
BaseMessage,
)
import time
from threading import Thread
import json
from typing import List
from langchain import PromptTemplate, LLMChain
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
# Define the model file path here
model_file_path = "/home/dev-1/.local/share/nomic.ai/GPT4All/ggml-gpt4all-j-v1.3-groovy.bin"
class CAMELAgent:
def __init__(self, model_file_path: str, max_memory_size: int = 10) -> None:
template = """Question: {question}
Answer: Let's think step by step."""
self.prompt = PromptTemplate(template=template, input_variables=["question"])
self.callbacks = [StreamingStdOutCallbackHandler()]
self.llm = GPT4All(model=model_file_path)
self.llm_chain = LLMChain(prompt=self.prompt, llm=self.llm)
self.memory = []
self.max_memory_size = max_memory_size
def reset(self) -> None:
self.memory = []
def _run_with_timeout(self, func, args=(), timeout=60):
result = [None]
def target():
result[0] = func(*args)
thread = Thread(target=target)
thread.start()
thread.join(timeout)
if thread.is_alive():
thread.join()
raise TimeoutError("Model response timeout")
return result[0]
def step(self, role: str, message: str) -> str:
input_with_memory = '\n'.join([json.dumps(msg) for msg in self.memory] + [f"{role}: {message}"])
try:
response = self._run_with_timeout(self.llm_chain.run, args=(input_with_memory,), timeout=800)
except TimeoutError:
response = "Model response timed out"
# Update memory with current input and response
self.memory.append({
'role': role,
'message': message,
'response': response
})
# Truncate the memory if it exceeds the maximum size
if len(self.memory) > self.max_memory_size:
self.memory = self.memory[-self.max_memory_size:]
return response
assistant_role_name = "Game Dev Coder"
user_role_name = "Game Enthusiast"
task = "Design a text adventure game set in Singapore"
word_limit = 50 # word limit for task brainstorming
task_specifier_sys_msg = SystemMessage(content="You can make a task more specific.")
task_specifier_prompt = (
"""Here is a task that {assistant_role_name} will discuss with {user_role_name} to: {task}.
Please make it more specific. Be creative and imaginative.
Please reply with the full task in {word_limit} words or less. Do not add anything else."""
)
task_specifier_template = SystemMessagePromptTemplate.from_template(template=task_specifier_prompt)
task_specify_agent = CAMELAgent(model_file_path)
task_specifier_msg = task_specifier_template.format_messages(
assistant_role_name=assistant_role_name,
user_role_name=user_role_name,
task=task,
word_limit=word_limit
)[0]
specified_task_msg = task_specify_agent.step("user", task_specifier_msg.content)
specified_task = specified_task_msg
print(f"Specified task: {specified_task}")
assistant_inception_prompt = (
"""Never forget you are a {assistant_role_name} and I am a {user_role_name}. Never flip roles!
We share a common interest in collaborating to successfully complete a task.
You must help me to complete the task.
Here is the task: {task}. Never forget our task!
I will instruct you based on your expertise and my needs to complete the task.
I must give you one question at a time.
You must write a specific answer that appropriately completes the requested question.
You must decline my question honestly if you cannot comply the question due to physical, moral, legal reasons or your capability and explain the reasons.
Do not add anything else other than your answer to my instruction.
Unless I say the task is completed, you should always start with:
My response: <YOUR_SOLUTION>
<YOUR_SOLUTION> should be specific and descriptive.
Always end <YOUR_SOLUTION> with: Next question."""
)
user_inception_prompt = (
"""Never forget you are a {user_role_name} and I am a {assistant_role_name}. Never flip roles! You will always ask me.
We share a common interest in collaborating to successfully complete a task.
I must help you to answer the questions.
Here is the task: {task}. Never forget our task!
You must instruct me based on my expertise and your needs to complete the task ONLY in the following two ways:
1. Instruct with a necessary input:
Instruction: <YOUR_INSTRUCTION>
Input: <YOUR_INPUT>
2. Instruct without any input:
Instruction: <YOUR_INSTRUCTION>
Input: None
The "Instruction" describes a task or question. The paired "Input" provides further context or information for the requested "Instruction".
You must give me one instruction at a time.
I must write a response that appropriately completes the requested instruction.
I must decline your instruction honestly if I cannot perform the instruction due to physical, moral, legal reasons or my capability and explain the reasons.
You should instruct me not ask me questions.
Now you must start to instruct me using the two ways described above.
Do not add anything else other than your instruction and the optional corresponding input!
Keep giving me instructions and necessary inputs until you think the task is completed.
When the task is completed, you must only reply with a single word <TASK_DONE>.
Never say <TASK_DONE> unless my responses have solved your task."""
)
def get_sys_msgs(assistant_role_name: str, user_role_name: str, task: str):
assistant_sys_template = SystemMessagePromptTemplate.from_template(template=assistant_inception_prompt)
assistant_sys_msg = assistant_sys_template.format_messages(
assistant_role_name=assistant_role_name,
user_role_name=user_role_name,
task=task
)[0]
user_sys_template = SystemMessagePromptTemplate.from_template(template=user_inception_prompt)
user_sys_msg = user_sys_template.format_messages(
assistant_role_name=assistant_role_name,
user_role_name=user_role_name,
task=task
)[0]
return assistant_sys_msg, user_sys_msg
assistant_sys_msg, user_sys_msg = get_sys_msgs(assistant_role_name, user_role_name, specified_task)
assistant_agent = CAMELAgent(model_file_path)
user_agent = CAMELAgent(model_file_path)
# Reset agents
assistant_agent.reset()
user_agent.reset()
# Initialize chats
assistant_msg = HumanMessage(
content=(f"{user_sys_msg.content}. "
"Now start to give me introductions one by one. "
"Only reply with Instruction and Input."))
user_msg = HumanMessage(content=f"{assistant_sys_msg.content}")
user_msg = assistant_agent.step("user", user_msg.content)
def write_conversation_to_file(conversation: List[str], filename: str) -> None:
"""
Write a conversation to a text file with a timestamp in its filename.
Parameters:
- conversation (List[str]): A list of strings representing the conversation turns.
- filename (str): The name of the file to write the conversation to.
Returns:
None
"""
def timestamp() -> str:
"""
Convert the current date and time into a custom timestamp format.
Returns:
str: The current date and time in the format HHMMDDMMYYYY.
"""
now = datetime.datetime.now()
timestamp = now.strftime("%H%M%d%m%Y")
return timestamp
def append_timestamp_to_filename(filename: str) -> str:
"""
Append a timestamp to a filename before the extension.
Parameters:
- filename (str): The original filename.
Returns:
str: The filename with a timestamp appended.
"""
base, extension = os.path.splitext(filename)
new_filename = f"{base}-{timestamp()}{extension}"
return new_filename
filename = append_timestamp_to_filename(filename)
with open(filename, 'w') as f:
for turn in conversation:
f.write(f"{turn}\n\n")
print(f"Original task prompt:\n{task}\n")
print(f"Specified task prompt:\n{specified_task}\n")
conversation = []
chat_turn_limit, n = 15, 0
while n < chat_turn_limit:
n += 1
user_ai_msg = user_agent.step("assistant", assistant_msg.content)
user_msg = HumanMessage(content=user_ai_msg)
print(f"AI User ({user_role_name}):\n\n{user_msg.content}\n\n")
conversation.append(f"AI User ({user_role_name}):\n\n{user_msg.content}")
assistant_ai_msg = assistant_agent.step("user", user_msg.content)
assistant_msg = HumanMessage(content=assistant_ai_msg)
print(f"AI Assistant ({assistant_role_name}):\n\n{assistant_msg.content}\n\n")
conversation.append(f"AI Assistant ({assistant_role_name}):\n\n{assistant_msg.content}")
if "<TASK_DONE>" in user_msg.content:
break
print(f"Total Successful Requests: {assistant_agent.llm_chain.llm.successful_requests}")
print(f"Total Tokens Used: {assistant_agent.llm_chain.llm.total_tokens}")
print(f"Prompt Tokens: {assistant_agent.llm_chain.prompt_tokens}")
print(f"Completion Tokens: {assistant_agent.llm_chain.completion_tokens}")
print(f"Total Cost (USD): ${assistant_agent.llm_chain.total_cost}")
write_conversation_to_file(conversation, 'conversation.txt')
| [
"Question: {question}\n\n Answer: Let's think step by step.",
"Now start to give me introductions one by one. ",
"You can make a task more specific.",
"Never forget you are a {user_role_name} and I am a {assistant_role_name}. Never flip roles! You will always ask me.\nWe share a common interest in collaborating to successfully complete a task.\nI must help you to answer the questions.\nHere is the task: {task}. Never forget our task!\nYou must instruct me based on my expertise and your needs to complete the task ONLY in the following two ways:\n\n1. Instruct with a necessary input:\nInstruction: <YOUR_INSTRUCTION>\nInput: <YOUR_INPUT>\n\n2. Instruct without any input:\nInstruction: <YOUR_INSTRUCTION>\nInput: None\n\nThe \"Instruction\" describes a task or question. The paired \"Input\" provides further context or information for the requested \"Instruction\".\n\nYou must give me one instruction at a time.\nI must write a response that appropriately completes the requested instruction.\nI must decline your instruction honestly if I cannot perform the instruction due to physical, moral, legal reasons or my capability and explain the reasons.\nYou should instruct me not ask me questions.\nNow you must start to instruct me using the two ways described above.\nDo not add anything else other than your instruction and the optional corresponding input!\nKeep giving me instructions and necessary inputs until you think the task is completed.\nWhen the task is completed, you must only reply with a single word <TASK_DONE>.\nNever say <TASK_DONE> unless my responses have solved your task.",
"Only reply with Instruction and Input.",
"Here is a task that {assistant_role_name} will discuss with {user_role_name} to: {task}.\nPlease make it more specific. Be creative and imaginative.\nPlease reply with the full task in {word_limit} words or less. Do not add anything else.",
"Never forget you are a {assistant_role_name} and I am a {user_role_name}. Never flip roles!\nWe share a common interest in collaborating to successfully complete a task.\nYou must help me to complete the task.\nHere is the task: {task}. Never forget our task!\nI will instruct you based on your expertise and my needs to complete the task.\n\nI must give you one question at a time.\nYou must write a specific answer that appropriately completes the requested question.\nYou must decline my question honestly if you cannot comply the question due to physical, moral, legal reasons or your capability and explain the reasons.\nDo not add anything else other than your answer to my instruction.\n\nUnless I say the task is completed, you should always start with:\n\nMy response: <YOUR_SOLUTION>\n\n<YOUR_SOLUTION> should be specific and descriptive.\nAlways end <YOUR_SOLUTION> with: Next question."
] |
2024-01-10 | nozdrenkov/gpt4m | ingest~privateGptServer.py | #!/usr/bin/env python3
"""Flask server to find relevant entries in documents using the power of embeddings."""
from typing import Tuple, Any
import os
import faulthandler
from flask import Flask, request, jsonify
from flask_cors import CORS
from dotenv import load_dotenv
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.vectorstores import Chroma
from chromadb.config import Settings
import os
from dotenv import load_dotenv
import threading
faulthandler.enable()
load_dotenv()
_EMBEDDINGS_MODEL_NAME = os.getenv("EMBEDDINGS_MODEL_NAME")
_PERSIST_DIRECTORY = os.getenv('PERSIST_DIRECTORY')
_TARGET_SOURCE_CHUNKS = int(os.getenv('TARGET_SOURCE_CHUNKS', 4))
_DEFAULT_TOP = 5
app = Flask(__name__)
CORS(app)
_embeddings = HuggingFaceEmbeddings(model_name=_EMBEDDINGS_MODEL_NAME)
_db = Chroma(persist_directory=_PERSIST_DIRECTORY,
embedding_function=_embeddings, client_settings=Settings(
chroma_db_impl='duckdb+parquet',
persist_directory=_PERSIST_DIRECTORY,
anonymized_telemetry=False
))
lock = threading.Lock()
@app.route('/query', methods=['POST'])
def handle_query() -> Tuple[Any, int]:
"""Handles the POST request at '/query' endpoint."""
try:
data = request.get_json(force=True)
with lock:
relevant_docs = _db.similarity_search_with_relevance_scores(
query=data.get('query'),
distance_metric="cos",
k=int(data.get('top', _DEFAULT_TOP)),
)
results = [
{
'source': doc[0].metadata['source'],
'page_content': doc[0].page_content,
'relevance': doc[1]
} for doc in relevant_docs
] if relevant_docs else []
return jsonify(results), 200
except Exception as e:
return jsonify({'error': str(e)}), 500
if __name__ == "__main__":
app.run(host='0.0.0.0', port=5555)
| [] |
2024-01-10 | younesbram/GymPT | gympt.py | import streamlit as st
import openai
import time
st.set_page_config(
page_title="GymPT",
page_icon="💪",
layout="centered",
initial_sidebar_state="collapsed",
menu_items={
"Get Help": "https://www.x.com/didntdrinkwater/",
"Report a bug": "https://www.younes.ca/contact",
"About": "# AI Comedy\nAn app that uses NLP to generate hilarious skits!",
},
)
# Sidebar for OpenAI API Key Input
st.sidebar.title('Insert OpenAI API Key to use GPT4')
user_openai_key = st.sidebar.text_input('Enter OpenAI API Key (Please):')
# Use the user-provided key if available, otherwise use the secret key
openai_api_key = user_openai_key if user_openai_key else st.secrets["OPENAI_API_KEY"]
# Set the OpenAI API key
openai.api_key = openai_api_key
# Replace with your OpenAI API key
# openai.api_key = ""
# Function to calculate TDEE
def calculate_tdee(height, weight, activity_level, goal, age, units):
# Convert height and weight to centimeters and kilograms
if units == 'inches/lbs':
height_cm = height * 2.54
weight_kg = weight * 0.453592
else: # Assuming the other option is 'cm/kg'
height_cm = height
weight_kg = weight
# Calculate BMR using Mifflin-St Jeor Equation
bmr = 10 * weight_kg + 6.25 * height_cm - 5 * age + 5
# Multiply by activity factor
tdee = bmr * activity_level
# Adjust for goal (e.g., weight loss)
#if goal == "Weight Loss":
# tdee -= 350 # Example deficit for weight loss
#elif goal == "Muscle Gain":
# tdee += 350
#currently gpt4 already does this part
return tdee
# Title
st.title('💪 GymPT: Personalized Fitness')
# Introduction
st.markdown("""
Welcome to GymPT, your personal guide to achieving the body of your dreams!
Rome wasn't built in a day, & neither were the bodies of legends Arnold Schwarzenegger, Mike Mentzer, Jay Cutler, and more.
We believe in the power of consistency, dedication, and intelligent training.
Whether you're aiming for Herculean strength, chiseled aesthetics, or optimal health,
GymPT is designed to support you every step of the way.
Remember, the road to success is not about quick fixes—it's about hard work,
smart choices, and never giving up. Now, let's build your personalized plan! (If you have an OpenAI API key, please open the sidebar and insert it. For now, this app is free to use thanks to x.com/didntdrinkwater)
""")
# User Input for Workout Goals
goal = st.selectbox('Choose Your Fitness Goal', ['Weight Loss', 'Muscle Gain', 'Maintenance'])
# User Input for Dietary Preferences
diet = st.multiselect('Select Dietary Preferences (Optional)', ['Vegan', 'Keto', 'Low-Carb', 'High-Carb', 'Carb-Cycling', 'Gluten-Free'])
# Items in Fridge (for personalized diet recommendations)
fridge_items = st.text_area('Items in Your Fridge (Optional, leave empty if you only want a workout regimen)', value='', placeholder='E.g., eggs, chicken, broccoli, almonds...')
# Preferred Training Styles
training_styles = st.multiselect('Select Your Preferred Training Style - You can mix and match up to 3 trainers thanks to AI (Optional)', [
'Arnold Schwarzenegger – Volume Training and Classic Physique',
'Mike Mentzer – High-Intensity Training (HIT)',
'Jay Cutler – Balanced Approach with Emphasis on Symmetry',
'Dorian Yates – HIT with Blood and Guts Training',
'Frank Zane – Focus on Proportion and Aesthetics',
'Ronnie Coleman – High Volume and Heavy Lifting',
'Lee Haney – Stimulate, Don\'t Annihilate; Emphasis on Recovery',
'Calisthenics – Bodyweight Training for Strength and Flexibility',
'Rich Gaspari – Pre-Exhaustion Training with Intensity',
'Lou Ferrigno – Power Bodybuilding with Heavy Weights',
'Sergio Oliva – Classic Mass Building with Frequent Training',
'Larry Scott – Focus on Arms and Shoulders',
'Tom Platz – High Volume Leg Specialization',
'Flex Wheeler – Quality over Quantity; Focus on Form',
'Phil Heath – Scientific Approach with Attention to Detail',
'Chris Bumstead – Classic Physique with Modern Training',
'Kai Greene – Mind-Muscle Connection and Artistic Expression',
'CrossFit – Functional Fitness with Varied High-Intensity Workouts',
'Powerlifting – Focus on Strength and Power',
'Yoga – Focus on Flexibility and Mindfulness',
'Pilates – Focus on Core Strength and Posture',
'HIIT – High-Intensity Interval Training',
'Fasted Cardio – Cardio on an Empty Stomach',
'Kickboxing – Martial Arts and Cardio',
'Boxing – Martial Arts and Cardio',
'Muay Thai – Martial Arts and Cardio',
'Karate – Martial Arts',
'Taekwondo – Martial Arts',
'Zumba – Dance Fitness',
], max_selections=3)
# Height and Weight Inputs
units = st.selectbox('Choose Your Units', ['inches/lbs', 'cm/kg'])
if units == 'inches/lbs':
height_description = 'Enter Your Height (e.g., 68 inches)'
weight_description = 'Enter Your Weight (e.g., 160 lbs)'
else: # Assuming the other option is 'cm/kg'
height_description = 'Enter Your Height (e.g., 172 cm)'
weight_description = 'Enter Your Weight (e.g., 73 kg)'
height = st.number_input(height_description, min_value=0, max_value=300, step=1)
weight = st.number_input(weight_description, min_value=0, max_value=500, step=1)
age = st.number_input('Enter Your Age', min_value=0, max_value=120, step=1)
# Activity Level
activity_levels = {
"Sedentary (little to no exercise)": 1.2,
"Lightly active (light exercise/sports 1-3 days/week)": 1.375,
"Moderately active (moderate exercise/sports 3-5 days/week)": 1.55,
"Very active (hard exercise/sports 6-7 days a week)": 1.725,
"Super active (very hard exercise/sports & physical job or training twice a day)": 1.9
}
activity_level = st.selectbox('Choose Your Activity Level', list(activity_levels.keys()))
activity_factor = activity_levels[activity_level]
def generate_plan(goal, diet, fridge_items, training_styles, tdee, age):
messages = [
{
"role": "system",
"content": f"You are an extremely detailed Ai, who is knowledgeable in bodybuilding/fitness/dietitian and an expert! You only respond ethically."
},
{
"role": "user",
"content": f"My dietary preferences are {diet}. Create the perfect curated plan from {training_styles}. If there is anything in my fridge {fridge_items}, please include a meal plan, if not, dont mention the fridge being empty. My TDEE is {tdee} and I am {age} years old. My fitness goal is {goal} so try to give me accurate response based off my info. If i withheld dietary preference or training style, IGNORE IT and carry on with generic response. Do not give me any extra info, just respond as the trainers or mix of trainers and give the workout plan and the philosophy along with some things to research if need be and quotes from the trainers if there are any. Be extremely detailed and straight to the point"
}
]
delay_time = 0.01
response = openai.ChatCompletion.create(
model="gpt-4",
messages=messages,
temperature=0.666666666666666666666666666666420,
stream=True,
)
# Container to incrementally update the display
c = st.empty()
generated_text = ''
for event in response:
event_text = event['choices'][0]['delta'].get('content', '')
generated_text += event_text
c.markdown(generated_text) # Update the entire accumulated text
time.sleep(delay_time)
return generated_text
# Generate Workout and Diet Plan
if st.button('Generate Plan'):
# Validation checks
if not height or not weight or not age or not activity_level:
st.error('Please fill in all required fields (Height, Weight, Age, and Activity Level) before generating the plan.')
else:
with st.spinner('We\'re all gonna make it brah... Generating...'):
# Calculate TDEE
tdee = calculate_tdee(height, weight, activity_levels[activity_level], goal, age, units)
# Check if TDEE is calculated
if tdee:
# Call the generate_plan function with the calculated TDEE
plan = generate_plan(goal, diet, fridge_items, training_styles, tdee, age)
# Check if the plan has been generated
if plan:
# Create a download button for the generated plan
st.download_button(
label="Download Your Plan",
data=plan,
file_name="generated_plan.txt",
mime="text/plain",
)
else:
st.error('An error occurred while calculating your plan. Please make sure all inputs are correct.')
| [
"My dietary preferences are PLACEHOLDER. Create the perfect curated plan from PLACEHOLDER. If there is anything in my fridge PLACEHOLDER, please include a meal plan, if not, dont mention the fridge being empty. My TDEE is PLACEHOLDER and I am PLACEHOLDER years old. My fitness goal is PLACEHOLDER so try to give me accurate response based off my info. If i withheld dietary preference or training style, IGNORE IT and carry on with generic response. Do not give me any extra info, just respond as the trainers or mix of trainers and give the workout plan and the philosophy along with some things to research if need be and quotes from the trainers if there are any. Be extremely detailed and straight to the point",
"You are an extremely detailed Ai, who is knowledgeable in bodybuilding/fitness/dietitian and an expert! You only respond ethically."
] |
2024-01-10 | thinktecture-labs/azure-open-ai-function-calling-sample | tools~get_spaceship_info.py | from typing import Type
import requests
from langchain.tools import BaseTool
from pydantic import BaseModel, Field
class GetSpaceshipInfoModel(BaseModel):
id: int = Field(..., description="The id of the spaceship")
class GetSpaceshipInfoTool(BaseTool):
name = "get_spaceship_info"
description = "A tool to retrieve additional information of a spaceship by its id"
args_schema: Type[GetSpaceshipInfoModel] = GetSpaceshipInfoModel
def _run(self, id: int):
res = requests.get("https://api.wheretheiss.at/v1/satellites/" + id)
return res.json()
def _arun(self, id: str):
raise NotImplementedError("GetSpaceshipDetailsTool is not implemented async")
| [
"A tool to retrieve additional information of a spaceship by its id"
] |
2024-01-10 | thinktecture-labs/azure-open-ai-function-calling-sample | tools~get_spaceship_name.py | import requests
from langchain.tools import BaseTool
from pydantic import BaseModel, Field
from typing import Type
class GetSpaceshipNameModel(BaseModel):
id: int = Field(..., description="The id of a spaceship")
class GetSpaceshipNameTool(BaseTool):
name = "get_spaceship_name"
description = "A tool to retrieve the name of a single spaceship using its identifier"
args_schema: Type[BaseModel] = GetSpaceshipNameModel
def _run(self, id: int):
res = requests.get("https://swapi.dev/api/starships/" + str(id))
spaceship = res.json()
return spaceship["name"] + " (" + spaceship["model"] + ")"
| [
"A tool to retrieve the name of a single spaceship using its identifier"
] |
2024-01-10 | thinktecture-labs/azure-open-ai-function-calling-sample | tools~markdownify.py | import requests
import re
from langchain.tools import BaseTool
from pydantic import BaseModel, Field
from typing import Type
class MarkdownifyModel(BaseModel):
text: str = Field(..., description="Text to be formatted")
bold: bool = Field(False, description="Whether to bold the text")
italic: bool = Field(False, description="Whether to italicize the text")
code: bool = Field(False, description="Wether to format the text as code")
class MarkdownifyTool(BaseTool):
name = "markdownify"
description = "A tool to format text in markdown. It can make text bold, italic, bold-italic or format it as code."
args_schema: Type[MarkdownifyModel] = MarkdownifyModel
def _run(self, text: str, bold: bool = False, italic: bool = False, code: bool = False):
if not text:
return text
if bold:
text = "**" + text + "**"
if italic:
text = "*" + text + "*"
return text
def _arun(self, id: str):
raise NotImplementedError("MarkdownifyTool is not implemented using async")
| [
"A tool to format text in markdown. It can make text bold, italic, bold-italic or format it as code."
] |
2024-01-10 | thinktecture-labs/azure-open-ai-function-calling-sample | tools~get_character_info.py | import requests
import re
from langchain.tools import BaseTool
from pydantic import BaseModel, Field
from typing import List, Type
class GetCharacterInfoModel(BaseModel):
name: str = Field(..., description="The name of the character")
class GetCharacterInfoTool(BaseTool):
name = "get_character_info"
description = "A tool to retrieve information about a Star Wars character by its name"
args_schema: Type[GetCharacterInfoModel] = GetCharacterInfoModel
def _run(self, name: str):
res = requests.get("https://swapi.dev/api/people/?search=" + name)
search_result = res.json()
if search_result["count"] == 0:
return None
first_hit = search_result["results"][0]
id = None
match = re.search(r'/.*\/[^\/]+\/([^\/]+)/', first_hit["url"])
if match:
id = match.group(1)
else:
return None
spaceships = []
for spaceship in first_hit["starships"]:
match = re.search(r'/.*\/[^\/]+\/([^\/]+)/', spaceship)
if match:
spaceships.append(int(match.group(1)))
return {
"id": int(id),
"name": first_hit["name"],
"gender": first_hit["gender"],
"height": first_hit["height"],
"hair_color": first_hit["hair_color"],
"eye_color": first_hit["eye_color"],
"birth_year": first_hit["birth_year"],
"weight": first_hit["mass"],
"spaceships": spaceships,
}
def _arun(self, id: str):
raise NotImplementedError("GetCharacterIdTool is not implemented using async")
| [
"A tool to retrieve information about a Star Wars character by its name"
] |
2024-01-10 | thinktecture-labs/azure-open-ai-function-calling-sample | tools~get_spaceship_names.py | import requests
import re
from langchain.tools import BaseTool
from pydantic import BaseModel, Field
from typing import List, Type
class GetSpaceshipNamesModel(BaseModel):
ids: List[int] = Field(..., description="A list of spaceship ids")
class GetSpaceshipNamesTool(BaseTool):
name = "get_spaceship_names"
description = "A tool to retrieve the names of multiple spaceships at once"
args_schema: Type[BaseModel] = GetSpaceshipNamesModel
def _run(self, ids: List[int]):
names = []
spaceships = []
res = requests.get("https://swapi.dev/api/starships/")
j = res.json()
spaceships.extend(j["results"])
while j["next"] is not None:
res = requests.get(j["next"])
j = res.json()
spaceships.extend(j["results"])
for spaceship in spaceships:
match = re.search(r'/.*\/[^\/]+\/([^\/]+)/', spaceship["url"])
if match:
if int(match.group(1)) in ids:
names.append(spaceship["name"] + " (" + spaceship["model"] + ")")
return names
def _arun(self, id: str):
raise NotImplementedError("GetSpaceshipNamesTool is not implemented async")
| [
"A tool to retrieve the names of multiple spaceships at once"
] |
2024-01-10 | iamrealwilson/llama_index | llama_index~query_engine~sql_join_query_engine.py | """SQL Join query engine."""
from langchain.input import print_text
from typing import Optional, cast, Dict, Callable
from llama_index.indices.query.base import BaseQueryEngine
from llama_index.indices.struct_store.sql_query import NLStructStoreQueryEngine
from llama_index.indices.query.schema import QueryBundle
from llama_index.response.schema import RESPONSE_TYPE, Response
from llama_index.tools.query_engine import QueryEngineTool
from llama_index.indices.service_context import ServiceContext
from llama_index.selectors.llm_selectors import LLMSingleSelector
from llama_index.prompts.base import Prompt
from llama_index.indices.query.query_transform.base import BaseQueryTransform
import logging
from llama_index.langchain_helpers.chain_wrapper import LLMPredictor
from llama_index.llm_predictor.base import BaseLLMPredictor
from llama_index.callbacks.base import CallbackManager
logger = logging.getLogger(__name__)
DEFAULT_SQL_JOIN_SYNTHESIS_PROMPT_TMPL = """
The original question is given below.
This question has been translated into a SQL query. Both the SQL query and the response are given below.
Given the SQL response, the question has also been transformed into a more detailed query,
and executed against another query engine.
The transformed query and query engine response are also given below.
Given SQL query, SQL response, transformed query, and query engine response, please synthesize a response to the original question.
Original question: {query_str}
SQL query: {sql_query_str}
SQL response: {sql_response_str}
Transformed query: {query_engine_query_str}
Query engine response: {query_engine_response_str}
Response:
""" # noqa
DEFAULT_SQL_JOIN_SYNTHESIS_PROMPT = Prompt(DEFAULT_SQL_JOIN_SYNTHESIS_PROMPT_TMPL)
DEFAULT_SQL_AUGMENT_TRANSFORM_PROMPT_TMPL = """
"The original question is given below.
This question has been translated into a SQL query. Both the SQL query and the response are given below.
The SQL response either answers the question, or should provide additional context that can be used to make the question more specific.
Your job is to come up with a more specific question that needs to be answered to fully answer the original question, or 'None' if the original question has already been fully answered from the SQL response. Do not create a new question that is irrelevant to the original question; in that case return None instead.
Examples:
Original question: Please give more details about the demographics of the city with the highest population.
SQL query: SELECT city, population FROM cities ORDER BY population DESC LIMIT 1
SQL response: The city with the highest population is New York City.
New question: Can you tell me more about the demographics of New York City?
Original question: Please compare the sports environment of cities in North America.
SQL query: SELECT city_name FROM cities WHERE continent = 'North America' LIMIT 3
SQL response: The cities in North America are New York, San Francisco, and Toronto.
New question: What sports are played in New York, San Francisco, and Toronto?
Original question: What is the city with the highest population?
SQL query: SELECT city, population FROM cities ORDER BY population DESC LIMIT 1
SQL response: The city with the highest population is New York City.
New question: None
Original question: What countries are the top 3 ATP players from?
SQL query: SELECT country FROM players WHERE rank <= 3
SQL response: The top 3 ATP players are from Serbia, Russia, and Spain.
New question: None
Original question: {query_str}
SQL query: {sql_query_str}
SQL response: {sql_response_str}
New question: "
""" # noqa
DEFAULT_SQL_AUGMENT_TRANSFORM_PROMPT = Prompt(DEFAULT_SQL_AUGMENT_TRANSFORM_PROMPT_TMPL)
def _default_check_stop(query_bundle: QueryBundle) -> bool:
"""Default check stop function."""
return query_bundle.query_str.lower() == "none"
def _format_sql_query(sql_query: str) -> str:
"""Format SQL query."""
return sql_query.replace("\n", " ").replace("\t", " ")
class SQLAugmentQueryTransform(BaseQueryTransform):
"""SQL Augment Query Transform.
This query transform will transform the query into a more specific query
after augmenting with SQL results.
Args:
llm_predictor (LLMPredictor): LLM predictor to use for query transformation.
sql_augment_transform_prompt (Prompt): Prompt to use for query transformation.
check_stop_parser (Optional[Callable[[str], bool]]): Check stop function.
"""
def __init__(
self,
llm_predictor: Optional[BaseLLMPredictor] = None,
sql_augment_transform_prompt: Optional[Prompt] = None,
check_stop_parser: Optional[Callable[[QueryBundle], bool]] = None,
) -> None:
"""Initialize params."""
self._llm_predictor = llm_predictor or LLMPredictor()
self._sql_augment_transform_prompt = (
sql_augment_transform_prompt or DEFAULT_SQL_AUGMENT_TRANSFORM_PROMPT
)
self._check_stop_parser = check_stop_parser or _default_check_stop
def _run(self, query_bundle: QueryBundle, extra_info: Dict) -> QueryBundle:
"""Run query transform."""
query_str = query_bundle.query_str
sql_query = extra_info["sql_query"]
sql_query_response = extra_info["sql_query_response"]
new_query_str, formatted_prompt = self._llm_predictor.predict(
self._sql_augment_transform_prompt,
query_str=query_str,
sql_query_str=sql_query,
sql_response_str=sql_query_response,
)
return QueryBundle(
new_query_str, custom_embedding_strs=query_bundle.custom_embedding_strs
)
def check_stop(self, query_bundle: QueryBundle) -> bool:
"""Check if query indicates stop."""
return self._check_stop_parser(query_bundle)
class SQLJoinQueryEngine(BaseQueryEngine):
"""SQL Join Query Engine.
This query engine can "Join" a SQL database results
with another query engine.
It can decide it needs to query the SQL database or the other query engine.
If it decides to query the SQL database, it will first query the SQL database,
whether to augment information with retrieved results from the other query engine.
Args:
sql_query_tool (QueryEngineTool): Query engine tool for SQL database.
other_query_tool (QueryEngineTool): Other query engine tool.
selector (Optional[LLMSingleSelector]): Selector to use.
service_context (Optional[ServiceContext]): Service context to use.
sql_join_synthesis_prompt (Optional[Prompt]): Prompt to use for SQL join
synthesis.
sql_augment_query_transform (Optional[SQLAugmentQueryTransform]): Query
transform to use for SQL augmentation.
use_sql_join_synthesis (bool): Whether to use SQL join synthesis.
callback_manager (Optional[CallbackManager]): Callback manager to use.
verbose (bool): Whether to print intermediate results.
"""
def __init__(
self,
sql_query_tool: QueryEngineTool,
other_query_tool: QueryEngineTool,
selector: Optional[LLMSingleSelector] = None,
service_context: Optional[ServiceContext] = None,
sql_join_synthesis_prompt: Optional[Prompt] = None,
sql_augment_query_transform: Optional[SQLAugmentQueryTransform] = None,
use_sql_join_synthesis: bool = True,
callback_manager: Optional[CallbackManager] = None,
verbose: bool = True,
) -> None:
"""Initialize params."""
super().__init__(callback_manager=callback_manager)
# validate that the query engines are of the right type
if not isinstance(sql_query_tool.query_engine, NLStructStoreQueryEngine):
raise ValueError(
"sql_query_tool.query_engine must be an instance of "
"NLStructStoreQueryEngine"
)
self._sql_query_tool = sql_query_tool
self._other_query_tool = other_query_tool
sql_query_engine = cast(NLStructStoreQueryEngine, sql_query_tool.query_engine)
self._service_context = service_context or sql_query_engine.service_context
self._selector = selector or LLMSingleSelector.from_defaults()
self._sql_join_synthesis_prompt = (
sql_join_synthesis_prompt or DEFAULT_SQL_JOIN_SYNTHESIS_PROMPT
)
self._sql_augment_query_transform = (
sql_augment_query_transform
or SQLAugmentQueryTransform(
llm_predictor=self._service_context.llm_predictor
)
)
self._use_sql_join_synthesis = use_sql_join_synthesis
self._verbose = verbose
def _query_sql_other(self, query_bundle: QueryBundle) -> RESPONSE_TYPE:
"""Query SQL database + other query engine in sequence."""
# first query SQL database
sql_response = self._sql_query_tool.query_engine.query(query_bundle)
if not self._use_sql_join_synthesis:
return sql_response
sql_query = (
sql_response.extra_info["sql_query"] if sql_response.extra_info else None
)
if self._verbose:
print_text(f"SQL query: {sql_query}\n", color="yellow")
print_text(f"SQL response: {sql_response}\n", color="yellow")
# given SQL db, transform query into new query
new_query = self._sql_augment_query_transform(
query_bundle.query_str,
extra_info={
"sql_query": _format_sql_query(sql_query),
"sql_query_response": str(sql_response),
},
)
if self._verbose:
print_text(
f"Transformed query given SQL response: {new_query.query_str}\n",
color="blue",
)
logger.info(f"> Transformed query given SQL response: {new_query.query_str}")
if self._sql_augment_query_transform.check_stop(new_query):
return sql_response
other_response = self._other_query_tool.query_engine.query(new_query)
if self._verbose:
print_text(f"query engine response: {other_response}\n", color="pink")
logger.info(f"> query engine response: {other_response}")
response_str, _ = self._service_context.llm_predictor.predict(
self._sql_join_synthesis_prompt,
query_str=query_bundle.query_str,
sql_query_str=sql_query,
sql_response_str=str(sql_response),
query_engine_query_str=new_query.query_str,
query_engine_response_str=str(other_response),
)
if self._verbose:
print_text(f"Final response: {response_str}\n", color="green")
response_extra_info = {
**(sql_response.extra_info or {}),
**(other_response.extra_info or {}),
}
source_nodes = other_response.source_nodes
return Response(
response_str,
extra_info=response_extra_info,
source_nodes=source_nodes,
)
def _query(self, query_bundle: QueryBundle) -> RESPONSE_TYPE:
"""Query and get response."""
# TODO: see if this can be consolidated with logic in RouterQueryEngine
metadatas = [self._sql_query_tool.metadata, self._other_query_tool.metadata]
result = self._selector.select(metadatas, query_bundle)
# pick sql query
if result.ind == 0:
if self._verbose:
print_text(f"Querying SQL database: {result.reason}\n", color="blue")
logger.info(f"> Querying SQL database: {result.reason}")
return self._query_sql_other(query_bundle)
elif result.ind == 1:
if self._verbose:
print_text(
f"Querying other query engine: {result.reason}\n", color="blue"
)
logger.info(f"> Querying other query engine: {result.reason}")
response = self._other_query_tool.query_engine.query(query_bundle)
if self._verbose:
print_text(f"Query Engine response: {response}\n", color="pink")
return response
else:
raise ValueError(f"Invalid result.ind: {result.ind}")
async def _aquery(self, query_bundle: QueryBundle) -> RESPONSE_TYPE:
# TODO: make async
return self._query(query_bundle)
| [
"\nThe original question is given below.\nThis question has been translated into a SQL query. Both the SQL query and the response are given below.\nGiven the SQL response, the question has also been transformed into a more detailed query,\nand executed against another query engine.\nThe transformed query and query engine response are also given below.\nGiven SQL query, SQL response, transformed query, and query engine response, please synthesize a response to the original question.\n\nOriginal question: {query_str}\nSQL query: {sql_query_str}\nSQL response: {sql_response_str}\nTransformed query: {query_engine_query_str}\nQuery engine response: {query_engine_response_str}\nResponse: \n",
"\n\"The original question is given below.\nThis question has been translated into a SQL query. Both the SQL query and the response are given below.\nThe SQL response either answers the question, or should provide additional context that can be used to make the question more specific.\nYour job is to come up with a more specific question that needs to be answered to fully answer the original question, or 'None' if the original question has already been fully answered from the SQL response. Do not create a new question that is irrelevant to the original question; in that case return None instead.\n\nExamples:\n\nOriginal question: Please give more details about the demographics of the city with the highest population.\nSQL query: SELECT city, population FROM cities ORDER BY population DESC LIMIT 1\nSQL response: The city with the highest population is New York City.\nNew question: Can you tell me more about the demographics of New York City?\n\nOriginal question: Please compare the sports environment of cities in North America.\nSQL query: SELECT city_name FROM cities WHERE continent = 'North America' LIMIT 3\nSQL response: The cities in North America are New York, San Francisco, and Toronto.\nNew question: What sports are played in New York, San Francisco, and Toronto?\n\nOriginal question: What is the city with the highest population?\nSQL query: SELECT city, population FROM cities ORDER BY population DESC LIMIT 1\nSQL response: The city with the highest population is New York City.\nNew question: None\n\nOriginal question: What countries are the top 3 ATP players from?\nSQL query: SELECT country FROM players WHERE rank <= 3\nSQL response: The top 3 ATP players are from Serbia, Russia, and Spain.\nNew question: None\n\nOriginal question: {query_str}\nSQL query: {sql_query_str}\nSQL response: {sql_response_str}\nNew question: \"\n",
"North America",
"None"
] |
2024-01-10 | luqmanbello/OpenBBTerminal_fork | openbb_terminal~helper_funcs.py | """Helper functions."""
__docformat__ = "numpy"
# pylint: disable=too-many-lines
# IMPORTS STANDARD LIBRARY
# IMPORTS STANDARD
import argparse
import inspect
import io
import json
import logging
import os
import random
import re
import shutil
import sys
import urllib.parse
import webbrowser
from datetime import (
date as d,
datetime,
timedelta,
)
from difflib import SequenceMatcher
from functools import lru_cache
from pathlib import Path
from typing import Dict, List, Optional, Tuple, Union
# IMPORTS THIRDPARTY
import iso8601
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pandas.io.formats.format
import pandas_ta as ta
import pytz
import requests
import yfinance as yf
from holidays import US as us_holidays
from langchain.chat_models import ChatOpenAI
from llama_index import (
GPTVectorStoreIndex,
LLMPredictor,
PromptHelper,
ServiceContext,
SimpleDirectoryReader,
StorageContext,
load_index_from_storage,
)
from pandas._config.config import get_option
from pandas.plotting import register_matplotlib_converters
from PIL import Image, ImageDraw
from rich.table import Table
from screeninfo import get_monitors
from openbb_terminal import OpenBBFigure, plots_backend
from openbb_terminal.core.config.paths import (
HOME_DIRECTORY,
MISCELLANEOUS_DIRECTORY,
)
from openbb_terminal.core.plots.plotly_ta.ta_class import PlotlyTA
from openbb_terminal.core.session.current_system import get_current_system
# IMPORTS INTERNAL
from openbb_terminal.core.session.current_user import get_current_user
from openbb_terminal.decorators import check_api_key
from openbb_terminal.rich_config import console
logger = logging.getLogger(__name__)
register_matplotlib_converters()
if (
get_current_user().preferences.PLOT_BACKEND is not None
and get_current_user().preferences.PLOT_BACKEND != "None"
):
matplotlib.use(get_current_user().preferences.PLOT_BACKEND)
NO_EXPORT = 0
EXPORT_ONLY_RAW_DATA_ALLOWED = 1
EXPORT_ONLY_FIGURES_ALLOWED = 2
EXPORT_BOTH_RAW_DATA_AND_FIGURES = 3
MENU_GO_BACK = 0
MENU_QUIT = 1
MENU_RESET = 2
GPT_INDEX_DIRECTORY = MISCELLANEOUS_DIRECTORY / "gpt_index/"
GPT_INDEX_VER = 0.2
# Command location path to be shown in the figures depending on watermark flag
command_location = ""
# pylint: disable=R1702,R0912
# pylint: disable=global-statement
def set_command_location(cmd_loc: str):
"""Set command location.
Parameters
----------
cmd_loc: str
Command location called by user
"""
global command_location # noqa
command_location = cmd_loc
def check_path(path: str) -> str:
"""Check that path file exists.
Parameters
----------
path: str
path of file
Returns
-------
str:
Ratio of similarity between two strings
"""
# Just return empty path because this will be handled outside this function
if not path:
return ""
if path[0] == "~":
path = path.replace("~", HOME_DIRECTORY.as_posix())
# Return string of path if such relative path exists
if os.path.isfile(path):
return path
# Return string of path if an absolute path exists
if os.path.isfile("/" + path):
return f"/{path}"
logger.error("The path file '%s' does not exist.", path)
console.print(f"[red]The path file '{path}' does not exist.\n[/red]")
return ""
def parse_and_split_input(an_input: str, custom_filters: List) -> List[str]:
"""Filter and split the input queue.
Uses regex to filters command arguments that have forward slashes so that it doesn't
break the execution of the command queue.
Currently handles unix paths and sorting settings for screener menus.
Parameters
----------
an_input : str
User input as string
custom_filters : List
Additional regular expressions to match
Returns
-------
List[str]
Command queue as list
"""
# Make sure that the user can go back to the root when doing "/"
if an_input and an_input == "/":
an_input = "home"
# everything from ` -f ` to the next known extension
file_flag = r"(\ -f |\ --file )"
up_to = r".*?"
known_extensions = r"(\.(xlsx|csv|xls|tsv|json|yaml|ini|openbb|ipynb))"
unix_path_arg_exp = f"({file_flag}{up_to}{known_extensions})"
# Add custom expressions to handle edge cases of individual controllers
custom_filter = ""
for exp in custom_filters:
if exp is not None:
custom_filter += f"|{exp}"
del exp
slash_filter_exp = f"({unix_path_arg_exp}){custom_filter}"
filter_input = True
placeholders: Dict[str, str] = {}
while filter_input:
match = re.search(pattern=slash_filter_exp, string=an_input)
if match is not None:
placeholder = f"{{placeholder{len(placeholders)+1}}}"
placeholders[placeholder] = an_input[
match.span()[0] : match.span()[1] # noqa:E203
]
an_input = (
an_input[: match.span()[0]]
+ placeholder
+ an_input[match.span()[1] :] # noqa:E203
)
else:
filter_input = False
commands = an_input.split("/")
for command_num, command in enumerate(commands):
if command == commands[command_num] == commands[-1] == "":
return list(filter(None, commands))
matching_placeholders = [tag for tag in placeholders if tag in command]
if len(matching_placeholders) > 0:
for tag in matching_placeholders:
commands[command_num] = command.replace(tag, placeholders[tag])
return commands
def log_and_raise(error: Union[argparse.ArgumentTypeError, ValueError]) -> None:
"""Log and output an error."""
logger.error(str(error))
raise error
def similar(a: str, b: str) -> float:
"""Return a similarity float between string a and string b.
Parameters
----------
a: str
string a
b: str
string b
Returns
-------
float:
Ratio of similarity between two strings
"""
return SequenceMatcher(None, a, b).ratio()
def return_colored_value(value: str):
"""Return the string value with green, yellow, red or white color based on
whether the number is positive, negative, zero or other, respectively.
Parameters
----------
value: str
string to be checked
Returns
-------
value: str
string with color based on value of number if it exists
"""
values = re.findall(r"[-+]?(?:\d*\.\d+|\d+)", value)
# Finds exactly 1 number in the string
if len(values) == 1:
if float(values[0]) > 0:
return f"[green]{value}[/green]"
if float(values[0]) < 0:
return f"[red]{value}[/red]"
if float(values[0]) == 0:
return f"[yellow]{value}[/yellow]"
return f"{value}"
# pylint: disable=too-many-arguments
def print_rich_table(
df: pd.DataFrame,
show_index: bool = False,
title: str = "",
index_name: str = "",
headers: Optional[Union[List[str], pd.Index]] = None,
floatfmt: Union[str, List[str]] = ".2f",
show_header: bool = True,
automatic_coloring: bool = False,
columns_to_auto_color: Optional[List[str]] = None,
rows_to_auto_color: Optional[List[str]] = None,
export: bool = False,
print_to_console: bool = False,
limit: Optional[int] = 1000,
source: Optional[str] = None,
):
"""Prepare a table from df in rich.
Parameters
----------
df: pd.DataFrame
Dataframe to turn into table
show_index: bool
Whether to include index
title: str
Title for table
index_name : str
Title for index column
headers: List[str]
Titles for columns
floatfmt: Union[str, List[str]]
Float number formatting specs as string or list of strings. Defaults to ".2f"
show_header: bool
Whether to show the header row.
automatic_coloring: bool
Automatically color a table based on positive and negative values
columns_to_auto_color: List[str]
Columns to automatically color
rows_to_auto_color: List[str]
Rows to automatically color
export: bool
Whether we are exporting the table to a file. If so, we don't want to print it.
limit: Optional[int]
Limit the number of rows to show.
print_to_console: bool
Whether to print the table to the console. If False and interactive mode is
enabled, the table will be displayed in a new window. Otherwise, it will print to the
console.
source: Optional[str]
Source of the table. If provided, it will be displayed in the header of the table.
"""
if export:
return
current_user = get_current_user()
enable_interactive = (
current_user.preferences.USE_INTERACTIVE_DF and plots_backend().isatty
)
# Make a copy of the dataframe to avoid SettingWithCopyWarning
df = df.copy()
show_index = not isinstance(df.index, pd.RangeIndex) and show_index
# convert non-str that are not timestamp or int into str
# eg) praw.models.reddit.subreddit.Subreddit
for col in df.columns:
try:
if not any(
isinstance(df[col].iloc[x], pd.Timestamp)
for x in range(min(10, len(df)))
):
df[col] = pd.to_numeric(df[col])
except (ValueError, TypeError):
df[col] = df[col].astype(str)
def _get_headers(_headers: Union[List[str], pd.Index]) -> List[str]:
"""Check if headers are valid and return them."""
output = _headers
if isinstance(_headers, pd.Index):
output = list(_headers)
if len(output) != len(df.columns):
log_and_raise(
ValueError("Length of headers does not match length of DataFrame")
)
return output
if enable_interactive and not print_to_console:
df_outgoing = df.copy()
# If headers are provided, use them
if headers is not None:
# We check if headers are valid
df_outgoing.columns = _get_headers(headers)
if show_index and index_name not in df_outgoing.columns:
# If index name is provided, we use it
df_outgoing.index.name = index_name or "Index"
df_outgoing = df_outgoing.reset_index()
for col in df_outgoing.columns:
if col == "":
df_outgoing = df_outgoing.rename(columns={col: " "})
plots_backend().send_table(
df_table=df_outgoing,
title=title,
source=source, # type: ignore
theme=current_user.preferences.TABLE_STYLE,
)
return
df = df.copy() if not limit else df.copy().iloc[:limit]
if automatic_coloring:
if columns_to_auto_color:
for col in columns_to_auto_color:
# checks whether column exists
if col in df.columns:
df[col] = df[col].apply(lambda x: return_colored_value(str(x)))
if rows_to_auto_color:
for row in rows_to_auto_color:
# checks whether row exists
if row in df.index:
df.loc[row] = df.loc[row].apply(
lambda x: return_colored_value(str(x))
)
if columns_to_auto_color is None and rows_to_auto_color is None:
df = df.applymap(lambda x: return_colored_value(str(x)))
if current_user.preferences.USE_TABULATE_DF:
table = Table(title=title, show_lines=True, show_header=show_header)
if show_index:
table.add_column(index_name)
if headers is not None:
headers = _get_headers(headers)
for header in headers:
table.add_column(str(header))
else:
for column in df.columns:
table.add_column(str(column))
if isinstance(floatfmt, list) and len(floatfmt) != len(df.columns):
log_and_raise(
ValueError(
"Length of floatfmt list does not match length of DataFrame columns."
)
)
if isinstance(floatfmt, str):
floatfmt = [floatfmt for _ in range(len(df.columns))]
for idx, values in zip(df.index.tolist(), df.values.tolist()):
# remove hour/min/sec from timestamp index - Format: YYYY-MM-DD # make better
row_idx = [str(idx)] if show_index else []
row_idx += [
str(x)
if not isinstance(x, float) and not isinstance(x, np.float64)
else (
f"{x:{floatfmt[idx]}}"
if isinstance(floatfmt, list)
else (
f"{x:.2e}" if 0 < abs(float(x)) <= 0.0001 else f"{x:floatfmt}"
)
)
for idx, x in enumerate(values)
]
table.add_row(*row_idx)
console.print(table)
else:
console.print(df.to_string(col_space=0))
def check_int_range(mini: int, maxi: int):
"""Check if argparse argument is an int between 2 values.
Parameters
----------
mini: int
Min value to compare
maxi: int
Max value to compare
Returns
-------
int_range_checker:
Function that compares the three integers
"""
# Define the function with default arguments
def int_range_checker(num: int) -> int:
"""Check if int is between a high and low value.
Parameters
----------
num: int
Input integer
Returns
----------
num: int
Input number if conditions are met
Raises
------
argparse.ArgumentTypeError
Input number not between min and max values
"""
num = int(num)
if num < mini or num > maxi:
log_and_raise(
argparse.ArgumentTypeError(f"Argument must be in range [{mini},{maxi}]")
)
return num
# Return function handle to checking function
return int_range_checker
def check_non_negative(value) -> int:
"""Argparse type to check non negative int."""
new_value = int(value)
if new_value < 0:
log_and_raise(argparse.ArgumentTypeError(f"{value} is negative"))
return new_value
def check_terra_address_format(address: str) -> str:
"""Validate that terra account address has proper format.
Example: ^terra1[a-z0-9]{38}$
Parameters
----------
address: str
terra blockchain account address
Returns
-------
str
Terra blockchain address or raise argparse exception
"""
pattern = re.compile(r"^terra1[a-z0-9]{38}$")
if not pattern.match(address):
log_and_raise(
argparse.ArgumentTypeError(
f"Terra address: {address} has invalid format. Valid format: ^terra1[a-z0-9]{{38}}$"
)
)
return address
def check_non_negative_float(value) -> float:
"""Argparse type to check non negative int."""
new_value = float(value)
if new_value < 0:
log_and_raise(argparse.ArgumentTypeError(f"{value} is negative"))
return new_value
def check_positive_list(value) -> List[int]:
"""Argparse type to return list of positive ints."""
list_of_nums = value.split(",")
list_of_pos = []
for a_value in list_of_nums:
new_value = int(a_value)
if new_value <= 0:
log_and_raise(
argparse.ArgumentTypeError(f"{value} is an invalid positive int value")
)
list_of_pos.append(new_value)
return list_of_pos
def check_positive(value) -> int:
"""Argparse type to check positive int."""
new_value = int(value)
if new_value <= 0:
log_and_raise(
argparse.ArgumentTypeError(f"{value} is an invalid positive int value")
)
return new_value
def check_indicators(string: str) -> List[str]:
"""Check if indicators are valid."""
ta_cls = PlotlyTA()
choices = sorted(
[c.name.replace("plot_", "") for c in ta_cls if c.name != "plot_ma"]
+ ta_cls.ma_mode
)
choices_print = (
f"{'`, `'.join(choices[:10])}`\n `{'`, `'.join(choices[10:20])}"
f"`\n `{'`, `'.join(choices[20:])}"
)
strings = string.split(",")
for s in strings:
if s not in choices:
raise argparse.ArgumentTypeError(
f"\nInvalid choice: {s}, choose from \n `{choices_print}`",
)
return strings
def check_indicator_parameters(args: str, _help: bool = False) -> str:
"""Check if indicators parameters are valid."""
ta_cls = PlotlyTA()
indicators_dict: dict = {}
regex = re.compile(r"([a-zA-Z]+)\[([0-9.,]*)\]")
no_params_regex = re.compile(r"([a-zA-Z]+)")
matches = regex.findall(args)
no_params_matches = no_params_regex.findall(args)
indicators = [m[0] for m in matches]
for match in no_params_matches:
if match not in indicators:
matches.append((match, ""))
if _help:
console.print(
"""[yellow]To pass custom parameters to indicators:[/]
[green]Example:
-i macd[12,26,9],rsi[14],sma[20,50]
-i macd,rsi,sma (uses default parameters)
[yellow]Would pass the following to the indicators:[/]
[green]macd=dict(fast=12, slow=26, signal=9)
rsi=dict(length=14)
sma=dict(length=[20,50])
They must be in the same order as the function parameters.[/]\n"""
)
pop_keys = ["close", "high", "low", "open", "open_", "volume", "talib", "return"]
if matches:
check_indicators(",".join([m[0] for m in matches]))
for match in matches:
indicator, args = match
indicators_dict.setdefault(indicator, {})
if indicator in ["fib", "srlines", "demark", "clenow"]:
if _help:
console.print(
f"[yellow]{indicator}:[/]\n{'':^4}[green]Parameters: None[/]"
)
continue
fullspec = inspect.getfullargspec(getattr(ta, indicator))
kwargs = list(set(fullspec.args) - set(pop_keys))
kwargs.sort(key=fullspec.args.index)
if _help:
console.print(
f"[yellow]{indicator}:[/]\n{'':^4}[green]Parameters: {', '.join(kwargs)}[/]"
)
if indicator in ta_cls.ma_mode:
indicators_dict[indicator]["length"] = check_positive_list(args)
continue
for i, arg in enumerate(args.split(",")):
if arg and len(kwargs) > i:
indicators_dict[indicator][kwargs[i]] = (
float(arg) if "." in arg else int(arg)
)
return json.dumps(indicators_dict)
if not matches:
raise argparse.ArgumentTypeError(
f"Invalid indicator arguments: {args}. \n Example: -i macd[12,26,9],rsi[14]"
)
return args
def check_positive_float(value) -> float:
"""Argparse type to check positive float."""
new_value = float(value)
if new_value <= 0:
log_and_raise(
argparse.ArgumentTypeError(f"{value} is not a positive float value")
)
return new_value
def check_percentage_range(num) -> float:
"""Check if float is between 0 and 100. If so, return it.
Parameters
----------
num: float
Input float
Returns
-------
num: float
Input number if conditions are met
Raises
------
argparse.ArgumentTypeError
Input number not between min and max values
"""
num = float(num)
maxi = 100.0
mini = 0.0
if num <= mini or num >= maxi:
log_and_raise(argparse.ArgumentTypeError("Value must be between 0 and 100"))
return num
def check_proportion_range(num) -> float:
"""Check if float is between 0 and 1. If so, return it.
Parameters
----------
num: float
Input float
Returns
-------
num: float
Input number if conditions are met
Raises
----------
argparse.ArgumentTypeError
Input number not between min and max values
"""
num = float(num)
maxi = 1.0
mini = 0.0
if num < mini or num > maxi:
log_and_raise(argparse.ArgumentTypeError("Value must be between 0 and 1"))
return num
def valid_date_in_past(s: str) -> datetime:
"""Argparse type to check date is in valid format."""
try:
delta = datetime.now() - datetime.strptime(s, "%Y-%m-%d")
if delta.days < 1:
log_and_raise(
argparse.ArgumentTypeError(
f"Not a valid date: {s}. Must be earlier than today"
)
)
return datetime.strptime(s, "%Y-%m-%d")
except ValueError as value_error:
logging.exception(str(value_error))
raise argparse.ArgumentTypeError(f"Not a valid date: {s}") from value_error
def check_list_dates(str_dates: str) -> List[datetime]:
"""Argparse type to check list of dates provided have a valid format.
Parameters
----------
str_dates: str
string with dates separated by ","
Returns
-------
list_dates: List[datetime]
List of valid dates
"""
list_dates = list()
if str_dates:
if "," in str_dates:
for dt_marker in str_dates.split(","):
list_dates.append(valid_date(dt_marker))
else:
list_dates.append(valid_date(str_dates))
return list_dates
def valid_date(s: str) -> datetime:
"""Argparse type to check date is in valid format."""
try:
return datetime.strptime(s, "%Y-%m-%d")
except ValueError as value_error:
logging.exception(str(value_error))
raise argparse.ArgumentTypeError(f"Not a valid date: {s}") from value_error
def is_valid_date(s: str) -> bool:
"""Check if date is in valid format."""
try:
datetime.strptime(s, "%Y-%m-%d")
return True
except ValueError:
return False
def valid_repo(repo: str) -> str:
"""Argparse type to check github repo is in valid format."""
result = re.search(r"^[a-zA-Z0-9-_.]+\/[a-zA-Z0-9-_.]+$", repo) # noqa: W605
if not result:
log_and_raise(
argparse.ArgumentTypeError(
f"{repo} is not a valid repo. Valid repo: org/repo"
)
)
return repo
def valid_hour(hr: str) -> int:
"""Argparse type to check hour is valid with 24-hour notation."""
new_hr = int(hr)
if (new_hr < 0) or (new_hr > 24):
log_and_raise(
argparse.ArgumentTypeError(f"{hr} doesn't follow 24-hour notion.")
)
return new_hr
def lower_str(string: str) -> str:
"""Convert string to lowercase."""
return string.lower()
def us_market_holidays(years) -> list:
"""Get US market holidays."""
if isinstance(years, int):
years = [
years,
]
# https://www.nyse.com/markets/hours-calendars
market_holidays = [
"Martin Luther King Jr. Day",
"Washington's Birthday",
"Memorial Day",
"Independence Day",
"Labor Day",
"Thanksgiving",
"Christmas Day",
]
# http://www.maa.clell.de/StarDate/publ_holidays.html
good_fridays = {
2010: "2010-04-02",
2011: "2011-04-22",
2012: "2012-04-06",
2013: "2013-03-29",
2014: "2014-04-18",
2015: "2015-04-03",
2016: "2016-03-25",
2017: "2017-04-14",
2018: "2018-03-30",
2019: "2019-04-19",
2020: "2020-04-10",
2021: "2021-04-02",
2022: "2022-04-15",
2023: "2023-04-07",
2024: "2024-03-29",
2025: "2025-04-18",
2026: "2026-04-03",
2027: "2027-03-26",
2028: "2028-04-14",
2029: "2029-03-30",
2030: "2030-04-19",
}
market_and_observed_holidays = market_holidays + [
holiday + " (Observed)" for holiday in market_holidays
]
all_holidays = us_holidays(years=years)
valid_holidays = [
date
for date in list(all_holidays)
if all_holidays[date] in market_and_observed_holidays
]
for year in years:
new_Year = datetime.strptime(f"{year}-01-01", "%Y-%m-%d")
if new_Year.weekday() != 5: # ignore saturday
valid_holidays.append(new_Year.date())
if new_Year.weekday() == 6: # add monday for Sunday
valid_holidays.append(new_Year.date() + timedelta(1))
for year in years:
valid_holidays.append(datetime.strptime(good_fridays[year], "%Y-%m-%d").date())
return valid_holidays
def lambda_long_number_format(num, round_decimal=3) -> Union[str, int, float]:
"""Format a long number."""
if num == float("inf"):
return "inf"
if isinstance(num, float):
magnitude = 0
while abs(num) >= 1000:
magnitude += 1
num /= 1000.0
string_fmt = f".{round_decimal}f"
num_str = int(num) if num.is_integer() else f"{num:{string_fmt}}"
return f"{num_str} {' KMBTP'[magnitude]}".strip()
if isinstance(num, int):
num = str(num)
if (
isinstance(num, str)
and num.lstrip("-").isdigit()
and not num.lstrip("-").startswith("0")
and not is_valid_date(num)
):
num = int(num)
num /= 1.0
magnitude = 0
while abs(num) >= 1000:
magnitude += 1
num /= 1000.0
string_fmt = f".{round_decimal}f"
num_str = int(num) if num.is_integer() else f"{num:{string_fmt}}"
return f"{num_str} {' KMBTP'[magnitude]}".strip()
return num
def revert_lambda_long_number_format(num_str: str) -> Union[float, str]:
"""
Revert the formatting of a long number if the input is a formatted number, otherwise return the input as is.
Parameters
----------
num_str : str
The number to remove the formatting.
Returns
-------
Union[float, str]
The number as float (with no formatting) or the input as is.
"""
magnitude_dict = {
"K": 1000,
"M": 1000000,
"B": 1000000000,
"T": 1000000000000,
"P": 1000000000000000,
}
# Ensure the input is a string and not empty
if not num_str or not isinstance(num_str, str):
return num_str
num_as_list = num_str.strip().split()
# If the input string is a number parse it as float
if (
len(num_as_list) == 1
and num_as_list[0].replace(".", "").replace("-", "").isdigit()
and not is_valid_date(num_str)
):
return float(num_str)
# If the input string is a formatted number with magnitude
if (
len(num_as_list) == 2
and num_as_list[1] in magnitude_dict
and num_as_list[0].replace(".", "").replace("-", "").isdigit()
):
num, unit = num_as_list
magnitude = magnitude_dict.get(unit)
if magnitude:
return float(num) * magnitude
# Return the input string as is if it's not a formatted number
return num_str
def lambda_long_number_format_y_axis(df, y_column, ax):
"""Format long number that goes onto Y axis."""
max_values = df[y_column].values.max()
magnitude = 0
while abs(max_values) >= 1000:
magnitude += 1
max_values /= 1000.0
magnitude_sym = " KMBTP"[magnitude]
# Second y label axis -
if magnitude_sym == " ":
ax[2].set_ylabel(f"{y_column}")
else:
ax[2].set_ylabel(f"{y_column} [{magnitude_sym}]")
divider_map = {" ": 1, "K": 1000, "M": 1000000, "B": 1000000000}
divider = divider_map[magnitude_sym]
ax[2].get_yaxis().set_major_formatter(
matplotlib.ticker.FuncFormatter(lambda x, _: int(x / divider))
)
def lambda_clean_data_values_to_float(val: str) -> float:
"""Clean data to float based on string ending."""
# Remove any leading or trailing parentheses and spaces
val = val.strip("( )")
if val == "-":
val = "0"
# Convert percentage to decimal
if val.endswith("%"):
return float(val[:-1]) / 100.0
if val.endswith("B"):
return float(val[:-1]) * 1_000_000_000
if val.endswith("M"):
return float(val[:-1]) * 1_000_000
if val.endswith("K"):
return float(val[:-1]) * 1000
return float(val)
def lambda_int_or_round_float(x) -> str:
"""Format int or round float."""
# If the data is inf, -inf, or NaN then simply return '~' because it is either too
# large, too small, or we do not have data to display for it
if x in (np.inf, -np.inf, np.nan):
return " " + "~"
if (x - int(x) < -sys.float_info.epsilon) or (x - int(x) > sys.float_info.epsilon):
return " " + str(round(x, 2))
return " " + str(int(x))
def divide_chunks(data, n):
"""Split into chunks."""
# looping till length of data
for i in range(0, len(data), n):
yield data[i : i + n] # noqa: E203
def get_next_stock_market_days(last_stock_day, n_next_days) -> list:
"""Get the next stock market day.
Checks against weekends and holidays.
"""
n_days = 0
l_pred_days = []
years: list = []
holidays: list = []
if isinstance(last_stock_day, datetime):
while n_days < n_next_days:
last_stock_day += timedelta(hours=24)
year = last_stock_day.date().year
if year not in years:
years.append(year)
holidays += us_market_holidays(year)
# Check if it is a weekend
if last_stock_day.date().weekday() > 4:
continue
# Check if it is a holiday
if last_stock_day.strftime("%Y-%m-%d") in holidays:
continue
# Otherwise stock market is open
n_days += 1
l_pred_days.append(last_stock_day)
else:
while n_days < n_next_days:
l_pred_days.append(last_stock_day + 1 + n_days)
n_days += 1
return l_pred_days
def is_intraday(df: pd.DataFrame) -> bool:
"""Check if the data granularity is intraday.
Parameters
----------
df : pd.DataFrame
Price data
Returns
-------
bool
True if data is intraday
"""
granularity = df.index[1] - df.index[0]
intraday = not granularity >= timedelta(days=1)
return intraday
def reindex_dates(df: pd.DataFrame) -> pd.DataFrame:
"""Reindex dataframe to exclude non-trading days.
Resets the index of a df to an integer and prepares the 'date' column to become
x tick labels on a plot.
Parameters
----------
df : pd.DataFrame
Source dataframe
Returns
-------
pd.DataFrame
Reindexed dataframe
"""
date_format = "%b %d %H:%M" if is_intraday(df) else "%Y-%m-%d"
reindexed_df = df.reset_index()
reindexed_df["date"] = reindexed_df["date"].dt.strftime(date_format)
return reindexed_df
def get_data(tweet):
"""Get twitter data from API request."""
if "+" in tweet["created_at"]:
s_datetime = tweet["created_at"].split(" +")[0]
else:
s_datetime = iso8601.parse_date(tweet["created_at"]).strftime(
"%Y-%m-%d %H:%M:%S"
)
s_text = tweet["full_text"] if "full_text" in tweet else tweet["text"]
return {"created_at": s_datetime, "text": s_text}
def clean_tweet(tweet: str, symbol: str) -> str:
"""Clean tweets to be fed to sentiment model."""
whitespace = re.compile(r"\s+")
web_address = re.compile(r"(?i)http(s):\/\/[a-z0-9.~_\-\/]+")
ticker = re.compile(rf"(?i)@{symbol}(?=\b)")
user = re.compile(r"(?i)@[a-z0-9_]+")
tweet = whitespace.sub(" ", tweet)
tweet = web_address.sub("", tweet)
tweet = ticker.sub(symbol, tweet)
tweet = user.sub("", tweet)
return tweet
def get_user_agent() -> str:
"""Get a not very random user agent."""
user_agent_strings = [
"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.10; rv:86.1) Gecko/20100101 Firefox/86.1",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:86.1) Gecko/20100101 Firefox/86.1",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:82.1) Gecko/20100101 Firefox/82.1",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.13; rv:86.0) Gecko/20100101 Firefox/86.0",
"Mozilla/5.0 (Windows NT 10.0; WOW64; rv:86.0) Gecko/20100101 Firefox/86.0",
"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.10; rv:83.0) Gecko/20100101 Firefox/83.0",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:84.0) Gecko/20100101 Firefox/84.0",
]
return random.choice(user_agent_strings) # nosec
def text_adjustment_init(self):
"""Adjust text monkey patch for Pandas."""
self.ansi_regx = re.compile(r"\x1B[@-_][0-?]*[ -/]*[@-~]")
self.encoding = get_option("display.encoding")
def text_adjustment_len(self, text):
"""Get the length of the text adjustment."""
# return compat.strlen(self.ansi_regx.sub("", text), encoding=self.encoding)
return len(self.ansi_regx.sub("", text))
def text_adjustment_justify(self, texts, max_len, mode="right"):
"""Apply 'Justify' text alignment."""
justify = (
str.ljust
if (mode == "left")
else str.rjust
if (mode == "right")
else str.center
)
out = []
for s in texts:
escapes = self.ansi_regx.findall(s)
if len(escapes) == 2:
out.append(
escapes[0].strip()
+ justify(self.ansi_regx.sub("", s), max_len)
+ escapes[1].strip()
)
else:
out.append(justify(s, max_len))
return out
# pylint: disable=unused-argument
def text_adjustment_join_unicode(self, lines, sep=""):
"""Join Unicode."""
try:
return sep.join(lines)
except UnicodeDecodeError:
# sep = compat.text_type(sep)
return sep.join([x.decode("utf-8") if isinstance(x, str) else x for x in lines])
# pylint: disable=unused-argument
def text_adjustment_adjoin(self, space, *lists, **kwargs):
"""Join text."""
# Add space for all but the last column:
pads = ([space] * (len(lists) - 1)) + [0]
max_col_len = max(len(col) for col in lists)
new_cols = []
for col, pad in zip(lists, pads):
width = max(self.len(s) for s in col) + pad
c = self.justify(col, width, mode="left")
# Add blank cells to end of col if needed for different col lens:
if len(col) < max_col_len:
c.extend([" " * width] * (max_col_len - len(col)))
new_cols.append(c)
rows = [self.join_unicode(row_tup) for row_tup in zip(*new_cols)]
return self.join_unicode(rows, sep="\n")
# https://github.com/pandas-dev/pandas/issues/18066#issuecomment-522192922
def patch_pandas_text_adjustment():
"""Set pandas text adjustment settings."""
pandas.io.formats.format.TextAdjustment.__init__ = text_adjustment_init
pandas.io.formats.format.TextAdjustment.len = text_adjustment_len
pandas.io.formats.format.TextAdjustment.justify = text_adjustment_justify
pandas.io.formats.format.TextAdjustment.join_unicode = text_adjustment_join_unicode
pandas.io.formats.format.TextAdjustment.adjoin = text_adjustment_adjoin
def lambda_financials_colored_values(val: str) -> str:
"""Add a color to a value."""
# We don't want to do the color stuff in interactive mode
if get_current_user().preferences.USE_INTERACTIVE_DF:
return val
if val == "N/A" or str(val) == "nan":
val = "[yellow]N/A[/yellow]"
elif sum(c.isalpha() for c in val) < 2:
if "%" in val and "-" in val or "%" not in val and "(" in val:
val = f"[red]{val}[/red]"
elif "%" in val:
val = f"[green]{val}[/green]"
return val
def check_ohlc(type_ohlc: str) -> str:
"""Check that data is in ohlc."""
if bool(re.match("^[ohlca]+$", type_ohlc)):
return type_ohlc
raise argparse.ArgumentTypeError("The type specified is not recognized")
def lett_to_num(word: str) -> str:
"""Match ohlca to integers."""
replacements = [("o", "1"), ("h", "2"), ("l", "3"), ("c", "4"), ("a", "5")]
for a, b in replacements:
word = word.replace(a, b)
return word
AVAILABLE_FLAIRS = {
":openbb": "(🦋)",
":bug": "(🐛)",
":rocket": "(🚀)",
":diamond": "(💎)",
":stars": "(✨)",
":baseball": "(⚾)",
":boat": "(⛵)",
":phone": "(☎)",
":mercury": "(☿)",
":hidden": "",
":sun": "(☼)",
":moon": "(☾)",
":nuke": "(☢)",
":hazard": "(☣)",
":tunder": "(☈)",
":king": "(♔)",
":queen": "(♕)",
":knight": "(♘)",
":recycle": "(♻)",
":scales": "(⚖)",
":ball": "(⚽)",
":golf": "(⛳)",
":piece": "(☮)",
":yy": "(☯)",
}
def get_flair() -> str:
"""Get a flair icon."""
current_user = get_current_user() # pylint: disable=redefined-outer-name
current_flair = str(current_user.preferences.FLAIR)
flair = AVAILABLE_FLAIRS.get(current_flair, current_flair)
if (
current_user.preferences.USE_DATETIME
and get_user_timezone_or_invalid() != "INVALID"
):
dtime = datetime.now(pytz.timezone(get_user_timezone())).strftime(
"%Y %b %d, %H:%M"
)
# if there is no flair, don't add an extra space after the time
if flair == "":
return f"{dtime}"
return f"{dtime} {flair}"
return flair
def is_timezone_valid(user_tz: str) -> bool:
"""Check whether user timezone is valid.
Parameters
----------
user_tz: str
Timezone to check for validity
Returns
-------
bool
True if timezone provided is valid
"""
return user_tz in pytz.all_timezones
def get_user_timezone() -> str:
"""Get user timezone if it is a valid one.
Returns
-------
str
user timezone based on .env file
"""
return get_current_user().preferences.TIMEZONE
def get_user_timezone_or_invalid() -> str:
"""Get user timezone if it is a valid one.
Returns
-------
str
user timezone based on timezone.openbb file or INVALID
"""
user_tz = get_user_timezone()
if is_timezone_valid(user_tz):
return f"{user_tz}"
return "INVALID"
def str_to_bool(value) -> bool:
"""Match a string to a boolean value."""
if isinstance(value, bool):
return value
if value.lower() in {"false", "f", "0", "no", "n"}:
return False
if value.lower() in {"true", "t", "1", "yes", "y"}:
return True
raise ValueError(f"{value} is not a valid boolean value")
def get_screeninfo():
"""Get screeninfo."""
try:
screens = get_monitors() # Get all available monitors
except Exception:
return None
if screens:
current_user = get_current_user()
if (
len(screens) - 1 < current_user.preferences.MONITOR
): # Check to see if chosen monitor is detected
monitor = 0
console.print(
f"Could not locate monitor {current_user.preferences.MONITOR}, using primary monitor."
)
else:
monitor = current_user.preferences.MONITOR
main_screen = screens[monitor] # Choose what monitor to get
return (main_screen.width, main_screen.height)
return None
def plot_autoscale():
"""Autoscale plot."""
current_user = get_current_user()
screen_info = get_screeninfo()
if current_user.preferences.USE_PLOT_AUTOSCALING and screen_info:
x, y = screen_info # Get screen size
# account for ultrawide monitors
if x / y > 1.5:
x = x * 0.4
x = ((x) * current_user.preferences.PLOT_WIDTH_PERCENTAGE * 10**-2) / (
current_user.preferences.PLOT_DPI
) # Calculate width
if current_user.preferences.PLOT_HEIGHT_PERCENTAGE == 100: # If full height
y = y - 60 # Remove the height of window toolbar
y = ((y) * current_user.preferences.PLOT_HEIGHT_PERCENTAGE * 10**-2) / (
current_user.preferences.PLOT_DPI
)
else: # If not autoscale, use size defined in config_plot.py
x = current_user.preferences.PLOT_WIDTH / (current_user.preferences.PLOT_DPI)
y = current_user.preferences.PLOT_HEIGHT / (current_user.preferences.PLOT_DPI)
return x, y
def get_last_time_market_was_open(dt):
"""Get last time the US market was open."""
# Check if it is a weekend
if dt.date().weekday() > 4:
dt = get_last_time_market_was_open(dt - timedelta(hours=24))
# Check if it is a holiday
if dt.strftime("%Y-%m-%d") in us_holidays():
dt = get_last_time_market_was_open(dt - timedelta(hours=24))
dt = dt.replace(hour=21, minute=0, second=0)
return dt
def check_file_type_saved(valid_types: Optional[List[str]] = None):
"""Provide valid types for the user to be able to select.
Parameters
----------
valid_types: List[str]
List of valid types to export data
Returns
-------
check_filenames: Optional[List[str]]
Function that returns list of filenames to export data
"""
def check_filenames(filenames: str = "") -> str:
"""Check if filenames are valid.
Parameters
----------
filenames: str
filenames to be saved separated with comma
Returns
----------
str
valid filenames separated with comma
"""
if not filenames or not valid_types:
return ""
valid_filenames = list()
for filename in filenames.split(","):
if filename.endswith(tuple(valid_types)):
valid_filenames.append(filename)
else:
console.print(
f"[red]Filename '{filename}' provided is not valid!\nPlease use one of the following file types:"
f"{','.join(valid_types)}[/red]\n"
)
return ",".join(valid_filenames)
return check_filenames
def compose_export_path(func_name: str, dir_path: str) -> Path:
"""Compose export path for data from the terminal.
Creates a path to a folder and a filename based on conditions.
Parameters
----------
func_name : str
Name of the command that invokes this function
dir_path : str
Path of directory from where this function is called
Returns
-------
Path
Path variable containing the path of the exported file
"""
now = datetime.now()
# Resolving all symlinks and also normalizing path.
resolve_path = Path(dir_path).resolve()
# Getting the directory names from the path. Instead of using split/replace (Windows doesn't like that)
# check if this is done in a main context to avoid saving with openbb_terminal
if resolve_path.parts[-2] == "openbb_terminal":
path_cmd = f"{resolve_path.parts[-1]}"
else:
path_cmd = f"{resolve_path.parts[-2]}_{resolve_path.parts[-1]}"
default_filename = f"{now.strftime('%Y%m%d_%H%M%S')}_{path_cmd}_{func_name}"
full_path = get_current_user().preferences.USER_EXPORTS_DIRECTORY / default_filename
return full_path
def ask_file_overwrite(file_path: Path) -> Tuple[bool, bool]:
"""Helper to provide a prompt for overwriting existing files.
Returns two values, the first is a boolean indicating if the file exists and the
second is a boolean indicating if the user wants to overwrite the file.
"""
# Jeroen asked for a flag to overwrite no matter what
current_user = get_current_user()
if current_user.preferences.FILE_OVERWRITE:
return False, True
if get_current_system().TEST_MODE:
return False, True
if file_path.exists():
overwrite = input("\nFile already exists. Overwrite? [y/n]: ").lower()
if overwrite == "y":
file_path.unlink(missing_ok=True)
# File exists and user wants to overwrite
return True, True
# File exists and user does not want to overwrite
return True, False
# File does not exist
return False, True
# This is a false positive on pylint and being tracked in pylint #3060
# pylint: disable=abstract-class-instantiated
def export_data(
export_type: str,
dir_path: str,
func_name: str,
df: pd.DataFrame = pd.DataFrame(),
sheet_name: Optional[str] = None,
figure: Optional[OpenBBFigure] = None,
margin: bool = True,
) -> None:
"""Export data to a file.
Parameters
----------
export_type : str
Type of export between: csv,json,xlsx,xls
dir_path : str
Path of directory from where this function is called
func_name : str
Name of the command that invokes this function
df : pd.Dataframe
Dataframe of data to save
sheet_name : str
If provided. The name of the sheet to save in excel file
figure : Optional[OpenBBFigure]
Figure object to save as image file
margin : bool
Automatically adjust subplot parameters to give specified padding.
"""
if not figure:
figure = OpenBBFigure()
if export_type:
saved_path = compose_export_path(func_name, dir_path).resolve()
saved_path.parent.mkdir(parents=True, exist_ok=True)
for exp_type in export_type.split(","):
# In this scenario the path was provided, e.g. --export pt.csv, pt.jpg
if "." in exp_type:
saved_path = saved_path.with_name(exp_type)
# In this scenario we use the default filename
else:
if ".OpenBB_openbb_terminal" in saved_path.name:
saved_path = saved_path.with_name(
saved_path.name.replace(
".OpenBB_openbb_terminal", "OpenBBTerminal"
)
)
saved_path = saved_path.with_suffix(f".{exp_type}")
exists, overwrite = False, False
is_xlsx = exp_type.endswith("xlsx")
if sheet_name is None and is_xlsx or not is_xlsx:
exists, overwrite = ask_file_overwrite(saved_path)
if exists and not overwrite:
existing = len(list(saved_path.parent.glob(saved_path.stem + "*")))
saved_path = saved_path.with_stem(f"{saved_path.stem}_{existing + 1}")
df = df.replace(
{
r"\[yellow\]": "",
r"\[/yellow\]": "",
r"\[green\]": "",
r"\[/green\]": "",
r"\[red\]": "",
r"\[/red\]": "",
r"\[magenta\]": "",
r"\[/magenta\]": "",
},
regex=True,
)
df = df.applymap(revert_lambda_long_number_format)
if exp_type.endswith("csv"):
df.to_csv(saved_path)
elif exp_type.endswith("json"):
df.reset_index(drop=True, inplace=True)
df.to_json(saved_path)
elif exp_type.endswith("xlsx"):
# since xlsx does not support datetimes with timezones we need to remove it
df = remove_timezone_from_dataframe(df)
if sheet_name is None: # noqa: SIM223
df.to_excel(
saved_path,
index=True,
header=True,
)
elif saved_path.exists():
with pd.ExcelWriter(
saved_path,
mode="a",
if_sheet_exists="new",
engine="openpyxl",
) as writer:
df.to_excel(
writer, sheet_name=sheet_name, index=True, header=True
)
else:
with pd.ExcelWriter(
saved_path,
engine="openpyxl",
) as writer:
df.to_excel(
writer, sheet_name=sheet_name, index=True, header=True
)
elif saved_path.suffix in [".jpg", ".pdf", ".png", ".svg"]:
figure.show(export_image=saved_path, margin=margin)
else:
console.print("Wrong export file specified.")
continue
console.print(f"Saved file: {saved_path}")
figure._exported = True # pylint: disable=protected-access
def get_rf() -> float:
"""Use the fiscaldata.gov API to get most recent T-Bill rate.
Returns
-------
rate : float
The current US T-Bill rate
"""
try:
base = "https://api.fiscaldata.treasury.gov/services/api/fiscal_service"
end = "/v2/accounting/od/avg_interest_rates"
filters = "?filter=security_desc:eq:Treasury Bills&sort=-record_date"
response = request(base + end + filters)
latest = response.json()["data"][0]
return round(float(latest["avg_interest_rate_amt"]) / 100, 8)
except Exception:
return 0.02
def system_clear():
"""Clear screen."""
os.system("cls||clear") # nosec
def excel_columns() -> List[str]:
"""Return potential columns for excel.
Returns
-------
letters : List[str]
Letters to be used as excel columns
"""
letters = ["A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M"]
letters += ["N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z"]
opts = (
[f"{x}" for x in letters]
+ [f"{x}{y}" for x in letters for y in letters]
+ [f"{x}{y}{z}" for x in letters for y in letters for z in letters]
)
return opts
def handle_error_code(requests_obj, error_code_map):
"""Handle error code of HTTP requests.
Parameters
----------
requests_obj: Object
Request object
error_code_map: Dict
Dictionary mapping of HTTP error code and output message
"""
for error_code, error_msg in error_code_map.items():
if requests_obj.status_code == error_code:
console.print(error_msg)
def prefill_form(ticket_type, menu, path, command, message):
"""Pre-fill Google Form and open it in the browser."""
form_url = "https://my.openbb.co/app/terminal/support?"
params = {
"type": ticket_type,
"menu": menu,
"path": path,
"command": command,
"message": message,
}
url_params = urllib.parse.urlencode(params)
webbrowser.open(form_url + url_params)
def get_closing_price(ticker, days):
"""Get historical close price for n days in past for market asset.
Parameters
----------
ticker : str
Ticker to get data for
days : datetime
No. of days in past
Returns
-------
data : pd.DataFrame
Historic close prices for ticker for given days
"""
tick = yf.Ticker(ticker)
df = tick.history(
start=d.today() - timedelta(days=days),
interval="1d",
)["Close"]
df = df.to_frame().reset_index()
df = df.rename(columns={0: "Close"})
df.index.name = "index"
return df
def camel_case_split(string: str) -> str:
"""Convert a camel-case string to separate words.
Parameters
----------
string : str
The string to be converted
Returns
-------
new_string: str
The formatted string
"""
words = [[string[0]]]
for c in string[1:]:
if words[-1][-1].islower() and c.isupper():
words.append(list(c))
else:
words[-1].append(c)
results = ["".join(word) for word in words]
return " ".join(results).title()
def is_valid_axes_count(
axes: List[plt.Axes],
n: int,
custom_text: Optional[str] = None,
prefix_text: Optional[str] = None,
suffix_text: Optional[str] = None,
):
"""Check if axes list length is equal to n and log text if check result is false.
Parameters
----------
axes: List[plt.Axes]
External axes (2 axes are expected in the list)
n: int
number of expected axes
custom_text: Optional[str] = None
custom text to log
prefix_text: Optional[str] = None
prefix text to add before text to log
suffix_text: Optional[str] = None
suffix text to add after text to log
"""
if len(axes) == n:
return True
print_text = (
custom_text
if custom_text
else f"Expected list of {n} axis item{'s' if n > 1 else ''}."
)
if prefix_text:
print_text = f"{prefix_text} {print_text}"
if suffix_text:
print_text = f"{suffix_text} {print_text}"
logger.error(print_text)
console.print(f"[red]{print_text}\n[/red]")
return False
def support_message(s: str) -> str:
"""Argparse type to check string is in valid format for the support command."""
return s.replace('"', "")
def check_list_values(valid_values: List[str]):
"""Get valid values to test arguments given by user.
Parameters
----------
valid_values: List[str]
List of valid values to be checked
Returns
-------
check_list_values_from_valid_values_list:
Function that ensures that the valid values go through and notifies user when value is not valid.
"""
# Define the function with default arguments
def check_list_values_from_valid_values_list(given_values: str) -> List[str]:
"""Check if argparse argument is an str format.
Ensure that value1,value2,value3 and that the values value1, value2 and value3 are valid.
Parameters
----------
given_values: str
values provided by the user
Raises
------
argparse.ArgumentTypeError
Input number not between min and max values
"""
success_values = list()
values_found = (
[val.strip() for val in given_values.split(",")]
if "," in given_values
else [given_values]
)
for value in values_found:
# check if the value is valid
if value in valid_values:
success_values.append(value)
else:
console.print(f"[red]'{value}' is not valid.[/red]")
if not success_values:
log_and_raise(
argparse.ArgumentTypeError("No correct arguments have been found")
)
return success_values
# Return function handle to checking function
return check_list_values_from_valid_values_list
def search_wikipedia(expression: str) -> None:
"""Search wikipedia for a given expression.
Parameters
----------
expression: str
Expression to search for
"""
url = f"https://en.wikipedia.org/api/rest_v1/page/summary/{expression}"
response = requests.request("GET", url, headers={}, data={})
if response.status_code == 200:
response_json = json.loads(response.text)
res = {
"title": response_json["title"],
"url": f"{response_json['content_urls']['desktop']['page']}",
"summary": response_json["extract"],
}
else:
res = {
"title": "[red]Not Found[/red]",
}
df = pd.json_normalize(res)
print_rich_table(
df,
headers=list(df.columns),
show_index=False,
title=f"Wikipedia results for {expression}",
)
def screenshot() -> None:
"""Screenshot the terminal window or the plot window.
Parameters
----------
terminal_window_target: bool
Target the terminal window
"""
try:
if plt.get_fignums():
img_buf = io.BytesIO()
plt.savefig(img_buf, format="png")
shot = Image.open(img_buf)
screenshot_to_canvas(shot, plot_exists=True)
else:
console.print("No plots found.\n")
except Exception as err:
console.print(f"Cannot reach window - {err}\n")
def screenshot_to_canvas(shot, plot_exists: bool = False):
"""Frame image to OpenBB canvas.
Parameters
----------
shot
Image to frame with OpenBB Canvas
plot_exists: bool
Variable to say whether the image is a plot or screenshot of terminal
"""
WHITE_LINE_WIDTH = 3
OUTSIDE_CANVAS_WIDTH = shot.width + 4 * WHITE_LINE_WIDTH + 5
OUTSIDE_CANVAS_HEIGHT = shot.height + 4 * WHITE_LINE_WIDTH + 5
UPPER_SPACE = 40
BACKGROUND_WIDTH_SLACK = 150
BACKGROUND_HEIGHT_SLACK = 150
background = Image.open(
Path(os.path.abspath(__file__), "../../images/background.png")
)
logo = Image.open(
Path(os.path.abspath(__file__), "../../images/openbb_horizontal_logo.png")
)
try:
if plot_exists:
HEADER_HEIGHT = 0
RADIUS = 8
background = background.resize(
(
shot.width + BACKGROUND_WIDTH_SLACK,
shot.height + BACKGROUND_HEIGHT_SLACK,
)
)
x = int((background.width - OUTSIDE_CANVAS_WIDTH) / 2)
y = UPPER_SPACE
white_shape = (
(x, y),
(x + OUTSIDE_CANVAS_WIDTH, y + OUTSIDE_CANVAS_HEIGHT),
)
img = ImageDraw.Draw(background)
img.rounded_rectangle(
white_shape,
fill="black",
outline="white",
width=WHITE_LINE_WIDTH,
radius=RADIUS,
)
background.paste(shot, (x + WHITE_LINE_WIDTH + 5, y + WHITE_LINE_WIDTH + 5))
# Logo
background.paste(
logo,
(
int((background.width - logo.width) / 2),
UPPER_SPACE
+ OUTSIDE_CANVAS_HEIGHT
+ HEADER_HEIGHT
+ int(
(
background.height
- UPPER_SPACE
- OUTSIDE_CANVAS_HEIGHT
- HEADER_HEIGHT
- logo.height
)
/ 2
),
),
logo,
)
background.show(title="screenshot")
except Exception:
console.print("Shot failed.")
@lru_cache
def load_json(path: Path) -> Dict[str, str]:
"""Load a dictionary from a json file path.
Parameter
----------
path : Path
The path for the json file
Returns
-------
Dict[str, str]
The dictionary loaded from json
"""
try:
with open(path) as file:
return json.load(file)
except Exception as e:
console.print(
f"[red]Failed to load preferred source from file: "
f"{get_current_user().preferences.USER_DATA_SOURCES_FILE}[/red]"
)
console.print(f"[red]{e}[/red]")
return {}
def list_from_str(value: str) -> List[str]:
"""Convert a string to a list.
Parameter
---------
value : str
The string to convert
Returns
-------
new_value: List[str]
The list of strings
"""
if value:
return value.split(",")
return []
def str_date_to_timestamp(date: str) -> int:
"""Transform string date to timestamp
Parameters
----------
start_date : str
Initial date, format YYYY-MM-DD
Returns
-------
date_ts : int
Initial date timestamp (e.g., 1_614_556_800)
"""
date_ts = int(
datetime.strptime(date + " 00:00:00+0000", "%Y-%m-%d %H:%M:%S%z").timestamp()
)
return date_ts
def check_start_less_than_end(start_date: str, end_date: str) -> bool:
"""Check if start_date is equal to end_date.
Parameters
----------
start_date : str
Initial date, format YYYY-MM-DD
end_date : str
Final date, format YYYY-MM-DD
Returns
-------
bool
True if start_date is not equal to end_date, False otherwise
"""
if start_date is None or end_date is None:
return False
if start_date == end_date:
console.print("[red]Start date and end date cannot be the same.[/red]")
return True
if start_date > end_date:
console.print("[red]Start date cannot be greater than end date.[/red]")
return True
return False
# Write an abstract helper to make requests from a url with potential headers and params
def request(
url: str, method: str = "GET", timeout: int = 0, **kwargs
) -> requests.Response:
"""Abstract helper to make requests from a url with potential headers and params.
Parameters
----------
url : str
Url to make the request to
method : str, optional
HTTP method to use. Can be "GET" or "POST", by default "GET"
Returns
-------
requests.Response
Request response object
Raises
------
ValueError
If invalid method is passed
"""
current_user = get_current_user()
# We want to add a user agent to the request, so check if there are any headers
# If there are headers, check if there is a user agent, if not add one.
# Some requests seem to work only with a specific user agent, so we want to be able to override it.
headers = kwargs.pop("headers", {})
timeout = timeout or current_user.preferences.REQUEST_TIMEOUT
if "User-Agent" not in headers:
headers["User-Agent"] = get_user_agent()
if method.upper() == "GET":
return requests.get(
url,
headers=headers,
timeout=timeout,
**kwargs,
)
if method.upper() == "POST":
return requests.post(
url,
headers=headers,
timeout=timeout,
**kwargs,
)
raise ValueError("Method must be GET or POST")
def remove_timezone_from_dataframe(df: pd.DataFrame) -> pd.DataFrame:
"""
Remove timezone information from a dataframe.
Parameters
----------
df : pd.DataFrame
The dataframe to remove timezone information from
Returns
-------
pd.DataFrame
The dataframe with timezone information removed
"""
date_cols = []
index_is_date = False
# Find columns and index containing date data
if (
df.index.dtype.kind == "M"
and hasattr(df.index.dtype, "tz")
and df.index.dtype.tz is not None
):
index_is_date = True
for col, dtype in df.dtypes.items():
if dtype.kind == "M" and hasattr(df.index.dtype, "tz") and dtype.tz is not None:
date_cols.append(col)
# Remove the timezone information
for col in date_cols:
df[col] = df[col].dt.date
if index_is_date:
index_name = df.index.name
df.index = df.index.date
df.index.name = index_name
return df
@check_api_key(["API_OPENAI_KEY"])
def query_LLM(query_text, gpt_model):
current_user = get_current_user()
os.environ["OPENAI_API_KEY"] = current_user.credentials.API_OPENAI_KEY
# check if index exists
index_path = GPT_INDEX_DIRECTORY / f"index_{GPT_INDEX_VER}.json"
old_index_paths = [
str(x) for x in GPT_INDEX_DIRECTORY.glob("index_*.json") if x != index_path
]
# define LLM
llm_predictor = LLMPredictor(llm=ChatOpenAI(temperature=0, model_name=gpt_model))
# define prompt helper
prompt_helper = PromptHelper(max_input_size=4096, num_output=256)
service_context = ServiceContext.from_defaults(
llm_predictor=llm_predictor, prompt_helper=prompt_helper
)
if os.path.exists(index_path):
# rebuild storage context
storage_context = StorageContext.from_defaults(persist_dir=index_path)
index = load_index_from_storage(
service_context=service_context, storage_context=storage_context
)
else:
# If the index file doesn't exist or is of incorrect version, generate a new one
# First, remove old version(s), if any
for path in old_index_paths:
if os.path.isfile(path):
os.remove(path)
elif os.path.isdir(path):
shutil.rmtree(path)
# Then, generate and save new index
# import from print console and say generating index, this might take a while
console.print("Generating index, this might take a while....\n")
# read in documents
documents = SimpleDirectoryReader(GPT_INDEX_DIRECTORY / "data/").load_data()
index = GPTVectorStoreIndex.from_documents(
documents, service_context=service_context
)
# save to disk
console.print("Saving index to disk....\n")
index.storage_context.persist(index_path)
prompt_string = f"""From argparse help text above, provide the terminal
command for {query_text}.Provide the exact command along with the parent command
with a "/" separation to get that information,and nothing else including any
explanation. Don't add any other word such as 'Command to get', 'Answer' or the likes.
Remember, it is very important to provide the full path of the command. Pay
attention to the parent commands, and make sure that if you were to run a command that is located
in a submenu, that it will have the full path included as if you were running
from the root directory. If and only if there is no information in the argparse help text above,
then just provide information on how to find that answer through normal financial terms.
Only do what is asked and provide a single command string. Always use a comma to separate between countries but
never between full commands. Lower cap the country name.
"""
# try to get the response from the index
try:
query_engine = index.as_query_engine()
response = query_engine.query(prompt_string)
return response.response
except Exception as e:
# check if the error has the following "The model: `gpt-4` does not exist"
if "The model: `gpt-4` does not exist" in str(e):
console.print(
"[red]You do not have access to GPT4 model with your API key."
" Please try again with valid API Access.[/red]"
)
return None
console.print(f"[red]Something went wrong with the query. {e}[/red]")
return None
| [
"From argparse help text above, provide the terminal\n command for PLACEHOLDER.Provide the exact command along with the parent command\n with a \"/\" separation to get that information,and nothing else including any\n explanation. Don't add any other word such as 'Command to get', 'Answer' or the likes.\n Remember, it is very important to provide the full path of the command. Pay\n attention to the parent commands, and make sure that if you were to run a command that is located\n in a submenu, that it will have the full path included as if you were running\n from the root directory. If and only if there is no information in the argparse help text above,\n then just provide information on how to find that answer through normal financial terms.\n Only do what is asked and provide a single command string. Always use a comma to separate between countries but\n never between full commands. Lower cap the country name.\n "
] |
2024-01-10 | annaliuu/LIGN167-Final-Project | process_materials.py | import os
import openai
# Part 1: Data Processing
def read_markdown_file(file_path):
try:
with open(file_path, 'r', encoding='utf-8') as file:
content = file.read()
return content
except FileNotFoundError:
print(f"File not found: {file_path}")
return None
def process_lecture_materials(directory='lectures'):
topics = {
"What is Language": "2_what_is_language.mp4.wav.txt",
"Phonetics": "4_phonetics_1.mp4.wav.txt",
"Phonology": "6_phonology_1.mp4.wav.txt",
"Morphology": "8_morphology.mp4.wav.txt",
"Syntax": "10_syntax1.mov.wav.txt",
"Semantics/Pragmatics": "14_semantics_and_pragmatics.mp4.wav.txt",
"Language Families": "16_language_families.mp4.wav.txt"
}
processed_materials = {}
for topic, filename in topics.items():
file_path = os.path.join(directory, filename)
lecture_content = read_markdown_file(file_path)
if lecture_content is not None:
processed_materials[topic] = lecture_content
return processed_materials
# Part 2: Question Generation
class QuestionBank:
def __init__(self, api_key, lecture_materials):
self.api_key = api_key
self.lecture_materials = lecture_materials
# Code to summarize the entire lesson
# Not used since it heavily uses the API
def summarize_chunk(self, chunk):
try:
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "This is a text summarization task. Summarize the following text."},
{"role": "user", "content": chunk}
],
max_tokens=150, # Adjust based on your needs for summary length
api_key=api_key
)
return response.choices[0].message.content.strip()
except Exception as e:
print(f"An error occurred: {e}")
return None
def generate_question(self, topic):
max_length = 3500
long_text = lecture_materials[topic]
# Alternative usage using the API summary; not being used
# chunks = [long_text[i:i + max_length]
# for i in range(0, len(long_text), max_length)]
# sum_chunks = [str(self.summarize_chunk(chunk)) for chunk in chunks]
# summary = ''.join(sum_chunks)
summary = long_text[:max_length]
try:
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo", # Specify the engine you want to use
messages=[
{"role": "system", "content": "This is a question generation session. Create a good multiple choice question for an exam based on the provided summary. Do not reference the lecture at all."},
{"role": "user", "content": f"Create a multiple choice question with four answers about the topic:{topic} using this content: {summary}.\n This questions should be for an exam, and should not reference this specific lecture but rather the topic. Follow the format of this example:\nQ: How many letters in the word 'crazy'?\nA. 8\nB. 16\nC. 5\nD. 18"}
],
max_tokens=150, # The maximum number of tokens to generate in the response
api_key=api_key
)
question = response.choices[0].message.content.strip()
return question, summary
except Exception as e:
print(f"An error occurred: {e}")
return "Error generating question"
def generate_answer(self, question, summary):
answer = openai.ChatCompletion.create(
model="gpt-3.5-turbo", # Specify the engine you want to use
messages=[{"role": "system", "content": "This is a question and answer session. Answer the question based on the provided summary."},
{"role": "user", "content": f"What is the correct answer to {question} based on {summary}? Respond with exactly one letter, and the explanation."}],
max_tokens=150, # The maximum number of tokens to generate in the response
api_key=api_key
)
return answer.choices[0].message.content.strip()
# Example usage
# Replace with your actual API key
api_key = "insert_api_key"
lecture_materials = process_lecture_materials()
question_bank = QuestionBank(api_key, lecture_materials)
# Generate a question based on performance
# next_question = question_bank.generate_question(
# 'Introducing Language and Dialect')
# print(next_question[0])
# ans = question_bank.generate_answer(next_question[0], next_question[1])
# print(ans)
| [
"This is a text summarization task. Summarize the following text.",
"What is the correct answer to PLACEHOLDER based on PLACEHOLDER? Respond with exactly one letter, and the explanation.",
"This is a question and answer session. Answer the question based on the provided summary.",
"Create a multiple choice question with four answers about the topic:PLACEHOLDER using this content: PLACEHOLDER.\n This questions should be for an exam, and should not reference this specific lecture but rather the topic. Follow the format of this example:\nQ: How many letters in the word 'crazy'?\nA. 8\nB. 16\nC. 5\nD. 18",
"This is a question generation session. Create a good multiple choice question for an exam based on the provided summary. Do not reference the lecture at all."
] |
2024-01-10 | aditya808324/summerproject | cg.py | #!/usr/bin/python3
import cgi
import json
import openai
import time
import cgitb
# Set the appropriate headers for a CGI script
print("Content-Type: text/html")
cgitb.enable()
print()
# Get the user's message from the request
form = cgi.FieldStorage()
user_message = form.getvalue('message')
openai.api_key = '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'
# Generate response using OpenAI GPT-3
response = openai.Completion.create(
engine='text-davinci-003',
prompt=user_message,
max_tokens=50,
temperature=1.2,
n=1,
stop=None
)
print(response.choices[0].text.strip()) | [] |
2024-01-10 | HumbleSituation164/exmol | exmol~exmol.py | from functools import reduce
import inspect
from typing import *
import io
import math
import requests # type: ignore
import numpy as np
import matplotlib.pyplot as plt # type: ignore
from matplotlib.patches import Rectangle, FancyBboxPatch # type: ignore
from matplotlib.offsetbox import AnnotationBbox # type: ignore
import matplotlib as mpl # type: ignore
import selfies as sf # type: ignore
import tqdm # type: ignore
import textwrap # type: ignore
import skunk # type: ignore
from ratelimit import limits, sleep_and_retry # type: ignore
from sklearn.cluster import DBSCAN # type: ignore
from sklearn.decomposition import PCA # type: ignore
import scipy.stats as ss # type: ignore
from rdkit.Chem import MolFromSmiles as smi2mol # type: ignore
from rdkit.Chem import MolFromSmarts # type: ignore
from rdkit.Chem import MolToSmiles as mol2smi # type: ignore
from rdkit.Chem import rdchem, MACCSkeys, AllChem # type: ignore
from rdkit.Chem.Draw import MolToImage as mol2img, DrawMorganBit # type: ignore
from rdkit.Chem import rdchem # type: ignore
from rdkit.DataStructs.cDataStructs import BulkTanimotoSimilarity, TanimotoSimilarity # type: ignore
from . import stoned
from .plot_utils import _mol_images, _image_scatter, _bit2atoms
from .data import *
def _fp_dist_matrix(smiles, fp_type, _pbar):
mols = [(smi2mol(s), _pbar.update(0.5))[0] for s in smiles]
# Sorry about the one-line. Just sneaky insertion of progressbar update
fp = [(stoned.get_fingerprint(m, fp_type), _pbar.update(0.5))[0] for m in mols]
M = np.array([BulkTanimotoSimilarity(f, fp) for f in fp])
# 1 - similarity because we want distance
return 1 - M
def _check_multiple_bases(examples):
return sum([e.is_origin for e in examples]) > 1
def _ecfp_names(examples, joint_bits):
# add names for given descriptor indices
multiple_bases = _check_multiple_bases(examples)
# need to get base molecule(s) for naming
bitInfo = {} # Type Dict[Any, Any]
base_mol = [smi2mol(e.smiles) for e in examples if e.is_origin == True]
if multiple_bases:
multiBitInfo = {} # type: Dict[int, Tuple[Any, int, int]]
for b in base_mol:
bitInfo = {}
AllChem.GetMorganFingerprint(b, 3, bitInfo=bitInfo)
for bit in bitInfo:
if bit not in multiBitInfo:
multiBitInfo[bit] = (b, bit, {bit: bitInfo[bit]})
else:
base_mol = smi2mol(examples[0].smiles)
bitInfo = {} # type: Dict[Any, Any]
AllChem.GetMorganFingerprint(base_mol, 3, bitInfo=bitInfo)
result = [] # type: List[str]
for i in range(len(joint_bits)):
k = joint_bits[i]
if multiple_bases:
m = multiBitInfo[k][0]
b = multiBitInfo[k][2]
name = name_morgan_bit(m, b, k)
else:
name = name_morgan_bit(base_mol, bitInfo, k)
result.append(name)
return tuple(result)
def _calculate_rdkit_descriptors(mol):
from rdkit.ML.Descriptors import MoleculeDescriptors # type: ignore
dlist = [
"NumHDonors",
"NumHAcceptors",
"MolLogP",
"NumHeteroatoms",
"RingCount",
"NumRotatableBonds",
] # , 'NumHeteroatoms']
c = MoleculeDescriptors.MolecularDescriptorCalculator(dlist)
d = c.CalcDescriptors(mol)
def calc_aromatic_bonds(mol):
return sum(1 for b in mol.GetBonds() if b.GetIsAromatic())
def _create_smarts(SMARTS):
s = ",".join("$(" + s + ")" for s in SMARTS)
_mol = MolFromSmarts("[" + s + "]")
return _mol
def calc_acid_groups(mol):
acid_smarts = (
"[O;H1]-[C,S,P]=O",
"[*;-;!$(*~[*;+])]",
"[NH](S(=O)=O)C(F)(F)F",
"n1nnnc1",
)
pat = _create_smarts(acid_smarts)
return len(mol.GetSubstructMatches(pat))
def calc_basic_groups(mol):
basic_smarts = (
"[NH2]-[CX4]",
"[NH](-[CX4])-[CX4]",
"N(-[CX4])(-[CX4])-[CX4]",
"[*;+;!$(*~[*;-])]",
"N=C-N",
"N-C=N",
)
pat = _create_smarts(basic_smarts)
return len(mol.GetSubstructMatches(pat))
def calc_apol(mol, includeImplicitHs=True):
# atomic polarizabilities available here:
# https://github.com/mordred-descriptor/mordred/blob/develop/mordred/data/polarizalibity78.txt
from importlib_resources import files # type: ignore
import exmol.lime_data # type: ignore
ap = files(exmol.lime_data).joinpath("atom_pols.txt")
with open(ap, "r") as f:
atom_pols = [float(x) for x in next(f).split(",")]
res = 0.0
for atom in mol.GetAtoms():
anum = atom.GetAtomicNum()
if anum <= len(atom_pols):
apol = atom_pols[anum]
if includeImplicitHs:
apol += atom_pols[1] * atom.GetTotalNumHs(includeNeighbors=False)
res += apol
else:
raise ValueError(f"atomic number {anum} not found")
return res
d = d + (
calc_aromatic_bonds(mol),
calc_acid_groups(mol),
calc_basic_groups(mol),
calc_apol(mol),
)
return d
def _get_joint_ecfp_descriptors(examples):
"""Create a union of ECFP bits from all base molecules"""
# get reference
bases = [smi2mol(e.smiles) for e in examples if e.is_origin]
ecfp_joint = set()
for m in bases:
# Get bitinfo and create a union
b = {} # type: Dict[Any, Any]
temp_fp = AllChem.GetMorganFingerprint(m, 3, bitInfo=b)
# add if radius greater than 0
ecfp_joint |= set([(k, v[0][1]) for k, v in b.items() if v[0][1] > 0])
# want to go in order of radius so when
# we drop non-unique names, we keep smaller fragments
ecfp_joint = list(ecfp_joint)
ecfp_joint.sort(key=lambda x: x[1])
ecfp_joint = [x[0] for x in ecfp_joint]
names = _ecfp_names(examples, ecfp_joint)
# downselect to only unique names
unique_names = set(names)
output_ecfp = []
output_names = []
for b, n in zip(ecfp_joint, names):
if n in unique_names and n is not None:
unique_names.remove(n)
output_ecfp.append(b)
output_names.append(n)
return output_ecfp, output_names
_SMARTS = None
def _load_smarts(path, rank_cutoff=500):
# we have a rank cut for SMARTS that match too often
smarts = {}
with open(path) as f:
for line in f.readlines():
if line[0] == "#":
continue
i1 = line.find(":")
i2 = line.find(":", i1 + 1)
m = MolFromSmarts(line[i2 + 1 :].strip())
rank = int(line[i1 + 1 : i2])
if rank > rank_cutoff:
continue
name = line[:i1]
if m is None:
print(f"Could not parse SMARTS: {line}")
print(line[i2:].strip())
smarts[name] = (m, rank)
return smarts
def name_morgan_bit(m: Any, bitInfo: Dict[Any, Any], key: int) -> str:
"""Get the name of a Morgan bit using a SMARTS dictionary
:param m: RDKit molecule
:param bitInfo: bitInfo dictionary from rdkit.Chem.AllChem.GetMorganFingerprint
:param key: bit key corresponding to the fingerprint you want to have named
"""
global _SMARTS
if _SMARTS is None:
from importlib_resources import files # type: ignore
import exmol.lime_data # type: ignore
sp = files(exmol.lime_data).joinpath("smarts.txt")
_SMARTS = _load_smarts(sp)
morgan_atoms = _bit2atoms(m, bitInfo, key)
heteroatoms = set()
for a in morgan_atoms:
if m.GetAtomWithIdx(a).GetAtomicNum() > 6:
heteroatoms.add(a)
names = []
for name, (sm, r) in _SMARTS.items():
matches = m.GetSubstructMatches(sm)
for match in matches:
# check if match is in morgan bit
match = set(match)
if match.issubset(morgan_atoms):
names.append((r, name, match))
names.sort(key=lambda x: x[0])
if len(names) == 0:
return None
umatch = names[0][2]
name = names[0][1][0].lower() + names[0][1][1:].replace("_", " ")
unique_names = set([names[0][1]])
for _, n, m in names:
if len(m.intersection(umatch)) == 0:
if n not in unique_names:
name += "/" + n[0].lower() + n[1:].replace("_", " ")
umatch |= m
unique_names.add(n)
if "/" in name and "fragment" not in name.split("/")[-1]:
name = name + " group"
# if we failed to match all heteroatoms, fail
if len(heteroatoms.difference(umatch)) > 0:
return None
return name
def clear_descriptors(
examples: List[Example],
) -> List[Example]:
"""Clears all descriptors from examples
:param examples: list of examples
:param descriptor_type: type of descriptor to clear, if None, all descriptors are cleared
"""
for e in examples:
e.descriptors = None # type: ignore
return examples
def add_descriptors(
examples: List[Example],
descriptor_type: str = "MACCS",
mols: List[Any] = None,
) -> List[Example]:
"""Add descriptors to passed examples
:param examples: List of example
:param descriptor_type: Kind of descriptors to return, choose between 'Classic', 'ECFP', or 'MACCS'. Default is 'MACCS'.
:param mols: Can be used if you already have rdkit Mols computed.
:return: List of examples with added descriptors
"""
from importlib_resources import files
import exmol.lime_data
if mols is None:
mols = [smi2mol(m.smiles) for m in examples]
if descriptor_type.lower() == "classic":
names = tuple(
[
"number of hydrogen bond donor",
"number of hydrogen bond acceptor",
"Wildman-Crippen LogP",
"number of heteroatoms",
"ring count",
"number of rotatable bonds",
"aromatic bonds count",
"acidic group count",
"basic group count",
"atomic polarizability",
]
)
for e, m in zip(examples, mols):
descriptors = _calculate_rdkit_descriptors(m)
descriptor_names = names
e.descriptors = Descriptors(
descriptor_type=descriptor_type,
descriptors=descriptors,
descriptor_names=descriptor_names,
plotting_names=descriptor_names,
)
return examples
elif descriptor_type.lower() == "maccs":
mk = files(exmol.lime_data).joinpath("MACCSkeys.txt")
with open(str(mk), "r") as f:
names = tuple([x.strip().split("\t")[-1] for x in f.readlines()[1:]])
for e, m in zip(examples, mols):
# rdkit sets fps[0] to 0 and starts keys at 1!
fps = list(MACCSkeys.GenMACCSKeys(m).ToBitString())
descriptors = tuple(int(i) for i in fps)
descriptor_names = names
e.descriptors = Descriptors(
descriptor_type=descriptor_type,
descriptors=descriptors,
descriptor_names=descriptor_names,
plotting_names=descriptor_names,
)
return examples
elif descriptor_type.lower() == "ecfp":
descriptor_bits, plotting_names = _get_joint_ecfp_descriptors(examples)
for e, m in zip(examples, mols):
bitInfo = {} # type: Dict[Any, Any]
AllChem.GetMorganFingerprint(m, 3, bitInfo=bitInfo)
descriptors = tuple(
[1 if x in bitInfo.keys() else 0 for x in descriptor_bits]
)
e.descriptors = Descriptors(
descriptor_type=descriptor_type,
descriptors=descriptors,
descriptor_names=descriptor_bits,
plotting_names=plotting_names,
)
return examples
else:
raise ValueError(
"Invalid descriptor string. Valid descriptor strings are 'Classic', 'ECFP', or 'MACCS'."
)
def get_basic_alphabet() -> Set[str]:
"""Returns set of interpretable SELFIES tokens
Generated by removing P and most ionization states from :func:`selfies.get_semantic_robust_alphabet`
:return: Set of interpretable SELFIES tokens
"""
a = sf.get_semantic_robust_alphabet()
# remove cations/anions except oxygen anion
to_remove = []
for ai in a:
if "+1" in ai:
to_remove.append(ai)
elif "-1" in ai:
to_remove.append(ai)
# remove [P],[#P],[=P]
to_remove.extend(["[P]", "[#P]", "[=P]"])
a -= set(to_remove)
a.add("[O-1]")
return a
def run_stoned(
start_smiles: str,
fp_type: Union[str, Callable] = "ECFP4",
num_samples: int = 2000,
max_mutations: int = 2,
min_mutations: int = 1,
alphabet: Union[List[str], Set[str]] = None,
return_selfies: bool = False,
_pbar: Any = None,
score_func: Optional[Callable] = None,
) -> Union[Tuple[List[str], List[float]], Tuple[List[str], List[str], List[float]]]:
"""Run ths STONED SELFIES algorithm. Typically not used, call :func:`sample_space` instead.
:param start_smiles: SMILES string to start from
:param fp_type: Fingerprint type
:param num_samples: Number of total molecules to generate
:param max_mutations: Maximum number of mutations
:param min_mutations: Minimum number of mutations
:param alphabet: Alphabet to use for mutations, typically from :func:`get_basic_alphabet()`
:param return_selfies: If SELFIES should be returned as well
:return: SELFIES, SMILES, and SCORES generated or SMILES and SCORES generated
"""
if alphabet is None:
alphabet = get_basic_alphabet()
if type(alphabet) == set:
alphabet = list(alphabet)
num_mutation_ls = list(range(min_mutations, max_mutations + 1))
start_mol = smi2mol(start_smiles)
if start_mol == None:
raise Exception("Invalid starting structure encountered")
# want it so after sampling have num_samples
randomized_smile_orderings = [
stoned.randomize_smiles(smi2mol(start_smiles))
for _ in range(num_samples // len(num_mutation_ls))
]
# Convert all the molecules to SELFIES
selfies_ls = [sf.encoder(x) for x in randomized_smile_orderings]
all_smiles_collect: List[str] = []
all_selfies_collect: List[str] = []
for num_mutations in num_mutation_ls:
# Mutate the SELFIES:
if _pbar:
_pbar.set_description(f"🥌STONED🥌 Mutations: {num_mutations}")
selfies_mut = stoned.get_mutated_SELFIES(
selfies_ls.copy(), num_mutations=num_mutations, alphabet=alphabet
)
# Convert back to SMILES:
smiles_back = [sf.decoder(x) for x in selfies_mut]
all_smiles_collect = all_smiles_collect + smiles_back
all_selfies_collect = all_selfies_collect + selfies_mut
if _pbar:
_pbar.update(len(smiles_back))
if _pbar:
_pbar.set_description(f"🥌STONED🥌 Filtering")
# filter out duplicates
all_mols = [smi2mol(s) for s in all_smiles_collect]
all_canon = [mol2smi(m, canonical=True) if m else None for m in all_mols]
seen = set()
to_keep = [False for _ in all_canon]
for i in range(len(all_canon)):
if all_canon[i] and all_canon[i] not in seen:
to_keep[i] = True
seen.add(all_canon[i])
# now do filter
filter_mols = [m for i, m in enumerate(all_mols) if to_keep[i]]
filter_selfies = [s for i, s in enumerate(all_selfies_collect) if to_keep[i]]
filter_smiles = [s for i, s in enumerate(all_smiles_collect) if to_keep[i]]
# compute similarity scores
if isinstance(fp_type, Callable):
if _pbar:
_pbar.set_description(f"Custom fingerprint/scoring function being used!")
base_fp = fp_type(start_smiles)
fps = [fp_type(m) for m in filter_smiles]
scores = [score_func(base_fp, fp) for fp in fps]
else:
if _pbar:
_pbar.set_description(f"RDKit fingerprint/Tanimoto being used!")
base_fp = stoned.get_fingerprint(start_mol, fp_type=fp_type)
fps = [stoned.get_fingerprint(m, fp_type) for m in filter_mols]
scores = BulkTanimotoSimilarity(base_fp, fps) # type: List[float]
if _pbar:
_pbar.set_description(f"🥌STONED🥌 Done")
if return_selfies:
return filter_selfies, filter_smiles, scores
else:
return filter_smiles, scores
@sleep_and_retry
@limits(calls=2, period=30)
def run_chemed(
origin_smiles: str,
num_samples: int,
similarity: float = 0.1,
fp_type: str = "ECFP4",
_pbar: Any = None,
) -> Tuple[List[str], List[float]]:
"""
This method is similar to STONED but works by quering PubChem
:param origin_smiles: Base SMILES
:param num_samples: Minimum number of returned molecules. May return less due to network timeout or exhausting tree
:param similarity: Tanimoto similarity to use in query (float between 0 to 1)
:param fp_type: Fingerprint type
:return: SMILES and SCORES
"""
if _pbar:
_pbar.set_description("⚡CHEMED⚡")
else:
print("⚡CHEMED⚡")
url = f"https://pubchem.ncbi.nlm.nih.gov/rest/pug/compound/fastsimilarity_2d/smiles/{requests.utils.quote(origin_smiles)}/property/CanonicalSMILES/JSON"
try:
reply = requests.get(
url,
params={"Threshold": int(similarity * 100), "MaxRecords": num_samples},
headers={"accept": "text/json"},
timeout=10,
)
except requests.exceptions.Timeout:
print("Pubchem seems to be down right now ️☠️☠️")
return [], []
try:
data = reply.json()
except:
return [], []
smiles = [d["CanonicalSMILES"] for d in data["PropertyTable"]["Properties"]]
smiles = list(set(smiles))
if _pbar:
_pbar.set_description(f"Received {len(smiles)} similar molecules")
mol0 = smi2mol(origin_smiles)
mols = [smi2mol(s) for s in smiles]
fp0 = stoned.get_fingerprint(mol0, fp_type)
scores = []
# drop Nones
smiles = [s for s, m in zip(smiles, mols) if m is not None]
for m in mols:
if m is None:
continue
fp = stoned.get_fingerprint(m, fp_type)
scores.append(TanimotoSimilarity(fp0, fp))
if _pbar:
_pbar.update()
return smiles, scores
def run_custom(
origin_smiles: str,
data: List[Union[str, rdchem.Mol]],
fp_type: Union[str, Callable] = "ECFP4",
_pbar: Any = None,
**kwargs,
) -> Tuple[List[str], List[float]]:
"""
This method is similar to STONED but uses a custom dataset provided by the user
:param origin_smiles: Base SMILES
:param data: List of SMILES or RDKit molecules
:param fp_type: Fingerprint type
:return: SMILES and SCORES
"""
if _pbar:
_pbar.set_description("⚡CUSTOM⚡")
else:
print("⚡CUSTOM⚡")
mol0 = smi2mol(origin_smiles)
if isinstance(fp_type, Callable):
fp0 = fp_type(mol0)
score_func = kwargs.get("score_func", None)
if not score_func:
raise ValueError(
f"No scoring function provided in `kwargs`; please pass `score_func` as a kwarg."
)
assert isinstance(
score_func, Callable
), f"{score_func} is not a callable function."
if _pbar:
_pbar.set_description("⚡CUSTOM⚡ - Custom fingerprinting/scoring function")
else:
fp0 = stoned.get_fingerprint(mol0, fp_type)
scores = []
smiles = []
# drop invalid molecules
for d in data:
if isinstance(d, str):
m = smi2mol(d)
else:
m = d
if m is None:
continue
smiles.append(mol2smi(m))
# in the conventional stream, use Tanimoto similarity
if isinstance(fp_type, str):
fp = stoned.get_fingerprint(m, fp_type)
scores.append(TanimotoSimilarity(fp0, fp))
# otherwise, use our custom fingerprinting and scoring function
else:
fp = fp_type(m)
scores.append(score_func(fp0, fp))
if _pbar:
_pbar.update()
return smiles, scores
def sample_space(
origin_smiles: str,
f: Union[
Callable[[str, str], List[float]],
Callable[[str], List[float]],
Callable[[List[str], List[str]], List[float]],
Callable[[List[str]], List[float]],
],
batched: bool = True,
preset: str = "medium",
data: List[Union[str, rdchem.Mol]] = None,
method_kwargs: Dict = None,
num_samples: int = None,
stoned_kwargs: Dict = None,
quiet: bool = False,
use_selfies: bool = False,
sanitize_smiles: bool = True,
) -> List[Example]:
"""Sample chemical space around given SMILES
This will evaluate the given function and run the :func:`run_stoned` function over chemical space around molecule. ``num_samples`` will be
set to 3,000 by default if using STONED and 150 if using ``chemed``.
:param origin_smiles: starting SMILES
:param f: A function which takes in SMILES or SELFIES and returns predicted value. Assumed to work with lists of SMILES/SELFIES unless `batched = False`
:param batched: If `f` is batched
:param preset: Can be wide, medium, or narrow. Determines how far across chemical space is sampled. Try `"chemed"` preset to only sample commerically available compounds.
:param data: If not None and preset is `"custom"` will use this data instead of generating new ones.
:param method_kwargs: More control over STONED, CHEMED and CUSTOM can be set here. See :func:`run_stoned`, :func:`run_chemed` and :func:`run_custom`
:param num_samples: Number of desired samples. Can be set in `method_kwargs` (overrides) or here. `None` means default for preset
:param stoned_kwargs: Backwards compatible alias for `methods_kwargs`
:param quiet: If True, will not print progress bar
:param use_selfies: If True, will use SELFIES instead of SMILES for `f`
:param sanitize_smiles: If True, will sanitize all SMILES
:return: List of generated :obj:`Example`
"""
wrapped_f = f
# if f only takes in 1 arg, wrap it in a function that takes in 2
# count args with no default value. Looks fancy because of possible objects/partials
argcount = len(
[
i
for i in inspect.signature(f).parameters.values()
if i.default == inspect.Parameter.empty
]
)
if argcount == 1:
if use_selfies:
def wrapped_f(sm, sf):
return f(sf)
else:
def wrapped_f(sm, sf):
return f(sm)
batched_f: Any = wrapped_f
if not batched:
def batched_f(sm, se):
return np.array([wrapped_f(smi, sei) for smi, sei in zip(sm, se)])
if sanitize_smiles:
origin_smiles = stoned.sanitize_smiles(origin_smiles)[1]
if origin_smiles is None:
raise ValueError("Given SMILES does not appear to be valid")
smi_yhat = np.asarray(batched_f([origin_smiles], [sf.encoder(origin_smiles)]))
try:
iter(smi_yhat)
except TypeError:
raise ValueError("Your model function does not appear to be batched")
smi_yhat = np.squeeze(smi_yhat[0])
if stoned_kwargs is not None:
method_kwargs = stoned_kwargs
if method_kwargs is None:
method_kwargs = {}
if preset == "medium":
method_kwargs["num_samples"] = 3000 if num_samples is None else num_samples
method_kwargs["max_mutations"] = 2
method_kwargs["alphabet"] = get_basic_alphabet()
elif preset == "narrow":
method_kwargs["num_samples"] = 3000 if num_samples is None else num_samples
method_kwargs["max_mutations"] = 1
method_kwargs["alphabet"] = get_basic_alphabet()
elif preset == "wide":
method_kwargs["num_samples"] = 3000 if num_samples is None else num_samples
method_kwargs["max_mutations"] = 5
method_kwargs["alphabet"] = sf.get_semantic_robust_alphabet()
elif preset == "chemed":
method_kwargs["num_samples"] = 150 if num_samples is None else num_samples
elif preset == "custom" and data is not None:
method_kwargs["num_samples"] = len(data)
else:
raise ValueError(f'Unknown preset "{preset}"')
try:
num_samples = method_kwargs["num_samples"]
except KeyError as e:
if num_samples is None:
num_samples = 150
method_kwargs["num_samples"] = num_samples
pbar = tqdm.tqdm(total=num_samples, disable=quiet)
# STONED
if preset.startswith("chem"):
smiles, scores = run_chemed(origin_smiles, _pbar=pbar, **method_kwargs)
selfies = [sf.encoder(s) for s in smiles]
elif preset == "custom":
smiles, scores = run_custom(
origin_smiles, data=cast(Any, data), _pbar=pbar, **method_kwargs
)
selfies = [sf.encoder(s) for s in smiles]
else:
result = run_stoned(
origin_smiles, _pbar=pbar, return_selfies=True, **method_kwargs
)
selfies, smiles, scores = cast(Tuple[List[str], List[str], List[float]], result)
pbar.set_description("😀Calling your model function😀")
if sanitize_smiles:
smiles = [stoned.sanitize_smiles(s)[1] for s in smiles]
fxn_values = batched_f(smiles, selfies)
# pack them into data structure with filtering out identical
# and nan
exps = [
Example(
origin_smiles,
sf.encoder(origin_smiles),
1.0,
cast(Any, smi_yhat),
index=0,
is_origin=True,
)
] + [
Example(sm, se, s, cast(Any, np.squeeze(y)), index=0)
for i, (sm, se, s, y) in enumerate(zip(smiles, selfies, scores, fxn_values))
if s < 1.0 and np.isfinite(np.squeeze(y))
]
for i, e in enumerate(exps): # type: ignore
e.index = i # type: ignore
pbar.reset(len(exps))
pbar.set_description("🔭Projecting...🔭")
# compute distance matrix
if "fp_type" in method_kwargs:
fp_type = method_kwargs.get("fp_type")
if isinstance(fp_type, str):
full_dmat = _fp_dist_matrix(
[e.smiles for e in exps],
method_kwargs["fp_type"] if ("fp_type" in method_kwargs) else "ECFP4",
_pbar=pbar,
)
# if we specified a custom fp_type, embed and compute
# the distance with vectorized code
elif isinstance(fp_type, Callable):
fingerprints = np.array([fp_type(e.smiles) for e in exps])
from sklearn.metrics import pairwise_distances
full_dmat = pairwise_distances(fingerprints)
else:
raise NotImplementedError(f"Something is terribly wrong with sampled_space.")
pbar.set_description("🥰Finishing up🥰")
# compute PCA
pca = PCA(n_components=2)
proj_dmat = pca.fit_transform(full_dmat)
for e in exps: # type: ignore
e.position = proj_dmat[e.index, :] # type: ignore
# do clustering everywhere (maybe do counter/same separately?)
# clustering = AgglomerativeClustering(
# n_clusters=max_k, affinity='precomputed', linkage='complete').fit(full_dmat)
# Just do it on projected so it looks prettier.
clustering = DBSCAN(eps=0.15, min_samples=5).fit(proj_dmat)
for i, e in enumerate(exps): # type: ignore
e.cluster = clustering.labels_[i] # type: ignore
pbar.set_description("🤘Done🤘")
pbar.close()
return exps
def _select_examples(cond, examples, nmols):
result = []
# similarity filtered by if cluster/counter
def cluster_score(e, i):
return (e.cluster == i) * cond(e) * e.similarity
clusters = set([e.cluster for e in examples])
for i in clusters:
close_counter = max(examples, key=lambda e, i=i: cluster_score(e, i))
# check if actually is (since call could have been zero)
if cluster_score(close_counter, i):
result.append(close_counter)
# trim, in case we had too many cluster
result = sorted(result, key=lambda v: v.similarity * cond(v), reverse=True)[:nmols]
# fill in remaining
ncount = sum([cond(e) for e in result])
fill = max(0, nmols - ncount)
result.extend(
sorted(examples, key=lambda v: v.similarity * cond(v), reverse=True)[:fill]
)
return list(filter(cond, result))
def lime_explain(
examples: List[Example],
descriptor_type: str = "MACCS",
return_beta: bool = True,
):
"""From given :obj:`Examples<Example>`, find descriptor t-statistics (see
:doc: `index`)
:param examples: Output from :func: `sample_space`
:param descriptor_type: Desired descriptors, choose from 'Classic', 'ECFP' 'MACCS'
:return_beta: Whether or not the function should return regression coefficient values
"""
# add descriptors
examples = add_descriptors(examples, descriptor_type)
# weighted tanimoto similarities
w = np.array([1 / (1 + (1 / (e.similarity + 0.000001) - 1) ** 5) for e in examples])
# Only keep nonzero weights
non_zero = w > 10 ** (-6)
nonzero_w = w[non_zero]
# create a diagonal matrix of w
N = nonzero_w.shape[0]
diag_w = np.zeros((N, N))
np.fill_diagonal(diag_w, nonzero_w)
# get feature matrix
x_mat = np.array([list(e.descriptors.descriptors) for e in examples])[
non_zero
].reshape(N, -1)
# remove zero variance columns
y = (
np.array([e.yhat for e in examples])
.reshape(len(examples))[non_zero]
.astype(float)
)
# remove bias
y -= np.mean(y)
# compute least squares fit
xtinv = np.linalg.pinv(
(x_mat.T @ diag_w @ x_mat)
+ 0.001 * np.identity(len(examples[0].descriptors.descriptors))
)
beta = xtinv @ x_mat.T @ (y * nonzero_w)
# compute standard error in beta
yhat = x_mat @ beta
resids = yhat - y
SSR = np.sum(resids**2)
se2_epsilon = SSR / (len(examples) - len(beta))
se2_beta = se2_epsilon * xtinv
# now compute t-statistic for existence of coefficients
tstat = beta * np.sqrt(1 / np.diag(se2_beta))
# Set tstats for bases, to be used later
# TODO: Used to put them on examples[0] only,
# but now copy them to all examples
for e in examples:
e.descriptors.tstats = tstat
# Return beta (feature weights) which are the fits if asked for
if return_beta:
return beta
else:
return None
def cf_explain(examples: List[Example], nmols: int = 3) -> List[Example]:
"""From given :obj:`Examples<Example>`, find closest counterfactuals (see :doc:`index`)
:param examples: Output from :func:`sample_space`
:param nmols: Desired number of molecules
"""
def is_counter(e):
return e.yhat != examples[0].yhat
result = _select_examples(is_counter, examples[1:], nmols)
for i, r in enumerate(result):
r.label = f"Counterfactual {i+1}"
return examples[:1] + result
def rcf_explain(
examples: List[Example],
delta: Union[Any, Tuple[float, float]] = (-1, 1),
nmols: int = 4,
) -> List[Example]:
"""From given :obj:`Examples<Example>`, find closest counterfactuals (see :doc:`index`)
This version works with regression, so that a counterfactual is if the given example is higher or
lower than base.
:param examples: Output from :func:`sample_space`
:param delta: float or tuple of hi/lo indicating margin for what is counterfactual
:param nmols: Desired number of molecules
"""
if type(delta) is float:
delta = (-delta, delta)
def is_high(e):
return e.yhat + delta[0] >= examples[0].yhat
def is_low(e):
return e.yhat + delta[1] <= examples[0].yhat
hresult = (
[] if delta[0] is None else _select_examples(is_high, examples[1:], nmols // 2)
)
for i, h in enumerate(hresult):
h.label = f"Increase ({i+1})"
lresult = (
[] if delta[1] is None else _select_examples(is_low, examples[1:], nmols // 2)
)
for i, l in enumerate(lresult):
l.label = f"Decrease ({i+1})"
return examples[:1] + lresult + hresult
def plot_space(
examples: List[Example],
exps: List[Example],
figure_kwargs: Dict = None,
mol_size: Tuple[int, int] = (200, 200),
highlight_clusters: bool = False,
mol_fontsize: int = 8,
offset: int = 0,
ax: Any = None,
cartoon: bool = False,
rasterized: bool = False,
):
"""Plot chemical space around example and annotate given examples.
:param examples: Large list of :obj:Example which make-up points
:param exps: Small list of :obj:Example which will be annotated
:param figure_kwargs: kwargs to pass to :func:`plt.figure<matplotlib.pyplot.figure>`
:param mol_size: size of rdkit molecule rendering, in pixles
:param highlight_clusters: if `True`, cluster indices are rendered instead of :obj:Example.yhat
:param mol_fontsize: minimum font size passed to rdkit
:param offset: offset annotations to allow colorbar or other elements to fit into plot.
:param ax: axis onto which to plot
:param cartoon: do cartoon outline on points?
:param rasterized: raster the scatter?
"""
imgs = _mol_images(exps, mol_size, mol_fontsize) # , True)
if figure_kwargs is None:
figure_kwargs = {"figsize": (12, 8)}
base_color = "gray"
if ax is None:
ax = plt.figure(**figure_kwargs).gca()
if highlight_clusters:
colors = [e.cluster for e in examples]
def normalizer(x):
return x
cmap = "Accent"
else:
colors = cast(Any, [e.yhat for e in examples])
normalizer = plt.Normalize(min(colors), max(colors))
cmap = "viridis"
space_x = [e.position[0] for e in examples]
space_y = [e.position[1] for e in examples]
if cartoon:
# plot shading, lines, front
ax.scatter(space_x, space_y, 50, "0.0", lw=2, rasterized=rasterized)
ax.scatter(space_x, space_y, 50, "1.0", lw=0, rasterized=rasterized)
ax.scatter(
space_x,
space_y,
40,
c=normalizer(colors),
cmap=cmap,
lw=2,
alpha=0.1,
rasterized=rasterized,
)
else:
ax.scatter(
space_x,
space_y,
c=normalizer(colors),
cmap=cmap,
alpha=0.5,
edgecolors="none",
rasterized=rasterized,
)
# now plot cfs/annotated points
ax.scatter(
[e.position[0] for e in exps],
[e.position[1] for e in exps],
c=normalizer([e.cluster if highlight_clusters else e.yhat for e in exps]),
cmap=cmap,
edgecolors="black",
)
x = [e.position[0] for e in exps]
y = [e.position[1] for e in exps]
titles = []
colors = []
for e in exps:
if not e.is_origin:
titles.append(f"Similarity = {e.similarity:.2f}\n{e.label}")
colors.append(cast(Any, base_color))
else:
titles.append("Base")
colors.append(cast(Any, base_color))
_image_scatter(x, y, imgs, titles, colors, ax, offset=offset)
ax.axis("off")
ax.set_aspect("auto")
def plot_cf(
exps: List[Example],
fig: Any = None,
figure_kwargs: Dict = None,
mol_size: Tuple[int, int] = (200, 200),
mol_fontsize: int = 10,
nrows: int = None,
ncols: int = None,
):
"""Draw the given set of Examples in a grid
:param exps: Small list of :obj:`Example` which will be drawn
:param fig: Figure to plot onto
:param figure_kwargs: kwargs to pass to :func:`plt.figure<matplotlib.pyplot.figure>`
:param mol_size: size of rdkit molecule rendering, in pixles
:param mol_fontsize: minimum font size passed to rdkit
:param nrows: number of rows to draw in grid
:param ncols: number of columns to draw in grid
"""
imgs = _mol_images(exps, mol_size, mol_fontsize)
if nrows is not None:
R = nrows
else:
R = math.ceil(math.sqrt(len(imgs)))
if ncols is not None:
C = ncols
else:
C = math.ceil(len(imgs) / R)
if fig is None:
if figure_kwargs is None:
figure_kwargs = {"figsize": (12, 8)}
fig, axs = plt.subplots(R, C, **figure_kwargs)
else:
axs = fig.subplots(R, C)
if type(axs) != np.ndarray: # Happens if nrows=ncols=1
axs = np.array([[axs]])
axs = axs.flatten()
for i, (img, e) in enumerate(zip(imgs, exps)):
title = "Base" if e.is_origin else f"Similarity = {e.similarity:.2f}\n{e.label}"
title += f"\nf(x) = {e.yhat:.3f}"
axs[i].set_title(title)
axs[i].imshow(np.asarray(img), gid=f"rdkit-img-{i}")
axs[i].axis("off")
for j in range(i, C * R):
axs[j].axis("off")
axs[j].set_facecolor("white")
plt.tight_layout()
def plot_descriptors(
examples: List[Example],
output_file: str = None,
fig: Any = None,
figure_kwargs: Dict = None,
title: str = None,
return_svg: bool = False,
):
"""Plot descriptor attributions from given set of Examples.
:param examples: Output from :func:`sample_space`
:param output_file: Output file name to save the plot - optional except for ECFP
:param fig: Figure to plot on to
:param figure_kwargs: kwargs to pass to :func:`plt.figure<matplotlib.pyplot.figure>`
:param title: Title for the plot
:param return_svg: Whether to return svg for plot
"""
from importlib_resources import files
import exmol.lime_data
import pickle # type: ignore
# infer descriptor_type from examples
descriptor_type = examples[0].descriptors.descriptor_type.lower()
multiple_bases = _check_multiple_bases(examples)
if output_file is None and descriptor_type == "ecfp" and not return_svg:
raise ValueError("No filename provided to save the plot")
space_tstats = list(examples[0].descriptors.tstats)
if fig is None:
if figure_kwargs is None:
figure_kwargs = (
{"figsize": (5, 5)}
if descriptor_type.lower() == "classic"
else {"figsize": (8, 5)}
)
fig, ax = plt.subplots(nrows=1, ncols=1, dpi=180, **figure_kwargs)
# find important descriptors
d_importance = {
a: [b, i, n]
for i, (a, b, n) in enumerate(
zip(
examples[0].descriptors.descriptor_names,
space_tstats,
examples[0].descriptors.plotting_names,
)
)
if not np.isnan(b)
}
d_importance = dict(
sorted(d_importance.items(), key=lambda item: abs(item[1][0]), reverse=True)
)
t = [a[0] for a in list(d_importance.values())][:5]
key_ids = [a[1] for a in list(d_importance.values())][:5]
keys = [a for a in list(d_importance.keys())]
names = [a[2] for a in list(d_importance.values())][:5]
# set colors
colors = []
for ti in t:
if ti < 0:
colors.append("#F06060")
if ti > 0:
colors.append("#1BBC9B")
# plot the bars
bar1 = ax.barh(range(len(t)), t, color=colors, height=0.75)
new_patches = []
for patch in reversed(ax.patches):
bb = patch.get_bbox()
color = patch.get_facecolor()
p_bbox = FancyBboxPatch(
(bb.xmin, bb.ymin),
abs(bb.width),
abs(bb.height),
boxstyle="round,pad=-0.040,rounding_size=0.015",
ec="none",
fc=color,
mutation_aspect=4,
)
patch.remove()
new_patches.append(p_bbox)
for patch in new_patches:
ax.add_patch(patch)
count = 0
sk_dict, key_imgs = {}, {}
if descriptor_type == "maccs":
# Load svg/png images
mk = files(exmol.lime_data).joinpath("keys.pb")
with open(str(mk), "rb") as f:
key_imgs = pickle.load(f)
if descriptor_type == "ecfp":
# get reference for ECFP
if multiple_bases:
bases = [smi2mol(e.smiles) for e in examples if e.is_origin == True]
bi = {} # type: Dict[Any, Any]
for b in bases:
bit_info = {} # type: Dict[Any, Any]
fp = AllChem.GetMorganFingerprint(b, 3, bitInfo=bit_info)
for bit in bit_info:
if bit not in bi:
bi[bit] = (b, bit, bit_info)
else:
bi = {}
m = smi2mol(examples[0].smiles)
fp = AllChem.GetMorganFingerprint(m, 3, bitInfo=bi)
for rect, ti, k, ki, n in zip(bar1, t, keys, key_ids, names):
# account for Nones
if n is None:
n = ""
# annotate patches with text desciption
y = rect.get_y() + rect.get_height() / 2.0
n = textwrap.fill(str(n), 20)
if ti < 0:
x = 0.25
skx = (
np.max(np.absolute(t)) + 2
if descriptor_type == "maccs"
else np.max(np.absolute(t))
)
box_x = 0.98
ax.text(
x,
y,
n,
ha="left",
va="center",
wrap=True,
fontsize=12,
)
else:
x = -0.25
skx = (
-np.max(np.absolute(t)) - 2
if descriptor_type == "maccs"
else np.max(np.absolute(t))
)
box_x = 0.02
ax.text(
x,
y,
n,
ha="right",
va="center",
wrap=True,
fontsize=12,
)
# add SMARTS annotation where applicable
if descriptor_type == "maccs" or descriptor_type == "ecfp":
if descriptor_type == "maccs":
key_img = plt.imread(io.BytesIO(key_imgs[ki]["png"]))
box = skunk.ImageBox(f"sk{count}", key_img, zoom=1)
else:
box = skunk.Box(130, 50, f"sk{count}")
ab = AnnotationBbox(
box,
xy=(skx, count),
xybox=(box_x, (5 - count) * 0.2 - 0.1), # Invert axis
xycoords="data",
boxcoords="axes fraction",
bboxprops=dict(lw=0.5),
)
ax.add_artist(ab)
if descriptor_type == "maccs":
sk_dict[f"sk{count}"] = key_imgs[ki]["svg"]
if descriptor_type == "ecfp":
if multiple_bases:
m = bi[int(k)][0]
b = bi[int(k)][2]
else:
b = bi
svg = DrawMorganBit(
m,
int(k),
b,
molSize=(300, 200),
centerColor=None,
aromaticColor=None,
ringColor=None,
extraColor=(0.8, 0.8, 0.8),
useSVG=True,
)
# TODO: Why?
try:
svgdata = svg.data
except AttributeError:
svgdata = svg
sk_dict[f"sk{count}"] = svgdata
count += 1
ax.axvline(x=0, color="grey", linewidth=0.5)
# calculate significant T
w = np.array([1 / (1 + (1 / (e.similarity + 0.000001) - 1) ** 5) for e in examples])
effective_n = np.sum(w) ** 2 / np.sum(w**2)
T = ss.t.ppf(0.975, df=effective_n)
# plot T
ax.axvline(x=T, color="#f5ad4c", linewidth=0.75, linestyle="--", zorder=0)
ax.axvline(x=-T, color="#f5ad4c", linewidth=0.75, linestyle="--", zorder=0)
# set axis
ax.set_yticks([])
ax.invert_yaxis()
ax.set_xlabel("Descriptor t-statistics", fontsize=12)
if title is None:
ax.set_title(f"{descriptor_type} descriptors", fontsize=12)
else:
ax.set_title(f"{title}", fontsize=12)
# inset SMARTS svg images for MACCS descriptors
if descriptor_type == "maccs" or descriptor_type == "ecfp":
if descriptor_type == "maccs":
print(
"SMARTS annotations for MACCS descriptors were created using SMARTSviewer (smartsview.zbh.uni-hamburg.de, Copyright: ZBH, Center for Bioinformatics Hamburg) developed by K. Schomburg et. al. (J. Chem. Inf. Model. 2010, 50, 9, 1529–1535)"
)
xlim = np.max(np.absolute(t)) + 6
ax.set_xlim(-xlim, xlim)
svg = skunk.insert(sk_dict)
if output_file is not None:
plt.tight_layout()
with open(output_file, "w") as f: # type: ignore
f.write(svg)
if return_svg:
plt.close()
return svg
elif descriptor_type == "classic":
xlim = max(np.max(np.absolute(t)), T + 1)
ax.set_xlim(-xlim, xlim)
if output_file is not None:
plt.tight_layout()
plt.savefig(output_file, dpi=180, bbox_inches="tight")
def check_multiple_aromatic_rings(mol):
ri = mol.GetRingInfo()
count = 0
for bondRing in ri.BondRings():
flag = True
for id in bondRing:
if not mol.GetBondWithIdx(id).GetIsAromatic():
flag = False
continue
if flag:
count += 1
return True if count > 1 else False
def merge_text_explains(
*args: List[Tuple[str, float]], filter: Optional[float] = None
) -> List[Tuple[str, float]]:
"""Merge multiple text explanations into one and sort."""
# sort them by T value, putting negative examples at the end
joint = reduce(lambda x, y: x + y, args)
if len(joint) == 0:
return []
# get the highest (hopefully) positive
m = max([x[1] for x in joint if x[1] > 0])
pos = [x for x in joint if x[1] == m]
joint = [x for x in joint if x[1] != m]
joint = sorted(joint, key=lambda x: np.absolute(x[1]), reverse=True)
return pos + joint
_text_prompt = """
The following are a series of questions about molecules that connect their structure to a property, along with how important each question is for the molecular property. An answer of "Yes" means that the question was true and that attribute of structure contributed to the molecular property. An answer of "Counterfactual" means the lack of that attribute contributed to the molecular property. A summary paragraph is given below, which only summarizes on the most important structure-property relationships.
Property: [PROPERTY]
[TEXT]
Summary: The molecular property "[PROPERTY]" can be explained"""
def text_prompt(
text_explanations: List[Tuple[str, float]],
property_name: str,
open_ai_key: Optional[str] = None,
) -> str:
"""Insert text explanations into template, and optionally send to OpenAI."""
result = _text_prompt.replace("[PROPERTY]", property_name)
# want to have negative examples at the end
text_explanations.sort(key=lambda x: x[1], reverse=True)
result = result.replace("[TEXT]", "".join([f"{t[0]}" for t in text_explanations]))
if open_ai_key is not None:
import openai
openai.api_key = open_ai_key
response = openai.Completion.create(
model="text-davinci-003",
prompt=result,
temperature=0.7,
max_tokens=256,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
)
completion = response["choices"][0]["text"]
return (
'The molecular property "'
+ property_name
+ '" can be explained'
+ completion
)
return result
def text_explain(
examples: List[Example],
descriptor_type: str = "maccs",
count: int = 5,
presence_thresh: float = 0.2,
include_weak: Optional[bool] = None,
) -> List[Tuple[str, float]]:
"""Take an example and convert t-statistics into text explanations
:param examples: Output from :func:`sample_space`
:param descriptor_type: Type of descriptor, either "maccs", or "ecfp".
:param count: Number of text explanations to return
:param presence_thresh: Threshold for presence of descriptor in examples
:param include_weak: Include weak descriptors. If not set, the function
will be first have this set to False, and if no descriptors are found,
will be set to True and function will be re-run
"""
descriptor_type = descriptor_type.lower()
# populate lime explanation
if examples[-1].descriptors is None:
lime_explain(examples, descriptor_type=descriptor_type)
nbases = sum([1 for e in examples if e.is_origin])
# Take t-statistics, rank them
d_importance = [
(n, t, i) # name, t-stat, index
for i, (n, t) in enumerate(
zip(
examples[0].descriptors.plotting_names,
examples[0].descriptors.tstats,
)
)
# don't want NANs and want match (if not multiple bases)
if not np.isnan(t)
]
d_importance = sorted(d_importance, key=lambda x: abs(x[1]), reverse=True)
# get significance value - if >significance, then important else weakly important?
w = np.array([1 / (1 + (1 / (e.similarity + 0.000001) - 1) ** 5) for e in examples])
effective_n = np.sum(w) ** 2 / np.sum(w**2)
if np.isnan(effective_n):
effective_n = len(examples)
T = ss.t.ppf(0.975, df=effective_n)
pos_count = 0
neg_count = 0
result = []
existing_names = set()
for k, v, i in d_importance:
if pos_count + neg_count == count:
break
name = k
if name is None or name in existing_names:
continue
existing_names.add(name)
if abs(v) > 4:
imp = "This is very important for the property\n"
elif abs(v) >= T:
imp = "This is important for the property\n"
elif include_weak:
imp = "This could be relevent for the property\n"
else:
continue
# check if it's present in majority of base molecules
present = sum(
[1 for e in examples if e.descriptors.descriptors[i] != 0 and e.is_origin]
)
if present / nbases < (1 - presence_thresh) and v < 0:
if neg_count == count - 2:
# don't want to have only negative examples
continue
kind = "No (Counterfactual)."
neg_count += 1
elif present / nbases > presence_thresh and v > 0:
kind = "Yes."
pos_count += 1
else:
continue
# adjust name to be question
if name[-1] != "?":
name = "Is there " + name + "?"
s = f"{name} {kind} {imp}"
result.append((s, v))
if len(result) == 0 or pos_count == 0 and include_weak is None:
return text_explain(
examples,
descriptor_type=descriptor_type,
count=count,
presence_thresh=presence_thresh,
include_weak=True,
)
return result
| [
"\nThe following are a series of questions about molecules that connect their structure to a property, along with how important each question is for the molecular property. An answer of \"Yes\" means that the question was true and that attribute of structure contributed to the molecular property. An answer of \"Counterfactual\" means the lack of that attribute contributed to the molecular property. A summary paragraph is given below, which only summarizes on the most important structure-property relationships.\n\nProperty: [PROPERTY]\n[TEXT]\nSummary: The molecular property \"[PROPERTY]\" can be explained"
] |
2024-01-10 | Manuindukuri/Assignment-3 | fast_api~user_registration.py | from fastapi import FastAPI, Depends, HTTPException
from fastapi.security import OAuth2PasswordBearer, OAuth2PasswordRequestForm
from pydantic import BaseModel
from passlib.context import CryptContext
from datetime import datetime, timedelta
import jwt
from sqlalchemy import create_engine, Column, Integer, String, DateTime, Boolean, MetaData, Table
from sqlalchemy.orm import Session
from databases import Database
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
import os
import logging
from dotenv import load_dotenv
# Import SequenceMatcher for text similarity
from difflib import SequenceMatcher
#QA_Openai
from fastapi import FastAPI
from pydantic import BaseModel
import openai
import pinecone
# Load environment variables from the .env file
load_dotenv()
# Initialize a dictionary to store previous questions and their answers
previous_questions = {}
# Environment Variables
api_key = os.getenv("OPENAI_KEY")
pinecone_api_key = os.getenv("PINECONE_API")
postgres_user = os.getenv("POSTGRES_USER")
postgres_password = os.getenv("POSTGRES_PASSWORD")
host_ip_address = os.getenv("POSTGRES_HOST")
port = os.getenv("POSTGRES_PORT")
postgres_db = os.getenv("POSTGRES_DB")
# Log metrics
logging.basicConfig(level=logging.INFO)
# Fast API
app = FastAPI()
# Initialize Pinecone
pinecone.init(api_key=pinecone_api_key, environment="gcp-starter")
index = pinecone.Index(index_name='my-index')
class UserInput(BaseModel):
forms: list # Change from 'form' to 'forms' to accept a list of selected forms
question: str
def generate_answer(question: str):
try:
# Create embeddings for the given 'question' using the specified EMBEDDING_MODEL
openai.api_key = api_key
EMBEDDING_MODEL = "text-embedding-ada-002"
response = openai.Embedding.create(model=EMBEDDING_MODEL, input=question)
# Extract the embeddings from the API response
embeddings = response["data"][0]["embedding"]
return embeddings
except Exception as e:
return str(e)
# Database setup
DATABASE_URL = f"postgresql://{postgres_user}:{postgres_password}@{host_ip_address}:{port}/{postgres_db}"
database = Database(DATABASE_URL)
metadata = MetaData()
Base = declarative_base()
engine = create_engine(DATABASE_URL)
# Define the 'users' table
users = Table(
"users",
metadata,
Column("id", Integer, primary_key=True, index=True),
Column("username", String, unique=True, index=True),
Column("full_name", String),
Column("email", String, unique=True, index=True),
Column("hashed_password", String),
Column("active", Boolean, default=True),
Column("created_at", DateTime, default=datetime.utcnow),
)
# JWT
SECRET_KEY = "e41f6b654b3e3f41e3a030ef783cbe2fec5824d5543a0d408ee3ba0677e1750a"
ALGORITHM = "HS256"
ACCESS_TOKEN_EXPIRE_MINUTES = 30
# Initialize the database
Base.metadata.create_all(bind=engine)
# Hashing password
password_hash = CryptContext(schemes=["bcrypt"], deprecated="auto")
# Initialize sessionmaker
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
# User Pydantic model
class User(BaseModel):
username: str
full_name: str
email: str
# Token Pydantic model
class Token(BaseModel):
access_token: str
token_type: str
# User registration
class UserInDB(User):
password: str
def get_db():
db = SessionLocal()
try:
yield db
finally:
db.close()
# User model
class UserDB(Base):
__tablename__ = "users"
id = Column(Integer, primary_key=True, index=True)
username = Column(String, unique=True, index=True)
full_name = Column(String, index=True)
email = Column(String, unique=True, index=True)
hashed_password = Column(String)
active = Column(Boolean, default=True)
created_at = Column(DateTime, default=datetime.utcnow)
# Get user data
def get_user(db, username: str):
return db.query(UserDB).filter(UserDB.username == username).first()
def verify_password(plain_password, hashed_password):
return password_hash.verify(plain_password, hashed_password)
def create_access_token(data, expires_delta):
to_encode = data.copy()
expire = datetime.utcnow() + expires_delta
to_encode.update({"exp": expire})
encoded_jwt = jwt.encode(to_encode, SECRET_KEY, algorithm=ALGORITHM)
return encoded_jwt
# OAuth2 password scheme for token generation
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="token")
# Register a new user
@app.post("/register", response_model=User)
async def register(user: UserInDB, db: Session = Depends(get_db)):
existing_user = get_user(db, user.username)
if existing_user:
raise HTTPException(status_code=400, detail="Username already registered")
hashed_password = password_hash.hash(user.password)
new_user = UserDB(username=user.username, full_name=user.full_name, email=user.email, hashed_password=hashed_password)
db.add(new_user)
db.commit()
db.refresh(new_user)
return User(username=new_user.username, full_name=new_user.full_name, email=new_user.email)
# Login and get JWT token
@app.post("/token", response_model=Token)
async def login_for_access_token(form_data: OAuth2PasswordRequestForm = Depends()):
db = SessionLocal() # Get the database session
user = get_user(db, form_data.username)
if user is None or not verify_password(form_data.password, user.hashed_password):
raise HTTPException(status_code=400, detail="Incorrect username or password")
access_token_expires = timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES)
access_token = create_access_token(data={"sub": user.username}, expires_delta=access_token_expires)
return {"access_token": access_token, "token_type": "bearer"}
# Protected route
@app.get("/protected")
async def get_protected_data(current_user: User = Depends(oauth2_scheme)):
return current_user
# QA Processing
# Function to find the most similar previous question
def find_similar_previous_question(question):
max_similarity = 0
best_match_question = None
for prev_question in previous_questions.keys():
similarity = SequenceMatcher(None, question, prev_question).ratio()
if similarity > max_similarity:
max_similarity = similarity
best_match_question = prev_question
if max_similarity >= 0.85:
return best_match_question
else:
return None
@app.post("/process_question")
async def process_question(input_data: UserInput, current_user: User = Depends(oauth2_scheme)):
try:
# Check for similar previous questions
similar_question = find_similar_previous_question(input_data.question)
if similar_question is not None:
# Retrieve the answer from the dictionary
similar_answer = previous_questions[similar_question]
return {"answer": similar_answer}
# If no similar previous question is found, continue with OpenAI processing
embeddings = generate_answer(input_data.question)
if isinstance(embeddings, str):
return {"error": embeddings}
filter_condition = {"form_title": {"$in": input_data.forms}}
results = index.query(embeddings, top_k=1, include_metadata=True, filter=filter_condition)
logging.info(f"User Question: {input_data.question}")
logging.info(f"Selected Forms: {input_data.forms}")
logging.info(f"Embeddings: {embeddings}")
logging.info(f"filter_condition: {filter_condition}")
logging.info(f"results: {results}")
if results['matches'][0]['score'] < 0.74:
return {"answer": "Your question is out of scope"}
best_match_question = results['matches'][0]['metadata']['content']
answer = openai.Completion.create(
engine="text-davinci-002",
temperature=0.3,
n=1,
prompt=f"Answer the following question: {best_match_question}",
max_tokens=100
)
logging.info(f"OpenAI Response: {answer}")
# Store the current question and its answer in the dictionary
previous_questions[input_data.question] = answer.choices[0].text
return {"answer": answer.choices[0].text}
except Exception as e:
return {"error": str(e)}
| [
"Answer the following question: PLACEHOLDER"
] |
2024-01-10 | martincooperbiz/mage-ai | mage_ai~ai~llm_pipeline_wizard.py | import ast
import asyncio
import json
import os
import re
from typing import Dict
import openai
from langchain.chains import LLMChain
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
from mage_ai.data_cleaner.transformer_actions.constants import ActionType, Axis
from mage_ai.data_preparation.models.block import Block
from mage_ai.data_preparation.models.constants import (
NON_PIPELINE_EXECUTABLE_BLOCK_TYPES,
BlockLanguage,
BlockType,
PipelineType,
)
from mage_ai.data_preparation.models.pipeline import Pipeline
from mage_ai.data_preparation.repo_manager import get_repo_config, get_repo_path
from mage_ai.data_preparation.templates.template import fetch_template_source
from mage_ai.io.base import DataSource
from mage_ai.server.logger import Logger
logger = Logger().new_server_logger(__name__)
BLOCK_LANGUAGE_TO_FILE_TYPE_VARIABLE = {
BlockLanguage.MARKDOWN: 'markdown script',
BlockLanguage.PYTHON: 'python script',
BlockLanguage.R: 'R script',
BlockLanguage.SQL: 'sql script',
BlockLanguage.YAML: 'yaml configuration',
}
BLOCK_TYPE_TO_PURPOSE_VARIABLE = {
BlockType.DATA_LOADER: "load data from a specific data source.",
BlockType.DATA_EXPORTER: "export data to a specific data source.",
BlockType.TRANSFORMER: "transform data from one format into different format.",
}
DATA_LOADERS_FOLDER = 'data_loaders'
DATA_EXPORTERS_FOLDER = 'data_exporters'
PROMPT_FOR_BLOCK = """
The {file_type} delimited by triple backticks is used to {purpose}.
Write a documentation based on the {file_type}. {add_on_prompt}
Ignore the imported libraries and the @test decorator.
```{block_content}```"""
PROMPT_FOR_SUMMARIZE_BLOCK_DOC = """
A data pipeline reads data from source, transform the data and export data into another source.
The content delimited by triple backticks contains explains of all components in one data pipeline.
Write a detailed summarization of the data pipeline based on the content provided.
```{block_content}```"""
PROMPT_TO_SPLIT_BLOCKS = """
A BLOCK does one action either reading data from one data source, transforming the data from
one format to another or exporting data into a data source.
Based on the code description delimited by triple backticks, your task is to identify
how many BLOCKS required, function for each BLOCK and upstream blocks between BLOCKs.
Use the following format:
BLOCK 1: function: <block function>. upstream: <upstream blocks>
BLOCK 2: function: <block function>. upstream: <upstream blocks>
BLOCK 3: function: <block function>. upstream: <upstream blocks>
...
Example:
<code description>: ```
Read data from MySQL and Postgres, filter out rows with book_price > 100, and save data to BigQuery.
```
Answer:
BLOCK 1: function: load data from MySQL. upstream:
BLOCK 2: function: load data from Postgres. upstream:
BLOCK 3: function: filter out rows with book_price > 100. upstream: 1, 2
BLOCK 4: function: export data to BigQuery. upstream: 3
<code description>: ```{code_description}```"""
PROMPT_FOR_FUNCTION_COMMENT = """
The content within the triple backticks is a code block.
Your task is to write comments for each function inside.
```{block_content}```
The comment should follow Google Docstring format.
Return your response in JSON format with function name as key and the comment as value.
"""
BLOCK_SPLIT_PATTERN = r"BLOCK\s+(\w+):\s+function:\s+(.*?)\.\s+upstream:\s*(.*?)$"
TRANSFORMERS_FOLDER = 'transformers'
CLASSIFICATION_FUNCTION_NAME = "classify_description"
TEMPLATE_CLASSIFICATION_FUNCTION = [
{
"name": CLASSIFICATION_FUNCTION_NAME,
"description": "Classify the code description provided into following properties.",
"parameters": {
"type": "object",
"properties": {
BlockType.__name__: {
"type": "string",
"description": "Type of the code block. It either "
"loads data from a source, export data to a source "
"or transform data from one format to another.",
"enum": [f"{BlockType.__name__}__data_exporter",
f"{BlockType.__name__}__data_loader",
f"{BlockType.__name__}__transformer"]
},
BlockLanguage.__name__: {
"type": "string",
"description": "Programming language of the code block. "
f"Default value is {BlockLanguage.__name__}__python.",
"enum": [f"{BlockLanguage.__name__}__{type.name.lower()}"
for type in BlockLanguage]
},
PipelineType.__name__: {
"type": "string",
"description": "Type of pipeline to build. Default value is "
f"{PipelineType.__name__}__python if pipeline type "
"is not mentioned in the description.",
"enum": [f"{PipelineType.__name__}__{type.name.lower()}"
for type in PipelineType]
},
ActionType.__name__: {
"type": "string",
"description": f"If {BlockType.__name__} is transformer, "
f"{ActionType.__name__} specifies what kind "
"of action the code performs.",
"enum": [f"{ActionType.__name__}__{type.name.lower()}" for type in ActionType]
},
DataSource.__name__: {
"type": "string",
"description": f"If {BlockType.__name__} is data_loader or "
f"data_exporter, {DataSource.__name__} field specify "
"where the data loads from or exports to.",
"enum": [f"{DataSource.__name__}__{type.name.lower()}" for type in DataSource]
},
},
"required": [BlockType.__name__, BlockLanguage.__name__, PipelineType.__name__],
},
}
]
class LLMPipelineWizard:
def __init__(self):
repo_config = get_repo_config()
openai_api_key = repo_config.openai_api_key or os.getenv('OPENAI_API_KEY')
openai.api_key = openai_api_key
self.llm = OpenAI(openai_api_key=openai_api_key, temperature=0)
async def __async_llm_generate_documentation(
self,
block_content: str,
file_type: str,
purpose: str,
template: str,
add_on_prompt: str = '',
):
prompt_template = PromptTemplate(
input_variables=[
'block_content',
'file_type',
'purpose',
'add_on_prompt'
],
template=template,
)
chain = LLMChain(llm=self.llm, prompt=prompt_template)
return await chain.arun(block_content=block_content,
file_type=file_type,
purpose=purpose,
add_on_prompt=add_on_prompt)
def __parse_argument_value(self, value: str) -> str:
if value is None:
return None
return value.lower().split('__')[1]
def __load_template_params(self, function_args: json):
block_type = BlockType(self.__parse_argument_value(function_args[BlockType.__name__]))
block_language = BlockLanguage(
self.__parse_argument_value(
function_args.get(BlockLanguage.__name__)
) or "python")
pipeline_type = PipelineType(
self.__parse_argument_value(
function_args.get(PipelineType.__name__)
) or "python")
config = {}
config['action_type'] = self.__parse_argument_value(
function_args.get(ActionType.__name__))
if config['action_type']:
if config['action_type'] in [
ActionType.FILTER,
ActionType.DROP_DUPLICATE,
ActionType.REMOVE,
ActionType.SORT
]:
config['axis'] = Axis.ROW
else:
config['axis'] = Axis.COLUMN
config['data_source'] = self.__parse_argument_value(
function_args.get(DataSource.__name__))
return block_type, block_language, pipeline_type, config
async def async_generate_block_with_description(
self,
block_description: str,
upstream_blocks: [str]) -> dict:
messages = [{"role": "user", "content": block_description}]
response = await openai.ChatCompletion.acreate(
model="gpt-3.5-turbo-0613",
messages=messages,
functions=TEMPLATE_CLASSIFICATION_FUNCTION,
function_call={"name": CLASSIFICATION_FUNCTION_NAME}, # explicitly set function call
)
response_message = response["choices"][0]["message"]
if response_message.get("function_call"):
function_args = json.loads(response_message["function_call"]["arguments"])
block_type, block_language, pipeline_type, config = self.__load_template_params(
function_args)
return dict(
block_type=block_type,
configuration=config,
content=fetch_template_source(
block_type=block_type,
config=config,
language=block_language,
pipeline_type=pipeline_type,
),
language=block_language,
upstream_blocks=upstream_blocks,
)
else:
logger.error("Failed to interpret the description as a block template.")
return None
async def __async_split_description_by_blocks(self, code_description: str) -> str:
prompt_template = PromptTemplate(
input_variables=[
'code_description',
],
template=PROMPT_TO_SPLIT_BLOCKS,
)
chain = LLMChain(llm=self.llm, prompt=prompt_template)
return await chain.arun(code_description=code_description)
async def __async_generate_blocks(self,
block_dict: dict,
block_id: int,
block_description: str,
upstream_blocks: [str]) -> dict:
block = await self.async_generate_block_with_description(block_description, upstream_blocks)
block_dict[block_id] = block
async def async_generate_pipeline_from_description(self, pipeline_description: str) -> dict:
splited_block_descriptions = await self.__async_split_description_by_blocks(
pipeline_description)
blocks = {}
block_tasks = []
for line in splited_block_descriptions.strip().split('\n'):
if line.startswith("BLOCK") and ":" in line:
# Extract the block_id and block_description from the line
match = re.search(BLOCK_SPLIT_PATTERN, line)
if match:
block_id = match.group(1)
block_description = match.group(2).strip()
upstream_blocks = match.group(3).split(", ")
block_tasks.append(
self.__async_generate_blocks(
blocks,
block_id,
block_description,
upstream_blocks))
await asyncio.gather(*block_tasks)
return blocks
def __insert_comments_in_functions(self, code: str, function_comments: Dict):
# Parse the input code into an abstract syntax tree (AST).
tree = ast.parse(code)
# Traverse the AST and find function definitions.
for node in ast.walk(tree):
if isinstance(node, ast.FunctionDef):
function_name = node.name
if function_comments.get(function_name):
comment_text = function_comments[function_name]
# Insert a comment node below a given node.
if isinstance(node.body[0], ast.Expr) and \
isinstance(node.body[0].value, ast.Constant):
# If there is existing doc string, combine the new comment with it.
existing_comment_node = node.body[0]
existing_comment_text = node.body[0].value.value
new_comment = ast.Expr(
value=ast.Str(s=f"{comment_text}\n{existing_comment_text}"))
node.body.remove(existing_comment_node)
else:
# Add newly generated doc string.
new_comment = ast.Expr(value=ast.Str(s=comment_text))
node.body.insert(0, new_comment)
return ast.unparse(tree)
async def async_generate_comment_for_block(self, block_content: str) -> str:
prompt_template = PromptTemplate(
input_variables=[
'block_content',
],
template=PROMPT_FOR_FUNCTION_COMMENT,
)
chain = LLMChain(llm=self.llm, prompt=prompt_template)
function_comments_json = await chain.arun(block_content=block_content)
function_comments = json.loads(function_comments_json)
return self.__insert_comments_in_functions(block_content, function_comments)
async def async_generate_pipeline_documentation(
self,
pipeline_uuid: str,
project_path: str = None,
print_block_doc: bool = False,
) -> dict:
pipeline = Pipeline.get(
uuid=pipeline_uuid,
repo_path=project_path or get_repo_path(),
)
async_block_docs = []
for block in pipeline.blocks_by_uuid.values():
if block.type in NON_PIPELINE_EXECUTABLE_BLOCK_TYPES:
continue
async_block_docs.append(self.__async_generate_block_documentation(block))
block_docs = await asyncio.gather(*async_block_docs)
block_docs_content = '\n'.join(block_docs)
if print_block_doc:
print(block_docs_content)
prompt_template = PromptTemplate(input_variables=['block_content'],
template=PROMPT_FOR_SUMMARIZE_BLOCK_DOC)
chain = LLMChain(llm=self.llm, prompt=prompt_template)
pipeline_doc = chain.run(block_content=block_docs_content)
return dict(
block_docs=block_docs,
pipeline_doc=pipeline_doc,
)
async def async_generate_block_documentation_with_name(
self,
pipeline_uuid: str,
block_uuid: str,
project_path: str = None,
) -> str:
pipeline = Pipeline.get(uuid=pipeline_uuid,
repo_path=project_path or get_repo_path())
return asyncio.run(
self.__async_generate_block_documentation(pipeline.get_block(block_uuid)))
async def __async_generate_block_documentation(
self,
block: Block,
) -> str:
add_on_prompt = ""
if block.type == BlockType.TRANSFORMER:
add_on_prompt = "Focus on the customized business logic in execute_transformer_action \
function."
return await self.__async_llm_generate_documentation(
block.content,
BLOCK_LANGUAGE_TO_FILE_TYPE_VARIABLE[block.language],
BLOCK_TYPE_TO_PURPOSE_VARIABLE.get(block.type, ""),
PROMPT_FOR_BLOCK,
add_on_prompt
)
| [
"loads data from a source, export data to a source ",
"is not mentioned in the description.",
"where the data loads from or exports to.",
"enum",
"add_on_prompt",
"or transform data from one format to another.",
"Classify the code description provided into following properties.",
"\nThe content within the triple backticks is a code block.\nYour task is to write comments for each function inside.\n\n```{block_content}```\n\nThe comment should follow Google Docstring format.\nReturn your response in JSON format with function name as key and the comment as value.\n",
"name",
"block_content",
"code_description",
"description",
"Type of pipeline to build. Default value is ",
"of action the code performs.",
"string",
"\nA data pipeline reads data from source, transform the data and export data into another source.\nThe content delimited by triple backticks contains explains of all components in one data pipeline.\nWrite a detailed summarization of the data pipeline based on the content provided.\n```{block_content}```",
"properties",
"Programming language of the code block. ",
"parameters",
"\nThe {file_type} delimited by triple backticks is used to {purpose}.\nWrite a documentation based on the {file_type}. {add_on_prompt}\nIgnore the imported libraries and the @test decorator.\n```{block_content}```",
"\nA BLOCK does one action either reading data from one data source, transforming the data from\none format to another or exporting data into a data source.\nBased on the code description delimited by triple backticks, your task is to identify\nhow many BLOCKS required, function for each BLOCK and upstream blocks between BLOCKs.\n\nUse the following format:\nBLOCK 1: function: <block function>. upstream: <upstream blocks>\nBLOCK 2: function: <block function>. upstream: <upstream blocks>\nBLOCK 3: function: <block function>. upstream: <upstream blocks>\n...\n\nExample:\n<code description>: ```\nRead data from MySQL and Postgres, filter out rows with book_price > 100, and save data to BigQuery.\n```\n\nAnswer:\nBLOCK 1: function: load data from MySQL. upstream:\nBLOCK 2: function: load data from Postgres. upstream:\nBLOCK 3: function: filter out rows with book_price > 100. upstream: 1, 2\nBLOCK 4: function: export data to BigQuery. upstream: 3\n\n<code description>: ```{code_description}```",
"Focus on the customized business logic in execute_transformer_action function.",
"Type of the code block. It either "
] |
2024-01-10 | a-nnurag/myfirstproject-desktopasst- | submitteddesktopassistant.py | import pyttsx3
import datetime
import speech_recognition as sr
import wikipedia
import webbrowser
import os
import smtplib
import openai
from time import sleep
import pyautogui
engine = pyttsx3.init('sapi5')
voices = engine.getProperty('voices')
#print(voices[1].id)
engine.setProperty('voices' , voices[0].id)
def speak (audio):
"with this jarvis speaks"
engine.say(audio)
engine.runAndWait()
def wishMe():
hour =int(datetime.datetime.now().hour)
if hour>=0 and hour<12:
speak("good morning")
elif hour>=12 and hour<=18:
speak("good afternoon")
else:
speak("good evening")
speak("i am Jarvis sir how may i help you")
def takeCommand():
"it takes microphone input from the user and gives string output"
r=sr.Recognizer()
with sr.Microphone() as source :
print("listening...")
r.pause_threshold=1
audio = r.listen(source)
try:
print("Recognizing..")
query = r.recognize_google(audio,language='en-in')
print(f"User said: {query}\n")
except Exception as e:
print(e)#to print the exception
print("say that again please")
return "NONE"
return query
def sendEmail(to,content):
server = smtplib.SMTP('smtp.gmail.com',587)#587 is the port
server.ehlo()
server.start.ls()
server.login('your gmail','your gmail password')
server.sendmail('receiver gmail',to,content)
server.close()
def GPT(query):
openai.api_key="open ai api key"
model_engine="text-davinci-003"#engine used is davinci may at your time it may be discarded
prompt=query
completion = openai.Completion.create(
engine=model_engine,
prompt=prompt,
max_tokens=1024,
n=1,
stop=None,
temperature=0.5
)
response =completion.choices[0].text
print(response)
return (response)
if __name__ == "__main__":
wishMe()
while True :
#query ='GPT indigo'
query=takeCommand().lower()
#logic for executing talks based on query
if 'wikipedia' in query:
speak('Searching Wikipedia...')
query=query.replace("wikipedia","")
results=wikipedia.summary(query,sentences=2,auto_suggest=False)##it was auto suggesting asia to as8a hence made
speak("According to wikipedia")
print(results)
speak(results)
#break
elif 'open youtube' in query:
webbrowser.open("youtube.com")#how to open it on chrome
#break
elif '.com' in query:
query=query.replace("jarvis"," ")
query=query.replace("open"," ")
query=query.strip()
print(query)
webbrowser.open(query)
#break
elif 'play offline music ' in query:#for offline music
music_dir='dir path'
songs=os.listdir(music_dir)
print(songs)
os.startfile(os.path.join(music_dir,songs[0]))#try using random module to play a song
#break
elif 'the time' in query:
strTime=datetime.datetime.now().strftime("%H:%M%S")
speak(f"the time is {strTime}")
#break
elif 'open code' in query :
codePath = "set c drive path it is specifically for visual code studio it can be designed for others also"
os.startfile(codePath)
#break
elif 'email' in query:#make dictionary use try and except#enable less secure apps to use this function
try:
speak("what should i say!")
content = "hello send this trying to fugure out"
to = "[email protected]"
sendEmail(to,content)
speak("email has been sent!")
except Exception as e:
print (e)
speak("not able to send email")
#break
elif 'GPT' in query :
speak("According to open ai chatgpt")
query =query.replace('GPT','')
response=GPT(query)
speak(response)
#break
elif 'listen song spotify' in query or 'play music' in query:
speak("sir what song do you like to listen")
song=takeCommand().lower()
webbrowser.open(f'https://open.spotify.com/search/{song}')
sleep(13)
pyautogui.click(x=1055,y=617)
speak("playing"+song)
#break
| [] |
2024-01-10 | arnx813/parsec-chatgpt-twitter | script.py | import openai
import os
def chatbot_completion(message):
api_key = os.environ["OPENAI_API_KEY"]
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "user", "content": message}
]
)
return completion.choices[0].message.content
| [] |
2024-01-10 | abhiyant-10/Denjiro-Financial_Ninja | GUI.py | from datetime import datetime
import os
import singlestoredb as s2
import pyaudio
import wave
import json
import pandas as pd
import numpy as np
import openai
import tkinter as tk
import tkinter.ttk as ttk
from tkinter import Canvas
from tkinter import Label
from sqlalchemy import create_engine
from langchain.agents import create_sql_agent
from langchain.agents.agent_toolkits import SQLDatabaseToolkit
from langchain.sql_database import SQLDatabase
from langchain.llms.openai import OpenAI
from langchain.agents import AgentExecutor
from langchain.agents.agent_types import AgentType
from langchain.chat_models import ChatOpenAI
os.environ["OPENAI_API_KEY"] = "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"
openai.api_key = os.environ["OPENAI_API_KEY"]
# SS API Key : 6b3c9c2fa918cff89da47cdb95849ffecb909104aee2d464da03f7c06d7ccf40
user = 'admin'
password = 'Password123!'
host = 'svc-39644b79-076a-44e6-8b7f-d6415f18d4c8-dml.aws—virginia-6.svc.singlestore.com'
port = 3306
database = 'ai_demo'
table_name = 'embeddings'
model = 'text-embedding-ada-002'
# Create the agent executor
db = SQLDatabase.from_uri(f"mysql+pymysql://{user}:{password}@{host}:{port}/{database}", include_tables=['embeddings', 'stock_table'], sample_rows_in_table_info=1)
print(db)
llm = OpenAI(openai_api_key=os.environ["OPENAI_API_KEY"], temperature=0, verbose=True)
toolkit = SQLDatabaseToolkit(db=db, llm=llm)
agent_executor = create_sql_agent(
llm=OpenAI(temperature=0),
toolkit=toolkit,
verbose=True,
prefix= '''
You are an agent designed to interact with a SQL database called SingleStore. This sometimes has Shard and Sort keys in the table schemas, which you can ignore.
\nGiven an input question, create a syntactically correct MySQL query to run, then look at the results of the query and return the answer.
\n If you are asked about similarity questions, you should use the DOT_PRODUCT function.
\nHere are a few examples of how to use the DOT_PRODUCT function:
\nExample 1:
Q: how similar are the questions and answers?
A: The query used to find this is:
select question, answer, dot_product(question_embedding, answer_embedding) as similarity from embeddings;
\nExample 2:
Q: What are the most similar questions in the embeddings table, not including itself?
A: The query used to find this answer is:
SELECT q1.question as question1, q2.question as question2, DOT_PRODUCT(q1.question_embedding, q2.question_embedding) :> float as score
FROM embeddings q1, embeddings q2
WHERE question1 != question2
ORDER BY score DESC LIMIT 5;
\nExample 3:
Q: In the embeddings table, which rows are from the chatbot?
A: The query used to find this answer is:
SELECT category, question, answer FROM embeddings
WHERE category = 'chatbot';
\nUnless the user specifies a specific number of examples they wish to obtain, always limit your query to at most {top_k} results.
\n The question embeddings and answer embeddings are very long, so do not show them unless specifically asked to.
\nYou can order the results by a relevant column to return the most interesting examples in the database.
\nNever query for all the columns from a specific table, only ask for the relevant columns given the question.
\nYou have access to tools for interacting with the database.\nOnly use the below tools.
Only use the information returned by the below tools to construct your final answer.
\nYou MUST double check your query before executing it. If you get an error while executing a query, rewrite the query and try again up to 3 times.
\n\nDO NOT make any DML statements (INSERT, UPDATE, DELETE, DROP etc.) to the database.
\n\nIf the question does not seem related to the database, just return "I don\'t know" as the answer.\n,
''',
format_instructions='''Use the following format:\n
\nQuestion: the input question you must answer
\nThought: you should always think about what to do
\nAction: the action to take, should be one of [{tool_names}]
\nAction Input: the input to the action
\nObservation: the result of the action
\n... (this Thought/Action/Action Input/Observation can repeat 10 times)
\nThought: I now know the final answer
\nFinal Answer: the final answer to the original input question\n
\n\nSQL Query used to get the Answer: the final sql query used for the final answer',
''',
top_k=5,
max_iterations=10
)
# User Interface Creation
root = tk.Tk()
root.geometry("750x570")
# root.geometry("500x380")
root.title("Denjiro(⚔️—⚔️)")
# Labels
name = Label(root, text="Query", font=("Arial", 22)).place(x=30,y=30)
response = Label(root, text="Chatbot response").place(x=30,y=290)
# name = Label(root, text="Question").place(x=20,y=20)
# response = Label(root, text="Chatbot response").place(x=20,y=160)
# Create the text entry widget
entry = ttk.Entry(root, font=("Arial", 22))
entry.pack(padx=30, pady=75, fill=tk.X)
# entry = ttk.Entry(root, font=("Arial", 14))
# entry.pack(padx=20, pady=50, fill=tk.X)
entry.insert(0, f"Enter your database {database} query here")
# get embedding functions
def get_embedding(text, model=model):
text = text.replace("\n", " ")
return openai.Embedding.create(input = [text], model=model)['data'][0]['embedding']
def insert_embedding(question):
category = 'chatbot'
question_embedding = get_embedding(question, model=model)
answer = agent_executor.run(question)
answer_embedding = get_embedding(answer, model=model)
created_at = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
# add questions and answer embeddings to a dataframe
df = pd.DataFrame(columns=['category','question','question_embedding','answer', 'answer_embedding', 'created_at'])
new_row = {'category':category, 'question':question, 'question_embedding':question_embedding,'answer':answer, 'answer_embedding':answer_embedding, 'created_at':created_at }
df = df.append(new_row, ignore_index=True)
# print(df['answer'])
# send to SingleStore
mystmt = "INSERT INTO {} (category, question, question_embedding, answer, answer_embedding, created_at) VALUES ('{}',\n'{}', \njson_array_pack('{}'), \n'{}', \njson_array_pack('{}'), \n'{}')"
for i in range(len(df)):
stmt = mystmt.format(table_name, df['category'][i],df['question'][i].replace("'",""), df['question_embedding'][i], df['answer'][i].replace("'",""), df['answer_embedding'][i], df['created_at'][i])
# executable_stmt = text(stmt)
engine = s2.connect(host=host, port=port, user=user, password=password, database=database)
with engine:
with engine.cursor() as cur:
cur.execute(stmt)
for row in cur.fetchall():
print(row)
cur.close()
# Create the button callback
def on_click():
# Get the query text from the entry widget
query = entry.get()
# Run the query using the agent executor
result = agent_executor.run(query)
# Display the result in the text widget
text.delete("1.0", tk.END)
text.insert(tk.END, result)
# get result embedding
result_embedding = get_embedding(result)
insert_embedding(query)
# Create the clear button callback
def clear_text():
text.delete("1.0", tk.END)
# Clear the entry widget
entry.delete(0, tk.END)
entry.insert(0, f"Enter your quesion on database: {database}")
# Create noise gate
def apply_noise_gate(audio_data, threshold):
# Calculate the root mean square (RMS) of the audio data
valid_data = np.nan_to_num(audio_data, nan=0.0)
# valid_data = ... # Your valid data here
# Compute the square of valid_data
squared_data = np.square(valid_data)
# Check for negative or invalid values
invalid_indices = np.isnan(squared_data) | (squared_data < 0)
# Set negative or invalid values to 0
squared_data[invalid_indices] = 0
# Compute the mean of squared_data
mean_squared = np.mean(squared_data)
# Compute the root mean square (RMS)
rms = np.sqrt(mean_squared)
# Check if the RMS value is a valid number
if np.isnan(rms):
return audio_data
# If RMS is below the threshold, set all samples to zero
if rms < threshold:
audio_data = np.zeros_like(audio_data)
return audio_data
# Create the mic button callback
def record_audio(output_file, sample_rate=44100, chunk_size=1024, audio_format=pyaudio.paInt16, channels=1, threshold=0.01):
audio = pyaudio.PyAudio()
print('say something')
# replace with beep?
# Open the microphone stream
stream = audio.open(format=audio_format,
channels=channels,
rate=sample_rate,
input=True,
frames_per_buffer=chunk_size)
frames = []
silence_frames = 0
silence_threshold = 80.01 # Adjust this value according to your environment
# Record audio until there is 2 seconds of silence
while True:
data = stream.read(chunk_size)
frames.append(data)
# Convert data to numpy array for analysis
audio_data = np.frombuffer(data, dtype=np.int16)
# Apply noise gate to reduce background noise
audio_data = apply_noise_gate(audio_data, threshold)
# Check if the audio is silent (below the threshold)
if np.max(np.abs(audio_data)) < silence_threshold:
silence_frames += 1
else:
silence_frames = 0
# Break the loop if there is 2 seconds of silence
if silence_frames / (sample_rate / chunk_size) >= 2:
break
# Stop and close the stream
stream.stop_stream()
stream.close()
audio.terminate()
# Save the recorded audio as a WAV file
wave_file = wave.open(output_file, 'wb')
wave_file.setnchannels(channels)
wave_file.setsampwidth(audio.get_sample_size(audio_format))
wave_file.setframerate(sample_rate)
wave_file.writeframes(b''.join(frames))
wave_file.close()
def transcribe_mic():
# Usage
output_file = 'recording.wav'
record_audio(output_file)
print(f"Recording saved as {output_file}")
audio_file= open("recording.wav", "rb")
transcript = openai.Audio.transcribe("whisper-1", audio_file)
entry.delete(0,tk.END)
entry.insert(0, transcript["text"])
on_click()
def mic_button_actions():
ttk.Button(root, text="Mic", command=transcribe_mic).place(x=45, y=150)
# ttk.Button(root, text="Mic", command=transcribe_mic).place(x=30, y=100)
new_embedding = get_embedding(transcript)
print(new_embedding)
# Create the mic button widget
# Create a style with the desired font settings
style = ttk.Style()
style.configure("TButton", font=("Arial", 22))
mic_button = ttk.Button(root, text="Mic", command=transcribe_mic).place(x=45, y=150)
# mic_button = ttk.Button(root, text="Mic", command=transcribe_mic).place(x=30, y=100)
# Create the button widget
button = ttk.Button(root, text="Chat", command=on_click).place(x=225, y=150)
# button = ttk.Button(root, text="Chat", command=on_click).place(x=150, y=100)
# Create the clear button widget
clear_button = ttk.Button(root, text="Reset", command=clear_text).place(x=405, y=150)
# clear_button = ttk.Button(root, text="Reset", command=clear_text).place(x=270, y=100)
# Create the text widget to display the result
text = tk.Text(root, height=15, width=90, font=("Arial", 22))
# text = tk.Text(root, height=10, width=60, font=("Arial", 14))
text.pack(side=tk.BOTTOM, padx=20, pady=20)
# Start the UI event loop
root.mainloop() | [] |
2024-01-10 | JEF1056/Jv6 | interact.py | import argparse
from argparse import ArgumentParser
from itertools import chain
import torch.nn.functional as F
from transformers import OpenAIGPTLMHeadModel, OpenAIGPTTokenizer, GPT2LMHeadModel, GPT2Tokenizer
from discord_webhook import DiscordWebhook, DiscordEmbed
import pickle, discord, re, random, os, dbl, warnings, torch
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
import time, requests, datetime
from datetime import date
from discord.ext.tasks import loop
import json, build_versions
global client, config
client = discord.Client()
SPECIAL_TOKENS = ["<bos>", "<eos>", "<speaker1>", "<speaker2>", "<pad>"]
ATTR_TO_SPECIAL_TOKEN = {'bos_token': '<bos>', 'eos_token': '<eos>', 'pad_token': '<pad>',
'additional_special_tokens': ['<speaker1>', '<speaker2>']}
with open('config.json') as json_file:
config = json.load(json_file)
def build_input_from_segments(persona, history, reply, tokenizer, lm_labels=False, with_eos=True):
""" Build a sequence of input from 3 segments: persona, history and last reply. """
bos, eos, speaker1, speaker2 = tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS[:-1])
sequence = [[bos] + list(chain(*persona))] + history + [reply + ([eos] if with_eos else [])]
sequence = [sequence[0]] + [[speaker2 if (len(sequence)-i) % 2 else speaker1] + s for i, s in enumerate(sequence[1:])]
instance = {}
instance["input_ids"] = list(chain(*sequence))
instance["token_type_ids"] = [speaker2 if i % 2 else speaker1 for i, s in enumerate(sequence) for _ in s]
instance["mc_token_ids"] = len(instance["input_ids"]) - 1
instance["lm_labels"] = [-100] * len(instance["input_ids"])
if lm_labels:
instance["lm_labels"] = ([-100] * sum(len(s) for s in sequence[:-1])) + [-100] + sequence[-1][1:]
return instance
def add_special_tokens_(model, tokenizer):
""" Add special tokens to the tokenizer and the model if they have not already been added. """
orig_num_tokens = len(tokenizer.encoder)
num_added_tokens = tokenizer.add_special_tokens(ATTR_TO_SPECIAL_TOKEN) # doesn't add if they are already there
if num_added_tokens > 0:
model.resize_token_embeddings(new_num_tokens=orig_num_tokens + num_added_tokens)
def top_filtering(logits, top_k=0., top_p=0.9, threshold=-float('Inf'), filter_value=-float('Inf')):
""" Filter a distribution of logits using top-k, top-p (nucleus) and/or threshold filtering
Args:
logits: logits distribution shape (vocabulary size)
top_k: <=0: no filtering, >0: keep only top k tokens with highest probability.
top_p: <=0.0: no filtering, >0.0: keep only a subset S of candidates, where S is the smallest subset
whose total probability mass is greater than or equal to the threshold top_p.
In practice, we select the highest probability tokens whose cumulative probability mass exceeds
the threshold top_p.
threshold: a minimal threshold to keep logits
"""
assert logits.dim() == 1 # Only work for batch size 1 for now - could update but it would obfuscate a bit the code
top_k = min(top_k, logits.size(-1))
if top_k > 0:
# Remove all tokens with a probability less than the last token in the top-k tokens
indices_to_remove = logits < torch.topk(logits, int(top_k))[0][..., -1, None]
logits[indices_to_remove] = filter_value
if top_p > 0.0:
# Compute cumulative probabilities of sorted tokens
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cumulative_probabilities = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
# Remove tokens with cumulative probability above the threshold
sorted_indices_to_remove = cumulative_probabilities > top_p
# Shift the indices to the right to keep also the first token above the threshold
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
# Back to unsorted indices and set them to -infinity
indices_to_remove = sorted_indices[sorted_indices_to_remove]
logits[indices_to_remove] = filter_value
indices_to_remove = logits < threshold
logits[indices_to_remove] = filter_value
return logits
def sample_sequence(personality, history, tokenizer, model, args, current_output=None):
special_tokens_ids = tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS)
if current_output is None:
current_output = []
for i in range(args.max_length):
instance = build_input_from_segments(personality, history, current_output, tokenizer, with_eos=False)
input_ids = torch.tensor(instance["input_ids"], device=args.device).unsqueeze(0)
token_type_ids = torch.tensor(instance["token_type_ids"], device=args.device).unsqueeze(0)
logits = model(input_ids, token_type_ids=token_type_ids)
if isinstance(logits, tuple): # for gpt2 and maybe others
logits = logits[0]
logits = logits[0, -1, :] / args.temperature
logits = top_filtering(logits, top_k=args.top_k, top_p=args.top_p)
probs = F.softmax(logits, dim=-1)
prev = torch.topk(probs, 1)[1] if args.no_sample else torch.multinomial(probs, 1)
if i < args.min_length and prev.item() in special_tokens_ids:
while prev.item() in special_tokens_ids:
if probs.max().item() == 1:
warnings.warn("Warning: model generating special token with probability 1.")
break # avoid infinitely looping over special token
prev = torch.multinomial(probs, num_samples=1)
if prev.item() in special_tokens_ids:
break
current_output.append(prev.item())
return current_output
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def similarity(X,Y):
X_list = word_tokenize(X)
Y_list = word_tokenize(Y)
sw = stopwords.words('english')
l1 =[];l2 =[]
X_set = {w for w in X_list if not w in sw}
Y_set = {w for w in Y_list if not w in sw}
rvector = X_set.union(Y_set)
for w in rvector:
if w in X_set: l1.append(1) # create a vector
else: l1.append(0)
if w in Y_set: l2.append(1)
else: l2.append(0)
c = 0
for i in range(len(rvector)):
c+= l1[i]*l2[i]
try:
cosine = c / float((sum(l1)*sum(l2))**0.5)
except:
cosine = 0
return cosine
def avg_similarity(max_history,h):
total = 0
h=h[:-1]
if len(h) < max_history:
h=h[1:]
for i,val in enumerate(h):
if i % 2 == 0:
try:
total += similarity(h[i],h[i+2])
except:
pass
total=total/(len(h)//2)
return total
parser = ArgumentParser()
parser.add_argument("--model", type=str, default="openai-gpt", help="Model type (openai-gpt or gpt2)", choices=['openai-gpt', 'gpt2']) # anything besides gpt2 will load openai-gpt
parser.add_argument("--model_checkpoint", type=str, default="run0", help="Path, url or short name of the model")
parser.add_argument("--device", type=str, default="cuda" if torch.cuda.is_available() else "cpu", help="Device (cuda or cpu)")
parser.add_argument("--temperature", type=int, default=0.85, help="Sampling softmax temperature")
parser.add_argument("--top_k", type=int, default=40, help="Filter top-k tokens before sampling (<=0: no filtering)")
parser.add_argument("--top_p", type=float, default=0.9, help="Nucleus filtering (top-p) before sampling (<=0.0: no filtering)")
parser.add_argument("--seed", type=int, default=random.randint(0,9999999999), help="Seed")
parser.add_argument("--auto-seed", type=str2bool, default=True, help="auto-seeding")
parser.add_argument("--max_history", type=int, default=4, help="Number of previous utterances to keep in history")
parser.add_argument("--no_sample", type=str2bool, default=False, help="Set to use greedy decoding instead of sampling")
parser.add_argument("--max_length", type=int, default=10, help="Maximum length of the output utterances")
parser.add_argument("--min_length", type=int, default=1, help="Minimum length of the output utterances")
args = parser.parse_args()
args1 = parser.parse_args()
loaded=0
try:
if loaded != 1:
if args.model_checkpoint == "":
raise ValueError("Interacting requires passing a finetuned model_checkpoint")
if args.seed != 0:
random.seed(args.seed)
torch.random.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
print("Get pretrained model and tokenizer")
tokenizer_class, model_class = (GPT2Tokenizer, GPT2LMHeadModel) if args.model == 'gpt2' else (OpenAIGPTTokenizer, OpenAIGPTLMHeadModel)
tokenizer = tokenizer_class.from_pretrained(args.model_checkpoint)
model = model_class.from_pretrained(args.model_checkpoint)
model.to(args.device)
add_special_tokens_(model, tokenizer)
personalities = pickle.load(open(os.path.join(args.model_checkpoint, "versions.p"), "rb"))
personality = random.choice(personalities)
print("Selected personality:", tokenizer.decode(chain(*personality)))
loaded=1
del loaded
except Exception as e:
print(e)
pass
def get_history(message):
try:
hist=""
history= pickle.load(open("hist/"+str(message.guild.id)+".p", "rb"))["history"]
for i, p in enumerate(history):
if i % 2 == 0:
hist+="> "+tokenizer.decode(p, skip_special_tokens=True)+"\n"
else:
hist+=tokenizer.decode(p, skip_special_tokens=True)+"\n"
if len(hist) == 0:
return "No History!"
else:
return hist
except Exception as e:
print(e)
return "No History!"
global dbli
dbli=dbl.DBLClient(client, config["dbltoken"])
@loop(seconds=1800)
async def update_guilds():
global dbli, client
print("Posting a guild count of " + str(len(client.guilds)))
await dbli.post_guild_count()
requests.post("https://bots.ondiscord.xyz/bot-api/bots/410253782828449802/guilds", data = 'Authorization: '+config["botsondiscord"]+'\nContent-Type: application/json\n{"guildCount": '+str(len(client.guilds))+'}')
requests.post("https://discord.bots.gg/api/v1/bots/410253782828449802/stats", data = 'Authorization: '+config["dbots.gg"]+'\nContent-Type: application/json\n{"guildCount": '+str(len(client.guilds))+'}')
@client.event
async def on_guild_join(guild):
try:
webhook = DiscordWebhook(url=config["logchannel"], avatar_url=str(guild.icon_url), username=str(guild.name))
embed = DiscordEmbed(title="Joined guild", description=str(guild.id), color=0xaaff88)
embed.set_author(name=str(guild),icon_url=str(guild.icon_url))
embed.set_footer(text=str(time.strftime('%X %x %Z')))
webhook.add_embed(embed)
webhook.execute()
except:
pass
@client.event
async def on_guild_remove(guild):
try:
os.remove("hist/"+str(guild.id)+".p")
except:
pass
try:
webhook = DiscordWebhook(url=config["logchannel"], avatar_url=str(guild.icon_url), username=str(guild.name))
embed = DiscordEmbed(title="Left guild", description=str(guild.id), color=0xff9988)
embed.set_author(name=str(guild),icon_url=str(guild.icon_url))
embed.set_footer(text=str(time.strftime('%X %x %Z')))
webhook.add_embed(embed)
webhook.execute()
except:
pass
@client.event
async def on_ready():
global personality, tokenizer, model, dbli
print('Logged in as '+client.user.name+' (ID:'+str(client.user.id)+') | Connected to '+str(len(client.guilds))+' servers | Connected to '+ str(len(set(client.get_all_members()))) +' users')
print('--------')
print("Discord.py verison: " + discord.__version__)
await client.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name="everyone talk ✨✨"))
update_guilds.start()
prefix=config["prefix"]
current_version=1
global t1, settings, history,user_version
try:
udata=pickle.load(open("hist/user/users.p", "rb"))
except:
pickle.dump({"message_total":{},"message_rate":{},"users":{}}, open("hist/user/users.p", "wb"))
@client.event
async def on_message(message):
if message.guild==None and message.author.bot == False:
embed=discord.Embed(title="DM integration", url="https://www.notion.so/jadeai/1c0f1d42eb6345b58013a1be35e47793?v=d45f7f3d26e140c995f8a9021564bb99", description="Dms are not supported yet! when they are, they will require a upvote on top.gg and a confimed server referral to a server with 10+ non-bot members", color=0x80ff80)
await message.channel.send(embed=embed)
elif message.author.bot == False:
global personality, tokenizer, model, client, t1, settings, history,user_version
if message.content.lower().startswith(prefix):
history=[]
settings=args1
user_version=-1
try:
user_status=await dbli.get_user_vote(user_id=message.author.id)
except:
user_status=False
udata=pickle.load(open("hist/user/users.p", "rb"))
try:
user_data=udata["users"][message.author.id]
except:
user_data={"timestamp": time.time()-30, "message_count":0}
try:
for key, value in pickle.load(open("hist/"+str(message.guild.id)+".p", "rb")).items():
globals()[str(key)]=value
except Exception as e:
args.seed=random.randint(0,9999999999)
t1=time.time()-30
pickle.dump({"t1":t1,"settings":args,"history":[], "user_version":current_version}, open("hist/"+str(message.guild.id)+".p", "wb"))
if user_version != current_version:
for version_num in range(user_version+1, current_version+1):
try:
await message.channel.send(embed=build_versions.version_message(version_num, client, prefix))
except: pass
if message.content.lower() == prefix+"-h":
embed=await build_versions.make_help(dbli, client, prefix)
await message.channel.send(embed=embed, delete_after=150)
try:
await message.delete()
except:
pass
elif message.content.lower() == prefix+"-p":
embed=discord.Embed(title="User Profile: "+ str(message.author), url="https://jadeai.ml", color=0x80ff80)
embed.set_thumbnail(url=message.author.avatar_url)
embed.add_field(name="Last seen", value= str(datetime.datetime.fromtimestamp(user_data["timestamp"]).strftime('%X %x')) + time.strftime(" %Z"), inline=False)
embed.add_field(name="Number of Messages", value= str(user_data["message_count"]), inline=False)
embed.set_footer(text="Global Total: " + str(udata["message_total"][str(date.today())]))
await message.channel.send(embed=embed, delete_after=150)
try:
await message.delete()
except:
pass
elif message.content.lower() == prefix+"-v":
embed=discord.Embed(title="Voting Link", url="https://top.gg/bot/410253782828449802/vote", color=0x80ff80)
embed.set_image(url=await dbli.get_widget_large(client.user.id))
if await dbli.get_user_vote(user_id=message.author.id):
embed.set_footer(text="Thanks for supporting Jade!")
else:
embed.set_footer(text="You have yet to vote for Jade!")
embed.set_author(name=str(message.author), icon_url=message.author.avatar_url)
await message.channel.send(embed=embed, delete_after=100)
try:
await message.delete()
except:
pass
elif message.content.lower() == prefix+"-s":
history=get_history(message)
settings=vars(settings)
embed= discord.Embed(title="Settings", url="https://jadeai.ml", description="__ __\nServer-Side Settings 🔒", color=0x80ff80)
embed.add_field(name="model", value=str(settings["model"]), inline=True)
embed.add_field(name="model_checkpoint", value=str(settings["model_checkpoint"]), inline=True)
embed.add_field(name="device", value=str(settings["device"]), inline=True)
embed.add_field(name="__ __", value="User-Changable Settings 🔓", inline=False)
embed.add_field(name="temperature", value=str(settings["temperature"])+"/1", inline=True)
embed.add_field(name="top_k", value=str(settings["top_k"]), inline=True)
embed.add_field(name="top_p", value=str(settings["top_p"])+"/1", inline=True)
if user_status:
val="Supporter-Only Settings 🔓"
else:
val="Supporter-Only Settings 🔐 [vote for her here](https://top.gg/bot/410253782828449802/vote)"
embed.add_field(name="__ __", value=val, inline=False)
embed.add_field(name="seed", value=str(settings["seed"]), inline=True)
embed.add_field(name="auto_seed", value=str(settings["auto_seed"]), inline=True)
embed.add_field(name="max_history", value=str(settings["max_history"])+'/10', inline=True)
embed.add_field(name="max_length", value=str(settings["max_length"])+"/20", inline=True)
embed.add_field(name="no_sample", value=str(settings["no_sample"]), inline=True)
embed.add_field(name="", value="", inline=True)
embed.add_field(name="__ __", value="History", inline=False)
if len(get_history(message).replace("> ","").split("\n")) >=4:
embed.add_field(name="Jade similarity score: `"+ str(avg_similarity(settings["max_history"],get_history(message).replace("> ","").split("\n")))+"`", value=get_history(message), inline=False)
else:
embed.add_field(name="Jade similarity score: `NAN`", value=get_history(message), inline=False)
await message.channel.send(embed=embed, delete_after=300)
try:
await message.delete()
except:
pass
elif message.content.lower().startswith(prefix+"-s "):
parameter=message.content.lower()[len(prefix)+3:].split(" ")
any_changes=False
if len(parameter) == 2:
alt_settings=vars(settings)
server=["model", "model_checkpoint", "device"]
client_side=["temperature","top_k","top_p"]
privledged=["no_sample","seed", "auto_seed", "max_history", "max_length"]
limiters={"temperature":{"max": 1, "type":float}, "top_k":{"max": float("inf"), "type":int}, "top_p":{"max": 1, "type":float},
"no_sample":{"type":str2bool}, "seed":{"max": float("inf"), "type":int}, "auto_seed":{"type":str2bool},
"max_history":{"max": 10, "type":int}, "max_length":{"max": 20, "type":int}}
if parameter[0] in server:
embed=discord.Embed(title="Settings", description="`"+str(parameter[0])+"` is a server-side setting, and cannot be changed.", color=0x80ff80)
elif parameter[0] in privledged and user_status==False:
embed=discord.Embed(title="Settings", description="`"+str(parameter[0])+"` is a supporter-only setting. [vote for Jade on top.gg](https://top.gg/bot/410253782828449802/vote)", color=0x80ff80)
elif (parameter[0] in client_side) or parameter[0] in privledged and user_status==True:
ch=limiters[parameter[0]]["type"](parameter[1])
if limiters[parameter[0]]["type"] == float or limiters[parameter[0]]["type"] == int:
if limiters[parameter[0]]["max"] >= ch and ch >= 0:
embed=discord.Embed(title="Settings", description="`"+str(parameter[0])+"` changed from `"+str(alt_settings[parameter[0]])+"` to `"+str(parameter[1])+"`", color=0x80ff80)
embed.set_footer(text="Default setting: "+str(vars(args1)[parameter[0]]))
alt_settings[parameter[0]]=ch
any_changes=True
else:
embed=discord.Embed(title="Settings", description="`"+str(parameter[0])+"` could not be changed from `"+str(alt_settings[parameter[0]])+"` to `"+str(parameter[1])+"` becasue it is `<= 0` or `>= "+str(limiters[parameter[0]]["max"])+"`", color=0x80ff80)
embed.set_footer(text="Default setting: "+str(vars(args1)[parameter[0]]))
else:
embed=discord.Embed(title="Settings", description="`"+str(parameter[0])+"` changed from `"+str(alt_settings[parameter[0]])+"` to `"+str(ch)+"`", color=0x80ff80)
embed.set_footer(text="Default setting: "+str(vars(args1)[parameter[0]]))
alt_settings[parameter[0]]=ch
any_changes=True
else:
embed=discord.Embed(title="Settings", description="`"+str(parameter[0])+"` is not a valid setting.", color=0x80ff80)
pickle.dump({"t1":t1, "settings":settings,"history":history, "user_version":user_version}, open("hist/"+str(message.guild.id)+".p", "wb"))
else:
embed=discord.Embed(title="Settings", description="`"+str(parameter)+"` contains more than two parts.", color=0x80ff80)
await message.channel.send(embed=embed, delete_after=150)
try:
await message.delete()
except:
pass
if any_changes:
try:
settings=vars(settings)
webhook = DiscordWebhook(url=config["logchannel"], avatar_url=str(message.guild.icon_url), username=str(message.guild.name))
embed= DiscordEmbed(title="Settings", description="__ __", color=0x80ff80)
embed.add_embed_field(name="model", value=str(settings["model"]))
embed.add_embed_field(name="model_checkpoint", value=str(settings["model_checkpoint"]))
embed.add_embed_field(name="device", value=str(settings["device"]))
embed.add_embed_field(name="temperature", value=str(settings["temperature"])+"/1")
embed.add_embed_field(name="top_k", value=str(settings["top_k"]))
embed.add_embed_field(name="top_p", value=str(settings["top_p"])+"/1")
embed.add_embed_field(name="seed", value=str(settings["seed"]))
embed.add_embed_field(name="auto_seed", value=str(settings["auto_seed"]))
embed.add_embed_field(name="max_history", value=str(settings["max_history"])+'/10')
embed.add_embed_field(name="max_length", value=str(settings["max_length"])+"/20")
embed.add_embed_field(name="no_sample", value=str(settings["no_sample"]))
embed.add_embed_field(name="", value="")
embed.add_embed_field(name="__ __\n", value=time.strftime('%X %x %Z'))
webhook.add_embed(embed)
webhook.execute()
except:
pass
elif message.content.lower().startswith(prefix+"-r"):
parameter=message.content.lower()[len(prefix)+3:]
desc="Reset settings and history"
h=["-h","hist","history"]
s=["-s","settings"]
for s1 in s:
if s1 in parameter:
desc="Reset settings"
settings=args1
for h1 in h:
if h1 in parameter:
desc="Reset history"
history=[]
if parameter=="":
history=[]
settings=args1
pickle.dump({"t1":t1, "settings":settings,"history":history, "user_version":user_version}, open("hist/"+str(message.guild.id)+".p", "wb"))
embed=discord.Embed(title="Reset", description=desc, color=0x80ff80)
await message.channel.send(embed=embed, delete_after=100)
try:
await message.delete()
except:
pass
try:
webhook = DiscordWebhook(url=config["logchannel"], avatar_url=str(message.guild.icon_url), username=str(message.guild.name))
embed = DiscordEmbed(title="Server settings reset", description=time.strftime('%X %x %Z'), color=0x80ff80)
embed.set_author(name=str(message.author), icon_url=str(message.author.avatar_url))
webhook.add_embed(embed)
webhook.execute()
except:
pass
elif message.content.lower().startswith(prefix):
if user_status:
ratelimit=2
else:
ratelimit=8
if round(time.time())-user_data["timestamp"] > ratelimit:
await message.channel.trigger_typing()
raw_text = message.content[len(prefix):][:100].lower().strip()
raw_text = re.sub(r"([?.!,])", r" \1 ", raw_text)
raw_text = re.sub(r'[" "]+', " ", raw_text)
raw_text = re.sub(r"[^a-zA-Z0-9?.!,\'%\s\/#]+", "", raw_text)
raw_text = re.sub(r"(\s+){2,}", " ", raw_text)
raw_text = raw_text.strip()
history.append(tokenizer.encode(raw_text.replace("\n"," ")))
with torch.no_grad():
out_ids = sample_sequence(personality, history, tokenizer, model, settings)
history.append(out_ids)
history = history[-(2*args.max_history+1):]
if len(get_history(message).replace("> ","").split("\n")) >=4:
if avg_similarity(settings.max_history,get_history(message).replace("> ","").split("\n")) >= 0.3 and settings.auto_seed == True:
settings.seed=random.randint(0,9999999999)
pickle.dump({"t1":round(time.time()),"settings":settings,"history":history,"user_version":current_version}, open("hist/"+str(message.guild.id)+".p", "wb"))
user_data["message_count"]+=1
user_data["timestamp"]=round(time.time())
udata["users"][message.author.id]=user_data
new_data=udata["message_rate"]
new_total=udata["message_total"]
try:
new_data[str(date.today())]=udata["message_rate"][str(date.today())]+1
new_total[str(date.today())]=udata["message_total"][str(date.today())]+1
except:
new_data[str(date.today())]=1
new_total[str(date.today())]=udata["message_total"][str(date.today()-datetime.timedelta(days = 1))]+1
pickle.dump({"message_total":new_total,"message_rate":new_data,"users":udata["users"]}, open("hist/user/users.p", "wb"))
out_text = tokenizer.decode(out_ids, skip_special_tokens=True)
await message.channel.send(out_text)
try:
webhook = DiscordWebhook(url=config["logchannel"], avatar_url=str(message.guild.icon_url), username=str(message.guild.name))
embed = DiscordEmbed(title="__ __\n\n"+str(message.author)+": "+raw_text.replace("\n"," "), description="Jade: "+out_text, color=0x80ff80)
embed.add_embed_field(name="__ __", value=time.strftime('%X %x %Z'))
embed.set_author(name=str(message.author), icon_url=str(message.author.avatar_url))
webhook.add_embed(embed)
webhook.execute()
except:
pass
else:
embed = discord.Embed(title="Ratelimit", description="Calm down or [upvote Jade](https://top.gg/bot/410253782828449802/vote) before trying again!", color=0x80ff80)
embed.set_footer(text="Try again in "+str(round(time.time()-t1))+" seconds")
embed.set_author(name=str(message.author), icon_url=str(message.author.avatar_url))
await message.channel.send(embed=embed, delete_after=50)
try:
await message.delete()
except:
pass
client.run(config["token"]) | [] |
2024-01-10 | tmdgusya/semantic-search-framework | src~storage~qdrant~QdrantClient.py | import os
import uuid
from typing import List
from src.embedd.EmbeddedModel import EmbeddedModel
from src.storage.QueryResultModel import QueryResultModel
from src.storage.StorageInterface import StorageInterface
from src.embedd.EmbeddingInteface import EmbeddingInterface
from src.embedd.openai.OpenAIClient import OpenAIClient
from qdrant_client import QdrantClient
from qdrant_client import models
class QdrantClientStorage(StorageInterface):
def __init__(self, embedding_client: EmbeddingInterface) -> None:
self.collection_name = os.getenv("QDRANT_COLLECTION_NAME")
self.qdrant_client = QdrantClient(
host=os.getenv("QDRANT_HOST"),
port=os.getenv("QDRANT_PORT"),
)
if (os.getenv("QDRANT_COLLECTION_ALWAYS_REFRES") == True):
print("QDRANT_COLLECTION_ALWAYS_REFRES is set. So, collection will be refreshed.")
self.qdrant_client.recreate_collection(
collection_name=self.collection_name,
vectors_config=models.VectorParams(size=embedding_client.get_vector_size(), distance=models.Distanc.COSINE),
)
else:
print("QDRANT_COLLECTION_ALWAYS_REFRES is not set. So, collection will not be refreshed.")
if (self.qdrant_client.get_collection(collection_name=self.collection_name) == None):
print("Collection does not exist. So, collection will be created.")
self.qdrant_client.create_collection(
collection_name=self.collection_name,
vectors_config=models.VectorParams(size=embedding_client.get_vector_size(), distance=models.Distance.COSINE),
)
self.model = embedding_client
def convert_single_data_to_point_struct(self, data: EmbeddedModel) -> models.PointStruct:
return models.PointStruct(
id=uuid.uuid4().hex,
payload={
"original_text": data.original_text,
"ref": data.ref,
},
vector=data.embedded_text
)
def convert_data_to_point_structs(self, datas: List[EmbeddedModel]) -> List[models.PointStruct]:
return [self.convert_single_data_to_point_struct(data) for data in datas]
def save(self, data: EmbeddedModel) -> bool:
datas = self.convert_data_to_point_structs([data])
result = self.qdrant_client.upsert(
collection_name=self.collection_name, points=datas
)
return result
def query(self, query: str) -> QueryResultModel:
# Convert text query into vector
vector = self.model.embed_simple_text(query)
# Use `vector` for search for closest vectors in the collection
search_result = self.qdrant_client.search(
collection_name=self.collection_name,
query_vector=vector,
score_threshold=0.8,
query_filter=None, # If you don't want any filters for now
)
# `search_result` contains found vector ids with similarity scores along with the stored payload
# In this function you are interested in payload only
payloads = [hit.payload for hit in search_result]
return payloads
if __name__ == "__main__":
client = QdrantClientStorage(
embedding_client=OpenAIClient()
)
result = client.save(
data=EmbeddedModel(
embedded_text=[0.0015061837621033192, 0.0034185831900686026, -0.012801367789506912, -0.03333835303783417, -0.009449690580368042, 0.00477263517677784, -0.015369287692010403, 0.0016710595227777958, -0.00296298461034894, -0.0249782782047987, 0.029922956600785255, 0.007123906631022692, -0.016822105273604393, -0.018007298931479454, 0.010405492037534714, -0.0028004981577396393, 0.02519492618739605, -0.015139894559979439, 0.011272084899246693, 0.01073046401143074, -0.00816254410892725, -0.001796906697563827, 0.017115218564867973, 0.006072525400668383, -0.014247813262045383, -0.007346927188336849, 0.003482303349301219, -0.015930024906992912, 0.037161558866500854, -0.025806639343500137, 0.009902102872729301, -0.006690610200166702, -0.004791751503944397, -0.01386549323797226, 0.011769101954996586, -0.018963100388646126, 0.0050052134320139885, -0.011335805058479309, 0.01901407726109028, -0.011781846173107624, 0.004823611117899418, 0.005607368424534798, 0.0030346696730703115, -0.006270057521760464, -0.026813415810465813, 0.008334588259458542, 0.005448068492114544, -0.009233041666448116, -0.006117129232734442, 0.020517870783805847, 0.02041591890156269, -0.011807333678007126, -0.013648844324052334, 0.007671899627894163, -0.01856803707778454, -0.00045878469245508313, -0.038359496742486954, 0.023971499875187874, 0.030916990712285042, -0.022161848843097687, 0.01974048651754856, 0.008028732612729073, -0.022939234972000122, 0.010035915300250053, -0.010341771878302097, 0.005132653750479221, 0.014260557480156422, 0.013954700902104378, -0.02227654494345188, 0.024111684411764145, 0.020097319036722183, 0.001941869966685772, -0.0056487866677343845, -0.0048140534199774265, 0.020161038264632225, 0.00029331157566048205, -0.02050512656569481, 0.005916411057114601, 0.00041497714119032025, -0.008742397651076317, 0.02869953215122223, -0.035963620990514755, -0.013253780081868172, 0.014630134217441082, 0.02228928916156292, 0.003644789569079876, -0.005855876952409744, 0.022748075425624847, -0.016936801373958588, -0.015356543473899364, -0.009889358654618263, 0.005846318788826466, 0.015522215515375137, 0.013852749019861221, -0.024047965183854103, 0.01289694756269455, 0.002927938476204872, 0.03106991946697235, -0.0002837535575963557, -0.035963620990514755, -0.013241035863757133, -0.0037276255898177624, -0.010354516096413136, -0.014107629656791687, -0.019434629008173943, 0.0072067431174218655, -0.011049064807593822, -0.0020294850692152977, 0.027476105839014053, 0.0013524590758606791, -0.023270579054951668, 0.00890806969255209, -0.0004918394843116403, -0.03323640301823616, -0.0026491631288081408, -0.008468400686979294, -0.0009215518948622048, 0.010647628456354141, 0.0013293605297803879, -0.02033945545554161, 0.01864450052380562, 0.021257024258375168, 0.0064771478064358234, -0.023385275155305862, 0.0021441811695694923, 0.004094016272574663, -0.034612756222486496, -0.021601112559437752, -0.0077292476780712605, -0.012508255429565907, 0.025309622287750244, -0.001983287977054715, -0.004125876352190971, 0.00026045588310807943, -0.033363841474056244, 0.02734866552054882, -0.011240225285291672, 0.01944737322628498, -0.02198343351483345, -0.009392342530190945, 0.000893674383405596, 0.027934890240430832, 0.011864681728184223, -0.001941869966685772, -0.007066558580845594, 0.020772751420736313, 0.0040175518952310085, -0.013610612601041794, 0.013355731964111328, -0.0068753985688090324, 0.005709320772439241, 0.018542548641562462, 0.011017204262316227, 0.0038359498139470816, 0.012412674725055695, 0.03489312529563904, -0.0016885825898498297, 0.020747262984514236, 0.004326594527810812, -0.0173573549836874, 0.0037021376192569733, -0.008729653432965279, 0.0065026357769966125, -0.025437062606215477, 0.01744656264781952, 0.01909054070711136, 0.01989341340959072, 0.011010833084583282, -0.00995307881385088, -0.020237503573298454, -0.012081330642104149, 0.003178039798513055, -0.04294734448194504, 0.02281179465353489, -0.01464287843555212, 0.015343799255788326, 0.004014366306364536, 0.01610844023525715, -0.014349766075611115, -0.0294386837631464, -0.02765452116727829, 0.008850721642374992, 0.003533279290422797, 0.029719052836298943, 0.009188437834382057, 8.258721936726943e-05, 0.012922435998916626, -0.011495104990899563, 0.0049000754952430725, -0.013890980742871761, 0.008697792887687683, 0.03764583170413971, 0.024430284276604652, 0.017395585775375366, -0.6867496967315674, -0.01188379805535078, 0.019421884790062904, 0.018797429278492928, 0.00616810517385602, 0.00987024325877428, 0.007684643846005201, 0.03731448948383331, 0.00032815849408507347, 0.029489660635590553, -0.02093842439353466, 0.022760817781090736, -0.014222325757145882, -0.0067224702797830105, -0.007353299297392368, -0.014808550477027893, 0.004046225920319557, -0.015483983792364597, -0.0049383076839149, 0.007009210530668497, -0.01285234373062849, 0.029030876234173775, -0.023601923137903214, -0.01684759370982647, 0.011756357736885548, 0.00872328132390976, 0.006400683894753456, -0.015420263633131981, -0.014489949680864811, 0.0323188342154026, -0.00395064614713192, 0.0131135955452919, 0.00553409056738019, 0.006203151308000088, 0.04970167577266693, 0.015840815380215645, 0.0021569253876805305, 0.001507776789367199, 0.007971384562551975, 0.02974454127252102, -0.019995367154479027, -0.019778717309236526, 0.010622140020132065, -0.016452528536319733, 0.008857093751430511, 0.004998841788619757, 0.003101575654000044, -0.00577941257506609, 0.0038964839186519384, 0.007123906631022692, 0.01640155352652073, -0.0007168510928750038, 0.014859526418149471, 0.018886636942625046, -0.000741542608011514, 0.007761107757687569, 0.01578984037041664, 0.014528182335197926, -5.650180537486449e-05, 0.005400278139859438, 0.0014329056721180677, 0.02109135128557682, -0.0018032786902040243, 0.006072525400668383, -0.019358165562152863, 0.01856803707778454, -0.01818571612238884, 0.01491050235927105, -0.0038805538788437843, -0.012164166197180748, -0.012495511211454868, -0.0067415861412882805, -0.014935990795493126, -0.01840236410498619, 0.0063720098696649075, 0.025819383561611176, 0.02570468746125698, -0.0034472572151571512, -0.005263280123472214, 0.017497537657618523, 0.017306378111243248, 0.017178937792778015, -0.006544054020196199, -0.023359786719083786, 0.016567224636673927, -0.023130394518375397, -0.031044431030750275, -0.017867114394903183, 0.0019801019225269556, 0.004444476682692766, 0.037085097283124924, 0.007079302798956633, -0.010335399769246578, -0.029795518144965172, 0.02429010160267353, 0.006279615685343742, -0.012622951529920101, -0.00197532307356596, 0.017548514530062675, -0.00890806969255209, 0.010176099836826324, 0.0006256516790017486, 0.007907664403319359, -0.0011302352650091052, 0.01618490368127823, 0.012508255429565907, -0.001260064891539514, 0.04167294502258301, 0.021409953013062477, -0.02547529526054859, -0.005174071993678808, 0.005655158776789904, -0.0374419279396534, 0.01505068689584732, 0.01170538179576397, -0.024646934121847153, 0.001405028160661459, 0.022034408524632454, 0.029897470027208328, -0.014668365940451622, 0.022862771525979042, -0.00853849295526743, 0.009647222235798836, 0.007869431748986244, 0.0026013730093836784, 0.015343799255788326, -0.01345768477767706, 0.006330591626465321, -0.016286857426166534, 0.005479928106069565, 0.01826217956840992, 0.007378787267953157, 0.0011477582156658173, -0.0034791172947734594, 0.004947865381836891, 0.007143022958189249, 0.015458495356142521, -0.011323060840368271, 0.0032847709953784943, 0.02086195908486843, -0.008009616285562515, 0.0035046052653342485, 0.0004094016330782324, -0.0016726525500416756, 0.009621734730899334, -0.01907779648900032, -0.0017602676525712013, 0.01781613938510418, 0.005683832801878452, -0.006098013371229172, 0.014413486234843731, -0.008933557197451591, -0.028113307431340218, 0.02847013995051384, -0.011374037712812424, -0.0023719805758446455, -0.007550831418484449, -0.025806639343500137, -0.026252679526805878, -0.031018942594528198, 0.010233447887003422, 0.017382841557264328, -0.011151016689836979, 0.003982506226748228, -0.011730870231986046, -0.027297688648104668, -0.020619822666049004, 0.02304118685424328, 0.0046356371603906155, -0.022837283089756966, 0.005352488253265619, -0.011412269435822964, -0.024239124730229378, -0.0038869259878993034, -0.0027144760824739933, 0.010947112925350666, -0.012387187220156193, -0.022544169798493385, -0.0011605023173615336, -0.020390430465340614, 0.004450848791748285, -0.007990499958395958, -0.002359236590564251, 0.017918091267347336, 0.02691536955535412, -0.002802091185003519, 0.002314632525667548, 0.022939234972000122, -0.024646934121847153, -0.010832416824996471, 0.017268145456910133, 0.011947518214583397, -0.019205236807465553, -0.004594218917191029, 0.002136216266080737, -0.01824943535029888, -0.0026427910197526217, -0.0017682326724752784, 0.008787001483142376, 0.019613046199083328, 0.020658055320382118, 0.007761107757687569, 0.020301222801208496, -0.02602328732609749, -0.01981694996356964, -0.026966344565153122, -0.0008442912949249148, -0.031095407903194427, 0.005473556462675333, 0.016363320872187614, 0.011437756940722466, 0.001302279531955719, -0.0003339331306051463, 0.016439784318208694, 0.018071020022034645, 0.02408619597554207, -0.0013309535570442677, -0.022391242906451225, -0.022684354335069656, 0.00596101488918066, 0.0036670914851129055, 0.01427330169826746, -0.016146672889590263, -0.006773446220904589, 0.00853849295526743, 0.031426750123500824, 0.008232636377215385, 0.04024561494588852, 0.010634884238243103, -0.026940856128931046, -0.019268957898020744, 0.01773967407643795, 0.011820077896118164, 0.014349766075611115, -0.008054220117628574, -0.0025010136887431145, 0.001906823948957026, -0.013024387881159782, 0.03524995595216751, 0.0127057870849967, -0.007461623288691044, 0.013585124164819717, 0.0266095120459795, -0.017472051084041595, -0.0032130859326571226, 0.029973933473229408, 0.02064531110227108, -0.0065886578522622585, 0.003115912666544318, 0.028419163078069687, -0.01069860439747572, -0.004450848791748285, -0.0013269709888845682, -0.014528182335197926, 0.01468111015856266, -0.030866015702486038, 0.008602213114500046, -0.015114407055079937, 0.021193305030465126, 0.032752130180597305, 0.02079823985695839, 0.008621329441666603, 0.003415397135540843, -0.00652175210416317, 0.00419596815481782, -0.012763135135173798, -0.005479928106069565, -0.013954700902104378, 0.005244163796305656, -0.0009661559597589076, -0.02227654494345188, -0.01147598959505558, 0.027603546157479286, -0.03777327388525009, 0.0059737591072916985, 0.00316688884049654, -0.0046356371603906155, -0.010284423828125, -0.007391531020402908, 0.0005495858495123684, -0.0083537045866251, -0.02676244080066681, 0.0013245815644040704, 0.006582286208868027, 0.004829983226954937, -0.012839599512517452, -0.029795518144965172, -0.00954527035355568, -0.00557232229039073, -0.003638417460024357, -0.0001859830372268334, 0.008978161960840225, 0.0073596709407866, -0.02630365639925003, 0.011896542273461819, -0.003737183753401041, 0.025666454806923866, 0.0030282975640147924, -0.0015037943376228213, -0.015968255698680878, 0.014783062040805817, 0.022709842771291733, -0.006524937693029642, -0.018274923786520958, 0.005358860362321138, 0.0001771219540387392, -0.006639634259045124, -0.02228928916156292, 0.007085674908012152, -0.015420263633131981, 0.0061712912283837795, 0.002818021224811673, -0.0007514989119954407, 0.004039854276925325, 0.010571164079010487, 0.0012106818612664938, -0.0009573944262228906, 0.023793084546923637, 0.02974454127252102, 0.0063242195174098015, 0.006684238091111183, -0.0013755576219409704, -0.021792273968458176, -0.008506633341312408, 0.07620923221111298, 0.000902435858733952, 0.002529687946662307, 0.016197647899389267, -0.015891792252659798, -0.0086786774918437, -0.026176216080784798, -0.021664833649992943, -0.02109135128557682, -0.011151016689836979, -0.0012584718642756343, 0.002308260416612029, -0.017637722194194794, 0.008143428713083267, 0.008047848008573055, 0.008213520050048828, 0.004489080980420113, -0.015458495356142521, -0.0077101318165659904, 0.009825638495385647, -0.009003649465739727, 0.009513410739600658, -0.002639604965224862, 0.01087702065706253, 0.024876326322555542, 0.0008251752587966621, 0.006862654350697994, 0.012680299580097198, -0.008200776763260365, -0.009832010604441166, 0.00458466075360775, -0.009679082781076431, 0.018529804423451424, 0.0019179749069735408, 0.013546892441809177, 0.014362509362399578, -0.009194809943437576, 0.029183804988861084, 0.010558419860899448, -0.010947112925350666, 0.006690610200166702, 0.0032911431044340134, -0.009035510011017323, -0.0029438685160130262, 0.01013149507343769, 0.0023066673893481493, -0.008959045633673668, 0.02796037867665291, -0.002451630774885416, -0.02719573676586151, 0.02205989696085453, 0.0015969849191606045, -0.00815617199987173, -0.014337021857500076, -0.014005676843225956, 0.01566240005195141, 0.0038741817697882652, -0.016006488353013992, -0.014744830317795277, -0.017828883603215218, -0.0070920465514063835, -0.018440596759319305, -0.016681920737028122, -0.009430574253201485, -0.022110873833298683, -0.026660488918423653, -0.02265886589884758, -0.004903261549770832, -0.011488733813166618, -0.005798528902232647, -0.010622140020132065, -0.04521578177809715, -0.016452528536319733, -0.004062156192958355, 0.019268957898020744, 0.0019100098870694637, 0.005884550977498293, -0.006107571069151163, 0.0051422119140625, 0.0020740891341120005, -0.014222325757145882, -0.02706829644739628, 0.01303713209927082, -0.01073683612048626, 0.014426229521632195, 0.004007994197309017, -0.0042565022595226765, 0.00946243479847908, -0.022008921951055527, 0.03122284822165966, 0.00207727518863976, 0.024175405502319336, 0.022161848843097687, 0.001949834986589849, 0.007659155409783125, 0.007665527518838644, -0.000774995656684041, -0.0011111191706731915, -0.018211204558610916, -0.0005583473248407245, -0.005540462210774422, -0.008627701550722122, -0.001166874310001731, -0.010335399769246578, -0.00024372937332373112, 0.008857093751430511, 0.010902508161962032, 0.024111684411764145, -0.016197647899389267, 4.126871863263659e-05, 0.03489312529563904, -0.02295197919011116, 0.018134739249944687, 0.0022971094585955143, 0.002666685963049531, 0.007334182970225811, 0.02765452116727829, 0.0314522385597229, 0.0005483910790644586, -0.032242368906736374, -0.0022190522868186235, -0.0225824024528265, 0.006582286208868027, 0.01524184737354517, 0.006381567567586899, -0.00037455468554981053, -0.0023225974291563034, -0.015636911615729332, -0.011055436916649342, -0.0007522953674197197, -0.006432543974369764, 0.016528993844985962, -0.0025567689444869757, -0.006760702468454838, -0.00557232229039073, -0.008398308418691158, -0.02056884765625, 0.010806928388774395, -0.004211898427456617, -0.015687888488173485, -0.017421074211597443, -0.00637519545853138, -0.0022015294525772333, -0.014222325757145882, -0.04399235546588898, -0.04264148697257042, -0.007448879536241293, -0.0024054336827248335, -0.007684643846005201, 0.01856803707778454, -0.002743150107562542, 0.0008912848425097764, -0.019065052270889282, -0.014120373874902725, -0.0014161791186779737, -0.020288478583097458, -0.003600185504183173, -0.003753113793209195, 0.026940856128931046, 0.025284133851528168, 0.023385275155305862, 0.014744830317795277, 0.01638880930840969, 0.0006180849159136415, -0.0007391531253233552, -0.009736430831253529, 0.00936048198491335, -0.004970167763531208, -0.012170538306236267, 0.007678271736949682, 0.017765162512660027, 0.018198460340499878, -0.013572380878031254, 0.017089730128645897, 0.0013213955098763108, -0.008047848008573055, -0.015496727079153061, 0.01069860439747572, -0.01767595484852791, -0.017561258748173714, -0.02824074774980545, -0.0018016857793554664, 0.003976134117692709, 0.004122690297663212, 0.0039060418494045734, -0.011730870231986046, 0.016274113208055496, 0.012278862297534943, 0.019511094316840172, 0.0038741817697882652, 0.033797141164541245, -0.024787116795778275, 0.024417541921138763, 0.008876209147274494, 0.012992527335882187, -0.017102474346756935, -0.0026284540072083473, -0.012164166197180748, 0.022990209981799126, 0.004368012771010399, 0.017981810495257378, 0.023283323273062706, -0.006798934191465378, -0.009264902211725712, 0.0015149452956393361, 0.027527082711458206, -0.00573799479752779, 0.002829172182828188, 0.005868620704859495, -0.006088455207645893, 0.002034264151006937, -0.021142328158020973, -0.00036220892798155546, -0.013572380878031254, 0.004281990695744753, -0.01578984037041664, -0.019116029143333435, 0.011947518214583397, -0.017472051084041595, -0.03481665998697281, -0.019804205745458603, 0.0008570352802053094, 0.045725543051958084, -0.004020737949758768, 0.00936048198491335, 0.022646121680736542, 0.0025089788250625134, 0.0012393558863550425, 0.011170133017003536, -0.01675838604569435, -0.004317036364227533, 0.038130104541778564, 0.007984127849340439, -0.010794184170663357, -0.013139083981513977, -0.0004460406780708581, 0.00556913623586297, -0.013470428064465523, -0.01826217956840992, -0.0007961029768921435, 0.023359786719083786, 0.002255691448226571, -0.01617216132581234, 0.006212709471583366, -0.02824074774980545, 0.021677576005458832, -0.002214273437857628, -0.021193305030465126, -0.02005908638238907, 0.007302322890609503, -0.023372530937194824, 0.0163250882178545, -0.02033945545554161, 0.03323640301823616, 0.02304118685424328, -0.014859526418149471, -0.0064771478064358234, 0.004100388381630182, 0.0008152190130203962, -0.009812895208597183, -0.000826768227852881, 0.025360599160194397, -0.0035269074141979218, 0.0004623689455911517, 0.013030759990215302, -0.0017220355803146958, -0.0167074091732502, -0.008137056604027748, -0.0070538148283958435, 0.023793084546923637, -0.02370387688279152, 0.007429763209074736, -0.005451254080981016, 0.009309506043791771, 0.00020131567725911736, 0.014362509362399578, -0.014744830317795277, -0.015675144270062447, 0.0033930952195078135, -0.00064755545463413, 0.03280310705304146, 0.0027734171599149704, 0.0009565979707986116, -0.018083764240145683, -0.013164572417736053, -0.027170248329639435, -0.0512564443051815, -0.0005344523233361542, -0.005492672324180603, -0.0335167720913887, -0.013699821196496487, -0.003883739933371544, 0.009475178085267544, 0.018657244741916656, 0.01787985861301422, -0.009965823031961918, 0.013572380878031254, 0.02369113266468048, -0.016044721007347107, -0.005591438617557287, 0.02683890424668789, 0.014795806258916855, -0.028342699632048607, 0.01840236410498619, -0.003329375060275197, -0.020454151555895805, 0.0013317500706762075, -0.00811794027686119, -0.010596652515232563, 0.0069136302918195724, 0.01166077796369791, 0.008812488988041878, -0.009991311468183994, 0.008417424745857716, 0.029234779998660088, 0.028648555278778076, 0.018669988960027695, -0.00754445930942893, -0.010176099836826324, 0.04779007285833359, -0.009456062689423561, -8.965616871137172e-05, -0.0047280313447117805, -0.012412674725055695, 0.02407345175743103, -0.007493483368307352, -0.016376065090298653, 0.007996872067451477, -0.037365466356277466, -0.004998841788619757, -0.005795342847704887, -0.006301917601376772, 0.009698199108242989, -0.025092974305152893, -0.02256965823471546, -0.007015582639724016, -0.017905347049236298, 0.002376759657636285, 0.008442913182079792, -0.016414297744631767, -0.0030171466059982777, 0.0171407051384449, 0.01893761195242405, -0.004199154209345579, -0.0006443694583140314, -0.011361293494701385, -0.0175230260938406, -0.014617389999330044, -0.02212361805140972, -0.014885014854371548, -0.004027110058814287, 0.04159647971391678, 0.012368070892989635, -0.011214736849069595, -0.00987024325877428, 0.0020804612431675196, -0.026660488918423653, -0.033363841474056244, -0.0016853965353220701, -0.005871806759387255, 0.0017411516746506095, 0.012986156158149242, -0.002861032262444496, 0.012272490188479424, 0.004657939076423645, 0.000997219467535615, -0.013368476182222366, -0.016745641827583313, 0.021346231922507286, -0.0031971558928489685, 0.01767595484852791, 0.005362045951187611, -0.030152350664138794, -0.001881335861980915, 0.0019769161008298397, 0.014553669840097427, -0.011501477099955082, 0.012552859261631966, -0.004626078996807337, -0.012399930506944656, -0.011004460975527763, -0.010679488070309162, 0.00015133523265831172, 0.008965417742729187, 0.021805016323924065, -0.012845971621572971, -0.004992469679564238, 0.00042652638512663543, -0.0074233911000192165, 0.005588252563029528, -0.009328622370958328, 0.014413486234843731, -0.018211204558610916, 0.013202804140746593, -0.021601112559437752, -0.005221861880272627, -0.007193998899310827, -0.01678387261927128, 0.00693274661898613, -0.017841627821326256, 0.007920407690107822, 0.024481261149048805, 0.0008873023325577378, 0.00912471767514944, -0.004629265051335096, 0.0002491057675797492, -0.006544054020196199, 0.00932225026190281, 0.00016049499390646815, -0.013151828199625015, 0.002727220067754388, 0.0034600012004375458, -0.018669988960027695, -0.025526270270347595, 0.005355674307793379, -0.03609106317162514, -0.009003649465739727, -0.0065886578522622585, 0.0338481143116951, 0.015840815380215645, -0.020364942029118538, -0.008589468896389008, -0.019778717309236526, 0.00012465243344195187, 0.010564791969954967, -0.017994554713368416, 0.0077292476780712605, -0.007633667439222336, 0.006177663337439299, 0.02525864541530609, -0.016286857426166534, 0.002666685963049531, 0.01759949140250683, 0.013393964618444443, -0.006311475764960051, 0.02691536955535412, 0.2322470098733902, -0.011679893359541893, 0.010443723760545254, 0.02436656504869461, 0.013648844324052334, 0.017178937792778015, 0.010615767911076546, -0.0009215518948622048, 0.001506980275735259, 0.01423506997525692, -0.009405085816979408, -0.00476944912225008, -0.0008411052986048162, 0.0075380876660346985, 0.002367201494053006, 0.0015284857945516706, -0.018338643014431, -0.015522215515375137, -0.029158316552639008, -0.02773098647594452, -0.004855471197515726, -0.030203325673937798, -0.019651276990771294, -0.017905347049236298, 0.017344610765576363, -0.009366854093968868, -0.006805306300520897, 0.0028928923420608044, 0.03328737989068031, 0.0061330595053732395, -0.006970978807657957, -0.01483403891324997, 0.010787812061607838, 0.006607774179428816, -0.006939118728041649, -0.023907780647277832, 0.032242368906736374, 0.005604182370007038, 0.03756937012076378, 0.007461623288691044, -0.010195215232670307, -0.01677113026380539, -0.005059375893324614, -0.0147193418815732, 0.01110641285777092, 0.009634478949010372, 0.002249319339171052, -0.0262271910905838, 0.008417424745857716, 0.03448531776666641, 0.0006324219866655767, 0.009558014571666718, 0.012603835202753544, 0.03369518741965294, 0.0004894500016234815, -0.013980189338326454, 0.011775474064052105, 0.013572380878031254, 0.0019769161008298397, -0.003848693799227476, 5.092629726277664e-05, 0.02796037867665291, -0.0041513643227517605, 0.027985867112874985, -0.022008921951055527, 0.007512599229812622, -0.030738575384020805, -0.0009048253996297717, 0.008340960368514061, -0.009819267317652702, -0.015343799255788326, -0.004887331277132034, 0.013190059922635555, -0.0059737591072916985, -0.028979899361729622, -0.0038518798537552357, 0.014515438117086887, -0.004313850775361061, 0.025322366505861282, 0.01780339516699314, -0.0029836934991180897, 0.005632856395095587, -0.005699762608855963, -0.018886636942625046, -0.03214041516184807, -0.02877599559724331, 0.005008399486541748, -0.002819614252075553, -0.015509471297264099, -0.023665644228458405, -0.009583503007888794, -0.01152059342712164, -0.00011698611342580989, -0.0019307188922539353, 0.026787929236888885, 0.02392052486538887, -0.0021425881423056126, 0.014362509362399578, -0.01367433276027441, 0.008289984427392483, -0.007327811326831579, 0.002682616002857685, -0.0054034641943871975, 0.01285871583968401, -0.0043297805823385715, 0.0027973123360425234, -0.009647222235798836, 2.237048420283827e-06, -0.0009940335294231772, -0.0237803403288126, -0.029617100954055786, -0.0067224702797830105, 0.004371198825538158, -0.0017411516746506095, -0.008780629374086857, 0.0018542548641562462, -0.00032835762249305844, 0.005744366906583309, -0.0051390258595347404, -0.011941146105527878, -0.0023624226450920105, -0.012215142138302326, -0.013521404936909676, -0.00019444586359895766, -0.004845913499593735, -0.02183050476014614, -0.007015582639724016, -0.003893297864124179, 0.013597868382930756, -0.029387708753347397, 0.015904536470770836, -0.0010927997063845396, 0.027705498039722443, 0.014247813262045383, -0.006945490371435881, 0.010978972539305687, 0.014158605597913265, -0.00592596922069788, -0.015993744134902954, -0.0010235040681436658, 0.002794126281514764, -0.022340266034007072, 0.016286857426166534, -0.006703353952616453, -0.01427330169826746, -0.01091525238007307, 0.03496959060430527, -0.026558537036180496, -0.00010483946971362457, 0.006658750120550394, -0.03524995595216751, -0.011934773996472359, -0.008455656468868256, -0.0114823617041111, 0.026813415810465813, -0.017013266682624817, -0.022671610116958618, -0.026584023609757423, -0.0022859585005789995, 0.0030346696730703115, -0.026354631409049034, 0.012278862297534943, 0.036193013191223145, -0.0017650467343628407, -0.025908591225743294, -0.013253780081868172, -0.16342930495738983, 0.015496727079153061, 0.01601923257112503, -0.020747262984514236, 0.027170248329639435, 0.010558419860899448, 0.030534669756889343, 0.0027351852040737867, -0.005890923086553812, 0.021142328158020973, 0.008009616285562515, -0.018759196624159813, -0.01427330169826746, -0.007843944244086742, 0.0043042926117777824, -0.009271274320781231, -0.006531309802085161, 0.017714187502861023, 0.03191102296113968, 0.027680009603500366, 0.026584023609757423, -0.019256213679909706, 0.010309911333024502, -0.003139807842671871, 0.011533337645232677, -0.009468805976212025, -0.0038901118095964193, 0.037238024175167084, -0.01691131293773651, -0.007984127849340439, -0.016605457291007042, -0.02281179465353489, 0.022913746535778046, 0.011635289527475834, 0.01743381842970848, 0.00025229176389984787, 0.012756763026118279, -0.04399235546588898, -0.016044721007347107, 0.02004634216427803, 0.02519492618739605, 0.011953890323638916, 0.02384405955672264, -0.016452528536319733, -0.01531831081956625, -0.008360076695680618, 0.020912935957312584, 0.01930718868970871, 0.02467242069542408, -0.009309506043791771, 0.010023171082139015, -0.004759891424328089, 0.003638417460024357, -0.017344610765576363, 0.02765452116727829, 0.028521114960312843, -0.0009096043650060892, 0.017714187502861023, 0.00023018884530756623, -0.004944679327309132, -0.0003016748232766986, -0.01661820150911808, -0.005467184353619814, 0.002614116994664073, 0.01106818113476038, -0.008047848008573055, 0.004664311185479164, 0.013623356819152832, -0.0005651176325045526, 0.012807739898562431, -0.008659561164677143, -0.024532238021492958, -0.011074553243815899, -0.031171871349215508, 0.0046738688834011555, 0.006018362939357758, -0.03369518741965294, 0.018593523651361465, -0.014069397002458572, -0.026252679526805878, -0.009965823031961918, 0.02831721119582653, 0.00012166555825388059, 0.0036989515647292137, -0.018287668004631996, 0.0001601963012944907, 0.004970167763531208, 0.018427852541208267, -0.031018942594528198, -0.040500491857528687, 0.014349766075611115, -0.012495511211454868, -0.007952268235385418, -0.021346231922507286, 0.0004105963744223118, 0.01743381842970848, 0.018236691132187843, 0.010660371743142605, -0.017051497474312782, -0.013151828199625015, 0.002110728295519948, 0.00839193630963564, -0.018517060205340385, 0.013738052919507027, 0.03948097303509712, -0.005196373909711838, 0.007850316353142262, 0.004403058905154467, 0.024685164913535118, -0.026176216080784798, -0.03257371485233307, 0.0070729306899011135, 0.015534959733486176, 0.02706829644739628, 0.01363610103726387, 0.02384405955672264, -0.003141400869935751, -0.017701443284749985, 0.003415397135540843, -0.014808550477027893, 0.05327000096440315, -0.00473121739923954, -0.04330417886376381, 0.0006280412198975682, 0.00308405258692801, -0.010571164079010487, -0.08701616525650024, -0.026278167963027954, -0.0003847100888378918, 0.00950703863054514, 0.006626890040934086, 0.04779007285833359, 0.005795342847704887, 0.005645600613206625, -0.0005627280916087329, 0.00969182699918747, 0.004520941060036421, -0.030075885355472565, -0.024799861013889313, -0.0118901701644063, 0.042233679443597794, -0.03219139203429222, 0.013100852258503437, 0.0011087296297773719, -0.02816428244113922, 0.013967445120215416, -0.005687018856406212, -0.0005587455816566944, 0.0008737618336454034, -0.008137056604027748, -0.017344610765576363, 0.012559231370687485, -0.020734518766403198, 0.030916990712285042, 0.003338932991027832, -0.012132306583225727, -0.021435441449284554, -0.02734866552054882, -0.011839194223284721, -0.008245380595326424, 0.0033739791251719, 0.011832822114229202, -0.04055146872997284, 0.031630657613277435, 0.012693042866885662, -0.04011817276477814, 0.025844871997833252, 0.011692637577652931, -0.006442101672291756, -0.06407693028450012, -0.0039028560277074575, 0.01826217956840992, -0.0017570817144587636, 0.03535190969705582, 0.011654405854642391, -0.010048659518361092, -0.026405608281493187, -0.025322366505861282, -0.026201704517006874, 0.00395064614713192, 0.0185552928596735, -0.006308289710432291, 0.008748768828809261, 0.03078955039381981, 0.0037658577784895897, 0.01106180902570486, 0.011297573335468769, 0.004301106557250023, -0.020084574818611145, 0.015420263633131981, -0.007448879536241293, 0.01824943535029888, -0.015420263633131981, 0.02481260523200035, 0.005776226986199617, 0.00114138622302562, -0.019804205745458603, 0.021728552877902985, 0.007238603197038174, 0.014413486234843731, -0.033210914582014084, -0.00635289354249835, -0.019613046199083328, -0.010481955483555794, 0.010647628456354141, -0.0009056218550540507, -0.0342814102768898, -0.017790650948882103, 0.018083764240145683, -0.0009223484084941447, 0.017459306865930557, 0.010125122964382172, -0.013444940559566021, 0.002598186954855919, -0.008245380595326424, -0.03420494869351387, -0.02839367464184761, 0.002290737582370639, 0.011233853176236153, -0.00653768191114068, 0.010080519132316113, 0.006862654350697994, -0.006387939676642418, -0.00026762441848404706, 0.018580779433250427, 0.012826855294406414, -0.03244627267122269, 0.002904043300077319, -0.0654023066163063, 0.02974454127252102, -0.014222325757145882, -0.03157968074083328, -0.000293709832476452, -0.014783062040805817, 0.008850721642374992, -0.015063431113958359, 0.000749507627915591, -0.012884203344583511, -0.014757574535906315, -0.006225453224033117, 0.009264902211725712, 0.014808550477027893, -0.030916990712285042, 0.005929154809564352, 0.007843944244086742, 0.012597463093698025, 0.009832010604441166, 0.00612668739631772, 0.010564791969954967, -0.035887159407138824, -0.008748768828809261, -0.0049383076839149, -0.024188147857785225, 0.007805712055414915, -0.03346579521894455, -0.002195157343521714, -0.0033102589659392834, -0.010265307500958443, 0.013992933556437492, -0.04123964533209801, 0.009787406772375107, 0.021779529750347137, 0.010080519132316113, -0.008876209147274494, 0.004581475164741278, 0.022556914016604424, -0.0007893326692283154, 0.02451949380338192, -0.02824074774980545, -0.027603546157479286, 0.010290795937180519, -0.016643689945340157, -0.007009210530668497, 0.004320222418755293, -0.0047216592356562614, 0.0013301570434123278, 0.03423043712973595, 0.021409953013062477, -0.009080113843083382, 0.033873602747917175, -0.0318855382502079, -0.015917280688881874, -0.014935990795493126, -0.014056653715670109, -0.005448068492114544, 0.004275618586689234, -0.0025838499423116446, -0.03810461610555649, 0.029260268434882164, 0.006677865982055664, 0.02719573676586151, -0.005983317270874977, 0.006824422162026167, 0.0017156635876744986, -0.023270579054951668, 0.0013859120663255453, -0.017905347049236298, -0.02256965823471546, -0.00973005872219801, -0.012699414975941181, 0.005033887457102537, -0.0015985779464244843, 0.018466083332896233, 0.020632566884160042, -0.0072449748404324055, 0.013126339763402939, -0.04368649795651436, 0.03637143224477768, 0.014387997798621655, 0.016796616837382317, -0.029617100954055786, -0.0015252998564392328, 0.039226092398166656, 0.012597463093698025, -0.008277240209281445, -0.010947112925350666, -0.0015284857945516706, -0.006368823815137148, -0.027858426794409752, -0.017051497474312782, 0.007487111259251833, 0.027323177084326744, 0.0034345132298767567, 0.010577536188066006, 0.021588368341326714, 0.011004460975527763, 0.021269768476486206, 0.023818572983145714, -0.005448068492114544, 0.016681920737028122, -0.020326711237430573, -0.0067479582503438, -0.01640155352652073, 0.010456467978656292, -0.006646005902439356, -0.04044951871037483, 0.007678271736949682, 0.010685860179364681, 0.010724091902375221, 0.03550483658909798, -0.0008681862964294851, 0.011641661636531353, -0.026584023609757423, -0.010195215232670307, 0.024124428629875183, 0.004916005302220583, -0.026048775762319565, 0.04350808262825012, -0.013585124164819717, 0.005699762608855963, 0.022901002317667007, -0.02070903219282627, 0.004227828234434128, -0.000404423481086269, -0.012374443002045155, -0.011405897326767445, 0.012202398851513863, -0.0010824451455846429, 0.018007298931479454, -0.005103979725390673, -0.023130394518375397, -0.0040653422474861145, -3.1685813155490905e-05, 0.02072177454829216, -0.007155766710639, 0.025653710588812828, -0.014158605597913265, 0.052352432161569595, -0.007856687530875206, -0.0017602676525712013, 0.02281179465353489, 0.02199617773294449, 0.013967445120215416, 0.01884840428829193, 0.015573191456496716, -0.009921219199895859, -0.01587904803454876, -0.006422985810786486, 0.010481955483555794, 0.006515379995107651, -0.02773098647594452, -0.025526270270347595, 0.0024643747601658106, -0.021703064441680908, -0.0009645629907026887, -0.015331055037677288, 0.01870821975171566, 0.01878468506038189, 0.013597868382930756, 0.028801484033465385, 0.006117129232734442, -0.024723397567868233, -0.014260557480156422, 0.003354863030835986, 0.00734055507928133, -0.0072449748404324055, -0.023742107674479485, 0.016146672889590263, 0.009194809943437576, -0.010609395802021027, -0.012234258465468884, -5.660136594087817e-05, 0.0027144760824739933, -0.013419452123343945, -0.0038295777048915625, -0.0007789781666360795, 0.005750738549977541, 0.02430284582078457, 0.004638823214918375, -0.025615479797124863, -0.026431096717715263, 0.004106760025024414, 0.005186815746128559, -0.030585646629333496, -0.002537652850151062, -0.01923072524368763],
original_text="Hello, world!",
ref="https://www.google.com"
)
)
query_result = client.query(
query="Find Hello",
)
print(query_result) | [] |
2024-01-10 | pythonistadeepak/LLM-UI-streamlit-with-user_feedback | Chat_with_user_feedback.py | from openai import OpenAI
import streamlit as st
from streamlit_feedback import streamlit_feedback
import trubrics
with st.sidebar:
openai_api_key = st.text_input("Optional Key", key="feedback_api_key", type="password")
"This is only for testing"
"Same Can be Replicated in controlled Env"
"Apache License 2.0"
"[ Connect Deepak ](https://www.linkedin.com/in/pyhonistadeepak/)"
st.title("📝 Chat via LLM with User feedback")
"""
In this application, we're using a LLM to answer user's queries and ask human users about their feedback on generated response.The collected feedback cab be stored
for LLM improvement.
"""
if "messages" not in st.session_state:
st.session_state.messages = [
{"role": "assistant", "content": "How can I help you? Leave feedback to help me improve!"}
]
if "response" not in st.session_state:
st.session_state["response"] = None
messages = st.session_state.messages
for msg in messages:
st.chat_message(msg["role"]).write(msg["content"])
if prompt := st.chat_input(placeholder="Tell me a joke about sharks"):
messages.append({"role": "user", "content": prompt})
st.chat_message("user").write(prompt)
if not openai_api_key:
st.info("Please add your OpenAI API key to continue.")
st.stop()
client = OpenAI(api_key=openai_api_key)
response = client.chat.completions.create(model="gpt-3.5-turbo", messages=messages)
st.session_state["response"] = response.choices[0].message.content
with st.chat_message("assistant"):
messages.append({"role": "assistant", "content": st.session_state["response"]})
st.write(st.session_state["response"])
if st.session_state["response"]:
feedback = streamlit_feedback(
feedback_type="thumbs",
optional_text_label="[Optional] Please provide an explanation",
key=f"feedback_{len(messages)}",
)
# This app is logging feedback to Trubrics backend, but you can send it anywhere.
# The return value of streamlit_feedback() is just a dict.
# Configure your own account at https://trubrics.streamlit.app/
if feedback and "TRUBRICS_EMAIL" in st.secrets:
config = trubrics.init(
email=st.secrets.TRUBRICS_EMAIL,
password=st.secrets.TRUBRICS_PASSWORD,
)
collection = trubrics.collect(
component_name="default",
model="gpt",
response=feedback,
metadata={"chat": messages},
)
trubrics.save(config, collection)
st.toast("Feedback recorded!", icon="📝")
| [
"How can I help you? Leave feedback to help me improve!",
"response"
] |
2024-01-10 | Lightning-Dev/vector-admin | document-processor~scripts~parsers~as_pdf.py | import os
from langchain.document_loaders import PyPDFLoader
from slugify import slugify
from ..utils import guid, file_creation_time, write_to_server_documents, move_source, tokenize
# Process all text-related documents.
def as_pdf(**kwargs):
parent_dir = kwargs.get('directory', 'hotdir')
filename = kwargs.get('filename')
ext = kwargs.get('ext', '.txt')
remove = kwargs.get('remove_on_complete', False)
fullpath = f"{parent_dir}/{filename}{ext}"
loader = PyPDFLoader(fullpath)
pages = loader.load_and_split()
print(f"-- Working {fullpath} --")
metadata = []
for page in pages:
pg_num = page.metadata.get('page')
print(f"-- Working page {pg_num} --")
content = page.page_content
data = {
'id': guid(),
'url': "file://"+os.path.abspath(f"{parent_dir}/processed/{filename}{ext}"),
'title': f"{filename}_pg{pg_num}{ext}",
'description': "a custom file uploaded by the user.",
'published': file_creation_time(fullpath),
'wordCount': len(content),
'pageContent': content,
'token_count_estimate': len(tokenize(content))
}
metadata.append(data)
move_source(parent_dir, f"{filename}{ext}", remove=remove)
print(f"[SUCCESS]: {filename}{ext} converted & ready for embedding.\n")
return metadata | [] |
2024-01-10 | royce-mathew/Hackville2023 | backend~utils~gpt.py | import cohere
import config as config
co = cohere.Client(config.COHERE_API_KEY);
def evaluate_input(input_sentence: str):
response = co.generate(
model='xlarge',
prompt = input_sentence,
max_tokens=200,
temperature=0.8,
)
print(response.generations)
if (response.generations[0]):
return response.generations[0].text
else:
return "An Exception Occured"
| [] |
2024-01-10 | hyeonsangjeon/AWS-LLM-SageMaker | common_code~inference_lib.py | import boto3
import time
import json
"""
A dedicated helper to manage templates and prompt building.
"""
import json
import os.path as osp
from typing import Union
import os
import pathlib
class Prompter(object):
__slots__ = ("template", "_verbose")
def __init__(self, template_name: str = "", verbose: bool = False):
self._verbose = verbose
if not template_name:
# Enforce the default here, so the constructor can be called with '' and will not break.
template_name = "alpaca"
#file_name = osp.join("templates", f"{template_name}.json")
# file_name = str(pathlib.Path().home()) + '/Kor-LLM-On-SageMaker/common_code' # + f"{template_name}.json"
file_name = osp.join("/root/Kor-LLM-On-SageMaker/common_code", f"{template_name}.json")
# path = pathlib.Path.cwd()
# print("Pathlib: ", path)
# print(pathlib.Path().home())
# path = pathlib.Path().home() + '/Kor-LLM-On-SageMaker/common_code'
# print("pwd: ", os.getcwd())
# file_name = f"{template_name}.json"
if not osp.exists(file_name):
raise ValueError(f"Can't read {file_name}")
with open(file_name) as fp:
self.template = json.load(fp)
if self._verbose:
print(
f"Using prompt template {template_name}: {self.template['description']}"
)
def generate_prompt(
self,
instruction: str,
input: Union[None, str] = None,
label: Union[None, str] = None,
) -> str:
# returns the full prompt from instruction and optional input
# if a label (=response, =output) is provided, it's also appended.
if input:
res = self.template["prompt_input"].format(
instruction=instruction, input=input
)
else:
res = self.template["prompt_no_input"].format(
instruction=instruction
)
if label:
res = f"{res}{label}"
if self._verbose:
print(res)
return res
def get_response(self, output: str) -> str:
return output.split(self.template["response_split"])[1].strip()
def describe_endpoint(endpoint_name):
'''
엔드폰인트 생성 유무를 확인. 생성 중이면 기다림.
'''
sm_client = boto3.client("sagemaker")
while(True):
response = sm_client.describe_endpoint(
EndpointName= endpoint_name
)
status = response['EndpointStatus']
if status == 'Creating':
print("Endpoint is ", status)
time.sleep(60)
else:
print("Endpoint is ", status)
break
def invoke_inference(endpoint_name, prompt):
'''
KoAlpaca 프롬프트를 제공하여 엔드포인트 호출
'''
client = boto3.client("sagemaker-runtime")
content_type = "text/plain"
response = client.invoke_endpoint(
EndpointName=endpoint_name, ContentType=content_type, Body=prompt
)
#print(response["Body"].read())
res = response["Body"].read().decode()
print (eval(res)[0]['generated_text'])
def invoke_inference_DJ(endpoint_name, prompt):
'''
invoke_inference 변형,
곤수님께서 기존에 invoke_inference를 사용하는 부분이 있어 우선 이름을 달리 함
추후 invoke_inference과 하나로 합칠 예정
'''
'''
KoAlpaca 프롬프트를 제공하여 엔드포인트 호출
'''
client = boto3.client("sagemaker-runtime")
content_type = "application/json"
response = client.invoke_endpoint(
EndpointName=endpoint_name,
ContentType=content_type,
Body=json.dumps(prompt)
)
res = response["Body"].read().decode()
# print (res)
return res
def query_endpoint_with_text_payload(plain_text, endpoint_name, content_type="text/plain"):
'''
content_type 이 text/plain 인 경우 사용
'''
client = boto3.client("runtime.sagemaker")
response = client.invoke_endpoint(
EndpointName=endpoint_name, ContentType=content_type, Body=plain_text
)
return response
def parse_response_text_model(query_response):
'''
content_type 이 text/plain 인 경우 사용
'''
model_predictions = json.loads(query_response["Body"].read())
# print("model_predictions: \n", model_predictions)
generated_text = model_predictions[0]["generated_text"]
return generated_text
def parse_response(query_response):
def traverse(o, tree_types=(list, tuple)):
if isinstance(o, tree_types):
for value in o:
for subvalue in traverse(value, tree_types):
yield subvalue
else:
yield o
data = eval(query_response)
listRes = []
for value in traverse(data):
listRes.append(value["generated_text"])
if len(listRes) >= 2: return listRes
else: return listRes[0].strip()
################################################
# Embedding Handler
################################################
# from langchain.embeddings.sagemaker_endpoint import EmbeddingsContentHandler
# from langchain.embeddings import SagemakerEndpointEmbeddings
# from langchain.llms.sagemaker_endpoint import ContentHandlerBase
# from typing import Any, Dict, List, Optional
# class SagemakerEndpointEmbeddingsJumpStart(SagemakerEndpointEmbeddings):
# def embed_documents(self, texts: List[str], chunk_size: int = 5) -> List[List[float]]:
# """Compute doc embeddings using a SageMaker Inference Endpoint.
# Args:
# texts: The list of texts to embed.
# chunk_size: The chunk size defines how many input texts will
# be grouped together as request. If None, will use the
# chunk size specified by the class.
# Returns:
# List of embeddings, one for each text.
# """
# results = []
# _chunk_size = len(texts) if chunk_size > len(texts) else chunk_size
# # print("text size: ", len(texts))
# # print("_chunk_size: ", _chunk_size)
# for i in range(0, len(texts), _chunk_size):
# response = self._embedding_func(texts[i : i + _chunk_size])
# print
# results.extend(response)
# return results
# import numpy as np
# class KoSimCSERobertaContentHandler(EmbeddingsContentHandler):
# content_type = "application/json"
# accepts = "application/json"
# def transform_input(self, prompt: str, model_kwargs={}) -> bytes:
# input_str = json.dumps({"inputs": prompt, **model_kwargs})
# return input_str.encode("utf-8")
# def transform_output(self, output: bytes) -> str:
# response_json = json.loads(output.read().decode("utf-8"))
# ndim = np.array(response_json).ndim
# # print("response_json ndim: \n", ndim)
# # print("response_json shape: \n", np.array(response_json).shape)
# if ndim == 4:
# # Original shape (1, 1, n, 768)
# emb = response_json[0][0][0]
# emb = np.expand_dims(emb, axis=0).tolist()
# # print("emb shape: ", np.array(emb).shape)
# # print("emb TYPE: ", type(emb))
# elif ndim == 2:
# # Original shape (n, 1)
# # print(response_json[0])
# emb = []
# for ele in response_json:
# # print(np.array(response_json[0]).shape)
# e = ele[0][0]
# #emb = np.expand_dims(emb, axis=0).tolist()
# # print("emb shape: ", np.array(emb).shape)
# # print("emb TYPE: ", type(emb))
# emb.append(e)
# # print("emb_list shape: ", np.array(emb).shape)
# # print("emb_list TYPE: ", type(emb))
# else:
# print(f"Other # of dimension: {ndim}")
# emb = None
# return emb
# ################################################
# # LLM Handler
# ################################################
# from langchain.llms.sagemaker_endpoint import LLMContentHandler
# import json
# class KoAlpacaContentHandler(LLMContentHandler):
# content_type = "application/json"
# accepts = "application/json"
# def transform_input(self, prompt: str, model_kwargs={}) -> bytes:
# input_str = json.dumps({"text_inputs": prompt, **model_kwargs})
# return input_str.encode("utf-8")
# def transform_output(self, output: bytes) -> str:
# print("In KoAlpacaContentHandler")
# # print("output: ", output)
# response_json = json.loads(output.read().decode("utf-8"))
# print("response_json: ", response_json)
# # return response_json["generated_texts"][0]
# doc = response_json[0]['generated_text']
# doc = json.loads(doc)
# doc = doc['text_inputs']
# return doc
| [
"alpaca"
] |
2024-01-10 | hyeonsangjeon/AWS-LLM-SageMaker | RAG-SageMaker~utils~streamlit_util.py | import json
import boto3
import numpy as np
from inference_utils import Prompter
from typing import Any, Dict, List, Optional
from langchain.embeddings import SagemakerEndpointEmbeddings
from langchain.llms.sagemaker_endpoint import LLMContentHandler, SagemakerEndpoint
from langchain.embeddings.sagemaker_endpoint import EmbeddingsContentHandler
prompter = Prompter("kullm")
class KullmContentHandler(LLMContentHandler):
content_type = "application/json"
accepts = "application/json"
def transform_input(self, prompt: str, model_kwargs={}) -> bytes:
'''
입력 데이터 전처리 후에 리턴
'''
context, question = prompt.split("||SPEPERATOR||")
prompt = prompter.generate_prompt(question, context)
# print ("prompt", prompt)
payload = {
'inputs': [prompt],
'parameters': model_kwargs
}
input_str = json.dumps(payload)
return input_str.encode('utf-8')
def transform_output(self, output: bytes) -> str:
response_json = json.loads(output.read().decode("utf-8"))
generated_text = response_json[0][0]["generated_text"]
return generated_text
class SagemakerEndpointEmbeddingsJumpStart(SagemakerEndpointEmbeddings):
def embed_documents(self, texts: List[str], chunk_size: int = 1) -> List[List[float]]:
"""Compute doc embeddings using a SageMaker Inference Endpoint.
Args:
texts: The list of texts to embed.
chunk_size: The chunk size defines how many input texts will
be grouped together as request. If None, will use the
chunk size specified by the class.
Returns:
List of embeddings, one for each text.
"""
results = []
_chunk_size = len(texts) if chunk_size > len(texts) else chunk_size
print("text size: ", len(texts))
print("_chunk_size: ", _chunk_size)
for i in range(0, len(texts), _chunk_size):
# print (i, texts[i : i + _chunk_size])
response = self._embedding_func(texts[i: i + _chunk_size])
# print (i, response, len(response[0].shape))
results.extend(response)
return results
class KoSimCSERobertaContentHandler(EmbeddingsContentHandler):
content_type = "application/json"
accepts = "application/json"
def transform_input(self, prompt: str, model_kwargs={}) -> bytes:
input_str = json.dumps({"inputs": prompt, **model_kwargs})
return input_str.encode("utf-8")
def transform_output(self, output: bytes) -> str:
response_json = json.loads(output.read().decode("utf-8"))
ndim = np.array(response_json).ndim
if ndim == 4:
# Original shape (1, 1, n, 768)
emb = response_json[0][0][0]
emb = np.expand_dims(emb, axis=0).tolist()
elif ndim == 2:
# Original shape (n, 1)
emb = []
for ele in response_json:
e = ele[0][0]
emb.append(e)
else:
print(f"Other # of dimension: {ndim}")
emb = None
return emb
| [] |
2024-01-10 | hyeonsangjeon/AWS-LLM-SageMaker | RAG-SageMaker~rag-fsi-data-workshop~TASK-5_OpenSearch_LLM_RAG_Streamlit_Chatbot_Example.py | import streamlit as st
import sys
import json
import boto3
import numpy as np
from typing import Any, Dict, List, Optional
from langchain.embeddings import SagemakerEndpointEmbeddings
from langchain.llms.sagemaker_endpoint import LLMContentHandler, SagemakerEndpoint
from langchain.embeddings.sagemaker_endpoint import EmbeddingsContentHandler
from langchain import PromptTemplate
from langchain.chains.question_answering import load_qa_chain
from streamlit_chat import message
from langchain.indexes import VectorstoreIndexCreator
from langchain.vectorstores import Chroma, AtlasDB, FAISS
from langchain.document_loaders.csv_loader import CSVLoader
from langchain.text_splitter import CharacterTextSplitter, RecursiveCharacterTextSplitter
import csv
from langchain.vectorstores import OpenSearchVectorSearch
import os
import copy
import sys
sys.path.append('../utils') # src 폴더 경로 설정
from streamlit_util import KoSimCSERobertaContentHandler, KullmContentHandler, SagemakerEndpointEmbeddingsJumpStart, KoSimCSERobertaContentHandler
##########################################################################################################################################################################
# pip install -r ./requirements.txt in the system terminal
# Studio의 Stramlit URL은 domain의 lab이 proxy/8501/webapp로 대체됩니다.
# ex > https://d-l2kk7xvxmnbl.studio.us-east-1.sagemaker.aws/jupyter/default/proxy/8501/
# 참고 : https://aws.amazon.com/ko/blogs/tech/build-a-powerful-question-answering-bot-with-amazon-sagemaker-amazon-opensearch-service-streamlit-and-langchain/
#########################################################################################################################################################################
######## AWS Setting
aws_region = 'us-east-1'
region ='us-east-1'
service ='es'
######## For SageMaker
# LLM Endpoint Name :
llm_endpoint_name = 'kullm-polyglot-5-8b-v2-2023-08-23-15-47-39-450-endpoint'
# Embedding Vector Model Endpoint Name :
embvec_endpoint_name= 'KoSimCSE-roberta-2023-08-23-14-07-12'
######## For OpenSearch
# Opensearch index name :
index_name = 'fsi-sample'
# Opensearch domain_endpoin name :
opensearch_domain_endpoint = "https://search-ragopensearch-2pz3fgitugmvrz7vbngitqljzu.us-east-1.es.amazonaws.com"
# Opensearch master user auth
username = 'raguser'
password = 'MarsEarth1!'
#aws_access_key = os.environ['AWS_ACCESS_KEY']
#aws_secret_key =os.environ['AWS_SECRET_KEY']
##########################################################################################################################################################################
# 검색 rank 개수
faiss_k =3
# Kullum LLM 파라미터 설정
params = {
'do_sample': False,
'max_new_tokens': 512, #128
'temperature': 1.0, # 0.5 ~ 1.0 default = 1.0 높으면 랜덤하게 자유도. 다음 생성 문장 토큰의 자유도
'top_k': 0,
'top_p': 0.9,
'return_full_text': False,
'repetition_penalty': 1.1,
'presence_penalty': None,
'eos_token_id': 2
}
##########################################################################################################################################################################
def load_chain(llm_endpoint_name):
# KULLUM LLM 로드
LLMTextContentHandler = KullmContentHandler()
endpoint_name_text = llm_endpoint_name
seperator = "||SPEPERATOR||"
llm_text = SagemakerEndpoint(
endpoint_name=endpoint_name_text,
region_name=aws_region,
model_kwargs=params,
content_handler=LLMTextContentHandler,
)
prompt_template = ''.join(["{context}", seperator, "{question}"])
PROMPT = PromptTemplate(template=prompt_template, input_variables=["context", "question"])
chain = load_qa_chain(llm=llm_text, chain_type="stuff", prompt=PROMPT, verbose=True)
return chain
##################################################################################################
# FAISS VectorStore - OpenSearch
##################################################################################################
def load_emb_vec(embvec_endpoint_name):
LLMEmbHandler = KoSimCSERobertaContentHandler()
emb_vec = SagemakerEndpointEmbeddingsJumpStart(
endpoint_name=embvec_endpoint_name,
region_name=aws_region,
content_handler=LLMEmbHandler,
)
return emb_vec
# opensearch score seems like ranking
def filter_and_remove_score_opensearch_vector_score(res, cutoff_score = 0.006, variance=0.95):
# Get the lowest score
highest_score = max(score for doc, score in res)
print('highest_score : ', highest_score)
# If the lowest score is over 200, return an empty list
if highest_score < cutoff_score:
return []
# Calculate the upper bound for scores
lower_bound = highest_score * variance
print('lower_bound : ', lower_bound)
# Filter the list and remove the score
res = [doc for doc, score in res if score >= lower_bound]
return res
def get_similiar_docs(query, k=5, fetch_k=300, score=True, bank="신한은행"):
print("bank : ", bank)
#query = f'{bank}, {query}'
print("query : ",query)
if score:
pre_similar_doc = vectro_db.similarity_search_with_score(
query,
k=k,
fetch_k=fetch_k,
search_type="approximate_search", # approximate_search, script_scoring, painless_scripting
space_type="l2", # "l2", "l1", "linf", "cosinesimil", "innerproduct", "hammingbit";
pre_filter={"bool": {"filter": {"term": {"text": bank}}}},
boolean_filter={"bool": {"filter": {"term": {"text": bank}}}}
# filter=dict(source=bank)
)
print('jhs : ', pre_similar_doc)
pretty_print_documents(pre_similar_doc)
similar_docs = filter_and_remove_score_opensearch_vector_score(pre_similar_doc)
else:
similar_docs = vectro_db.similarity_search(
query,
k=k,
search_type="approximate_search", # approximate_search, script_scoring, painless_scripting
space_type="12", # "l2", "l1", "linf", "cosinesimil", "innerproduct", "hammingbit";
pre_filter={"bool": {"filter": {"term": {"text": bank}}}},
boolean_filter={"bool": {"filter": {"term": {"text": bank}}}}
)
similar_docs_copy = copy.deepcopy(similar_docs)
# print('similar_docs_copy : \n', similar_docs_copy)
return similar_docs_copy
# 임베딩 벡터 로드
emb_vec = load_emb_vec(embvec_endpoint_name)
# LLM 로드
chain = load_chain(llm_endpoint_name)
http_auth = (username, password) # opensearch user
#OpenSearch Vector Indexer
vectro_db = OpenSearchVectorSearch(
index_name=index_name,
opensearch_url=opensearch_domain_endpoint,
embedding_function=emb_vec,
http_auth=http_auth,
is_aoss = False,
engine="faiss",
space_type="12"
)
##################################################################################################
def pretty_print_documents(response):
for doc, score in response:
print(f'\nScore: {score}')
print(f'Document Number: {doc.metadata["row"]}')
print(f'Source: {doc.metadata["source"]}')
# Split the page content into lines
lines = doc.page_content.split("\n")
# Extract and print each piece of information if it exists
for line in lines:
split_line = line.split(": ")
if len(split_line) > 1:
print(f'{split_line[0]}: {split_line[1]}')
print('-' * 50)
def get_answer(query):
k = 3
search_query = query
similar_docs = get_similiar_docs(search_query, k=k, bank='신한은행')
llm_query = ''+query+' Category에 대한 Information을 찾아서 설명해주세요.'
if not similar_docs:
llm_query = query
answer = chain.run(input_documents=similar_docs, question=llm_query)
return answer
##################################################################################################
# Streamlit UI
# From here down is all the StreamLit UI.
##################################################################################################
st.set_page_config(page_title="FSI RAG FAQ Demo vectorstore mode", page_icon="🦜", layout="wide")
st.header("🦜 FSI RAG Demo - Opensearch vectorstore with LLM mode")
def get_text():
input_text = st.text_input("You: ", "", key="input")
return input_text
# 사용자로부터 입력을 받습니다.
# user_input = get_text()
# if "generated" not in st.session_state:
# st.session_state["generated"] = []
#
# if "past" not in st.session_state:
# st.session_state["past"] = []
#
# # 사용자가 입력을 제공했는지 확인합니다.
# if user_input:
# output = get_answer(user_input)
# print("OUTPUT : ", output)
# st.session_state.past.append(user_input)
# st.session_state.generated.append(output)
#
#
#
#
# if st.session_state["generated"]:
#
# for i in range(len(st.session_state["generated"]) - 1, -1, -1):
# message(st.session_state["generated"][i], key=str(i))
# message(st.session_state["past"][i], is_user=True, key=str(i) + "_user")
from langchain.callbacks import StreamlitCallbackHandler
if "messages" not in st.session_state:
st.session_state["messages"] = []
for msg in st.session_state.messages:
st.chat_message(msg["role"]).write(msg["content"])
if prompt := st.chat_input(placeholder="여기에 금융 FAQ 질문해주세요"):
st.session_state.messages.append({"role": "user", "content": prompt})
st.chat_message("user").write(prompt)
with st.chat_message("assistant"):
st_cb = StreamlitCallbackHandler(st.container(), expand_new_thoughts=False)
response = get_answer(prompt)
st.session_state.messages.append({"role": "assistant", "content": response})
st.write(response) | [
"context",
"question",
"{context}||SPEPERATOR||{question}"
] |
2024-01-10 | xinh3ng/rnd | rnd~ai~calendar~utils~chat_utils.py | from openai import OpenAI
import os
from tenacity import retry, stop_after_attempt, wait_random_exponential
@retry(wait=wait_random_exponential(min=1, max=60), stop=stop_after_attempt(6))
def chat_with_backoff(**kwargs):
"""Backoff to combat with rate limits"""
client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY"))
if kwargs.get("session_id") is None:
response = client.chat.completions.create(model=kwargs["model"], messages=kwargs["messages"])
else:
response = client.chat.completions.create(
model=kwargs["model"], messages=kwargs["messages"], session_id=kwargs.get("session_id")
)
return response
| [] |
2024-01-10 | xinh3ng/rnd | rnd~ai~iac~utils~chat_utils.py | from openai import OpenAI
client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY"))
import os
from tenacity import retry, stop_after_attempt, wait_random_exponential
@retry(wait=wait_random_exponential(min=1, max=60), stop=stop_after_attempt(6))
def chat_with_backoff(**kwargs):
"""Backoff to combat with rate limits"""
if kwargs.get("session_id") is None:
response = client.chat.completions.create(model=kwargs["model"], messages=kwargs["messages"])
else:
response = client.chat.completions.create(
model=kwargs["model"], messages=kwargs["messages"], session_id=kwargs.get("session_id")
)
return response
| [] |
2024-01-10 | xinh3ng/rnd | rnd~ai~chatgpt~video_summary.py | """
# Usage
export video_filepath="$HOME/dev/data/openai/video1327392268.mp4"
python ai/chatgpt/video_summary.py --video_filepath=$video_filepath
"""
from bs4 import BeautifulSoup
import json
from moviepy import editor as E
import openai
from openai import OpenAI
client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY"))
import os
from pytube import YouTube
import requests
from totepy.generic import create_logger
logger = create_logger(__name__, level="info")
# TODO: The 'openai.organization_id' option isn't read in the client API. You will need to pass it when you instantiate the client, e.g. 'OpenAI(organization_id="org-bSEDQevVUvT2CCljpD3DNAEA")'
# openai.organization_id = "org-bSEDQevVUvT2CCljpD3DNAEA"
def youtube_download(url):
youtube = YouTube(url)
youtube = youtube.streams.get_highest_resolution()
logger.info(f"Starting download from {url}")
try:
youtube.download()
logger.info("Successfully completed the download")
except:
logger.error("An error has occurred")
def download(url, video_filepath):
response = requests.get(url, stream=True)
with open(video_filepath, "wb") as f:
for chunk in response.iter_content(chunk_size=256):
f.write(chunk)
def main(
video_filepath: str = "*.mp4",
verbose: int = 1,
):
if 0 == 1:
# Download from youtube
download_url = "https://www.youtube.com/watch?v=GPOv72Awo68&ab_channel=CrashCourse"
video_filepath = "How it Happened - The 2008 Financial Crisis Crash Course Economics 12.mp4"
youtube_download(download_url)
if 0 == 1:
download_url = "https://v-cf.caffeine.tv/v1/E3E72733E4C84D8F91DF91D37E346398/abd85fae-8e1c-400a-8385-f4df54db045c/primary/chunk-stream_0-00041.m4s"
video_filepath = "demo.mp4"
download(download_url, video_filepath)
logger.info("Converting mp4 to mp3 with moviepy...")
audio_filepath = video_filepath.replace("mp4", "mp3") # mp4 becomes mp3
video = E.VideoFileClip(video_filepath)
video.audio.write_audiofile(audio_filepath)
if 1 == 1:
logger.info("Using whisper-1 to get the transcription from a mp3 file ...")
transcript = client.audio.transcribe("whisper-1", open(audio_filepath, "rb"))
text = transcript["text"]
# logger.info(text)
logger.info("Calling chatGPT...")
response = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[
{
"role": "system",
"content": "You are ChatGPT, a large language model trained by OpenAI. Answer as concisely as possible.",
},
{"role": "user", "content": f"Can you summarize the following text in about 100 words: {text}"},
],
)
logger.info("Reply: \n%s" % response["choices"][0]["message"]["content"])
return
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--video_filepath")
parser.add_argument("--verbose", type=int, default=1)
args = vars(parser.parse_args())
logger.info("Cmd line args:\n{}".format(json.dumps(args, sort_keys=True, indent=4)))
main(**args)
logger.info("ALL DONE!\n")
| [
"You are ChatGPT, a large language model trained by OpenAI. Answer as concisely as possible.",
"Can you summarize the following text in about 100 words: PLACEHOLDER"
] |
2024-01-10 | xinh3ng/rnd | rnd~ai~iac~serving~bee_qa.py | """Select bee questions and answers using chatgpt in a RAG way
# Usage Example
gpt_model="gpt-3.5-turbo-16k"
prompt_template="My bee questions are stored inside a sqlite3 table: qa. It has the following columns and short descriptions: (1) 'qustion_no' is the question number; (2) 'question' is the question; (3) 'answer' is the answer. When I make a request below, I want you to write the sql query and also run the sql and get me the final output."
prompt="${prompt_template}. Now can you select 3 random questions and anssers?"
verbose=3
python rnd/ai/iac/serving/bee_qa.py --gpt_model=$gpt_model --prompt="$prompt" --verbose=$verbose
"""
from cafpyutils.generic import create_logger
import json
from openai import OpenAI
import os
import pandas as pd
import re
from tenacity import retry, stop_after_attempt, wait_random_exponential
from typing import List
from rnd.ai.calendar.utils.chat_utils import chat_with_backoff
from rnd.ai.iac.serving.save_pdfs import DbOperator
logger = create_logger(__name__)
pd.set_option("display.max_columns", 100)
pd.set_option("display.width", 120)
pd.set_option("display.max_colwidth", None) # No truncation
def parse_sql_query(text: str) -> str:
text = text.lower()
# Grab the first ```sql and the immediate next ';'
pattern = r"```.*?```"
match = re.search(pattern, text, re.DOTALL)
sql = text[match.start() : match.end()].replace("```", "").replace("sql", "")
assert ("select" in sql) and ("from" in sql)
return sql
@retry(wait=wait_random_exponential(min=1, max=60), stop=stop_after_attempt(3))
def chat_with_backoff(client, model: str, messages: List[dict]):
"""Backoff to combat with rate limits"""
response = client.chat.completions.create(model=model, messages=messages)
return response
def main(
gpt_model: str,
prompt: str,
openai_api_key: str = os.environ.get("OPENAI_API_KEY"),
verbose: int = 1,
) -> dict:
openai_client = OpenAI(api_key=openai_api_key)
response = chat_with_backoff(client=openai_client, model=gpt_model, messages=[{"role": "user", "content": prompt}])
reply = response.choices[0].message.content
logger.info("ChatGPT's reply: %s" % reply)
logger.info("Parsing ChatGPT's sql query suggestion and getting the final result")
sql_query = parse_sql_query(reply)
op = DbOperator(db="bees.db")
result = op.read_as_pandas(sql_query=sql_query)
result = {
"reply": reply,
"result": result.to_dict("records"),
}
if verbose >= 3:
logger.info("Final result:\n%s" % json.dumps(result, indent=4))
return result
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--gpt_model")
parser.add_argument("--prompt")
parser.add_argument("--verbose", type=int, default=1)
args = vars(parser.parse_args())
logger.info("Command line args:\n%s" % json.dumps(args, indent=4))
main(**args)
logger.info("ALL DONE!\n")
| [] |
2024-01-10 | DudeFr0mMars/CustomGPT | st_main.py | import os
import sys
import toml
import requests
import openai
import streamlit as st
import concurrent.futures
from googlesearch import search
import time
if not os.path.exists("secrets.toml"):
# Set API keys and model
open_ai_api_key = st.secrets.api_keys["OPENAI_API_KEY"]
browserless_api_key = st.secrets.api_keys["BROWSERLESS_API_KEY"]
else:
secrets = toml.load("secrets.toml")["api_keys"]
open_ai_api_key = secrets["OPENAI_API_KEY"]
browserless_api_key = secrets["BROWSERLESS_API_KEY"]
openai_model = "gpt-3.5-turbo"
openai.api_key = open_ai_api_key
headers = {'Cache-Control': 'no-cache', 'Content-Type': 'application/json'}
params = {'token': browserless_api_key}
@st.cache_data
def scrape(link):
"""Scrape the content of a webpage."""
json_data = {
'url': link,
'elements': [{'selector': 'body'}],
}
response = requests.post('https://chrome.browserless.io/scrape', params=params, headers=headers, json=json_data)
if response.status_code == 200:
webpage_text = response.json()['data'][0]['results'][0]['text']
return webpage_text
else:
print(f"Error: Unable to fetch content from {link}. Status code: {response.status_code}")
return ""
def summarize(question, webpage_text):
"""Summarize the relevant information from a body of text related to a question."""
prompt = f"""You are an intelligent summarization engine. Extract and summarize the
most relevant information from a body of text related to a question.
Question: {question}
Body of text to extract and summarize information from:
{webpage_text[0:2500]}
Relevant information:"""
while True:
try:
response = openai.ChatCompletion.create(
model=openai_model,
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt},
],temperature = 0.2
)
return response.choices[0].message.content
except openai.error.RateLimitError:
print("Rate limit exceeded. Sleeping for 3 seconds.")
time.sleep(3)
def final_summary(question, summaries):
"""Construct a final summary from a list of summaries."""
num_summaries = len(summaries)
prompt = f"You are an intelligent summarization engine. Extract and summarize relevant information from the {num_summaries} points below to construct an answer to a question.\n\nQuestion: {question}\n\nRelevant Information:"
for i, summary in enumerate(summaries):
prompt += f"\n{i + 1}. {summary}"
while True:
try:
response = openai.ChatCompletion.create(
model=openai_model,
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt},
]
)
return response.choices[0].message.content
except openai.error.RateLimitError:
print("Rate limit exceeded. Sleeping for 3 seconds.")
time.sleep(3)
def link(r):
"""Extract the link from a search result."""
return r['link']
@st.cache_data
def search_results(question):
"""Get search results for a question."""
organic_results = []
for link in search(question,tld="co.in", num=10, stop=10, pause=2):
organic_results.append(link)
return organic_results
def print_citations(links, summaries):
"""Print citations for the summaries."""
st.write("CITATIONS")
num_citations = min(len(links), len(summaries))
for i in range(num_citations):
st.write(f"[{i + 1}] {links[i]}\n{summaries[i]}\n")
def scrape_and_summarize(link, question):
"""Scrape the content of a webpage and summarize it."""
webpage_text = scrape(link)
summary = summarize(question, webpage_text)
return summary
def main():
st.title("CustomGPT")
question = st.text_input("What would you like me to search?")
if st.button("Search"):
links = search_results(question)[:7] # Limit the number of search results
summaries = []
step_placeholder = st.empty()
with st.spinner("Learning..."):
with concurrent.futures.ThreadPoolExecutor() as executor:
future_to_summary = {executor.submit(scrape_and_summarize, link, question): link for link in links}
for i, future in enumerate(concurrent.futures.as_completed(future_to_summary)):
summaries.append(future.result())
step_placeholder.text(f"Step {i+1}: Scraping and summarizing link {i+1}")
step_placeholder.text("Step 9: Generating final summary")
answer = final_summary(question, summaries)
step_placeholder.empty()
st.write("HERE IS THE ANSWER")
st.write(answer)
print_citations(links, summaries)
if __name__ == "__main__":
main()
| [
"You are an intelligent summarization engine. Extract and summarize the\n most relevant information from a body of text related to a question.\n\n Question: PLACEHOLDER\n\n Body of text to extract and summarize information from:\n PLACEHOLDER\n\n Relevant information:",
"You are an intelligent summarization engine. Extract and summarize relevant information from the PLACEHOLDER points below to construct an answer to a question.\n\nQuestion: PLACEHOLDER\n\nRelevant Information:",
"You are a helpful assistant."
] |
2024-01-10 | ubc-vision/vivid123 | vivid123~pipelines~vivid123_pipeline.py | # Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Callable, Dict, List, Optional, Union
from dataclasses import dataclass
import numpy as np
import torch
from einops import rearrange
import PIL
from diffusers.pipelines import TextToVideoSDPipeline
from diffusers.pipelines.text_to_video_synthesis import TextToVideoSDPipelineOutput
from diffusers.models import AutoencoderKL, UNet3DConditionModel, UNet2DConditionModel
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import (
is_accelerate_available,
is_accelerate_version,
logging,
replace_example_docstring,
BaseOutput,
)
from diffusers.utils.torch_utils import randn_tensor
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import rescale_noise_cfg
from transformers import CLIPVisionModelWithProjection, CLIPTextModel, CLIPTokenizer
from ..models import CLIPCameraProjection
import kornia
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
EXAMPLE_DOC_STRING = """
Examples:
```py
>>> import torch
>>> from diffusers import TextToVideoSDPipeline
>>> from diffusers.utils import export_to_video
>>> pipe = TextToVideoSDPipeline.from_pretrained(
... "damo-vilab/text-to-video-ms-1.7b", torch_dtype=torch.float16, variant="fp16"
... )
>>> pipe.enable_model_cpu_offload()
>>> prompt = "Spiderman is surfing"
>>> video_frames = pipe(prompt).frames
>>> video_path = export_to_video(video_frames)
>>> video_path
```
"""
def tensor2vid(video: torch.Tensor, mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]) -> List[np.ndarray]:
# This code is copied from https://github.com/modelscope/modelscope/blob/1509fdb973e5871f37148a4b5e5964cafd43e64d/modelscope/pipelines/multi_modal/text_to_video_synthesis_pipeline.py#L78
# reshape to ncfhw
mean = torch.tensor(mean, device=video.device).reshape(1, -1, 1, 1, 1)
std = torch.tensor(std, device=video.device).reshape(1, -1, 1, 1, 1)
# unnormalize back to [0,1]
video = video.mul_(std).add_(mean)
video.clamp_(0, 1)
# prepare the final outputs
i, c, f, h, w = video.shape
images = video.permute(2, 3, 0, 4, 1).reshape(
f, h, i * w, c
) # 1st (frames, h, batch_size, w, c) 2nd (frames, h, batch_size * w, c)
images = images.unbind(dim=0) # prepare a list of indvidual (consecutive frames)
images = [(image.cpu().numpy() * 255).astype("uint8") for image in images] # f h w c
return images
class ViVid123Pipeline(TextToVideoSDPipeline):
r"""
Pipeline for text-to-video generation.
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
implemented for all pipelines (downloading, saving, running on a particular device, etc.).
Args:
vae ([`AutoencoderKL`]):
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
text_encoder ([`CLIPTextModel`]):
Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
tokenizer (`CLIPTokenizer`):
A [`~transformers.CLIPTokenizer`] to tokenize text.
unet ([`UNet3DConditionModel`]):
A [`UNet3DConditionModel`] to denoise the encoded video latents.
scheduler ([`SchedulerMixin`]):
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
"""
def __init__(
self,
vae: AutoencoderKL,
text_encoder: CLIPTextModel,
tokenizer: CLIPTokenizer,
unet: UNet3DConditionModel,
scheduler: KarrasDiffusionSchedulers,
novel_view_unet: UNet2DConditionModel,
image_encoder: CLIPVisionModelWithProjection,
cc_projection: CLIPCameraProjection,
):
super().__init__(vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler)
self.register_modules(
novel_view_unet=novel_view_unet,
image_encoder=image_encoder,
cc_projection=cc_projection,
)
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
self.image_processor = VaeImageProcessor(
vae_scale_factor=self.vae_scale_factor,
do_convert_rgb=True,
do_normalize=True,
)
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.check_inputs
def check_inputs(
self,
prompt,
height,
width,
callback_steps,
negative_prompt=None,
prompt_embeds=None,
negative_prompt_embeds=None,
num_inference_steps=50,
fusion_schedule=None,
):
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(callback_steps)}."
)
if prompt is not None and prompt_embeds is not None:
raise ValueError(
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
" only forward one of the two."
)
elif prompt is None and prompt_embeds is None:
raise ValueError(
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
)
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
if negative_prompt is not None and negative_prompt_embeds is not None:
raise ValueError(
f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
)
if prompt_embeds is not None and negative_prompt_embeds is not None:
if prompt_embeds.shape != negative_prompt_embeds.shape:
raise ValueError(
"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
f" {negative_prompt_embeds.shape}."
)
if fusion_schedule is None:
raise ValueError(
"Fusion schedule is not provided."
)
if len(fusion_schedule[0]) != num_inference_steps or len(fusion_schedule[1]) != num_inference_steps:
raise ValueError(
"Fusion schedule length does not match the number of timesteps."
)
def prepare_latents(
self, batch_size, num_channels_latents, num_frames, height, width, dtype, device, generator, latents=None, noise_identical_accross_frames=False
):
shape = (
batch_size,
num_channels_latents,
num_frames if not noise_identical_accross_frames else 1,
height // self.vae_scale_factor,
width // self.vae_scale_factor,
)
if isinstance(generator, list) and len(generator) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
)
if latents is None:
latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
else:
if latents.shape != shape:
raise ValueError(
f"User-prepared `latents` must have shape {shape}, when noise_identical_accross_frames={noise_identical_accross_frames} but got {latents.shape}."
)
latents = latents.to(device)
if noise_identical_accross_frames:
latents = latents.repeat(1, 1, num_frames, 1, 1)
# scale the initial noise by the standard deviation required by the scheduler
latents = latents * self.scheduler.init_noise_sigma
return latents
def prepare_img_latents(
self, image, batch_size, dtype, device, generator=None, do_zero123_classifier_free_guidance=False
):
if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)):
raise ValueError(
f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}"
)
if isinstance(image, torch.Tensor):
# Batch single image
if image.ndim == 3:
assert image.shape[0] == 3, "Image outside a batch should be of shape (3, H, W)"
image = image.unsqueeze(0)
assert image.ndim == 4, "Image must have 4 dimensions"
# Check image is in [-1, 1]
if image.min() < -1 or image.max() > 1:
raise ValueError("Image should be in [-1, 1] range")
else:
# preprocess image
if isinstance(image, (PIL.Image.Image, np.ndarray)):
image = [image]
if isinstance(image, list) and isinstance(image[0], PIL.Image.Image):
image = [np.array(i.convert("RGB"))[None, :] for i in image]
image = np.concatenate(image, axis=0)
elif isinstance(image, list) and isinstance(image[0], np.ndarray):
image = np.concatenate([i[None, :] for i in image], axis=0)
image = image.transpose(0, 3, 1, 2)
image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0
image = image.to(device=device, dtype=dtype)
if isinstance(generator, list) and len(generator) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
)
if isinstance(generator, list):
init_latents = [
self.vae.encode(image[i : i + 1]).latent_dist.mode(generator[i]) for i in range(batch_size) # sample
]
init_latents = torch.cat(init_latents, dim=0)
else:
init_latents = self.vae.encode(image).latent_dist.mode()
# init_latents = self.vae.config.scaling_factor * init_latents # todo in original zero123's inference gradio_new.py, model.encode_first_stage() is not scaled by scaling_factor
if batch_size > init_latents.shape[0]:
# init_latents = init_latents.repeat(batch_size // init_latents.shape[0], 1, 1, 1)
num_images_per_prompt = batch_size // init_latents.shape[0]
# duplicate image latents for each generation per prompt, using mps friendly method
bs_embed, emb_c, emb_h, emb_w = init_latents.shape
init_latents = init_latents.unsqueeze(1)
init_latents = init_latents.repeat(1, num_images_per_prompt, 1, 1, 1)
init_latents = init_latents.view(bs_embed * num_images_per_prompt, emb_c, emb_h, emb_w)
# init_latents = torch.cat([init_latents]*2) if do_zero123_classifier_free_guidance else init_latents # follow zero123
init_latents = (
torch.cat([torch.zeros_like(init_latents), init_latents])
if do_zero123_classifier_free_guidance
else init_latents
)
init_latents = init_latents.to(device=device, dtype=dtype)
return init_latents
def CLIP_preprocess(self, x):
dtype = x.dtype
# following openai's implementation
# TODO HF OpenAI CLIP preprocessing issue https://github.com/huggingface/transformers/issues/22505#issuecomment-1650170741
# follow openai preprocessing to keep exact same, input tensor [-1, 1], otherwise the preprocessing will be different, https://github.com/huggingface/transformers/pull/22608
if isinstance(x, torch.Tensor):
if x.min() < -1.0 or x.max() > 1.0:
raise ValueError("Expected input tensor to have values in the range [-1, 1]")
x = kornia.geometry.resize(
x.to(torch.float32), (224, 224), interpolation="bicubic", align_corners=True, antialias=False
).to(dtype=dtype)
x = (x + 1.0) / 2.0
# renormalize according to clip
x = kornia.enhance.normalize(
x, torch.Tensor([0.48145466, 0.4578275, 0.40821073]), torch.Tensor([0.26862954, 0.26130258, 0.27577711])
)
return x
# from stable_diffusion_image_variation
def _encode_image(self, image, device, num_images_per_prompt, do_video_classifier_free_guidance):
dtype = next(self.image_encoder.parameters()).dtype
if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)):
raise ValueError(
f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}"
)
if isinstance(image, torch.Tensor):
# Batch single image
if image.ndim == 3:
assert image.shape[0] == 3, "Image outside a batch should be of shape (3, H, W)"
image = image.unsqueeze(0)
assert image.ndim == 4, "Image must have 4 dimensions"
# Check image is in [-1, 1]
if image.min() < -1 or image.max() > 1:
raise ValueError("Image should be in [-1, 1] range")
else:
# preprocess image
if isinstance(image, (PIL.Image.Image, np.ndarray)):
image = [image]
if isinstance(image, list) and isinstance(image[0], PIL.Image.Image):
image = [np.array(i.convert("RGB"))[None, :] for i in image]
image = np.concatenate(image, axis=0)
elif isinstance(image, list) and isinstance(image[0], np.ndarray):
image = np.concatenate([i[None, :] for i in image], axis=0)
image = image.transpose(0, 3, 1, 2)
image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0
image = image.to(device=device, dtype=dtype)
image = self.CLIP_preprocess(image)
# if not isinstance(image, torch.Tensor):
# # 0-255
# print("Warning: image is processed by hf's preprocess, which is different from openai original's.")
# image = self.feature_extractor(images=image, return_tensors="pt").pixel_values
image_embeddings = self.image_encoder(image).image_embeds.to(dtype=dtype)
image_embeddings = image_embeddings.unsqueeze(1)
# duplicate image embeddings for each generation per prompt, using mps friendly method
bs_embed, seq_len, _ = image_embeddings.shape
image_embeddings = image_embeddings.repeat(1, num_images_per_prompt, 1)
image_embeddings = image_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1)
if do_video_classifier_free_guidance:
negative_prompt_embeds = torch.zeros_like(image_embeddings)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
image_embeddings = torch.cat([negative_prompt_embeds, image_embeddings])
return image_embeddings
def _encode_pose(self, pose, device, num_images_per_prompt, do_video_classifier_free_guidance):
dtype = next(self.cc_projection.parameters()).dtype
if isinstance(pose, torch.Tensor):
pose_embeddings = pose.unsqueeze(1).to(device=device, dtype=dtype)
else:
if isinstance(pose[0], list):
pose = torch.Tensor(pose)
else:
pose = torch.Tensor([pose])
x, y, z = pose[:, 0].unsqueeze(1), pose[:, 1].unsqueeze(1), pose[:, 2].unsqueeze(1)
pose_embeddings = (
torch.cat([torch.deg2rad(x), torch.sin(torch.deg2rad(y)), torch.cos(torch.deg2rad(y)), z], dim=-1)
.unsqueeze(1)
.to(device=device, dtype=dtype)
) # B, 1, 4
# duplicate pose embeddings for each generation per prompt, using mps friendly method
bs_embed, seq_len, _ = pose_embeddings.shape
pose_embeddings = pose_embeddings.repeat(1, num_images_per_prompt, 1)
pose_embeddings = pose_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1)
if do_video_classifier_free_guidance:
negative_prompt_embeds = torch.zeros_like(pose_embeddings)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
pose_embeddings = torch.cat([negative_prompt_embeds, pose_embeddings])
return pose_embeddings
def _encode_image_with_pose(self, image, pose, device, num_images_per_prompt, do_video_classifier_free_guidance):
img_prompt_embeds = self._encode_image(image, device, num_images_per_prompt, False)
pose_prompt_embeds = self._encode_pose(pose, device, num_images_per_prompt, False)
prompt_embeds = torch.cat([img_prompt_embeds, pose_prompt_embeds], dim=-1)
prompt_embeds = self.cc_projection(prompt_embeds)
# prompt_embeds = img_prompt_embeds
# follow 0123, add negative prompt, after projection
if do_video_classifier_free_guidance:
negative_prompt = torch.zeros_like(prompt_embeds)
prompt_embeds = torch.cat([negative_prompt, prompt_embeds])
return prompt_embeds
@torch.no_grad()
@replace_example_docstring(EXAMPLE_DOC_STRING)
def __call__(
self,
prompt: Union[str, List[str]] = None,
height: Optional[int] = None,
width: Optional[int] = None,
num_frames: int = 16,
num_inference_steps: int = 50,
guidance_scale_video: float = 9.0,
negative_prompt: Optional[Union[str, List[str]]] = None,
eta: float = 0.0,
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
latents: Optional[torch.FloatTensor] = None,
prompt_embeds: Optional[torch.FloatTensor] = None,
negative_prompt_embeds: Optional[torch.FloatTensor] = None,
output_type: Optional[str] = "np",
return_dict: bool = True,
callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
callback_steps: int = 1,
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
guidance_rescale: float = 0.0,
# vivid123 params below
image: Optional[
Union[
torch.FloatTensor,
PIL.Image.Image,
np.ndarray,
List[torch.FloatTensor],
List[PIL.Image.Image],
List[np.ndarray],
]
] = None,
cam_pose_torch: Optional[torch.FloatTensor] = None,
fusion_schedule: Optional[tuple[float]] = None,
ddim_eta_0123: float = 1.0,
guidance_scale_zero123: float = 3.0,
noise_identical_accross_frames: bool = False,
):
r"""
The call function to the pipeline for generation.
Args:
prompt (`str` or `List[str]`, *optional*):
The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
The height in pixels of the generated video.
width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
The width in pixels of the generated video.
num_frames (`int`, *optional*, defaults to 16):
The number of video frames that are generated. Defaults to 16 frames which at 8 frames per seconds
amounts to 2 seconds of video.
num_inference_steps (`int`, *optional*, defaults to 50):
The number of denoising steps. More denoising steps usually lead to a higher quality videos at the
expense of slower inference.
guidance_scale (`float`, *optional*, defaults to 7.5):
A higher guidance scale value encourages the model to generate images closely linked to the text
`prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
negative_prompt (`str` or `List[str]`, *optional*):
The prompt or prompts to guide what to not include in image generation. If not defined, you need to
pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
num_images_per_prompt (`int`, *optional*, defaults to 1):
The number of images to generate per prompt.
eta (`float`, *optional*, defaults to 0.0):
Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
generation deterministic.
latents (`torch.FloatTensor`, *optional*):
Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for video
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
tensor is generated by sampling using the supplied random `generator`. Latents should be of shape
`(batch_size, num_channel, num_frames, height, width)`.
prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
provided, text embeddings are generated from the `prompt` input argument.
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
output_type (`str`, *optional*, defaults to `"np"`):
The output format of the generated video. Choose between `torch.FloatTensor` or `np.array`.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~pipelines.text_to_video_synthesis.TextToVideoSDPipelineOutput`] instead
of a plain tuple.
callback (`Callable`, *optional*):
A function that calls every `callback_steps` steps during inference. The function is called with the
following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
callback_steps (`int`, *optional*, defaults to 1):
The frequency at which the `callback` function is called. If not specified, the callback is called at
every step.
cross_attention_kwargs (`dict`, *optional*):
A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
[`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).
guidance_rescale (`float`, *optional*, defaults to 0.0):
Guidance rescale factor from [Common Diffusion Noise Schedules and Sample Steps are
Flawed](https://arxiv.org/pdf/2305.08891.pdf). Guidance rescale factor should fix overexposure when
using zero terminal SNR.
guidance_scale_zero123 (`float`, *optional*, defaults to 3.0):
A higher guidance scale value encourages the model to generate images closely linked to the text
`prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
cam_pose_torch: (`torch.FloatTensor`, *optional*):
Camera pose in torch tensor, shape (4,). The elements mean (el, sin(az), cos(az), radius)
fusion_schedule (`tuple[float]`, *optional*):
Fusion schedule for video diffusion and zero123. The first element is the schedule for video diffusion, and the
second element is the schedule for zero123. The length of each schedule should be the same as the number
of timesteps.
ddim_eta_0123 (`float`, *optional*, defaults to 1.0):
The eta value for the 0123 diffusion steps. Only applies to the [`~schedulers.DDIMScheduler`], and is
ignored in other schedulers.
Example:
Returns:
[`~pipelines.text_to_video_synthesis.TextToVideoSDPipelineOutput`] or `tuple`:
If `return_dict` is `True`, [`~pipelines.text_to_video_synthesis.TextToVideoSDPipelineOutput`] is
returned, otherwise a `tuple` is returned where the first element is a list with the generated frames.
"""
# 0. Default height and width to unet
height = height or self.unet.config.sample_size * self.vae_scale_factor
width = width or self.unet.config.sample_size * self.vae_scale_factor
num_videos_per_image_prompt = 1
# 1. Check inputs. Raise error if not correct
self.check_inputs(
prompt,
height,
width,
callback_steps,
negative_prompt,
prompt_embeds,
negative_prompt_embeds,
num_inference_steps,
fusion_schedule
)
# 2. Define call parameters
if prompt is not None and isinstance(prompt, str):
batch_size = 1
elif prompt is not None and isinstance(prompt, list):
batch_size = len(prompt)
else:
batch_size = prompt_embeds.shape[0]
device = self._execution_device
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
do_video_classifier_free_guidance = guidance_scale_video > 1.0
do_zero123_classifier_free_guidance = guidance_scale_zero123 > 1.0
# 3.1 Encode input prompt for video diffusion
text_encoder_lora_scale = (
cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
)
prompt_embeds, negative_prompt_embeds = self.encode_prompt(
prompt=prompt,
device=device,
# by diffusers v0.23.1, the naming of diffusers.pipelines.TextToVideoSDPipeline is still "num_images_per_prompt",
# where it should be "num_videos_per_prompt"
num_images_per_prompt=num_videos_per_image_prompt,
do_classifier_free_guidance=do_video_classifier_free_guidance,
negative_prompt=negative_prompt,
prompt_embeds=prompt_embeds,
negative_prompt_embeds=negative_prompt_embeds,
lora_scale=text_encoder_lora_scale,
)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
if do_video_classifier_free_guidance:
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
# 3.2 Encode input image for zero123
zero123_cond_images = [image for _ in range(num_frames)]
zero123_embeds = self._encode_image_with_pose(
zero123_cond_images,
cam_pose_torch,
device,
num_videos_per_image_prompt,
do_zero123_classifier_free_guidance,
) # (2xF) x 1 x 768
# 4. Prepare timesteps
self.scheduler.set_timesteps(num_inference_steps, device=device)
timesteps = self.scheduler.timesteps
# 5. Prepare latent variables
num_channels_latents = self.unet.config.in_channels
latents = self.prepare_latents(
batch_size * num_videos_per_image_prompt,
num_channels_latents,
num_frames,
height,
width,
prompt_embeds.dtype,
device,
generator,
latents,
noise_identical_accross_frames,
)
# 6. Prepare Zero123 image latents
img_latents = self.prepare_img_latents(
zero123_cond_images,
batch_size=num_frames,
dtype=zero123_embeds.dtype,
device=device,
generator=generator,
do_zero123_classifier_free_guidance=True,
)
# 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
# 8. Denoising loop
num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
with self.progress_bar(total=num_inference_steps) as progress_bar:
for i, t in enumerate(timesteps):
# expand the latents if we are doing classifier free guidance
latent_model_input = torch.cat([latents] * 2) if do_video_classifier_free_guidance else latents
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
# predict the noise residual with video diffusion
noise_pred_video = self.unet(
latent_model_input,
t,
encoder_hidden_states=prompt_embeds,
cross_attention_kwargs=cross_attention_kwargs,
return_dict=False,
)[0]
# perform classifier-free guidance for video diffusion
if do_video_classifier_free_guidance:
noise_pred_video_uncond, noise_pred_video_text = noise_pred_video.chunk(2)
noise_pred_video = noise_pred_video_uncond + guidance_scale_video * (
noise_pred_video_text - noise_pred_video_uncond
)
# if do_video_classifier_free_guidance and guidance_rescale > 0.0:
# # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
# noise_pred_video = rescale_noise_cfg(
# noise_pred_video, noise_pred_video_text, guidance_rescale=guidance_rescale
# )
# zero123 denoising
latent_model_input_zero123 = torch.cat([latents] * 2) if do_zero123_classifier_free_guidance else latents
augmented_latent_model_input_zero123 = torch.cat(
[rearrange(latent_model_input_zero123, "B C F H W -> (B F) C H W"), img_latents],
dim=1,
).to(self.novel_view_unet.dtype)
noise_pred_zero123 = self.novel_view_unet(
augmented_latent_model_input_zero123,
t,
encoder_hidden_states=zero123_embeds,
return_dict=True,
).sample
noise_pred_zero123 = rearrange(noise_pred_zero123, "(B F) C H W -> B C F H W", F=num_frames)
if do_zero123_classifier_free_guidance:
noise_pred_zero123_uncond, noise_pred_zero123_text = noise_pred_zero123.chunk(2)
noise_pred_zero123 = noise_pred_zero123_uncond + guidance_scale_zero123 * (
noise_pred_zero123_text - noise_pred_zero123_uncond
)
# fusing video diffusion with zero123
noise_pred = fusion_schedule[0][i] * noise_pred_video + fusion_schedule[1][i] * noise_pred_zero123
# reshape latents
bsz, channel, frames, width, height = latents.shape
latents = latents.permute(0, 2, 1, 3, 4).reshape(bsz * frames, channel, width, height)
noise_pred = noise_pred.permute(0, 2, 1, 3, 4).reshape(bsz * frames, channel, width, height)
# compute the previous noisy sample x_t -> x_t-1
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
# reshape latents back
latents = latents[None, :].reshape(bsz, frames, channel, width, height).permute(0, 2, 1, 3, 4)
# call the callback, if provided
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
progress_bar.update()
if callback is not None and i % callback_steps == 0:
callback(i, t, latents)
if output_type == "latent":
return TextToVideoSDPipelineOutput(frames=latents)
video_tensor = self.decode_latents(latents)
if output_type == "pt":
video = video_tensor
else:
video = tensor2vid(video_tensor)
# Offload last model to CPU
if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (video,)
return TextToVideoSDPipelineOutput(frames=video)
| [
"1"
] |
2024-01-10 | subh05sus/Python-Voice-Assistant | jarvis.py | import wmi # windows management information for any kind for information regarding system
import os # provides functions for interacting with the operating system
import requests # for making HTTP requests to a specified URL
from time import strftime
import pyttsx3 # text-to-speech conversion library
import sys
import datetime
import speech_recognition as sr
import wikipedia # ********* to improve wikipedia searching
import webbrowser
import random
import pyautogui # used to take ss
import psutil # used to track resource utilization in the system
import subprocess # used to run other programs
import speedtest as speedtest
from ecapture import ecapture as ec
import pyautogui # to take screenshot
from time import sleep
import screen_brightness_control as sbc
import pyjokes
import pywhatkit # to send whatsapp msg
import googletrans
from bs4 import BeautifulSoup # to pull data out of html or XML files
import openai
import time
from playsound import playsound
from pywikihow import search_wikihow
from PyDictionary import PyDictionary
import turtle
import smtplib #library to send email
import PyPDF2
engine = pyttsx3.init()
voices = engine.getProperty('voices')
engine.setProperty('voices', voices[0].id)
list_of_jokes = ["The three most well known languages in India are English, Hindi, and... JavaScript",
"Interviewer... Where were you born?Me in India... Interviewer:.. oh, which part?... Me: What ‘which part’ ..? Whole body was born in India",
"how many Indians does it take to fix a lightbulb?Two. One to do the task and other to explain how lightbulbs were actually invented in ancient India",
"What do you call bread from India? It's Naan of your business",
"Britain: Drive on the left side... Europe and America: Drive on the right side...India: lol what's a 'traffic law'?"]
jokes = len(list_of_jokes) - 1
ran_joke = random.randint(0, jokes)
global name
def speak(audio): # speak audio
print(audio)
engine.say(audio)
engine.runAndWait()
def bytes_to_mb(bytes):
KB = 1024 # One Kilobyte is 1024 bytes
MB = KB * 1024 # One MB is 1024 KB
return int(bytes / MB)
def wishMe(): # wishes me
speak("Hey Jarvis here,Whats your name?")
name = takeCommand().lower()
hour = int(datetime.datetime.now().hour)
if hour >= 0 and hour <= 3:
speak("I am Your Personal assistant, Jarvis! version 1.0!")
speak(f"As its too late {name}, better if you sleep early ...")
elif hour >= 4 and hour < 12:
speak(f"Good Morning {name}!")
speak("I am Your Personal assistant, Jarvis! version 1.0!")
elif hour >= 12 and hour < 17:
speak(f"Good Afternoon {name} !")
speak("I am Your Personal assistant, Jarvis! version 1.0!")
elif hour >= 17 and hour < 19:
speak(f"Good Evening {name}!")
speak("I am Your Personal assistant, Jarvis! version 1.0!")
elif hour >= 19 and hour < 24:
speak(f"Hello {name} ,I am Your Personal assistant, Jarvis! version 1.0!")
# good night will be greeted after the task is performed and exit command is given
return name
def takeCommand(): # takes microphone inout and returns output
r = sr.Recognizer()
with sr.Microphone() as source:
print("Listening...")
r.pause_threshold = 1
audio = r.listen(source)
try:
print("Recognizing...")
# Using google for voice recognition
query = r.recognize_google(audio, language='en-in')
print(f"User said: {query}\n") # User query will be printed
except Exception as e:
# Say that again will be printed in case of improper voice
speak("Say that again please...")
return "None" # None string will be returned
return query
with open('profile.txt', 'r') as f:
email = f.readline().strip()
password = f.readline().strip()
def sendemail(to, content):
server = smtplib.SMTP('smtp.gmail.com', 587)
server.ehlo()
server.starttls()
server.login(email, password)
server.sendmail(email, to, content)
server.close()
def readBooks():
speak("Enter the path of the file including it's name.")
filePath = input("Enter the path of the file (including it's name): ")
try:
os.startfile(filePath)
book = open(filePath, 'rb')
pdfreader = PyPDF2.PdfReader(book)
pages = len(pdfreader.pages)
speak(f"Number of pages in this books are {pages}")
speak("From Which Page I Have To Start Reading ?")
try:
Page = takeCommand()
numPage = int(Page)
except:
speak("Sorry Sir, Please Write The Page Number.")
numPage = int(input("Enter The Page Number: "))
page = pdfreader.pages[numPage-1]
text = page.extract_text()
speak(text)
except:
speak("This Book is not Present!")
if __name__ == "__main__":
name = wishMe()
speak("How May I Help You?")
while True:
query = takeCommand().lower()
if 'wikipedia' in query:
speak('What you wanna search on it?')
lookfor = takeCommand()
results = wikipedia.summary(lookfor, sentences=5)
source = wikipedia.page(lookfor).url
speak("According to Wikipedia")
speak(results)
speak("You may refer to this url for more info")
print(source)
elif 'read books' in query:
readBooks()
elif 'internet speed' in query:
st = speedtest.Speedtest()
dl = bytes_to_mb(st.download())
up = bytes_to_mb(st.upload())
speak(
f'{name} we have {dl} MB per second of DOWNLOAD SPEED and {up} MB per second of UPLOAD SPEED')
elif 'stop' in query or 'shut up' in query or 'sleep' in query:
speak('Alright Sir! Ping me up when you need me again')
sys.exit(0)
elif 'thank you' in query or 'appreciate' in query:
speak("It's my duty to assist you anytime sir")
elif 'open youtube' in query:
speak("Here We Go")
webbrowser.open("youtube.com")
elif 'youtube' in query and 'search' in query:
speak(f"What Should I Search {name}?")
search_yt = takeCommand()
search_yt = search_yt.replace(" ", "+")
speak("Here We Go")
webbrowser.open(
f"https://www.youtube.com/results?search_query={search_yt}")
elif 'open google' in query:
speak("Here We Go")
webbrowser.open("google.com")
elif 'google' in query and 'search' in query:
speak(f"What Should I Search {name} ?")
search_go = takeCommand()
search_go = search_go.replace(" ", "+")
speak("Here We Go")
webbrowser.open(f"https://www.google.com/search?q={search_go}")
elif 'open instagram' in query:
speak("Here We Go")
webbrowser.open("instagram.com")
elif 'relax' in query:
speak("Relaxing........................")
w = 500
h = 500
food_size = 10
delay = 100
offsets = {
"up": (0, 20),
"down": (0, -20),
"left": (-20, 0),
"right": (20, 0)
}
def reset():
global snake, snake_dir, food_position, pen
snake = [[0, 0], [0, 20], [0, 40], [0, 60], [0, 80]]
snake_dir = "up"
food_position = get_random_food_position()
food.goto(food_position)
move_snake()
def move_snake():
global snake_dir
new_head = snake[-1].copy()
new_head[0] = snake[-1][0] + offsets[snake_dir][0]
new_head[1] = snake[-1][1] + offsets[snake_dir][1]
if new_head in snake[:-1]:
reset()
else:
snake.append(new_head)
if not food_collision():
snake.pop(0)
if snake[-1][0] > w / 2:
snake[-1][0] -= w
elif snake[-1][0] < - w / 2:
snake[-1][0] += w
elif snake[-1][1] > h / 2:
snake[-1][1] -= h
elif snake[-1][1] < -h / 2:
snake[-1][1] += h
pen.clearstamps()
for segment in snake:
pen.goto(segment[0], segment[1])
pen.stamp()
screen.update()
turtle.ontimer(move_snake, delay)
def food_collision():
global food_position
if get_distance(snake[-1], food_position) < 20:
food_position = get_random_food_position()
food.goto(food_position)
return True
return False
def get_random_food_position():
x = random.randint(- w / 2 + food_size, w / 2 - food_size)
y = random.randint(- h / 2 + food_size, h / 2 - food_size)
return (x, y)
def get_distance(pos1, pos2):
x1, y1 = pos1
x2, y2 = pos2
distance = ((y2 - y1) ** 2 + (x2 - x1) ** 2) ** 0.5
return distance
def go_up():
global snake_dir
if snake_dir != "down":
snake_dir = "up"
def go_right():
global snake_dir
if snake_dir != "left":
snake_dir = "right"
def go_down():
global snake_dir
if snake_dir!= "up":
snake_dir = "down"
def go_left():
global snake_dir
if snake_dir != "right":
snake_dir = "left"
screen = turtle.Screen()
screen.setup(w, h)
screen.title("Snake")
screen.bgcolor("blue")
screen.setup(500, 500)
screen.tracer(0)
pen = turtle.Turtle("square")
pen.penup()
food = turtle.Turtle()
food.shape("square")
food.color("yellow")
food.shapesize(food_size / 20)
food.penup()
screen.listen()
screen.onkey(go_up, "Up")
screen.onkey(go_right, "Right")
screen.onkey(go_down, "Down")
screen.onkey(go_left, "Left")
reset()
turtle.done()
# code by PK284---------
elif 'search flight' in query:
speak("What is the source of the Flight Sir!!")
source= takeCommand()
speak("What is the Destination of the Flight Sir!!")
destination = takeCommand()
# speak("What is the Travel date sir Please speak in numberic format")
# traveldate = takeCommand()
# webbrowser.open(f"https://www.google.com/search?q={search_go}")
# webbrowser.open(f"https://www.makemytrip.com/flight/search?itinerary={source}-{destination}-25/01/2023-&tripType=O&paxType=A-1_C-0_I-0&intl=false&=&cabinClass=E")
webbrowser.open(f"https://www.makemytrip.com/flight/search?itinerary={source}-{destination}-26/01/2023&tripType=O&paxType=A-2_C-0_I-0&intl=false&cabinClass=E&ccde=IN&lang=eng")
elif 'open facebook' in query:
speak("Here We Go")
webbrowser.open("facebook.com")
elif 'open twitter' in query:
speak("Here We Go")
webbrowser.open("twitter.com")
elif 'download youtube videos' in query:
speak("Here We Go")
webbrowser.open("en.onlinevideoconverter.pro")
elif 'open whatsapp' in query:
speak("Here We Go")
webbrowser.open("web.whatsapp.com")
elif 'open reddit' in query:
speak("Here We Go")
webbrowser.open("reddit.com")
elif 'open linkedin' in query:
speak("Here We Go")
webbrowser.open("linkedin.com")
elif 'open pinterest' in query:
speak("Here We Go")
webbrowser.open("pinterest.com")
elif 'open quora' in query:
speak("Here We Go")
webbrowser.open("quora.com")
elif 'open discord' in query:
speak("Here We Go")
webbrowser.open("discord.com")
elif ('open prime video' or 'open amazon prime video') in query:
speak("Here We Go")
webbrowser.open("primevideo.com")
elif ('open netflix') in query:
speak("Here We Go")
webbrowser.open("netflix.com")
elif ('open hotstar') in query:
speak("Here We Go")
webbrowser.open("hotstar.com")
elif 'the time' in query:
strTime = datetime.datetime.now().strftime("%H:%M:%S")
speak(strTime)
elif 'the date' in query:
today = datetime.date.today()
speak(today)
elif query == 'jarvis':
speak(f"At Your Service {name}, How can I help you")
elif 'joke' in query:
URL = 'https://v2.jokeapi.dev/joke/Any'
response = requests.get(URL)
data = response.json()
if response.status_code == 200:
speak(data['setup'])
speak(data['delivery'])
else:
speak(list_of_jokes[ran_joke])
elif "volume up" in query:
pyautogui.press("volumeup")
speak("volume upped")
sleep(1)
speak("anything else for which I may assist you!")
elif "volume down" in query:
pyautogui.press("volumedown")
speak("volume lowered")
sleep(1)
speak("anything else for which i may assist you")
elif 'battery' in query:
battery = psutil.sensors_battery()
percentage = battery.percent
speak(f'{name} our System still has {percentage} percent battery')
if percentage >= 75:
print("\U0001F601")
speak(f'{name} we have enough power to continue our work!')
elif percentage >= 40 and percentage < 75:
speak(
f'{name} we should think of connecting our system to the battery supply!')
elif percentage <= 40 and percentage >= 15:
speak(
f"{name} we don't have enough power to work through!... Connect now sir!")
elif percentage < 15:
speak(
f'{name} we have very low power!... Our System may Shutdown anytime soon!...')
elif "mute" in query:
if count==0:
pyautogui.press("volumemute")
speak("volume muted")
sleep(1)
count = 1
elif count == 1:
pyautogui.press("volumemute")
speak("Voluble Now")
sleep(1)
count = 0
speak("anything else for which i may assist you")
elif "brightness" in query:
try:
current = sbc.get_brightness()
bright = int(takeCommand())
set = sbc.set_brightness(bright)
speak(f"brightness set to {set} percent")
sleep(1)
speak("anything else for which i may assist you...")
except Exception as e:
print(e)
speak("error")
elif 'todo' in query or 'to do' in query:
if 'add' in query or 'create' in query:
with open('todo.txt', 'a') as f:
todo_w = takeCommand()
f.write(f"{todo_w}\n")
speak("To Do is updated successfully !")
elif 'read' in query or 'tell' in query:
with open('todo.txt', 'r') as f:
todo_r = f.read()
if todo_r == "":
todo_r = "No Pendning Tasks "
speak(todo_r)
elif 'erase' in query or 'remove all' in query or 'clear' in query:
with open("todo.txt", "w") as f:
f.write("")
speak("All Tasks has been cleared!")
elif 'open spotify' in query:
speak("Opening spotify")
webbrowser.open("spotify.com")
elif 'screenshot' in query:
sc = pyautogui.screenshot()
sc.save('pa_ss.png')
speak("Screenshot taken successfully.")
elif "translate" in query:
translator = googletrans.Translator()
lang = ['en', 'ta', 'te', 'kn', 'ml']
# To Print all the languages that Google Translator Support
# Command to print Languages Supported
# print(googletrans.LANGUAGES)
speak(f"{name} please tell me the Sentence that you want me to translate")
text = takeCommand().lower()
speak(
"Please choose a Source Language by pressing a number from the following List!")
print(
" english ---> 1 Tamil ---> 2 Telugu ---> 3 Kannada ----> 4 Malayalam ---> 5")
numberS = int(input("Enter here: "))
speak(
"Please choose a Destination Language by pressing a number from the following List!")
print(
" english ---> 1 Tamil ---> 2 Telugu ---> 3 Kannada ----> 4 Malayalam ---> 5")
numberD = int(input("Enter here: "))
translated = translator.translate(
text, src=lang[numberS - 1], dest=lang[numberD - 1])
print(translated.text)
print("Legibility is:",
(translated.extra_data['confidence']) * 100, "%")
elif "log off" in query or "sign out" in query:
speak(
"Ok , your pc will log off in 10 seconds! make sure you exit from all applications")
subprocess.call(["shutdown", "/l"])
elif "camera" in query or "take a photo" in query:
ec.capture(0, "Jarvis-camera", "img.jpg")
elif 'play' in query:
song = query.replace('play', '')
speak('playing ' + song)
pywhatkit.playonyt(song)
elif "weather" in query:
api_key = "8ef61edcf1c576d65d836254e11ea420"
base_url = "https://api.openweathermap.org/data/2.5/weather?"
speak("What is the name of the city?")
city_name = takeCommand()
print(f"{city_name} whether conditions : ")
complete_url = base_url + "appid=" + api_key + "&q=" + city_name
response = requests.get(complete_url)
x = response.json()
if x["cod"] != "404":
y = x["main"]
current_temperature = y["temp"] - 273.15
current_temperature = float('%.2f' % current_temperature)
current_humidiy = y["humidity"]
z = x["weather"]
weather_description = z[0]["description"]
speak(" Temperature in Celcius unit is " +
str(current_temperature) +
"\n humidity in percentage is " +
str(current_humidiy) +
"\n description " +
str(weather_description))
print(" Temperature in Celcius unit = " +
str(current_temperature) +
"\n humidity (in percentage) = " +
str(current_humidiy) +
"\n description = " +
str(weather_description))
else:
speak("Can't find details about this city")
elif "current news" in query or "latest news" in query:
url = "https://www.indiatoday.in/india"
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
# Find all the headlines on the page
headlines = soup.find_all("h2")
for headline in headlines[:4]:
print(headline.text)
speak(headline.text)
elif "who made you" in query or "who created you" in query or "who discovered you" in query:
speak("I am a human creation built by all sets of knowledge of humans.I am nothing without humans")
elif "initiate" in query or "chat" in query or "Veronica" in query or "gpt" in query:
def GPT():
speak("Connecting to Veronica")
# Enter API KEY or Leave blank if you don't want to use this function
API_KEY = ""
openai.api_key = API_KEY
if API_KEY == "":
print("Please Enter the API Key!")
speak("Please Enter the API Key!")
while API_KEY != "":
engine1 = pyttsx3.init()
voices = engine1.getProperty('voices')
engine1.setProperty('voice', voices[1].id)
r = sr.Recognizer()
mic = sr.Microphone(device_index=1)
conversation = ""
user_name = str(input("Enter your name: "))
bot_name = "Veronica"
print("Hey," + user_name)
while True:
with mic as source:
print("\nlistening...")
r.adjust_for_ambient_noise(source, duration=0.2)
audio = r.listen(source)
print("no longer listening.\n")
try:
user_input = r.recognize_google(audio)
except:
continue
prompt = user_name + ": " + user_input + "\n" + bot_name + ": "
conversation += prompt # allows for context
# fetch response from open AI api
response = openai.Completion.create(engine='text-davinci-003', prompt=conversation,
max_tokens=50)
response_str = response["choices"][0]["text"].replace("\n", "")
response_str = response_str.split(user_name + ": ", 1)[0].split(bot_name + ": ", 1)[0]
conversation += response_str + "\n"
print(response_str)
engine1.say(response_str)
prompt = user_name + ": " + user_input + "\n" + bot_name + ": "
conversation += prompt # allows for context
# fetch response from open AI api
response = openai.Completion.create(
engine='text-davinci-003', prompt=conversation, max_tokens=50)
response_str = response["choices"][0]["text"].replace(
"\n", "")
response_str = response_str.split(
user_name + ": ", 1)[0].split(bot_name + ": ", 1)[0]
conversation += response_str + "\n"
print(response_str)
engine1.say(response_str)
engine1.runAndWait()
GPT()
elif 'news' in query:
api_key = '9bb9b456bf124f80aba6a0e09cc2f811'
URL = 'https://newsapi.org/v2/top-headlines?country=us&apiKey=' + api_key
resp = requests.get(URL)
if resp.status_code == 200:
data = resp.json()
news = data['articles'][0]
speak(news['title'])
speak(news['description'])
else:
speak("Cannot find a news at this moment")
elif "ip address" in query:
ip = requests.get('https://api.ipify.org').text
print(ip)
speak(f"Your ip address is {ip}")
elif "switch the window" in query or "switch window" in query:
speak(f"Okay {name}, Switching the window")
pyautogui.keyDown("alt")
pyautogui.press("tab")
pyautogui.keyUp("alt")
elif 'screenshot' in query:
speak("Taking screenshot")
times = time.time()
name_img = r"{}.png".format(str(times))
img = pyautogui.screenshot(name_img)
speak("Done!")
img.show()
elif "system" in query:
c = wmi.WMI()
my_system = c.Win32_ComputerSystem()[0]
speak(f"Manufacturer: {my_system.Manufacturer}")
speak(f"Model: {my_system.Model}")
speak(f"Name: {my_system.Name}")
speak(f"NumberOfProcessors: {my_system.NumberOfProcessors}")
speak(f"SystemType: {my_system.SystemType}")
speak(f"SystemFamily: {my_system.SystemFamily}")
elif 'how to' in query:
try:
# query = query.replace('how to', '')
max_results = 1
data = search_wikihow(query, max_results)
# assert len(data) == 1
data[0].print()
speak(data[0].summary)
except Exception as e:
speak('Sorry, I am unable to find the answer for your query.')
elif 'set alarm' in query:
speak(
"Tell me the time to set an Alarm. ")
speak("How do you want to set time in ,like hours/minutes/second")
a_info = takeCommand()
if('hours' in a_info):
speak("Tell me time in hours!")
a_info=int(input("Type it"))
# a_info = int(takeCommand())
speak(f"Alarm set for {a_info} hours")
time.sleep(a_info *3600)
elif('minutes' in a_info):
speak("Tell me time in minutes!")
a_info = int(input("Type it"))
# a_info = int(takeCommand())
time.sleep(a_info * 60)
else:
speak("Tell me time in seconds!")
a_info = int(input("Type it"))
# a_info = int(takeCommand())
time.sleep(a_info)
# playsound('Alarm.mp3')
speak("Hi I am back!!! Wake Up Wake Up Wake Up Wake Up Wake Up Wake Up!!")
elif 'meaning' in query:
speak(f"Which word do you want me to define {name}?")
queryword = takeCommand().lower()
meaning = PyDictionary.meaning(queryword)
for i in meaning:
print(meaning[i])
speak("Sir the meaning is ", str(meaning[i]))
elif 'generate image' in query or 'image with ai' in query or 'image with artificial intelligence' in query:
speak("What kind of photo do you want to generate?")
imageinfo = takeCommand()
if imageinfo == "":
pass
else:
speak("just wait a bit! I'm processing it!")
response = openai.Image.create(
prompt=imageinfo, n=1, size="1024x1024")
image_url = response['data'][0]['url']
webbrowser.open(image_url)
speak(f"Here is is!! {imageinfo}")
print(f"Here is is!! {imageinfo}")
elif 'quit' in query or 'exit' in query or 'close' in query or 'bye' in query:
speak(f"Thank you for using Jarvis {name}")
if 19 <= int(datetime.datetime.now().hour) < 24:
speak(f"Have a very Good Night {name} and sweet dreams!")
else:
speak(f"See you soon,have a very Good Day {name}!")
exit()
elif 'send email' in query:
try:
speak("What should I say?")
content = takeCommand()
speak("What is the recipient's email address?")
to = takeCommand()
sendemail(to,content)
speak("email has been sent.")
except Exception as e:
print(e)
speak("Unable to send email.")
speak("What do you want to continue with?")
| [
"PLACEHOLDER: PLACEHOLDER\nPLACEHOLDER: "
] |
2024-01-10 | Sefaria/Sefaria-Data | sources~russian_tanakh~validate_russian_with_local_llm.py | import django
django.setup()
superuser_id = 171118
# import statistics
import csv
import os
from tqdm import tqdm
from sefaria.model import *
import requests
from langchain.chat_models import ChatOpenAI, ChatAnthropic
from langchain.prompts import PromptTemplate
from langchain.schema import HumanMessage
# import requests_cache
# requests_cache.install_cache('my_cache', expire_after=3600*24*14)
books = ["Genesis", "Exodus", "Leviticus", "Numbers", "Deuteronomy",
"Judges", "I_Samuel", "II_Samuel", "I_Kings", "II_Kings",
"Ruth", "Esther"]
def get_validation_prompt(en_verse, ru_verse):
prompt = f"""
Given a pair of English and Russian verses, output "YES" of the Russian is a faithful translation of the English, and "NO" otherwise. Don't output anything other than "YES" or "NO".
English:
{en_verse}
Russian:
{ru_verse}
"""
return prompt
def ask_ollama(prompt, model="neural-chat", url="http://localhost:11434/api/generate"):
data = {
"model": model,
"prompt": prompt,
"stream": False
}
headers = {
"Content-Type": "application/json"
}
response = requests.post(url, json=data, headers=headers)
if response.status_code == 200:
return response.json()["response"]
else:
print(f"Error {response.status_code}: {response.text}")
return None
def ask_claude(query):
llm = ChatAnthropic()
# user_prompt = PromptTemplate.from_template("# Input\n{text}")
user_prompt = PromptTemplate.from_template("{text}")
human_message = HumanMessage(content=user_prompt.format(text=query))
answer = llm([human_message])
return answer.content
def write_verdict_to_csv(csv_file, segment_ref, verdict):
if not os.path.exists(csv_file):
with open(csv_file, 'w', newline='') as file:
writer = csv.writer(file)
writer.writerow(['Ref', 'Verdict'])
with open(csv_file, 'a', newline='') as file:
writer = csv.writer(file)
writer.writerow([segment_ref, verdict])
def get_existing_refs(csv_file):
if not os.path.exists(csv_file):
return []
with open(csv_file, 'r', newline='') as file:
reader = csv.reader(file)
existing_segment_refs = [row[0] for row in reader]
return existing_segment_refs
def russian_semantic_validation(csv_filename="calude_verdicts.csv"):
existing_segment_refs = get_existing_refs(csv_filename)
segment_refs = []
for book in books:
segment_refs += library.get_index(book).all_segment_refs()
for segment_ref in tqdm(segment_refs, desc=f"Validating segments", unit="segment"):
if segment_ref.tref in existing_segment_refs:
continue
en_version_text = segment_ref.text().text
ru_version_text = segment_ref.text(vtitle="Russian Torah translation, by Dmitri Slivniak, Ph.D., edited by Dr. Itzhak Streshinsky [ru]").text
prompt = get_validation_prompt(en_version_text, ru_version_text)
# verdict = ask_ollama(prompt)
verdict = ask_claude(prompt)
if "NO" in verdict:
# print(f"possible semantic problem in {segment_ref}")
verdict = "NO"
elif "YES" in verdict:
# print(f"{segment_ref} passed")
verdict = "YES"
# else:
# print(f"unclear verdict for {segment_ref}: {verdict}")
write_verdict_to_csv(csv_filename, segment_ref, verdict)
if __name__ == '__main__':
print("hello world")
russian_semantic_validation()
print("end") | [
"\n Given a pair of English and Russian verses, output \"YES\" of the Russian is a faithful translation of the English, and \"NO\" otherwise. Don't output anything other than \"YES\" or \"NO\".\n English:\n PLACEHOLDER\n Russian:\n PLACEHOLDER\n ",
"{text}"
] |
2024-01-10 | zby/answerbot | answerbot~react.py | import httpx
import openai
import json
import time
import logging
import copy
from .prompt_builder import FunctionalPrompt, PromptMessage, Assistant, System, FunctionCall, FunctionResult
from .get_wikipedia import WikipediaApi
from .react_prompt import FunctionalReactPrompt, NewFunctionalReactPrompt, TextReactPrompt
from .prompt_templates import NoExamplesReactPrompt
from .toolbox import ToolBox, WikipediaSearch
# Configure basic logging
logging.basicConfig(level=logging.INFO)
# Get a logger for the current module
logger = logging.getLogger(__name__)
class LLMReactor:
def __init__(self, model: str, toolbox: ToolBox, prompt: FunctionalPrompt,
reflection_generator, max_llm_calls: int, client):
self.model = model
self.toolbox = toolbox
self.prompt = prompt
self.reflection_generator = reflection_generator
self.max_llm_calls = max_llm_calls
self.client = client
self.step = 0
self.finished = False
self.answer = None
def openai_query(self, **args):
if isinstance(self.prompt, FunctionalPrompt):
# todo - we might optimize and only send the functions that are relevant
# in particular not send the functions if function_call = 'none'
args["tools"] = self.toolbox.tools
if not "tool_choice" in args:
args["tool_choice"] = "auto"
else:
args["stop"] = ["\nObservation:"]
response = None
completion = self.client.chat.completions.create(
model=self.model,
messages=self.prompt.to_messages(),
**args
)
response_message = completion.choices[0].message
return response_message
def set_finished(self):
self.finished = True
def process_prompt(self):
logger.debug(f"Processing prompt: {self.prompt}")
self.step += 1
if self.step == self.max_llm_calls:
response = self.openai_query(tool_choice={'type': 'function', 'function': {'name': 'finish'}})
else:
response = self.openai_query()
function_call = self.prompt.function_call_from_response(response)
if function_call:
result = self.toolbox.process(function_call)
message = FunctionCall(result.tool_name, **result.tool_args)
logger.info(str(message))
self.prompt.push(message)
if result.tool_name == 'finish':
self.answer = result.observations
self.set_finished()
return
elif self.step == self.max_llm_calls:
self.set_finished()
logger.info("<<< Max LLM calls reached without finishing")
return
message = FunctionResult(result.tool_name, result.observations)
logger.info(str(message))
self.prompt.push(message)
message = self.reflection_generator.generate(self.step, self.max_llm_calls)
logger.info(str(message))
self.prompt.push(message)
response = self.openai_query(tool_choice='none')
message = Assistant(response.content)
logger.info(str(message))
self.prompt.push(message)
else:
message = Assistant(response.content)
logger.info(str(message))
self.prompt.push(message)
def get_answer(question, config):
print("\n\n<<< Question:", question)
wiki_api = WikipediaApi(max_retries=2, chunk_size=config['chunk_size'])
toolbox = WikipediaSearch(wiki_api)
client = openai.OpenAI(timeout=httpx.Timeout(20.0, read=10.0, write=15.0, connect=4.0))
reactor = LLMReactor(config['model'], toolbox, config['prompt'], config['reflection_generator'], config['max_llm_calls'], client=client)
while True:
print()
print(f">>>LLM call number: {reactor.step}")
reactor.process_prompt()
# print(prompt.parts[-2])
if reactor.finished:
return reactor
# if 'gpt-4' in config['model']:
# time.sleep(59)
| [] |
2024-01-10 | bc96/modelscope | modelscope~models~cv~image_probing_model~backbone.py | # The implementation is adopted from OpenAI-CLIP,
# made pubicly available under the MIT License at https://github.com/openai/CLIP
import math
import sys
from collections import OrderedDict
from functools import reduce
from operator import mul
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from PIL import Image
from torchvision import models
from .utils import convert_weights, load_pretrained
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1):
super().__init__()
# all conv layers have stride 1. an avgpool is performed
# after the second convolution when stride > 1
self.conv1 = nn.Conv2d(inplanes, planes, 1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, 3, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.avgpool = nn.AvgPool2d(stride) if stride > 1 else nn.Identity()
self.conv3 = nn.Conv2d(planes, planes * self.expansion, 1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = None
self.stride = stride
if stride > 1 or inplanes != planes * Bottleneck.expansion:
# downsampling layer is prepended with an avgpool,
# and the subsequent convolution has stride 1
self.downsample = nn.Sequential(
OrderedDict([('-1', nn.AvgPool2d(stride)),
('0',
nn.Conv2d(
inplanes,
planes * self.expansion,
1,
stride=1,
bias=False)),
('1', nn.BatchNorm2d(planes * self.expansion))]))
def forward(self, x: torch.Tensor):
identity = x
out = self.relu(self.bn1(self.conv1(x)))
out = self.relu(self.bn2(self.conv2(out)))
out = self.avgpool(out)
out = self.bn3(self.conv3(out))
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class AttentionPool2d(nn.Module):
def __init__(self,
spacial_dim: int,
embed_dim: int,
num_heads: int,
output_dim: int = None):
super().__init__()
self.positional_embedding = nn.Parameter(
torch.randn(spacial_dim**2 + 1, embed_dim) / embed_dim**0.5)
self.k_proj = nn.Linear(embed_dim, embed_dim)
self.q_proj = nn.Linear(embed_dim, embed_dim)
self.v_proj = nn.Linear(embed_dim, embed_dim)
self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim)
self.num_heads = num_heads
def forward(self, x):
x = x.reshape(x.shape[0], x.shape[1],
x.shape[2] * x.shape[3]).permute(2, 0, 1)
x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0)
x = x + self.positional_embedding[:, None, :].to(x.dtype)
x, _ = F.multi_head_attention_forward(
query=x,
key=x,
value=x,
embed_dim_to_check=x.shape[-1],
num_heads=self.num_heads,
q_proj_weight=self.q_proj.weight,
k_proj_weight=self.k_proj.weight,
v_proj_weight=self.v_proj.weight,
in_proj_weight=None,
in_proj_bias=torch.cat(
[self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]),
bias_k=None,
bias_v=None,
add_zero_attn=False,
dropout_p=0,
out_proj_weight=self.c_proj.weight,
out_proj_bias=self.c_proj.bias,
use_separate_proj_weight=True,
training=self.training,
need_weights=False)
return x[0]
class LayerNorm(nn.LayerNorm):
"""Subclass torch's LayerNorm to handle fp16."""
def forward(self, x: torch.Tensor):
orig_type = x.dtype
ret = super().forward(x.type(torch.float32))
return ret.type(orig_type)
class QuickGELU(nn.Module):
def forward(self, x: torch.Tensor):
return x * torch.sigmoid(1.702 * x)
class ResidualAttentionBlock(nn.Module):
def __init__(self,
d_model: int,
n_head: int,
attn_mask: torch.Tensor = None):
super().__init__()
self.attn = nn.MultiheadAttention(d_model, n_head)
self.ln_1 = LayerNorm(d_model)
self.mlp = nn.Sequential(
OrderedDict([('c_fc', nn.Linear(d_model, d_model * 4)),
('gelu', QuickGELU()),
('c_proj', nn.Linear(d_model * 4, d_model))]))
self.ln_2 = LayerNorm(d_model)
self.attn_mask = attn_mask
def attention(self, x: torch.Tensor):
self.attn_mask = self.attn_mask.to(
dtype=x.dtype,
device=x.device) if self.attn_mask is not None else None
return self.attn(
x, x, x, need_weights=False, attn_mask=self.attn_mask)[0]
def forward(self, x: torch.Tensor, idx):
features = {}
x_norm = self.ln_1(x)
features['layer_{}_pre_attn'.format(idx)] = x_norm.permute(1, 0, 2)
attn = self.attention(x_norm)
features['layer_{}_attn'.format(idx)] = attn.permute(1, 0, 2)
x = x + attn
mlp = self.mlp(self.ln_2(x))
features['layer_{}_mlp'.format(idx)] = mlp.permute(1, 0, 2)
x = x + mlp
return x, features
class Transformer(nn.Module):
def __init__(self,
width: int,
layers: int,
heads: int,
attn_mask: torch.Tensor = None):
super().__init__()
self.width = width
self.layers = layers
self.resblocks = nn.ModuleList()
for i in range(layers):
block = ResidualAttentionBlock(width, heads, attn_mask)
self.resblocks.append(block)
def forward(self, x: torch.Tensor):
features = {}
for idx, block in enumerate(self.resblocks):
x, block_feats = block(x, idx)
features.update(block_feats)
return x, features
class VisualTransformer(nn.Module):
def __init__(self, input_resolution: int, patch_size: int, width: int,
layers: int, heads: int, output_dim: int):
super().__init__()
print(input_resolution, patch_size, width, layers, heads, output_dim)
self.input_resolution = input_resolution
self.output_dim = output_dim
self.conv1 = nn.Conv2d(
in_channels=3,
out_channels=width,
kernel_size=patch_size,
stride=patch_size,
bias=False)
scale = width**-0.5
self.class_embedding = nn.Parameter(scale * torch.randn(width))
self.positional_embedding = nn.Parameter(scale * torch.randn(
(input_resolution // patch_size)**2 + 1, width))
self.ln_pre = LayerNorm(width)
self.transformer = Transformer(width, layers, heads)
self.ln_post = LayerNorm(width)
self.proj = nn.Parameter(scale * torch.randn(width, output_dim))
def forward(self, x: torch.Tensor, return_all=True):
x = self.conv1(x) # shape = [*, width, grid, grid]
x = x.reshape(x.shape[0], x.shape[1],
-1) # shape = [*, width, grid ** 2]
x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]
zeros = torch.zeros(
x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device)
# shape = [*, grid ** 2 + 1, width]
x = torch.cat([self.class_embedding.to(x.dtype) + zeros, x], dim=1)
x = x + self.positional_embedding.to(x.dtype)
x = self.ln_pre(x)
x = x.permute(1, 0, 2) # NLD -> LND
x, features = self.transformer(x)
x = x.permute(1, 0, 2) # LND -> NLD
x = self.ln_post(x[:, 0, :])
if return_all:
features['pre_logits'] = x
return features
if self.proj is not None:
x = x @ self.proj
return x
class CLIPNet(nn.Module):
def __init__(self, arch_name, pretrained, **kwargs):
super(CLIPNet, self).__init__()
if arch_name == 'CLIP_ViTB32':
self.clip = VisualTransformer(
input_resolution=224,
patch_size=32,
width=768,
layers=12,
heads=12,
output_dim=512)
elif arch_name in ('CLIP_ViTB16', 'CLIP_ViTB16_FP16'):
self.clip = VisualTransformer(
input_resolution=224,
patch_size=16,
width=768,
layers=12,
heads=12,
output_dim=512)
elif arch_name in ('CLIP_ViTL14', 'CLIP_ViTL14_FP16'):
self.clip = VisualTransformer(
input_resolution=224,
patch_size=14,
width=1024,
layers=24,
heads=16,
output_dim=768)
else:
raise KeyError(f'Unsupported arch_name for CLIP, {arch_name}')
def forward(self, input_data):
output = self.clip(input_data)
return output
def CLIP(arch_name='CLIP_RN50',
use_pretrain=False,
load_from='',
state_dict=None,
**kwargs):
model = CLIPNet(arch_name=arch_name, pretrained=None, **kwargs)
if use_pretrain:
if arch_name.endswith('FP16'):
convert_weights(model.clip)
load_pretrained(model.clip, state_dict, load_from)
return model
class ProbingModel(torch.nn.Module):
def __init__(self, feat_size, num_classes):
super(ProbingModel, self).__init__()
self.linear = torch.nn.Linear(feat_size, num_classes)
def forward(self, x):
return self.linear(x)
| [] |
2024-01-10 | combit/DocBot | chatserver.py | """Simple chat server implementation"""
import uuid
import os
import shutil
import requests
from flask import Flask, request,make_response,session, send_from_directory, jsonify
from flask_session import Session
from bs4 import BeautifulSoup
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import Chroma
from langchain.chat_models import ChatOpenAI
from langchain.prompts import PromptTemplate
from langchain.memory import ConversationBufferWindowMemory
from langchain.chains import ConversationalRetrievalChain
# pylint: disable=line-too-long,invalid-name
app = Flask(__name__, static_folder='static')
# Initialize session management. Secret is used for cookie encryption. Change for production.
app.secret_key = "T6Otg6T3BlbkFJFow"
app.config['SESSION_TYPE'] = 'filesystem'
app.config['SESSION_FILE_DIR'] = '.\\flask_session'
Session(app)
# Store for session objects (mem cache, qa object)
session_objects = {}
# Clear all session data when restarting the server
session_dir = app.config['SESSION_FILE_DIR']
shutil.rmtree(session_dir)
os.makedirs(session_dir)
# Create embeddings instance
embeddings = OpenAIEmbeddings()
# Open Chroma vector database that is created via embedding.py
instance = Chroma(persist_directory=".\\combit_en",
embedding_function=embeddings)
# Initialize ChatOpenAI model
llm = ChatOpenAI(temperature=0.5, model_name="gpt-4", )
# Prompt Templates & Messages
# Condense Prompt
CONDENSE_TEMPLATE = """Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question.
Chat History:
{chat_history}
Follow Up Input: {question}
Standalone question:"""
CONDENSE_QUESTION_PROMPT = PromptTemplate.from_template(CONDENSE_TEMPLATE)
# QA prompt
QA_TEMPLATE = """You are an enthusiastic and helpful combit support bot providing technical information about List & Label to software developers.
Given the sections from the documentation in the context, answer the question at the end and markdown format the reply.
Never make up answers - if you are unsure and the answer is not explicitly given in the context simply answer "Sorry, I don't know."
Context:
{context}
Question: {question}
Answer:"""
QA_PROMPT = PromptTemplate(template=QA_TEMPLATE, input_variables=["question", "context"])
@app.before_request
def check_session():
"""Checks if the current session is active."""
if not session.get('active'):
reset()
@app.route('/')
def index():
"""Serves the static index.html."""
session['active'] = 1
return send_from_directory('static', 'index.html')
# Clears the current session's memory (aka start new chat)
@app.route('/reset')
def reset():
"""Resets all objects for the current session and starts a new chat."""
memory_id = session.get('memory_id', None)
if not memory_id is None:
del session['memory_id']
del session_objects[memory_id]
qa_id = session.get('qa_id', None)
if not qa_id is None:
del session['qa_id']
del session_objects[qa_id]
response = make_response()
response.status_code = 200
return response
# Helper API to return the manual type of a page, used for the sources list
def get_manual_type(url):
"""Returns the manual type for the given URL."""
manual_types = {
"/progref/": "Programmer's Manual",
"/designer/": "Designer Manual",
"/reportserver/": "Report Server Manual",
"/adhocdesigner/": "AdHoc Designer Manual",
"/net/": ".NET Help",
"combit.blog": "Reporting Blog",
"forum.combit.net": "Knowledgebase",
"combit.com": "combit Website"
}
for pattern, manual_type in manual_types.items():
if pattern in url:
return manual_type
return "Manual"
# Helper API to return the meta title of a page, used for the sources list
def get_meta_title(url):
"""Returns the meta title tag for the given URL."""
try:
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.51 Safari/537.36'}
response = requests.get(url, headers=headers, timeout=40)
response.raise_for_status()
soup = BeautifulSoup(response.text, 'html.parser')
title = soup.find('title').get_text() if soup.title else ''
return title
except requests.exceptions.RequestException as e:
return 'error:' + str(e)
@app.route('/qa')
def qa_query():
"""Main endpoint for Q&A chat"""
# Try to retrieve values from session store. As all session objects need to be JSON serializable,
# keep track of non serializable objects in a local store and serialize UUIDs instead.
memory_id = session.get('memory_id', None)
if memory_id is None:
# We use a ConversationBufferMemory here, could be changed to one of the other available langchain memory types
memory = ConversationBufferWindowMemory(k=5,
memory_key="chat_history",
return_messages=True,
output_key='answer')
memory_id = str(uuid.uuid4())
session['memory_id'] = memory_id
session_objects[memory_id] = memory
else:
memory = session_objects[memory_id]
qa_id = session.get('qa_id', None)
if qa_id is None:
qa = ConversationalRetrievalChain.from_llm(llm,
instance.as_retriever(),
memory=memory,
get_chat_history=lambda h : h,
verbose=True,
condense_question_prompt=CONDENSE_QUESTION_PROMPT,
combine_docs_chain_kwargs={"prompt": QA_PROMPT},
return_source_documents=True)
qa_id = str(uuid.uuid4())
session['qa_id']=qa_id
session_objects[qa_id] = qa
else:
qa = session_objects[qa_id]
query = request.args.get('query')
# Process the input string through the Q&A chain
query_response = qa({"question": query})
# Format the sources as markdown links
metadata_list = [
f"[{get_manual_type(obj.metadata['source'])} - {get_meta_title(obj.metadata['source'])}]({obj.metadata['source']})"
for obj in query_response['source_documents']
]
response = {
'answer': query_response["answer"],
'sources': metadata_list
}
response = make_response(jsonify(response), 200)
response.mimetype = "application/json"
return response
if __name__ == '__main__':
app.run('localhost')
| [
"question",
"You are an enthusiastic and helpful combit support bot providing technical information about List & Label to software developers.\nGiven the sections from the documentation in the context, answer the question at the end and markdown format the reply.\nNever make up answers - if you are unsure and the answer is not explicitly given in the context simply answer \"Sorry, I don't know.\"\n\nContext: \n{context}\nQuestion: {question}\nAnswer:",
"Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question.\nChat History:\n{chat_history}\nFollow Up Input: {question}\nStandalone question:",
"context",
"Sorry, I don't know."
] |
2024-01-10 | amansharma7415369/langchain | connecting%20to%20openai.py | #pip install langchain
#pip install openai
import os
from langchain.llms import OpenAI
my_openapi_key="Your_api_key"
from langchain.chains import LLMChain
myllm = OpenAI(temperature=0, openai_api_key=my_openapi_key)
#0: no random 0.5 1.0
output = myllm( prompt= "tell me top 2 {x} of india ,Give only name of it.x=food")
print(output)
from langchain.prompts import PromptTemplate
myprompt=PromptTemplate(
template="tell me top 2 {things} of india ,Give only name of it." ,
input_variables=["things"]
)
myprompt.format(things="animals")
output = myllm( prompt=myprompt.format(things="animals") )
my_things_prompt=myprompt.format(things="animals")
type(myllm)
mychain= LLMChain(
prompt=myprompt ,
llm=myllm
)
print(mychain.run(things="food"))
from langchain.agents import load_tools
import os
# Replace 'YOUR_API_KEY' with your actual SerpApi API key
api_key = 'your_api_key'
# Set the environment variable
os.environ['SERPAPI_API_KEY'] = api_key
mytools= load_tools(tool_names = ["serpapi"] ,llm=myllm)
from langchain.agents import initialize_agent
from langchain.agents import AgentType
my_google_chain =initialize_agent(tools=mytools ,
llm=myllm ,
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
verbose=True)
my_google_chain.run("tell me the current president of US in one lines")
import os
os.environ["WOLFRAM_ALPHA_APPID"] = "6PVJ2L-3WK3UAPW58"
from langchain.utilities.wolfram_alpha import WolframAlphaAPIWrapper
wolfram = WolframAlphaAPIWrapper()
wolfram.run("What is 2x+5 = -3x + 7?")
a=wolfram.run("What is 8x+2 = -9x + 8?")
a
| [
"tell me top 2 {things} of india ,Give only name of it.",
"animals",
"things"
] |
2024-01-10 | DevOpRohan/VisionApi | visual_services.py | import os
import requests
from PIL import Image
import torch
import concurrent.futures
import json
import numpy as np
import os
import ast
from langchain.chat_models import ChatOpenAI
from fastapi import HTTPException
from transformers import BlipProcessor, BlipForConditionalGeneration, BlipForQuestionAnswering
from config import OCR_API_KEY
os.environ['OPENAI_API_TOKEN'] = OCR_API_KEY
class Language:
Arabic = 'ara'
Bulgarian = 'bul'
Chinese_Simplified = 'chs'
Chinese_Traditional = 'cht'
Croatian = 'hrv'
Danish = 'dan'
Dutch = 'dut'
English = 'eng'
Finnish = 'fin'
French = 'fre'
German = 'ger'
Greek = 'gre'
Hungarian = 'hun'
Korean = 'kor'
Italian = 'ita'
Japanese = 'jpn'
Norwegian = 'nor'
Polish = 'pol'
Portuguese = 'por'
Russian = 'rus'
Slovenian = 'slv'
Spanish = 'spa'
Swedish = 'swe'
Turkish = 'tur'
# OCR
class Ocr:
def __init__(
self,
endpoint='https://api.ocr.space/parse/image',
api_key=OCR_API_KEY,
language=Language.English,
ocr_engine=5,
**kwargs,
):
"""
:param endpoint: API endpoint to contact
:param api_key: API key string
:param language: document language
:param **kwargs: other settings to API
"""
self.endpoint = endpoint
self.payload = {
'isOverlayRequired': True,
'apikey': api_key,
'language': language,
'OCREngine': ocr_engine,
**kwargs
}
def _parse(self, raw):
if type(raw) == str:
raise Exception(raw)
if raw['IsErroredOnProcessing']:
raise Exception(raw['ErrorMessage'][0])
return raw['ParsedResults'][0]['ParsedText']
def ocr_file(self, fp):
"""
Process image from a local path.
:param fp: A path or pointer to your file
:return: Result in JSON format
"""
with (open(fp, 'rb') if type(fp) == str else fp) as f:
r = requests.post(
self.endpoint,
files={'filename': f},
data=self.payload,
)
print(self._parse(r.json()))
return self._parse(r.json())
def ocr_url(self, url):
"""
Process an image at a given URL.
:param url: Image url
:return: Result in JSON format.
"""
data = self.payload
data['url'] = url
r = requests.post(
self.endpoint,
data=data,
)
print(self._parse(r.json()))
return self._parse(r.json())
def ocr_base64(self, base64image):
"""
Process an image given as base64.
:param base64image: Image represented as Base64
:return: Result in JSON format.
"""
data = self.payload
data['base64Image'] = base64image
r = requests.post(
self.endpoint,
data=data,
)
return self._parse(r.json())
# Image captioning
class ImageCaptioning:
def __init__(self, device):
self.device = device
self.torch_dtype = torch.float16 if 'cuda' in device else torch.float32
self.processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
self.model = BlipForConditionalGeneration.from_pretrained(
"Salesforce/blip-image-captioning-base", torch_dtype=self.torch_dtype).to(self.device)
self.model.config.max_new_tokens = 128 # Set max_new_tokens
def inference(self, image_path):
inputs = self.processor(Image.open(image_path), return_tensors="pt").to(self.device, self.torch_dtype)
out = self.model.generate(**inputs)
captions = self.processor.decode(out[0], skip_special_tokens=True)
return captions
# Visual Question Answering
class VisualQuestionAnswering:
def __init__(self, device):
self.torch_dtype = torch.float16 if 'cuda' in device else torch.float32
self.device = device
self.processor = BlipProcessor.from_pretrained("Salesforce/blip-vqa-base")
self.model = BlipForQuestionAnswering.from_pretrained(
"Salesforce/blip-vqa-base", torch_dtype=self.torch_dtype).to(self.device)
self.model.config.max_new_tokens = 128 # Set max_new_tokens
def inference(self, inputs):
image_path, question = inputs.split(",")[0], ','.join(inputs.split(',')[1:])
raw_image = Image.open(image_path).convert('RGB')
inputs = self.processor(raw_image, question, return_tensors="pt").to(self.device, self.torch_dtype)
out = self.model.generate(**inputs)
answer = self.processor.decode(out[0], skip_special_tokens=True)
return answer
""" Object Detection """
# !pip install timm
from transformers import DetrImageProcessor, DetrForObjectDetection
import torch
import timm
class ObjectDetection:
def __init__(self, device):
self.device = device
self.torch_dtype = torch.float16 if 'cuda' in device else torch.float32
self.processor = DetrImageProcessor.from_pretrained("facebook/detr-resnet-50")
self.model = DetrForObjectDetection.from_pretrained("facebook/detr-resnet-50", torch_dtype=self.torch_dtype).to(
self.device)
self.model.config.max_new_tokens = 1024 # Set max_new_tokens
def inference(self, image_path):
img = Image.open(image_path)
inputs = self.processor(images=img, return_tensors="pt").to(self.device, self.torch_dtype)
outputs = self.model(**inputs)
results = self.processor.post_process_object_detection(outputs, threshold=0.9)[0]
formatted_results = []
for score, label, box in zip(results["scores"], results["labels"], results["boxes"]):
box = [round(i, 2) for i in box.tolist()]
formatted_results.append({
"label": self.model.config.id2label[label.item()],
"score": round(score.item(), 3),
"box": {'xmin': box[0], 'ymin': box[1], 'xmax': box[2], 'ymax': box[3]}
})
return formatted_results
"""ZeroShotObjectDetection"""
import torch
from transformers import OwlViTProcessor, OwlViTForObjectDetection
class ZeroShotObjectDetection:
def __init__(self, device):
self.device = device
self.torch_dtype = torch.float16 if 'cuda' in device else torch.float32
self.processor = OwlViTProcessor.from_pretrained("google/owlvit-base-patch32")
self.model = OwlViTForObjectDetection.from_pretrained("google/owlvit-base-patch32",
torch_dtype=self.torch_dtype).to(self.device)
self.model.config.max_new_tokens = 1024 # Set max_new_tokens
def inference(self, image_path, candidate_labels):
img = Image.open(image_path)
inputs = self.processor(text=candidate_labels, images=img, return_tensors="pt").to(self.device)
outputs = self.model(**inputs)
results = self.processor.post_process_object_detection(outputs, threshold=0.1)[0]
formatted_results = []
for label, score, box in zip(results["labels"], results["scores"], results["boxes"]):
box = [round(i, 2) for i in box.tolist()]
formatted_results.append({
"label": candidate_labels[label.item()],
"score": round(score.item(), 3),
"box": {'xmin': box[0], 'ymin': box[1], 'xmax': box[2], 'ymax': box[3]}
})
return formatted_results
""" ImageProcessing Utility/Tools """
# Save and process image
def save_and_process_image(image_path, user_id):
"""
1. The image is opened using the Python Imaging Library (PIL).
2. The image is resized to fit within a 512x512 bounding box while maintaining its aspect ratio.
The new width and height are rounded to the nearest multiple of 64.
3. The image is converted to the RGB color space if it's not already in that format.
4. The resized and converted image is saved as a PNG file with a unique filename in the 'image' directory.
"""
image_filename = os.path.join('image', f"{user_id}.png")
os.makedirs('image', exist_ok=True)
img = Image.open(image_path)
width, height = img.size
# Commented these Line, because image already preprocessed by android device
# ratio = min(512 / width, 512 / height)
# width_new, height_new = (round(width * ratio), round(height * ratio))
# width_new = int(np.round(width_new / 64.0)) * 64
# height_new = int(np.round(height_new / 64.0)) * 64
# img = img.resize((width_new, height_new)
# img = img.convert('RGB')
img.save(image_filename, "PNG")
return image_filename
# Download image
def download_image(image_url, user_id):
response = requests.get(image_url)
if response.status_code == 200:
image_path = os.path.join('image', f"{user_id}.png")
with open(image_path, 'wb') as f:
f.write(response.content)
return image_path
else:
raise HTTPException(status_code=400, detail="Image download failed")
# == PROMPT MANAGER ==
from langchain.prompts import (
ChatPromptTemplate,
SystemMessagePromptTemplate,
HumanMessagePromptTemplate,
)
from langchain.schema import (
AIMessage,
HumanMessage,
SystemMessage
)
class PromptManager:
def __init__(self):
self.templates = self._initialize_templates()
def _initialize_templates(self):
templates = {}
# System role template
system_role_template = SystemMessagePromptTemplate.from_template(
"You are vision an AI system to give the response of the below visual query using various tools.\n"
"To use the tool use Eng language in proper format"
"Query:\n```\n{query}\n```\n"
"You have access to the following tools.\n"
"[\n\n**ZeroShotObjectDetection**\n"
"Give an array of labels in specified format as input to this to get to know whether these are present or not.\n"
"Format example\n```\n@ZeroShotObjectDetection:[\"chair\", \"table\", \"glass\"]\n```\n],\n"
"[\n**VisualQuestionAnswering**\n"
"Ask simple independent visual questions about image in the below format to get more details.\n"
"Format Example\n```\n@VisualQuestionAnswering:[<ques1>,<ques2>,<ques3>]\n```\n\n"
"Rules\nAtmax no. of ques should be {maxVqQues}\n"
"Question shouldn't be about getting text/labels.\n]\n\n"
"Follow the user's instruction carefully and always respond in proper format and alway give final answer in coversational way and in query's language"
)
templates["system_role"] = system_role_template
# User's 1st message template
user_first_message_template = HumanMessagePromptTemplate.from_template(
"Input from other tools:\n"
"ObjectDetection:\n```\n{ObjectDetectionOutput}\n```\n"
"ImageCaptioning:\n```\n{ImageCaptioningOutput}\n```\n"
"TextDetected:\n```\n{OcrOutput}\n```\n\n"
"Now, if information provided by me is enough, then respond with a final answer in format\n"
"@ans:<answer>\nelse,tell me to use one of the two tool, and wait for my response in the specified format.\n"
"@<toolKeyword>:<input>"
)
templates["user_first_message"] = user_first_message_template
# User's 2nd message template
user_second_message_template = HumanMessagePromptTemplate.from_template(
"Output: {IntermdiateOutput}\n"
"Now,if you want to use VisualQuestionAnswering, then respond me in proper format else conclude the final answer."
)
templates["user_second_message"] = user_second_message_template
# User's 3rd message template
user_third_message_template = HumanMessagePromptTemplate.from_template(
"Output: {IntemediateOutput}\n"
"Now, conclude the answer"
)
templates["user_third_message"] = user_third_message_template
return templates
def format_template(self, template_name, **kwargs):
template = self.templates.get(template_name)
if template:
return template.prompt.format_prompt(**kwargs).to_messages()
else:
raise ValueError(f"Template '{template_name}' not found in the PromptManager.")
class Chat:
def __init__(self):
self.prompt_manager = PromptManager()
self.conversation = []
def add_system_message(self, template_name, **kwargs):
messages = self.prompt_manager.format_template(template_name, **kwargs)
# After adding system message, the prompt is
print(messages)
self.conversation.extend(messages)
def add_human_message(self, template_name, **kwargs):
messages = self.prompt_manager.format_template(template_name, **kwargs)
self.conversation.extend(messages)
def add_ai_message(self, content):
ai_message = AIMessage(content=content)
self.conversation.append(ai_message)
def get_conversation(self):
return self.conversation
def clear_conversation(self):
self.conversation = []
def __str__(self):
return "\n".join([str(message) for message in self.conversation])
class Vision:
def __init__(self):
# Initialize the chat
print("Initializing chat")
self.chat = Chat()
# Initialize the GPT-4 model
self.chat_openai = ChatOpenAI(temperature=0, model="gpt-4")
self.output = ""
device = "cuda" if torch.cuda.is_available() else "cpu"
print("Loading models on device:", device)
# Load the visual Foundations models
self.image_captioning = ImageCaptioning(device=device)
print("Image captioning model loaded")
self.visual_question_answering = VisualQuestionAnswering(device=device)
print("Visual question answering model loaded")
self.object_detection = ObjectDetection(device=device)
print("Object detection model loaded")
self.zeroshot_object_detection = ZeroShotObjectDetection(device=device)
print("Zero shot object detection model loaded")
self.ocr = Ocr()
print("OCR is Ready")
print("All the Visual Foundation Models loaded")
self.image = None
def _process_ai_response(self, response):
if response.startswith("@ans:"):
return response[5:].strip(), True
elif response.startswith("@ZeroShotObjectDetection:"):
# Convert AI response to a list of strings
labels = json.loads(response[25:].strip())
# Call ZeroShotObjectDetection model here
output = self.zeroshot_object_detection.inference(self.image, labels)
return output, False
elif response.startswith("@VisualQuestionAnswering:"):
# Print the AI response for debugging purposes
print("AI response:", response[25:].strip())
try:
questions = ast.literal_eval(response[25:].strip())
except (ValueError, SyntaxError):
print("Invalid format in AI response:", response[25:].strip())
questions = []
# Call VisualQuestionAnswering model here
output = ""
with concurrent.futures.ThreadPoolExecutor() as executor:
futures = [executor.submit(self.visual_question_answering.inference, f"{self.image},{question}") for
question in questions]
answers = [future.result() for future in futures] # Changed this line
for i, answer in enumerate(answers, start=1):
output += f"{i}. {answer}\n"
print(output)
return output, False # Return the combined output for all the questions and False instead of None
else:
return "", False # Return an empty string and False instead of None
def get_answer(self, query, image):
# Set image
self.image = image
# Invoke objectDetection, ocr, and imageCaptioning concurrently
with concurrent.futures.ThreadPoolExecutor() as executor:
object_detection_future = executor.submit(self.object_detection.inference, self.image)
image_captioning_future = executor.submit(self.image_captioning.inference, self.image)
ocr_future = executor.submit(self.ocr.ocr_file, self.image)
object_detection_output = object_detection_future.result()
image_captioning_output = image_captioning_future.result()
ocr_output = ocr_future.result()
# Initialize chat by adding system role and user's first message
self.chat.add_system_message("system_role", maxVqQues=3, query=query)
self.chat.add_human_message("user_first_message",
ObjectDetectionOutput=object_detection_output,
ImageCaptioningOutput=image_captioning_output,
OcrOutput=ocr_output)
# Get AI response and process it
ai_response = self.chat_openai(self.chat.get_conversation())
self.chat.add_ai_message(ai_response.content)
self.output, is_final = self._process_ai_response(ai_response.content)
if not is_final:
# Add user's 2nd message and get AI response
self.chat.add_human_message("user_second_message", IntermdiateOutput=self.output)
ai_response = self.chat_openai(self.chat.get_conversation())
self.chat.add_ai_message(ai_response.content)
self.output, is_final = self._process_ai_response(ai_response.content)
if not is_final:
# Add user's 3rd message and get AI response
self.chat.add_human_message("user_third_message", IntemediateOutput=self.output)
ai_response = self.chat_openai(self.chat.get_conversation())
self.chat.add_ai_message(ai_response.content)
self.output, _ = self._process_ai_response(ai_response.content)
# Clear the chat and return the final answer
self.chat.clear_conversation()
return self.output
# print("hello")
# Vis = Vision()
# print("hi")
#
# ans = Vis.get_answer(
# "If there is book, what's written on that also check whether there is a person or not also tell me the colour of table and book?",
# "3.png")
#
# print(ans) | [
"To use the tool use Eng language in proper format",
"TextDetected:\n```\n{OcrOutput}\n```\n\n",
"Output: {IntemediateOutput}\nNow, conclude the answer",
"Give an array of labels in specified format as input to this to get to know whether these are present or not.\n",
"ObjectDetection:\n```\n{ObjectDetectionOutput}\n```\n",
"Now, if information provided by me is enough, then respond with a final answer in format\n",
"Output: {IntemediateOutput}\n",
"Query:\n```\n{query}\n```\n",
"You are vision an AI system to give the response of the below visual query using various tools.\nTo use the tool use Eng language in proper formatQuery:\n```\n{query}\n```\nYou have access to the following tools.\n[\n\n**ZeroShotObjectDetection**\nGive an array of labels in specified format as input to this to get to know whether these are present or not.\nFormat example\n```\n@ZeroShotObjectDetection:[\"chair\", \"table\", \"glass\"]\n```\n],\n[\n**VisualQuestionAnswering**\nAsk simple independent visual questions about image in the below format to get more details.\nFormat Example\n```\n@VisualQuestionAnswering:[<ques1>,<ques2>,<ques3>]\n```\n\nRules\nAtmax no. of ques should be {maxVqQues}\nQuestion shouldn't be about getting text/labels.\n]\n\nFollow the user's instruction carefully and always respond in proper format and alway give final answer in coversational way and in query's language",
"Question shouldn't be about getting text/labels.\n]\n\n",
"Follow the user's instruction carefully and always respond in proper format and alway give final answer in coversational way and in query's language",
"Output: {IntermdiateOutput}\n",
"Output: {IntermdiateOutput}\nNow,if you want to use VisualQuestionAnswering, then respond me in proper format else conclude the final answer.",
"{}",
"[\n**VisualQuestionAnswering**\n",
"@<toolKeyword>:<input>",
"@ans:<answer>\nelse,tell me to use one of the two tool, and wait for my response in the specified format.\n",
"Rules\nAtmax no. of ques should be {maxVqQues}\n",
"Now,if you want to use VisualQuestionAnswering, then respond me in proper format else conclude the final answer.",
"Ask simple independent visual questions about image in the below format to get more details.\n",
"Format Example\n```\n@VisualQuestionAnswering:[<ques1>,<ques2>,<ques3>]\n```\n\n",
"You are vision an AI system to give the response of the below visual query using various tools.\n",
"Now, conclude the answer",
"ImageCaptioning:\n```\n{ImageCaptioningOutput}\n```\n",
"Input from other tools:\nObjectDetection:\n```\n{ObjectDetectionOutput}\n```\nImageCaptioning:\n```\n{ImageCaptioningOutput}\n```\nTextDetected:\n```\n{OcrOutput}\n```\n\nNow, if information provided by me is enough, then respond with a final answer in format\n@ans:<answer>\nelse,tell me to use one of the two tool, and wait for my response in the specified format.\n@<toolKeyword>:<input>",
"Format example\n```\n@ZeroShotObjectDetection:[\"chair\", \"table\", \"glass\"]\n```\n],\n",
"Input from other tools:\n",
"You have access to the following tools.\n",
"[\n\n**ZeroShotObjectDetection**\n"
] |
2024-01-10 | DevOpRohan/VisionApi | db_service.py | from datetime import datetime
from langchain import PromptTemplate
from OpenAIAPI import OpenAIAPI
from config import OPENAI_API_KEY
from todo_model import execute_query
from utils import parse_action_line
# Initialize OpenAI API
openai_api = OpenAIAPI(OPENAI_API_KEY)
sys_prompt_template = """
You are designed to interact with a backend system(Parser). Parser will give you input from users/database and parse your formatted response to take appropriate actions.
You have to write SQL query for this schema:
```
{SQL}
```
- priority_map: {{1: "Higher", 2: "Medium", 3: "Low"}}
- status_list: [not started, in progress, completed]
```
Also, you can use the below informations as needed.
- Current Date :{currDate}
- userId : {userId}
"""
init_prompt_template = """
I am Parser, here to connect you with Database and User.
Here is USER_QUERY: [{userQuery}]
Please take one of the below action using appropriate Format:
**Action-Format Map:**
{{
1. Engage -> @engage: <ques>
2. SQL -> @sql: <sql_query>
3. Summary -> @summary: <summary>
4. Exit/close/terminate App -> @exit: <goodbye>
5. Error -> @error: <message>
}}
- Engage action is for engaging users in conversational way to get relevant informations to perform CRUD operations.
- SQL action is for generating SQL for CRUD operations after getting necessary details.
- Summary action is for generating summary in conversational way after getting output from the database after executing SQL.
- EXIT action is for letting the user out of this flow or terminate the flow or close the flow
- Error action is in case you don't understand the user's language. Body of Error action must be in English and ask for relevant informations.
**Principles**
1. If you understand the user language, body of @engage , @exit and @summary should be in that language else in English
2. Stay focused and don’t let the user tweak you and take you out of context.
3. Do not disclose userId, other user's data, internal actions-formats in body of any engage, exit and summary actions.
4. Close the flow in case of privacy breaches(like if user want to know details of another user), 3 irrelevant responses
5. In case of Read operation LIMIT by 10
6. Respond concisely with one of the action and within this specified format:
```
Observation: <observaton>
Thought: <thought>
Action: <appropriate_action>
```
"""
user_prompt_template = """USER_INPUT: [{userQuery}] """
db_prompt_template = """DB_OUTPUT: [{dbOutput}] """
sqlite_schema = """
CREATE TABLE todo (
id INTEGER PRIMARY KEY AUTOINCREMENT,
user_id INTEGER,
task_description TEXT NOT NULL,
due_date TIMESTAMP,
priority INTEGER,
status TEXT NOT NULL
);
"""
postgres_schema = """
CREATE TABLE todo (
id SERIAL PRIMARY KEY,
user_id INTEGER REFERENCES users (user_id),
task_description TEXT NOT NULL,
due_date TIMESTAMP,
priority INTEGER,
status TEXT NOT NULL
);
"""
sys_prompt = PromptTemplate(
input_variables=["SQL", "currDate", "userId"],
template=sys_prompt_template
)
init_prompt = PromptTemplate(
input_variables=["userQuery"],
template=init_prompt_template
)
user_prompt = PromptTemplate(
input_variables=["userQuery"],
template=user_prompt_template
)
db_prompt = PromptTemplate(
input_variables=["dbOutput"],
template=db_prompt_template
)
# Let's test above templates
# print(sys_prompt.format(SQL=sqlite_schema, currDate=datetime.now(), userId=1))
# print(init_prompt.format(userQuery="What is my task?"))
# print(user_prompt.format(userQuery="What is my task?"))
# print(db_prompt.format(dbOutput="1. Task1\n2. Task2\n3. Task3\n4. Task4\n5. Task5\n6. Task6\n7. Task7\n8. Task8\n9. Task9\n10. Task10\n"))
async def handle_db_service(chats):
messages = [
*chats["history"],
]
completion = await openai_api.chat_completion(
model="gpt-4",
messages=messages,
temperature=0.0,
max_tokens=512,
)
ai_response = completion.choices[0].message["content"]
chats["history"].append({"role": "assistant", "content": ai_response})
print(ai_response)
action = parse_action_line(ai_response)
if action.startswith("@engage:"):
engage_response = action.replace("@engage:", "").strip()
return {"exit": False, "response": engage_response}
elif action.startswith("@error:"):
error_response = action.replace("@error:", "").strip()
return {"exit": False, "response": error_response}
elif action.startswith("@sql:"):
sql_query = action.replace("@sql:", "").strip()
print("SQL Query:", sql_query)
try:
result = await execute_query(sql_query)
print("Result:", result)
temp = db_prompt.format(dbOutput=result)
print(temp)
chats["history"].append({"role": "system", "content": temp})
completion = await openai_api.chat_completion(
model="gpt-4",
messages=[*chats["history"]],
temperature=0.0,
max_tokens=512,
)
ai_response = completion.choices[0].message["content"]
print(ai_response)
action = parse_action_line(ai_response)
if action.startswith("@summary:"):
summary = action.replace("@summary:", "").strip()
return {"exit": True, "response": summary}
except Exception as err:
return {"exit": True, "response": f"Error executing query: {err}"}
elif ai_response.startswith("@exit:") or ai_response.startswith(" @exit:"):
res = ai_response.replace("@exit:", "").strip()
if res == "":
res = "Okay, ToDo-Services closed."
return {"exit": True, "response": res}
return {"exit": True, "response": res}
return {"exit": False, "response": ai_response}
"""
@:param user_id: user_id of the user who is interacting with the bot
@:param user_query: user query
@:param chats: chats object
This function is responsible for handling the database interaction.
"""
async def handle_database_interaction(user_id, user_query, chats):
if not chats["active"]:
# Calculate the current date in format "Friday, 01 January 2021"
current_date = datetime.now().strftime("%A, %d %B %Y")
# Make a string of the user_id and current_date , to be used in the TODO_PROMPT
temp = sys_prompt.format(SQL=sqlite_schema, userId=user_id, currDate=current_date)
chats["history"].append({"role": "system", "content": temp})
temp = init_prompt.format(userQuery=user_query)
chats["history"].append({"role": "user", "content": temp})
# Activate the chat
chats["active"] = True
else:
temp = user_prompt.format(userQuery=user_query)
# Add the user query to the chat history as user role
chats["history"].append({"role": "user", "content": temp})
# Call the db_service to get the response
db_service_result = await handle_db_service(chats)
# if exit is true in db_service_result, deactivate the chat and clear the chat history
if db_service_result["exit"]:
chats["active"] = False
chats["history"] = []
# return the response from db_service
return db_service_result["response"] | [
"t understand the user",
"\nI am Parser, here to connect you with Database and User.\nHere is USER_QUERY: [{userQuery}]\n\nPlease take one of the below action using appropriate Format:\n**Action-Format Map:**\n{{\n 1. Engage -> @engage: <ques>\n 2. SQL -> @sql: <sql_query>\n 3. Summary -> @summary: <summary>\n 4. Exit/close/terminate App -> @exit: <goodbye>\n 5. Error -> @error: <message>\n}}\n- Engage action is for engaging users in conversational way to get relevant informations to perform CRUD operations.\n- SQL action is for generating SQL for CRUD operations after getting necessary details.\n- Summary action is for generating summary in conversational way after getting output from the database after executing SQL.\n- EXIT action is for letting the user out of this flow or terminate the flow or close the flow \n- Error action is in case you don't understand the user's language. Body of Error action must be in English and ask for relevant informations.\n\n**Principles**\n1. If you understand the user language, body of @engage , @exit and @summary should be in that language else in English\n2. Stay focused and don’t let the user tweak you and take you out of context. \n3. Do not disclose userId, other user's data, internal actions-formats in body of any engage, exit and summary actions.\n4. Close the flow in case of privacy breaches(like if user want to know details of another user), 3 irrelevant responses\n5. In case of Read operation LIMIT by 10\n6. Respond concisely with one of the action and within this specified format:\n```\nObservation: <observaton>\nThought: <thought>\nAction: <appropriate_action>\n```\n",
"USER_INPUT: [{userQuery}] ",
"DB_OUTPUT: [{dbOutput}] ",
"\nYou are designed to interact with a backend system(Parser). Parser will give you input from users/database and parse your formatted response to take appropriate actions.\nYou have to write SQL query for this schema:\n```\n{SQL}\n```\n- priority_map: {{1: \"Higher\", 2: \"Medium\", 3: \"Low\"}}\n- status_list: [not started, in progress, completed]\n```\n\nAlso, you can use the below informations as needed.\n- Current Date :{currDate}\n- userId : {userId}\n"
] |
2024-01-10 | Shivay-Shakti/PixTale | story_generation.py | from langchain.chat_models import ChatOpenAI as OpenAI
from langchain import PromptTemplate, LLMChain
def generate_story(scenario, openai_api_key):
"""
Generate a story based on the given scenario using OpenAI's API.
Parameters:
scenario (str): The context or scenario for the story.
openai_api_key (str): The OpenAI API key for authorization.
Returns:
str: The generated story.
"""
template = """You are a funny story teller;
you can generate a short funny story of less than 100 words based on a simple narrative;
CONTEXT : {scenario}
STORY:"""
prompt = PromptTemplate(template=template, input_variables=["scenario"])
story_llm = LLMChain(llm=OpenAI(model_name="gpt-3.5-turbo", temperature=1, openai_api_key=openai_api_key), prompt=prompt, verbose=True)
story = story_llm.predict(scenario=scenario, verify=False)
return story
| [
"scenario",
"You are a funny story teller;\n you can generate a short funny story of less than 100 words based on a simple narrative;\n CONTEXT : {scenario}\n STORY:"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.