date_collected
stringclasses 1
value | repo_name
stringlengths 6
116
| file_name
stringlengths 2
220
| file_contents
stringlengths 13
357k
| prompts
sequence |
---|---|---|---|---|
2024-01-10 | nacquisto/Stock-Picker-Main | Final%20Deliverable~Classes~GPTStock.py | import openai
import os
from dotenv import load_dotenv
# Load environment variables from .env file
env_file = "env_vars.env"
if not load_dotenv(env_file):
raise ValueError(f"Failed to load the environment variables from {env_file}")
# Get OpenAI API key from environment variables
openai_api_key = os.getenv("OPENAI_API_KEY")
if openai_api_key is None:
raise ValueError("OPENAI_API_KEY is not set in the environment variables.")
else:
# Set OpenAI API key
openai.api_key = openai_api_key
class GPT_Stock:
def __init__(self, stock_symbols):
"""Initialize the object with the stock symbols"""
self.stock_symbols = stock_symbols
# Begin the conversation with the GPT-3 model with a system message
self.messages = [{"role": "system", "content": f"You are a stock analysis. Provide a summary comparison of the following companies: {', '.join(stock_symbols)}. Make sure that you provide a paragraph for each company and a summary of your comparison, that is all you need."}]
# Method to generate a comparison analysis using OpenAI's GPT-3
def CustomChatGPT(self, user_input):
"""Append user message to the conversation, make an API call to get a reply from the model, and append the model's reply to the conversation"""
self.messages.append({"role": "user", "content": user_input})
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=self.messages
)
ChatGPT_reply = response["choices"][0]["message"]["content"]
self.messages.append({"role": "assistant", "content": ChatGPT_reply})
return ChatGPT_reply
# Method to get the comparison analysis
def get_comparison_analysis(self):
"""Get the comparison analysis by providing the appropriate user input"""
user_input = "Please provide a detailed comparison analysis."
comparison_analysis = self.CustomChatGPT(user_input)
return comparison_analysis | [
", ",
"Please provide a detailed comparison analysis."
] |
2024-01-10 | doodledood/chat-flock | examples~assistant_self_dialog.py | import typer
from dotenv import load_dotenv
from halo import Halo
from chatflock.backing_stores.in_memory import InMemoryChatDataBackingStore
from chatflock.base import Chat
from chatflock.conductors.round_robin import RoundRobinChatConductor
from chatflock.participants.langchain import LangChainBasedAIChatParticipant
from chatflock.renderers.terminal import TerminalChatRenderer
from examples.common import create_chat_model
def assistant_self_dialog(model: str = "gpt-4-1106-preview", temperature: float = 0.0) -> None:
chat_model = create_chat_model(model=model, temperature=temperature)
spinner = Halo(spinner="dots")
writer = LangChainBasedAIChatParticipant(
name="Novel Writer",
role="Novel Writer",
personal_mission="Write great, romantic novels. Be the best writer in the world.",
chat_model=chat_model,
spinner=spinner,
)
# There could only be one participant in this chat, in that case, it will be more like a scratchpad for the
# participant to write down their thoughts, do self-reflection, and actual concrete work.
participants = [writer]
chat = Chat(
backing_store=InMemoryChatDataBackingStore(), renderer=TerminalChatRenderer(), initial_participants=participants
)
chat_conductor = RoundRobinChatConductor()
chat_conductor.initiate_dialog(
chat=chat,
initial_message="I want to write and complete my short 2-page long novel about space turtles now. "
'Space turtles are AWESOME. When I am done with it I should respond with the word "TERMINATE" no quotes with '
"nothing else after it",
)
if __name__ == "__main__":
load_dotenv()
typer.run(assistant_self_dialog)
| [] |
2024-01-10 | doodledood/chat-flock | chatflock~use_cases~bshr.py | # Based directly on David Shaprio's BSHR Loop: https://github.com/daveshap/BSHR_Loop
from typing import Any, Dict, Generator, Generic, List, Optional, Type, TypeVar
import datetime
import json
from functools import partial
import questionary
from halo import Halo
from langchain.callbacks.manager import CallbackManagerForToolRun
from langchain.chat_models.base import BaseChatModel
from langchain.llms.openai import OpenAI
from langchain.memory import ConversationSummaryBufferMemory
from langchain.tools import BaseTool
from pydantic import BaseModel, Field
from chatflock.backing_stores import InMemoryChatDataBackingStore
from chatflock.backing_stores.langchain import LangChainMemoryBasedChatDataBackingStore
from chatflock.base import Chat, ChatDataBackingStore
from chatflock.conductors import RoundRobinChatConductor
from chatflock.parsing_utils import chat_messages_to_pydantic
from chatflock.participants.langchain import LangChainBasedAIChatParticipant
from chatflock.participants.user import UserChatParticipant
from chatflock.renderers import TerminalChatRenderer
from chatflock.sequencial_process import SequentialProcess, Step
from chatflock.structured_string import Section, StructuredString
from chatflock.use_cases.request_response import get_response
from chatflock.web_research import WebSearch
from chatflock.web_research.web_research import WebResearchTool
class BHSRState(BaseModel):
information_need: Optional[str] = None
queries_to_run: Optional[List[str]] = None
answers_to_queries: Optional[Dict[str, str]] = None
current_hypothesis: Optional[str] = None
proposed_hypothesis: Optional[str] = None
feedback: Optional[str] = None
is_satisficed: Optional[bool] = None
def save_state(state: BHSRState, state_file: Optional[str]) -> None:
if state_file is None:
return
data = state.model_dump()
with open(state_file, "w") as f:
json.dump(data, f, indent=2)
def load_state(state_file: Optional[str]) -> Optional[BHSRState]:
if state_file is None:
return None
try:
with open(state_file) as f:
data = json.load(f)
return BHSRState.model_validate(data)
except FileNotFoundError:
return None
class QueryGenerationResult(BaseModel):
information_need: str = Field(description="Information need as requested by the user.")
queries: List[str] = Field(description="Set of queries to run.")
class HypothesisGenerationResult(BaseModel):
hypothesis: str = Field(
description="A new or updated hypothesis based on the materials provided. Rich formatting using Markdown. Should include all relevant citations inline."
)
class SatisficationCheckResult(BaseModel):
feedback: str = Field(
description="If not satisficed yet, feedback on why not satisfied and what to think about next. If satisficed, feedback can be empty."
)
is_satisficed: bool = Field(description="Whether or not the information need has been satisficed.")
def generate_queries(
state: BHSRState,
chat_model: BaseChatModel,
interactive_user: bool = True,
max_queries: int = 5,
shared_sections: Optional[List[Section]] = None,
web_search_tool: Optional[BaseTool] = None,
spinner: Optional[Halo] = None,
) -> None:
if state.queries_to_run is not None and len(state.queries_to_run) > 0:
# Means we are continuing a previous session
return
if shared_sections is None:
shared_sections = []
query_generator = LangChainBasedAIChatParticipant(
name="Search Query Generator",
role="Search Query Generator",
personal_mission="You will be given a specific query or problem by the user and you are to generate a list of "
f"AT MOST {max_queries} search queries that will be used to search the internet. Make sure you "
f"generate comprehensive, counterfactual, and maximally orthogonal search queries. "
"Employ everything you know about "
"information foraging and information literacy to generate the best possible questions. "
"Use a step-by-step approach and think about the information need and the information "
"domain before generating the queries. Order the queries by their importance and relevance "
"to the main information need of the user.",
other_prompt_sections=shared_sections
+ [
Section(
name="Unclear Information Need",
text=(
"If the information need or query are vague and unclear, either perform a web search to "
"clarify the information need or ask the user for clarification."
if interactive_user
else "If the information need or query are vague and unclear, either perform a web search to "
"clarify the information need or make a best guess. The user will not be available to "
"respond back."
),
),
Section(
name="Refine Queries",
text='You might be given a first-pass information need with "None" previous queries and answers, '
"in which case you will do the best you"
'can to generate "naive queries" (uninformed search queries). However the USER might also '
"give you previous search queries or other background information such as accumulated notes. "
'If these materials are present, you are to generate "informed queries" - more specific '
"search queries that aim to zero in on the correct information domain. Do not duplicate "
"previously asked questions. Use the notes and other information presented to create "
"targeted queries and/or to cast a wider net.",
),
Section(
name="Termination",
text="Once you generate a new set of queries to run, you should terminate the chat immediately by "
"ending your message with TERMINATE",
),
],
tools=[web_search_tool] if web_search_tool is not None else None,
ignore_group_chat_environment=True,
chat_model=chat_model,
spinner=spinner,
)
user = UserChatParticipant()
participants = [user, query_generator]
try:
memory = ConversationSummaryBufferMemory(
llm=chat_model, max_token_limit=OpenAI.modelname_to_contextsize(chat_model.model_name) # type: ignore
)
backing_store: ChatDataBackingStore = LangChainMemoryBasedChatDataBackingStore(memory=memory)
except ValueError:
backing_store = InMemoryChatDataBackingStore()
chat = Chat(
backing_store=backing_store,
renderer=TerminalChatRenderer(),
initial_participants=participants,
max_total_messages=None if interactive_user else 2,
)
chat_conductor = RoundRobinChatConductor()
if state.information_need is None:
if spinner is not None:
spinner.stop()
_ = chat_conductor.initiate_dialog(
chat=chat, initial_message=f"What is your information need or query?", from_participant=query_generator
)
else:
_ = chat_conductor.initiate_dialog(
chat=chat,
initial_message=str(
StructuredString(
sections=[
Section(name="Information Need", text=state.information_need),
Section(
name="Previous Queries & Answers",
text="None"
if state.answers_to_queries is None or len(state.answers_to_queries) == 0
else None,
sub_sections=[
Section(name=query, text=f"```markdown\n{answer}\n```", uppercase_name=False)
for query, answer in (state.answers_to_queries or {}).items()
],
),
Section(name="Current Hypothesis", text=str(state.current_hypothesis)),
]
)
),
from_participant=user,
)
output = chat_messages_to_pydantic(
chat_messages=chat.get_messages(), chat_model=chat_model, output_schema=QueryGenerationResult
)
if state.information_need is None:
state.information_need = output.information_need
if state.queries_to_run is None:
state.queries_to_run = []
state.queries_to_run += output.queries[:max_queries]
def search_queries(
state: BHSRState, web_search: WebSearch, n_search_results: int = 3, spinner: Optional[Halo] = None
) -> Generator[BHSRState, None, None]:
if state.queries_to_run is None:
return
queries_and_answers = state.answers_to_queries if state.answers_to_queries is not None else {}
queries_to_run_set = set(state.queries_to_run)
for query in state.queries_to_run:
if query in queries_and_answers:
continue
answer = web_search.get_answer(query=query, n_results=n_search_results, spinner=spinner)[1]
queries_and_answers[query] = answer
queries_to_run_set.remove(query)
state.answers_to_queries = queries_and_answers
state.queries_to_run = list(queries_to_run_set)
yield state
def generate_hypothesis(
state: BHSRState,
chat_model: BaseChatModel,
shared_sections: Optional[List[Section]] = None,
spinner: Optional[Halo] = None,
) -> None:
hypothesis_generator = LangChainBasedAIChatParticipant(
name="Information Needs Hypothesis Generator",
role="Information Needs Hypothesis Generator",
personal_mission="You are an information needs hypothesis generator. You will be given a main information "
"need or user query as well as a variety of materials, such as search results, "
"previous hypotheses, and notes. Whatever information you receive, your output should be a "
"revised, refined, or improved hypothesis. In this case, the hypothesis is a comprehensive "
"answer to the user query or information need. To the best of your ability. Do not include "
"citations in your hypothesis, as this will all be record via out-of-band processes (e.g. "
"the information that you are shown will have metadata and cataloging working behind the "
"scenes that you do not see). Even so, you should endeavour to write everything in complete, "
"comprehensive sentences and paragraphs such that your hypothesis requires little to no "
"outside context to understand. Your hypothesis must be relevant to the USER QUERY or "
"INFORMATION NEED. Format the hypothesis in rich markdown and include all relevant citations "
"inline.",
other_prompt_sections=shared_sections,
ignore_group_chat_environment=True,
chat_model=chat_model,
spinner=spinner,
)
_, chat = get_response(
query=str(
StructuredString(
sections=[
Section(name="Information Need", text=state.information_need),
Section(
name="Previous Queries & Answers",
text="None" if state.answers_to_queries is None or len(state.answers_to_queries) == 0 else None,
sub_sections=[
Section(name=query, text=f"```markdown\n{answer}\n```", uppercase_name=False)
for query, answer in (state.answers_to_queries or {}).items()
],
),
Section(name="Previous Hypothesis", text=str(state.current_hypothesis)),
Section(name="Feedback", text=str(state.feedback)),
]
)
),
answerer=hypothesis_generator,
renderer=TerminalChatRenderer(),
)
output = chat_messages_to_pydantic(
chat_messages=chat.get_messages(), chat_model=chat_model, output_schema=HypothesisGenerationResult
)
state.proposed_hypothesis = output.hypothesis
def check_satisficing(
state: BHSRState,
chat_model: BaseChatModel,
shared_sections: Optional[List[Section]] = None,
spinner: Optional[Halo] = None,
) -> None:
satisficing_checker = LangChainBasedAIChatParticipant(
name="Information Needs Satisficing Checker",
role="Information Needs Satisficing Checker",
personal_mission="You are an information needs satisficing checker. You will be given a litany of materials, "
"including an original user query, previous search queries, their results, notes, "
"and a final hypothesis. You are to generate a decision as to whether or not the information "
"need has been satisficed or not. You are to make this judgment by virtue of several "
"factors: amount and quality of searches performed, specificity and comprehensiveness of the "
"hypothesis, and notes about the information domain and foraging (if present). Several "
"things to keep in mind: the user's information need may not be answerable, "
"or only partially answerable, given the available information or nature of the problem. "
"Unanswerable data needs are satisficed when data foraging doesn't turn up more relevant "
"information. Use a step-by-step approach to determine whether or not the information need "
"has been satisficed.",
other_prompt_sections=shared_sections,
ignore_group_chat_environment=True,
chat_model=chat_model,
spinner=spinner,
)
_, chat = get_response(
query=str(
StructuredString(
sections=[
Section(name="Information Need", text=state.information_need),
Section(
name="Previous Queries & Answers",
text="None" if state.answers_to_queries is None or len(state.answers_to_queries) == 0 else None,
sub_sections=[
Section(name=query, text=f"```markdown\n{answer}\n```", uppercase_name=False)
for query, answer in (state.answers_to_queries or {}).items()
],
),
Section(name="Previous Hypothesis", text=str(state.current_hypothesis)),
Section(name="Proposed New Hypothesis", text=str(state.proposed_hypothesis)),
]
)
),
answerer=satisficing_checker,
)
output = chat_messages_to_pydantic(
chat_messages=chat.get_messages(), chat_model=chat_model, output_schema=SatisficationCheckResult
)
state.feedback = output.feedback
state.is_satisficed = output.is_satisficed
state.current_hypothesis = state.proposed_hypothesis
state.proposed_hypothesis = None
def brainstorm_search_hypothesize_refine(
web_search: WebSearch,
chat_model: BaseChatModel,
initial_state: Optional[BHSRState] = None,
n_search_results: int = 3,
state_file: Optional[str] = None,
spinner: Optional[Halo] = None,
) -> BHSRState:
shared_sections = [Section(name="Current Date (YYYY-MM-DD)", text=datetime.datetime.utcnow().strftime("%Y-%m-%d"))]
web_search_tool = WebResearchTool(web_search=web_search, n_results=n_search_results, spinner=spinner)
if state_file is not None and spinner is not None:
spinner.start("Loading previous state...")
initial_state = BHSRState() if initial_state is None else initial_state
process = SequentialProcess(
steps=[
Step(
name="Query Generation",
func=partial(
generate_queries,
chat_model=chat_model,
interactive_user=initial_state.information_need is None,
shared_sections=shared_sections,
web_search_tool=web_search_tool,
spinner=spinner,
),
on_step_start=lambda _: spinner.start("Generating queries...") if spinner is not None else None,
on_step_completed=lambda _: spinner.succeed("Queries generated.") if spinner is not None else None,
),
Step(
name="Web Search",
func=partial(search_queries, web_search=web_search, n_search_results=n_search_results, spinner=spinner),
on_step_start=lambda _: spinner.start("Searching queries...") if spinner is not None else None,
on_step_completed=lambda _: spinner.succeed("Queries answered.") if spinner is not None else None,
),
Step(
name="Hypothesis Generation",
func=partial(
generate_hypothesis, chat_model=chat_model, shared_sections=shared_sections, spinner=spinner
),
on_step_start=lambda _: spinner.start("Generating hypothesis...") if spinner is not None else None,
on_step_completed=lambda _: spinner.succeed("Hypothesis generated.") if spinner is not None else None,
),
Step(
name="Satificing Check",
func=partial(
check_satisficing, chat_model=chat_model, shared_sections=shared_sections, spinner=spinner
),
on_step_start=lambda _: spinner.start("Checking satisfication condition...")
if spinner is not None
else None,
on_step_completed=lambda _: spinner.succeed("Satisfication checked.") if spinner is not None else None,
),
],
initial_state=initial_state,
save_state=partial(save_state, state_file=state_file),
)
state = process.run()
return state # type: ignore
def run_brainstorm_search_hypothesize_refine_loop(
web_search: WebSearch,
chat_model: BaseChatModel,
n_search_results: int = 3,
initial_state: Optional[BHSRState] = None,
state_file: Optional[str] = None,
confirm_satisficed: bool = False,
spinner: Optional[Halo] = None,
) -> str:
loaded_state = load_state(state_file)
if loaded_state is None:
initial_state = BHSRState() if initial_state is None else initial_state
if spinner is not None:
spinner.stop()
else:
initial_state = loaded_state
if spinner is not None:
spinner.succeed("Loaded previous state.")
if initial_state.is_satisficed:
spinner.warn("The information need has already been satisficed")
return initial_state.current_hypothesis or ""
while True:
state = brainstorm_search_hypothesize_refine(
initial_state=initial_state,
web_search=web_search,
chat_model=chat_model,
n_search_results=n_search_results,
state_file=state_file,
spinner=spinner,
)
if state.is_satisficed:
if not confirm_satisficed:
break
has_feedback = questionary.confirm(
"The information need seems to have been satisficed. Do you have any feedback?"
).ask()
if not has_feedback:
break
feedback = questionary.text("What is your feedback?").ask()
state.is_satisficed = False
state.feedback = feedback
return state.current_hypothesis or ""
class BrainstormSearchHypothesizeRefineToolArgs(BaseModel):
query: str = Field(description="The query to thoroughly research.")
TArgSchema = TypeVar("TArgSchema", bound=BaseModel)
class BrainstormSearchHypothesizeRefineTool(BaseTool, Generic[TArgSchema]):
web_search: WebSearch
chat_model: BaseChatModel
n_results: int = 3
state_file: Optional[str] = None
spinner: Optional[Halo] = None
name: str = "web_research"
description: str = (
"Research the web using a Brainstorm-Search-Hypothesize-Refine approach. Use that to get a "
"very comprehensive (but expensive) answer for a query you don't know or unsure of the answer "
"to, for recent events, or if the user asks you to. This will evaluate answer snippets, "
"knowledge graphs, and the top N results from google and aggregate a result for multiple "
"queries. Very thorough research."
)
args_schema: Type[TArgSchema] = BrainstormSearchHypothesizeRefineToolArgs # type: ignore
progress_text: str = "Researching the topic (this may take a while)..."
def _run(self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None, **kwargs: Any) -> Any:
hypothesis = run_brainstorm_search_hypothesize_refine_loop(
initial_state=BHSRState(information_need=query),
confirm_satisficed=False,
web_search=self.web_search,
chat_model=self.chat_model,
n_search_results=self.n_results,
state_file=self.state_file,
spinner=self.spinner,
)
return hypothesis
| [] |
2024-01-10 | doodledood/chat-flock | chatflock~web_research~page_analyzer.py | from typing import Optional
import abc
from bs4 import BeautifulSoup, Comment, NavigableString
from halo import Halo
from langchain.chat_models.base import BaseChatModel
from langchain.text_splitter import TextSplitter
from pydantic import BaseModel
from chatflock.parsing_utils import string_output_to_pydantic
from chatflock.structured_string import Section, StructuredString
from ..participants.langchain import LangChainBasedAIChatParticipant
from ..use_cases.request_response import get_response
from .errors import NonTransientHTTPError, TransientHTTPError
from .page_retrievers import PageRetriever
def clean_html(content):
# Parse the HTML content
soup = BeautifulSoup(content, "html.parser")
# Remove non-visible tags
for invisible_elem in soup(["style", "script", "meta", "[document]", "head", "title"]):
invisible_elem.extract()
# Remove comment nodes
for comment in soup.findAll(text=lambda text: isinstance(text, Comment)):
comment.extract()
# Function to check if a tag contains text
def tag_contains_text(tag):
if isinstance(tag, NavigableString):
return tag.strip() != ""
return any(tag_contains_text(child) for child in tag.children if not isinstance(child, Comment))
# Remove tags that don't contain text or don't have children that contain text
for tag in soup.find_all(True):
if not tag_contains_text(tag):
tag.decompose()
else:
# Strip all attributes from tags that contain text, expect hrefs on links
href = tag.attrs.get("href")
tag.attrs = {}
if href is not None:
tag.attrs["href"] = href
return str(soup)
class PageQueryAnalysisResult(BaseModel):
answer: str
class PageQueryAnalyzer(abc.ABC):
@abc.abstractmethod
def analyze(self, url: str, title: str, query: str, spinner: Optional[Halo] = None) -> PageQueryAnalysisResult:
raise NotImplementedError()
class OpenAIChatPageQueryAnalyzer(PageQueryAnalyzer):
def __init__(
self,
chat_model: BaseChatModel,
page_retriever: PageRetriever,
text_splitter: TextSplitter,
use_first_split_only: bool = True,
):
self.chat_model = chat_model
self.page_retriever = page_retriever
self.text_splitter = text_splitter
self.use_first_split_only = use_first_split_only
def analyze(self, url: str, title: str, query: str, spinner: Optional[Halo] = None) -> PageQueryAnalysisResult:
try:
html = self.page_retriever.retrieve_html(url)
except (NonTransientHTTPError, TransientHTTPError) as e:
return PageQueryAnalysisResult(
answer=f"The query could not be answered because an error occurred while retrieving the page: {e}"
)
finally:
self.page_retriever.close()
cleaned_html = clean_html(html)
docs = self.text_splitter.create_documents([cleaned_html])
answer = "No answer yet."
for i, doc in enumerate(docs):
text = doc.page_content
query_answerer = LangChainBasedAIChatParticipant(
name="Web Page Query Answerer",
role="Web Page Query Answerer",
personal_mission="Answer queries based on provided (partial) web page content from the web.",
chat_model=self.chat_model,
other_prompt_sections=[
Section(
name="Crafting a Query Answer",
sub_sections=[
Section(
name="Process",
list=[
"Analyze the query and the given content",
"If context is provided, use it to answer the query.",
"Summarize the answer in a comprehensive, yet succinct way.",
],
list_item_prefix=None,
),
Section(
name="Guidelines",
list=[
"If the answer is not found in the page content, it's insufficent, or not relevant "
"to the query at all, state it clearly.",
"Do not fabricate information. Stick to provided content.",
"Provide context for the next call (e.g., if a paragraph was cut short, include "
"relevant header information, section, etc. for continuity). Assume the content is "
"partial content from the page. Be very detailed in the context.",
"If unable to answer but found important information, include it in the context "
"for the next call.",
"Pay attention to the details of the query and make sure the answer is suitable "
"for the intent of the query.",
"A potential answer might have been provided. This means you thought you found "
"the answer in a previous partial text for the same page. You should double-check "
"that and provide an alternative revised answer if you think it's wrong, "
"or repeat it if you think it's right or cannot be validated using the current "
"text.",
],
),
],
)
],
)
final_answer, _ = get_response(
query=str(
StructuredString(
sections=[
Section(name="Query", text=query),
Section(name="Url", text=url),
Section(name="Title", text=title),
Section(name="Previous Answer", text=answer),
Section(name="Page Content", text=f"```{text}```"),
]
)
),
answerer=query_answerer,
)
result = string_output_to_pydantic(
output=final_answer, chat_model=self.chat_model, output_schema=PageQueryAnalysisResult
)
answer = result.answer
if self.use_first_split_only:
break
return PageQueryAnalysisResult(
answer=answer,
)
| [] |
2024-01-10 | doodledood/chat-flock | examples~chatgpt_clone_with_langchain_memory.py | import typer
from dotenv import load_dotenv
from halo import Halo
from langchain.memory import ConversationSummaryBufferMemory
from chatflock.backing_stores import InMemoryChatDataBackingStore
from chatflock.backing_stores.langchain import LangChainMemoryBasedChatDataBackingStore
from chatflock.base import Chat, ChatDataBackingStore
from chatflock.conductors import RoundRobinChatConductor
from chatflock.participants.langchain import LangChainBasedAIChatParticipant
from chatflock.participants.user import UserChatParticipant
from chatflock.renderers import TerminalChatRenderer
from examples.common import create_chat_model, get_max_context_size
def chatgpt_clone_with_langchain_memory(model: str = "gpt-4-1106-preview", temperature: float = 0.0) -> None:
chat_model = create_chat_model(model=model, temperature=temperature)
spinner = Halo(spinner="dots")
ai = LangChainBasedAIChatParticipant(name="Assistant", chat_model=chat_model, spinner=spinner)
user = UserChatParticipant(name="User")
participants = [user, ai]
max_context_size = get_max_context_size(chat_model)
if max_context_size is None:
backing_store: ChatDataBackingStore = InMemoryChatDataBackingStore()
else:
memory = ConversationSummaryBufferMemory(llm=chat_model, max_token_limit=max_context_size)
backing_store = LangChainMemoryBasedChatDataBackingStore(memory=memory)
chat = Chat(backing_store=backing_store, renderer=TerminalChatRenderer(), initial_participants=participants)
chat_conductor = RoundRobinChatConductor()
chat_conductor.initiate_dialog(chat=chat)
if __name__ == "__main__":
load_dotenv()
typer.run(chatgpt_clone_with_langchain_memory)
| [] |
2024-01-10 | doodledood/chat-flock | examples~bshr_loop.py | # Based directly on David Shaprio's BSHR Loop: https://github.com/daveshap/BSHR_Loop
from typing import Optional
from pathlib import Path
import typer
from dotenv import load_dotenv
from halo import Halo
from langchain.text_splitter import TokenTextSplitter
from chatflock.use_cases.bshr import run_brainstorm_search_hypothesize_refine_loop
from chatflock.web_research import OpenAIChatPageQueryAnalyzer, WebSearch
from chatflock.web_research.page_retrievers import SeleniumPageRetriever
from chatflock.web_research.search import GoogleSerperSearchResultsProvider
from examples.common import create_chat_model, get_max_context_size
def bshr_loop(
model: str = "gpt-4-1106-preview",
model_for_page_analysis: str = "gpt-3.5-turbo-1106",
temperature: float = 0.0,
temperature_for_page_analysis: float = 0.0,
n_search_results: int = 3,
state_file_path: Optional[str] = "output/bshr_state.json",
) -> None:
if state_file_path is not None:
Path(state_file_path).parent.mkdir(exist_ok=True, parents=True)
chat_model = create_chat_model(model=model, temperature=temperature)
chat_model_for_analysis = create_chat_model(
model=model_for_page_analysis, temperature=temperature_for_page_analysis
)
max_context_size = get_max_context_size(chat_model_for_analysis) or 12_000
page_retriever = SeleniumPageRetriever()
web_search = WebSearch(
chat_model=chat_model,
# Make sure you have a valid API Key for Serper in your .env file: SERPER_API_KEY=...
search_results_provider=GoogleSerperSearchResultsProvider(),
page_query_analyzer=OpenAIChatPageQueryAnalyzer(
chat_model=chat_model_for_analysis,
page_retriever=page_retriever,
text_splitter=TokenTextSplitter(chunk_size=max_context_size, chunk_overlap=max_context_size // 5),
use_first_split_only=True,
),
)
spinner = Halo(spinner="dots")
hypothesis = run_brainstorm_search_hypothesize_refine_loop(
confirm_satisficed=True,
web_search=web_search,
chat_model=chat_model,
n_search_results=n_search_results,
state_file=state_file_path,
spinner=spinner,
)
print(f"Final Answer:\n----------------\n{hypothesis}\n----------------")
page_retriever.close()
if __name__ == "__main__":
load_dotenv()
typer.run(bshr_loop)
| [] |
2024-01-10 | doodledood/chat-flock | examples~automatic_chat_composition.py | import typer
from dotenv import load_dotenv
from halo import Halo
from langchain.memory import ConversationSummaryBufferMemory
from chatflock.backing_stores import InMemoryChatDataBackingStore
from chatflock.backing_stores.langchain import LangChainMemoryBasedChatDataBackingStore
from chatflock.base import Chat, ChatDataBackingStore
from chatflock.code.langchain import CodeExecutionTool
from chatflock.code.local import LocalCodeExecutor
from chatflock.composition_generators.langchain import LangChainBasedAIChatCompositionGenerator
from chatflock.conductors import LangChainBasedAIChatConductor
from chatflock.participants.user import UserChatParticipant
from chatflock.renderers import TerminalChatRenderer
from examples.common import create_chat_model, get_max_context_size
def automatic_chat_composition(model: str = "gpt-4-1106-preview", temperature: float = 0.0) -> None:
chat_model = create_chat_model(model=model, temperature=temperature)
def create_default_backing_store() -> ChatDataBackingStore:
max_context_size = get_max_context_size(chat_model)
if max_context_size is not None:
return LangChainMemoryBasedChatDataBackingStore(
memory=ConversationSummaryBufferMemory(llm=chat_model, max_token_limit=max_context_size)
)
else:
return InMemoryChatDataBackingStore()
spinner = Halo(spinner="dots")
user = UserChatParticipant(name="User")
chat_conductor = LangChainBasedAIChatConductor(
chat_model=chat_model,
spinner=spinner,
# Set up a proper goal so the composition generator can use it to generate the composition that will best fit
goal="Come up with a plan for the user to invest their money. The goal is to maximize wealth over the "
"long-term, while minimizing risk.",
# Pass in a composition generator to the conductor
composition_generator=LangChainBasedAIChatCompositionGenerator(
fixed_team_members=[user],
chat_model=chat_model,
spinner=spinner,
participant_available_tools=[CodeExecutionTool(executor=LocalCodeExecutor(), spinner=spinner)],
),
)
chat = Chat(backing_store=create_default_backing_store(), renderer=TerminalChatRenderer())
# It's not necessary in practice to manually call `initialize_chat` since initiation is done automatically
# when calling `initiate_dialog`. However, this is needed to eagerly generate the composition.
# Default is lazy and will happen when the chat is initiated.
chat_conductor.prepare_chat(chat=chat)
print(f"\nGenerated composition:\n=================\n{chat.active_participants_str}\n=================\n\n")
result = chat_conductor.initiate_dialog(chat=chat)
print(result)
if __name__ == "__main__":
load_dotenv()
typer.run(automatic_chat_composition)
| [] |
2024-01-10 | doodledood/chat-flock | examples~common.py | from typing import Optional
from pathlib import Path
from langchain.cache import SQLiteCache
from langchain.chat_models import ChatOpenAI
from langchain.chat_models.base import BaseChatModel
from langchain.globals import set_llm_cache
from langchain.llms.openai import OpenAI
def create_chat_model(
model: str = "gpt-4-1106-preview",
temperature: float = 0.0,
cache_db_file_path: Optional[str] = "output/llm_cache.db",
) -> BaseChatModel:
if cache_db_file_path is not None:
Path(cache_db_file_path).parent.mkdir(parents=True, exist_ok=True)
set_llm_cache(SQLiteCache(database_path=cache_db_file_path))
chat_model = ChatOpenAI(temperature=temperature, model=model)
return chat_model
def get_max_context_size(chat_model: BaseChatModel) -> Optional[int]:
try:
max_context_size = OpenAI.modelname_to_contextsize(chat_model.model_name) # type: ignore
except ValueError:
return None
return max_context_size
| [] |
2024-01-10 | doodledood/chat-flock | examples~chatgpt_clone_with_langchain_retrieval.py | import typer
from dotenv import load_dotenv
from halo import Halo
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores.chroma import Chroma
from chatflock.backing_stores import InMemoryChatDataBackingStore
from chatflock.base import Chat
from chatflock.conductors import RoundRobinChatConductor
from chatflock.participants.langchain import LangChainBasedAIChatParticipant
from chatflock.participants.user import UserChatParticipant
from chatflock.renderers import TerminalChatRenderer
from examples.common import create_chat_model
def chatgpt_clone_with_langchain_retrieval(model: str = "gpt-4-1106-preview", temperature: float = 0.0) -> None:
chat_model = create_chat_model(model=model, temperature=temperature)
spinner = Halo(spinner="dots")
# Set up a simple document store.
texts = [
"The user's name is Eric.",
"The user likes to eat Chocolate.",
"The user loves to play video games.",
"The user is a software engineer.",
]
# Make sure you install chromadb: `pip install chromadb`
db = Chroma.from_texts(texts, OpenAIEmbeddings())
retriever = db.as_retriever()
ai = LangChainBasedAIChatParticipant(
name="Assistant",
chat_model=chat_model,
# Pass the retriever to the AI participant
retriever=retriever,
spinner=spinner,
)
user = UserChatParticipant(name="User")
participants = [user, ai]
chat = Chat(
backing_store=InMemoryChatDataBackingStore(), renderer=TerminalChatRenderer(), initial_participants=participants
)
chat_conductor = RoundRobinChatConductor()
chat_conductor.initiate_dialog(chat=chat)
if __name__ == "__main__":
load_dotenv()
typer.run(chatgpt_clone_with_langchain_retrieval)
| [] |
2024-01-10 | doodledood/chat-flock | examples~manual_hierarchical_participant.py | import typer
from dotenv import load_dotenv
from halo import Halo
from chatflock.backing_stores import InMemoryChatDataBackingStore
from chatflock.base import Chat
from chatflock.conductors import LangChainBasedAIChatConductor, RoundRobinChatConductor
from chatflock.participants.group import GroupBasedChatParticipant
from chatflock.participants.langchain import LangChainBasedAIChatParticipant
from chatflock.participants.user import UserChatParticipant
from chatflock.renderers import TerminalChatRenderer
from examples.common import create_chat_model
def manual_hierarchical_participant(model: str = "gpt-4-1106-preview", temperature: float = 0.0) -> None:
chat_model = create_chat_model(model=model, temperature=temperature)
spinner = Halo(spinner="dots")
comedy_team = GroupBasedChatParticipant(
group_name="Comedy Team",
mission="Collaborate on funny humour-filled responses based on the original request for the user",
chat=Chat(
backing_store=InMemoryChatDataBackingStore(),
renderer=TerminalChatRenderer(),
initial_participants=[
LangChainBasedAIChatParticipant(
name="Bob",
role="Chief Comedian",
personal_mission="Take questions from the user and collaborate with "
"Tom to come up with a succinct funny (yet realistic) "
"response. Short responses are preferred.",
chat_model=chat_model,
spinner=spinner,
),
LangChainBasedAIChatParticipant(
name="Tom",
role="Junior Comedian",
personal_mission="Collaborate with Bob to come up with a succinct "
"funny (yet realistic) response to the user. Short responses are preferred",
chat_model=chat_model,
spinner=spinner,
),
],
),
chat_conductor=LangChainBasedAIChatConductor(
chat_model=chat_model,
goal="Come up with a funny succinct response to the user.",
interaction_schema="Bob should collaborate with Tom to come up with a great funny and succinct response. "
"When one is agreed upon, the chat should end",
spinner=spinner,
),
spinner=spinner,
)
user = UserChatParticipant(name="User")
participants = [user, comedy_team]
chat = Chat(
backing_store=InMemoryChatDataBackingStore(), renderer=TerminalChatRenderer(), initial_participants=participants
)
chat_conductor = RoundRobinChatConductor()
chat_conductor.initiate_dialog(chat=chat)
if __name__ == "__main__":
load_dotenv()
typer.run(manual_hierarchical_participant)
| [] |
2024-01-10 | doodledood/chat-flock | examples~chatgpt_clone.py | import typer
from dotenv import load_dotenv
from halo import Halo
from chatflock.backing_stores.in_memory import InMemoryChatDataBackingStore
from chatflock.base import Chat
from chatflock.conductors.round_robin import RoundRobinChatConductor
from chatflock.participants.langchain import LangChainBasedAIChatParticipant
from chatflock.participants.user import UserChatParticipant
from chatflock.renderers.terminal import TerminalChatRenderer
from examples.common import create_chat_model
def chatgpt_clone(model: str = "gpt-4-1106-preview", temperature: float = 0.0) -> None:
chat_model = create_chat_model(model=model, temperature=temperature)
spinner = Halo(spinner="dots")
ai = LangChainBasedAIChatParticipant(name="Assistant", chat_model=chat_model, spinner=spinner)
user = UserChatParticipant(name="User")
participants = [user, ai]
chat = Chat(
backing_store=InMemoryChatDataBackingStore(), renderer=TerminalChatRenderer(), initial_participants=participants
)
chat_conductor = RoundRobinChatConductor()
chat_conductor.initiate_dialog(chat=chat)
if __name__ == "__main__":
load_dotenv()
typer.run(chatgpt_clone)
| [] |
2024-01-10 | doodledood/chat-flock | chatflock~parsing_utils.py | from typing import List, Optional, Sequence, Type
from halo import Halo
from langchain.chat_models.base import BaseChatModel
from chatflock.backing_stores import InMemoryChatDataBackingStore
from chatflock.base import Chat, ChatMessage, TOutputSchema
from chatflock.conductors import RoundRobinChatConductor
from chatflock.errors import MessageCouldNotBeParsedError
from chatflock.participants.langchain import LangChainBasedAIChatParticipant
from chatflock.participants.output_parser import JSONOutputParserChatParticipant
from chatflock.renderers import NoChatRenderer
from chatflock.structured_string import Section
from chatflock.utils import pydantic_to_json_schema
def string_output_to_pydantic(
output: str,
chat_model: BaseChatModel,
output_schema: Type[TOutputSchema],
spinner: Optional[Halo] = None,
n_tries: int = 3,
hide_message: bool = True,
) -> TOutputSchema:
return chat_messages_to_pydantic(
chat_messages=[ChatMessage(id=1, sender_name="Unknown", content=output)],
chat_model=chat_model,
output_schema=output_schema,
spinner=spinner,
n_tries=n_tries,
hide_message=hide_message,
)
def chat_messages_to_pydantic(
chat_messages: Sequence[ChatMessage],
chat_model: BaseChatModel,
output_schema: Type[TOutputSchema],
spinner: Optional[Halo] = None,
n_tries: int = 3,
hide_message: bool = True,
) -> TOutputSchema:
text_to_json_ai = LangChainBasedAIChatParticipant(
chat_model=chat_model,
name="Jason",
role="JSON Converter",
symbol="📄",
personal_mission="Your only purpose is to convert the previous chat messages (usually the last one)"
"to a valid and logical JSON that follows the JSON SCHEMA provided. Your message should "
"include only correct JSON. No fluff.",
other_prompt_sections=[Section(name="JSON SCHEMA", text=str(pydantic_to_json_schema(output_schema)))],
ignore_group_chat_environment=True,
spinner=spinner,
)
json_parser = JSONOutputParserChatParticipant(output_schema=output_schema)
# Remove TERMINATE if present so the chat conductor doesn't end the chat prematurely
if len(chat_messages) > 0:
chat_messages = list(chat_messages).copy()
last_message = chat_messages[-1]
try:
# Chop the content at the last instance of the word TERMINATE in the content
idx = last_message.content.rindex("TERMINATE")
new_content = last_message.content[:idx].strip()
last_message = ChatMessage(id=last_message.id, sender_name=last_message.sender_name, content=new_content)
chat_messages[-1] = last_message
except ValueError:
pass
parser_chat = Chat(
backing_store=InMemoryChatDataBackingStore(messages=list(chat_messages)),
renderer=NoChatRenderer(),
initial_participants=[text_to_json_ai, json_parser],
hide_messages=hide_message,
max_total_messages=len(chat_messages) + 1 + (n_tries - 1) * 2,
)
conductor = RoundRobinChatConductor()
_ = conductor.initiate_dialog(chat=parser_chat)
if json_parser.output is None:
raise MessageCouldNotBeParsedError("An output could not be parsed from the chat messages.")
return json_parser.output
| [] |
2024-01-10 | doodledood/chat-flock | examples~three_way_ai_conductor.py | import typer
from dotenv import load_dotenv
from halo import Halo
from chatflock.backing_stores.in_memory import InMemoryChatDataBackingStore
from chatflock.base import Chat
from chatflock.conductors.langchain import LangChainBasedAIChatConductor
from chatflock.participants.langchain import LangChainBasedAIChatParticipant
from chatflock.participants.user import UserChatParticipant
from chatflock.renderers.terminal import TerminalChatRenderer
from examples.common import create_chat_model
def three_way_ai_conductor(model: str = "gpt-4-1106-preview", temperature: float = 0.0) -> None:
chat_model = create_chat_model(model=model, temperature=temperature)
spinner = Halo(spinner="dots")
bartender = LangChainBasedAIChatParticipant(
name="Johnny",
role="Bartender",
personal_mission="You are a bartender at a Cafe called 'Coffee Time'. You are a friendly guy who likes to "
"chat with customers. You should collaborate with the Cook when the customer asks for food. "
"You are the one in front, greeting the customer.",
chat_model=chat_model,
spinner=spinner,
)
cook = LangChainBasedAIChatParticipant(
name="Greg",
role="Cook",
personal_mission="You are a cook at a Cafe called 'Coffee Time'. You are an impatient and serious guy who "
"doesn't like to chat with customers. You should collaborate with the Bartender when the "
"customer asks for food. You are the one in the back, preparing the food.",
chat_model=chat_model,
spinner=spinner,
)
user = UserChatParticipant(name="User")
participants = [user, bartender, cook]
chat = Chat(
backing_store=InMemoryChatDataBackingStore(),
renderer=TerminalChatRenderer(),
initial_participants=participants,
)
chat_conductor = LangChainBasedAIChatConductor(
chat_model=chat_model,
spinner=spinner,
goal="Serve the user as best as possible.",
# This tells the conductor how to select the next speaker
interaction_schema="The User is a customer at a Cafe called 'Coffee Time'. "
"The bartender should go first and greet the customer. "
"When the user asks for food and orders something, the bartender should ask the cook to cook the food. "
"There might be some conversation between the cook and bartender. "
"The cook should then give the food to the bartender and the bartender should give the food to the user. "
"The user should then eat the food and give feedback to the bartender. The cook should not talk to the user.",
)
chat_conductor.initiate_dialog(chat=chat)
if __name__ == "__main__":
load_dotenv()
typer.run(three_way_ai_conductor)
| [] |
2024-01-10 | doodledood/chat-flock | examples~chatgpt_clone_with_additional_tools.py | import typer
from dotenv import load_dotenv
from halo import Halo
from langchain.text_splitter import TokenTextSplitter
from chatflock.backing_stores import InMemoryChatDataBackingStore
from chatflock.base import Chat
from chatflock.code import LocalCodeExecutor
from chatflock.code.langchain import CodeExecutionTool
from chatflock.conductors.round_robin import RoundRobinChatConductor
from chatflock.participants.langchain import LangChainBasedAIChatParticipant
from chatflock.participants.user import UserChatParticipant
from chatflock.renderers.terminal import TerminalChatRenderer
from chatflock.web_research import WebSearch
from chatflock.web_research.page_analyzer import OpenAIChatPageQueryAnalyzer
from chatflock.web_research.page_retrievers.selenium_retriever import SeleniumPageRetriever
from chatflock.web_research.search import GoogleSerperSearchResultsProvider
from chatflock.web_research.web_research import WebResearchTool
from examples.common import create_chat_model, get_max_context_size
def chatgpt_clone_with_additional_tools(
model: str = "gpt-4-1106-preview",
model_for_page_analysis: str = "gpt-3.5-turbo-1106",
temperature: float = 0.0,
temperature_for_page_analysis: float = 0.0,
) -> None:
chat_model = create_chat_model(model=model, temperature=temperature)
chat_model_for_page_analysis = create_chat_model(
model=model_for_page_analysis, temperature=temperature_for_page_analysis
)
max_context_size_for_page_analysis = get_max_context_size(chat_model_for_page_analysis) or 12_000
page_retriever = SeleniumPageRetriever()
web_search = WebSearch(
chat_model=chat_model,
search_results_provider=GoogleSerperSearchResultsProvider(),
page_query_analyzer=OpenAIChatPageQueryAnalyzer(
chat_model=chat_model_for_page_analysis,
# Should `pip install selenium webdriver_manager` to use this
page_retriever=page_retriever,
text_splitter=TokenTextSplitter(
chunk_size=max_context_size_for_page_analysis, chunk_overlap=max_context_size_for_page_analysis // 5
),
use_first_split_only=True,
),
)
spinner = Halo(spinner="dots")
ai = LangChainBasedAIChatParticipant(
name="Assistant",
chat_model=chat_model,
tools=[
CodeExecutionTool(executor=LocalCodeExecutor(spinner=spinner), spinner=spinner),
WebResearchTool(web_search=web_search, n_results=3, spinner=spinner),
],
spinner=spinner,
)
user = UserChatParticipant(name="User")
participants = [user, ai]
chat = Chat(
backing_store=InMemoryChatDataBackingStore(), renderer=TerminalChatRenderer(), initial_participants=participants
)
chat_conductor = RoundRobinChatConductor()
chat_conductor.initiate_dialog(chat=chat)
page_retriever.close()
if __name__ == "__main__":
load_dotenv()
typer.run(chatgpt_clone_with_additional_tools)
| [] |
2024-01-10 | doodledood/chat-flock | chatflock~participants~spr.py | from typing import Any, List, Optional
from chatflock.participants.langchain import LangChainBasedAIChatParticipant
from chatflock.structured_string import Section
class SPRWriterChatParticipant(LangChainBasedAIChatParticipant):
def __init__(
self, name: str = "SPR Writer", other_prompt_sections: Optional[List[Section]] = None, **kwargs: Any
) -> None:
super().__init__(
name=name,
personal_mission="You are a Sparse Priming Representation (SPR) writer. An SPR is a particular kind of "
"use of language for advanced NLP, NLU, and NLG tasks, particularly useful for the "
"latest generation of Large Language Models (LLMs). You will be given information by the "
"USER which you are to render as an SPR.",
other_prompt_sections=[
Section(
name="Theory",
text="LLMs are a kind of deep neural network. They have been demonstrated to embed knowledge, "
"abilities, and concepts, ranging from reasoning to planning, and even to theory of mind. "
"These are called latent abilities and latent content, collectively referred to as latent "
"space. The latent space of an LLM can be activated with the correct series of words as "
"inputs, which will create a useful internal state of the neural network. This is not unlike "
"how the right shorthand cues can prime a human mind to think in a certain way. Like human "
"minds, LLMs are associative, meaning you only need to use the correct associations to "
'"prime" another model to think in the same way.',
),
Section(
name="Methodology",
text="Render the input as a distilled list of succinct statements, assertions, associations, "
"concepts, analogies, and metaphors. The idea is to capture as much, conceptually, "
"as possible but with as few words as possible. Write it in a way that makes sense to you, "
"as the future audience will be another language model, not a human.",
),
*(other_prompt_sections or []),
],
**kwargs,
)
| [] |
2024-01-10 | doodledood/chat-flock | chatflock~web_research~web_research.py | from typing import Any, List, Optional, Tuple, Type
import re
from halo import Halo
from langchain.callbacks.manager import CallbackManagerForToolRun
from langchain.chat_models.base import BaseChatModel
from langchain.tools import BaseTool, Tool
from pydantic.v1 import BaseModel, Field
from tenacity import RetryError
from chatflock.backing_stores import InMemoryChatDataBackingStore
from chatflock.base import Chat
from chatflock.conductors import RoundRobinChatConductor
from chatflock.participants.langchain import LangChainBasedAIChatParticipant
from chatflock.participants.user import UserChatParticipant
from chatflock.renderers import NoChatRenderer
from chatflock.structured_string import Section, StructuredString
from chatflock.web_research.errors import NonTransientHTTPError, TransientHTTPError
from chatflock.web_research.page_analyzer import PageQueryAnalyzer
from chatflock.web_research.search import SearchResultsProvider
video_watch_urls_patterns = [
r"youtube.com/watch\?v=([a-zA-Z0-9_-]+)",
r"youtu.be/([a-zA-Z0-9_-]+)",
r"vimeo.com/([0-9]+)",
r"dailymotion.com/video/([a-zA-Z0-9]+)",
r"dailymotion.com/embed/video/([a-zA-Z0-9]+)",
r"tiktok.com/@([a-zA-Z0-9_]+)/video/([0-9]+)",
]
def url_unsupported(url):
# List of unsupported file types
unsupported_types = ["pdf", "doc", "docx", "xls", "xlsx", "ppt", "pptx", "rtf", "jpg", "png", "gif"]
# Extract file extension from the URL
file_extension = re.findall(r"\.([a-zA-Z0-9]+)(?:[\?\#]|$)", url)
# Check if the file extension is in the list of unsupported types
if file_extension and file_extension[0] in unsupported_types:
return True
# Check if URL is a video or video site
for pattern in video_watch_urls_patterns:
if re.search(pattern, url):
return True
return False
class WebSearch:
def __init__(
self,
chat_model: BaseChatModel,
search_results_provider: SearchResultsProvider,
page_query_analyzer: PageQueryAnalyzer,
skip_results_if_answer_snippet_found: bool = True,
):
self.chat_model = chat_model
self.search_results_provider = search_results_provider
self.page_query_analyzer = page_query_analyzer
self.skip_results_if_answer_snippet_found = skip_results_if_answer_snippet_found
def get_answer(
self, query: str, n_results: int = 3, urls: Optional[List[str]] = None, spinner: Optional[Halo] = None
) -> Tuple[bool, str]:
original_spinner_text = None if spinner is None else spinner.text
qna = []
if urls is None:
if spinner is not None:
spinner.start(f'Getting search results for "{query}"...')
try:
search_results = self.search_results_provider.search(query=query, n_results=n_results)
except (TransientHTTPError, NonTransientHTTPError) as e:
return False, f'Failed to get search results for "{query}" because of an error: {e}'
if spinner is not None:
spinner.succeed(f'Got search results for "{query}".')
if len(search_results.organic_results) == 0 and search_results.answer_snippet is None:
return False, "Nothing was found on the web for this query."
if search_results.knowledge_graph_description is not None:
qna.append({"answer": search_results.knowledge_graph_description, "source": "Knowledge Graph"})
if search_results.answer_snippet is not None:
qna.append({"answer": search_results.answer_snippet, "source": "Answer Snippet"})
if not self.skip_results_if_answer_snippet_found or search_results.answer_snippet is None:
for result in search_results.organic_results:
if url_unsupported(result.link):
continue
if spinner is not None:
spinner.start(f'Reading & analyzing #{result.position} result "{result.title}"')
try:
page_result = self.page_query_analyzer.analyze(
url=result.link, title=result.title, query=query, spinner=spinner
)
answer = page_result.answer
if spinner is not None:
spinner.succeed(f'Read & analyzed #{result.position} result "{result.title}".')
except Exception as e:
if type(e) in (RetryError, TransientHTTPError, NonTransientHTTPError):
if spinner is not None:
spinner.warn(
f'Failed to read & analyze #{result.position} result "{result.title}", moving on.'
)
answer = "Unable to answer query because the page could not be read."
else:
raise
qna.append({"answer": answer, "source": result.link})
else:
# Urls were provided, search in those urls instead of searching using a search engine
for url in urls:
if url_unsupported(url):
continue
if spinner is not None:
spinner.start(f'Reading & analyzing URL "{url}"')
try:
page_result = self.page_query_analyzer.analyze(
url=url, title="Unknown", query=query, spinner=spinner
)
answer = page_result.answer
if spinner is not None:
spinner.succeed(f'Read & analyzed URL "{url}".')
except Exception as e:
if type(e) in (RetryError, TransientHTTPError, NonTransientHTTPError):
if spinner is not None:
spinner.warn(f'Failed to read & analyze URL "{url}", moving on.')
answer = "Unable to answer query because the page could not be read."
else:
raise
qna.append({"answer": answer, "source": url})
if spinner is not None:
spinner.start(f"Processing results...")
formatted_answers = "\n".join([f'{i + 1}. {q["answer"]}; Source: {q["source"]}' for i, q in enumerate(qna)])
chat = Chat(
backing_store=InMemoryChatDataBackingStore(),
renderer=NoChatRenderer(),
initial_participants=[
UserChatParticipant(),
LangChainBasedAIChatParticipant(
name="Query Answer Aggregator",
role="Query Answer Aggregator",
personal_mission="Analyze query answers, discard unlikely ones, and provide an aggregated final response.",
chat_model=self.chat_model,
other_prompt_sections=[
Section(
name="Aggregating Query Answers",
sub_sections=[
Section(
name="Process",
list=[
"Receive query and answers with sources.",
"Analyze answers, discard unlikely or minority ones.",
"Formulate final answer based on most likely answers.",
'If no data found, respond "The answer could not be found."',
],
list_item_prefix=None,
),
Section(
name="Aggregation",
list=[
"Base final answer on sources.",
"Incorporate sources as inline citations in Markdown format.",
'Example: "Person 1 was [elected president in 2012](https://...)."',
"Only include sources from provided answers.",
"If part of an answer is used, use the same links inline.",
],
),
Section(
name="Final Answer Notes",
list=[
"Do not fabricate information. Stick to provided data.",
"You will be given the top search results from a search engine, there is a reason they are the top results. You should pay attention to all of them and think about the query intent."
"If the answer is not found in the page data, state it clearly.",
"Should be formatted in Markdown with inline citations.",
],
),
],
)
],
),
],
max_total_messages=2,
)
chat_conductor = RoundRobinChatConductor()
final_answer = chat_conductor.initiate_dialog(
chat=chat,
initial_message=str(
StructuredString(
sections=[Section(name="Query", text=query), Section(name="Answers", text=formatted_answers)]
)
),
)
if spinner is not None:
spinner.succeed(f"Done searching the web.")
if original_spinner_text is not None:
spinner.start(original_spinner_text)
return True, final_answer
class WebSearchToolArgs(BaseModel):
query: str = Field(
description="The query to search the web for (or what to look for in the page in case urls are provided)."
)
urls: Optional[List[str]] = Field(
description="A list of urls to search for the query in. If provided, the query will be searched in these urls. If not provided, the query will be searched in the top search results from a search engine. Provide urls only when the user mentions a URL (if applicable)"
)
class WebResearchTool(BaseTool):
web_search: WebSearch
n_results: int = 3
spinner: Optional[Halo] = None
name: str = "web_search"
description: str = "Research the web. Use that to get an answer for a query you don't know or unsure of the answer to, for recent events, or if the user asks you to. This will evaluate answer snippets, knowledge graphs, and the top N results from google and aggregate a result."
args_schema: Type[BaseModel] = WebSearchToolArgs
progress_text: str = "Searching the web..."
def _run(
self,
query: str,
urls: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForToolRun] = None,
**kwargs: Any,
) -> Any:
return self.web_search.get_answer(query=query, n_results=self.n_results, urls=urls, spinner=self.spinner)[1]
| [
"Research the web. Use that to get an answer for a query you don't know or unsure of the answer to, for recent events, or if the user asks you to. This will evaluate answer snippets, knowledge graphs, and the top N results from google and aggregate a result."
] |
2024-01-10 | doodledood/chat-flock | chatflock~ai_utils.py | from typing import Any, Dict, Optional, Sequence, Type, TypeVar
import json
from json import JSONDecodeError
from halo import Halo
from langchain.chat_models.base import BaseChatModel
from langchain.schema import BaseMessage, FunctionMessage
from langchain.tools import BaseTool
from langchain.tools.render import format_tool_to_openai_function
from pydantic import BaseModel
from chatflock.errors import FunctionNotFoundError
from chatflock.utils import fix_invalid_json
def execute_chat_model_messages(
chat_model: BaseChatModel,
messages: Sequence[BaseMessage],
chat_model_args: Optional[Dict[str, Any]] = None,
tools: Optional[Sequence[BaseTool]] = None,
spinner: Optional[Halo] = None,
) -> str:
chat_model_args = chat_model_args or {}
if "functions" in chat_model_args:
raise ValueError(
"The `functions` argument is reserved for the "
"`execute_chat_model_messages` function. If you want to add more "
"functions use the `functions` argument to this method."
)
if tools is not None and len(tools) > 0:
chat_model_args["functions"] = [format_tool_to_openai_function(tool) for tool in tools]
function_map = {tool.name: tool for tool in tools or []}
all_messages = list(messages).copy()
last_message = chat_model.predict_messages(all_messages, **chat_model_args)
function_call = last_message.additional_kwargs.get("function_call")
while function_call is not None:
function_name = function_call["name"]
if function_name in function_map:
tool = function_map[function_name]
args = function_call["arguments"]
if spinner is not None:
if hasattr(tool, "progress_text"):
progress_text = tool.progress_text
else:
progress_text = f"Executing function `{function_name}`..."
spinner.start(progress_text)
try:
args = json.loads(args)
result = tool.run(args)
except JSONDecodeError as e:
# Try to fix the JSON manually before giving up
try:
args = fix_invalid_json(args)
args = json.loads(args)
result = tool.run(args)
except JSONDecodeError as e:
result = f"Error decoding args for function: {e}"
except Exception as e:
result = f"Error executing function: {e}"
all_messages.append(
FunctionMessage(
name=function_name,
content=f"The function execution returned:\n```{str(result).strip()}```" or "None",
)
)
last_message = chat_model.predict_messages(all_messages, **chat_model_args)
function_call = last_message.additional_kwargs.get("function_call")
else:
raise FunctionNotFoundError(function_name)
return str(last_message.content)
PydanticType = TypeVar("PydanticType", bound=Type[BaseModel])
def pydantic_to_openai_function(
pydantic_type: PydanticType, function_name: Optional[str] = None, function_description: Optional[str] = None
) -> Dict[str, Any]:
base_schema = pydantic_type.model_json_schema()
del base_schema["title"]
del base_schema["description"]
description = function_description if function_description is not None else (pydantic_type.__doc__ or "")
return {"name": function_name or pydantic_type.__name__, "description": description, "parameters": base_schema}
| [
"The function execution returned:\n```PLACEHOLDER```"
] |
2024-01-10 | onlyphantom/gpt3-sandbox | api~demo_web_app.py | """Runs the web app given a GPT object and UI configuration."""
from http import HTTPStatus
import json
import subprocess
import os
import openai
from dotenv import load_dotenv
from flask import Flask, request, Response
from .gpt import set_openai_key, Example
from .ui_config import UIConfig
load_dotenv()
OPENAI_KEY = os.getenv("OPENAI_SECRET_KEY")
def demo_web_app(gpt, config=UIConfig()):
"""Creates Flask app to serve the React app."""
app = Flask(__name__)
set_openai_key(OPENAI_KEY)
@app.route("/params", methods=["GET"])
def get_params():
# pylint: disable=unused-variable
response = config.json()
return response
def error(err_msg, status_code):
return Response(json.dumps({"error": err_msg}), status=status_code)
def get_example(example_id):
"""Gets a single example or all the examples."""
# return all examples
if not example_id:
return json.dumps(gpt.get_all_examples())
example = gpt.get_example(example_id)
if not example:
return error("id not found", HTTPStatus.NOT_FOUND)
return json.dumps(example.as_dict())
def post_example():
"""Adds an empty example."""
new_example = Example("", "")
gpt.add_example(new_example)
return json.dumps(gpt.get_all_examples())
def put_example(args, example_id):
"""Modifies an existing example."""
if not example_id:
return error("id required", HTTPStatus.BAD_REQUEST)
example = gpt.get_example(example_id)
if not example:
return error("id not found", HTTPStatus.NOT_FOUND)
if "input" in args:
example.input = args["input"]
if "output" in args:
example.output = args["output"]
# update the example
gpt.add_example(example)
return json.dumps(example.as_dict())
def delete_example(example_id):
"""Deletes an example."""
if not example_id:
return error("id required", HTTPStatus.BAD_REQUEST)
gpt.delete_example(example_id)
return json.dumps(gpt.get_all_examples())
@app.route(
"/examples",
methods=["GET", "POST"],
defaults={"example_id": ""},
)
@app.route(
"/examples/<example_id>",
methods=["GET", "PUT", "DELETE"],
)
def examples(example_id):
method = request.method
args = request.json
if method == "GET":
return get_example(example_id)
if method == "POST":
return post_example()
if method == "PUT":
return put_example(args, example_id)
if method == "DELETE":
return delete_example(example_id)
return error("Not implemented", HTTPStatus.NOT_IMPLEMENTED)
@app.route("/translate", methods=["GET", "POST"])
def translate():
# pylint: disable=unused-variable
prompt = request.json["prompt"]
response = gpt.submit_request(prompt)
offset = 0
if not gpt.append_output_prefix_to_query:
offset = len(gpt.output_prefix)
return {'text': response['choices'][0]['text'][offset:]}
subprocess.Popen(["yarn", "start"])
app.run()
| [] |
2024-01-10 | ag8/tiny-town | http_utils.py | import re
import openai as openai
import secret_stuff
openai.api_key = secret_stuff.API_KEY
def save(text):
with open("requests.log", "a") as f:
f.write(text + "\n")
f.write("-----------------------------\n")
def get_embedding(text):
response = openai.Embedding.create(
input="Your text string goes here",
model="text-embedding-ada-002"
)
embeddings = response['data'][0]['embedding']
return embeddings
def get_importance(memory):
prompt = "On the scale of 1 to 10, where 1 is purely mundane (e.g., brushing teeth, making bed) and 10 is extremely poignant (e.g., a break up, college acceptance), rate the likely poignancy of the following piece of memory.\nMemory: " + memory + "\nRating:"
save(prompt)
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
max_tokens=2,
temperature=0.7,
messages=[
{"role": "user", "content": prompt}
]
)
try:
return int(completion["choices"][0]["message"]["content"])
except:
try:
return int(completion["choices"][0]["message"]["content"])[0]
except:
print("Oh no, can't evaluate importance well for memory " + memory)
print("instead saying " + completion["choices"][0]["message"]["content"])
return 5
def reflect_on_memories(name, memories, t):
prompt = "Statements about " + str(name) + "\n"
for idx, memory in enumerate(memories):
prompt += str(idx) + ". " + memory.description + "\n"
prompt += "What 5 high-level insights can you infer from the above statements? (example format: insight (because of {1, 6, 2, 3}))\n\n1."
save(prompt)
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
max_tokens=800,
temperature=0.7,
messages=[
{"role": "user", "content": prompt}
]
)
compl = completion["choices"][0]["message"]["content"]
reflections = compl.split("\n")
reflected = []
for idx, reflection in enumerate(reflections):
if idx == 0:
actual_text = reflection
else:
actual_text = ". ".join(reflection.split(". ")[1:])
print(actual_text)
evidentiary = actual_text.split("(")[1].split(")")[0]
evidence = tuple(int(x) for x in re.findall(r'\d+', evidentiary))
reflected.append([actual_text.split("(")[0], evidence])
return reflected
def summarize_core_memories(name, memories):
prompt = "How would one describe " + name + "'s core characteristics given the following statements?\n"
for memory in memories:
prompt += "- " + memory.description + "\n"
save(prompt)
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
max_tokens=1200,
temperature=0.7,
messages=[
{"role": "user", "content": prompt}
]
)
return completion["choices"][0]["message"]["content"]
def get_completion(prompt):
save(prompt)
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
max_tokens=1200,
temperature=0.7,
messages=[
{"role": "user", "content": prompt}
]
)
# todo add some checks
return completion["choices"][0]["message"]["content"]
| [
"What 5 high-level insights can you infer from the above statements? (example format: insight (because of {1, 6, 2, 3}))\n\n1.",
"\n",
"On the scale of 1 to 10, where 1 is purely mundane (e.g., brushing teeth, making bed) and 10 is extremely poignant (e.g., a break up, college acceptance), rate the likely poignancy of the following piece of memory.\nMemory: PLACEHOLDER\nRating:",
"Statements about PLACEHOLDER\n",
"- ",
"How would one describe PLACEHOLDER's core characteristics given the following statements?\n",
". "
] |
2024-01-10 | rotysz/corpoingchat | corpoingchat.py | import sys
import os
import time
import argparse
from langchain.document_loaders import PyMuPDFLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import FAISS
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.prompts import PromptTemplate
from langchain.chains.question_answering import load_qa_chain
from langchain.memory import ConversationBufferWindowMemory
from langchain.chains import LLMChain
from langchain.chat_models import ChatOpenAI
from langchain.prompts.chat import SystemMessagePromptTemplate,HumanMessagePromptTemplate,ChatPromptTemplate
from langchain.retrievers.multi_query import MultiQueryRetriever
from langchain.vectorstores import Pinecone
import pinecone
import fitz
def ReadAndSplitPDF(input_pdf_dir:str, chunk_size:int=8000, chunk_overlap:int=0):
print('Processing files ....')
all_doc_pages = []
text_splitter = RecursiveCharacterTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap)
for dirpath, dirnames, filenames in os.walk(input_pdf_dir):
for file in filenames:
if file.endswith('.pdf'):
print(f'Reading -> {file}')
pymu_loader = PyMuPDFLoader(os.path.join(dirpath, file))
pages = pymu_loader.load_and_split(text_splitter=text_splitter)
for page in pages:
page.page_content = page.page_content.replace('\n', '')
page.page_content = page.page_content.replace('', '')
all_doc_pages.extend(pages)
return all_doc_pages
def BuildVectorDatabase(all_doc_pages, index_file_name:str, vector_store:str,sleep_time:float=0.1):
print('Generating index ....')
db = None
for page_number, page in enumerate(all_doc_pages, start=1):
print(f'Adding page {page_number}/{len(all_doc_pages)}')
if page_number == 1:
if vector_store == "FAISS":
db = FAISS.from_documents([page], OpenAIEmbeddings())
else: # Pinecone
db = Pinecone.from_documents([page], OpenAIEmbeddings(), index_name=index_file_name)
else:
db.add_documents([page])
time.sleep(sleep_time)
if vector_store == "FAISS":
db.save_local(index_file_name)
return db
def AddToVectorDatabase(new_doc_pages, index_file_name:str, vector_store:str):
print('Adding new documents to index ....')
if vector_store == "FAISS":
db = FAISS.load_local(index_file_name, OpenAIEmbeddings())
else: # Pinecone
db = Pinecone.from_existing_index(index_name=index_file_name, embedding=OpenAIEmbeddings())
for page_number, page in enumerate(new_doc_pages, start=1):
print(f'Adding page {page_number}/{len(new_doc_pages)}')
db.add_documents([page])
time.sleep(0.1)
if vector_store == "FAISS":
db.save_local(index_file_name)
return db
def ReadIndex(input_pdf_dir:str, index_file_name:str, option:str, vector_store:str):
if option == 'gen':
all_doc_pages = ReadAndSplitPDF(input_pdf_dir)
db = BuildVectorDatabase(all_doc_pages, index_file_name, vector_store)
elif option == 'add':
new_doc_pages = ReadAndSplitPDF(input_pdf_dir)
db = AddToVectorDatabase(new_doc_pages, index_file_name, vector_store)
else:
print(f'Loading index from file .... {index_file_name}')
if vector_store == "FAISS":
db = FAISS.load_local(index_file_name, OpenAIEmbeddings())
else:
db = Pinecone.from_existing_index(index_name=index_file_name,embedding=OpenAIEmbeddings())
return db
def GetPromptFromFile( prompt_dir, prompt_name):
with open(os.path.join(prompt_dir, f'{prompt_name}.txt'), 'r') as file:
return file.read()
def GetQuestion(_query, _memory,
_human_template, _system_template,
_temperature=0, _max_tokens=256, _model_name='gpt-3.5-turbo-16k'):
Q_PROMPT = HumanMessagePromptTemplate.from_template(_human_template)
S_PROMPT = SystemMessagePromptTemplate.from_template(_system_template)
chat_prompt = ChatPromptTemplate.from_messages([S_PROMPT,Q_PROMPT])
chain = LLMChain(llm=ChatOpenAI(model_name=_model_name , temperature=_temperature, max_tokens=_max_tokens), memory=_memory, prompt=chat_prompt)
output = chain.predict(question=_query)
return output
def SearchMmr (vector_store, query, k, fetch_k ):
docs_mmr = vector_store.max_marginal_relevance_search(query, k=k, fetch_k=fetch_k)
return docs_mmr
def SearchMultiQuery ( vector_store, model, query, k):
retriever_from_llm = MultiQueryRetriever.from_llm(
retriever=vector_store.as_retriever(search_kwargs = {"k": k}), llm=model)
docs = retriever_from_llm.get_relevant_documents(query=query)
return docs
def GetAnswer(_query, _search_query, vectorstore,
_human_template, _system_template,
_temperature=0, _max_tokens=1024, _search_elements=4,
_model_name='gpt-3.5-turbo-16k', _searchopt='norm'):
_search_query_int = _search_query
if _searchopt == 'mmr':
docs = SearchMmr(vectorstore, _search_query_int, _search_elements, _search_elements* 3)
elif _searchopt == 'multiq':
docs = SearchMultiQuery(vectorstore,ChatOpenAI(model_name=_model_name, temperature=_temperature, max_tokens=_max_tokens) ,
_search_query_int, _search_elements)
else:
docs = vectorstore.similarity_search(_search_query_int, k=_search_elements)
total_words = 0
for i in range(len(docs)):
total_words += len(docs[i].page_content.split())
if total_words > 2500:
docs = docs[:i]
break
H_PROMPT = HumanMessagePromptTemplate.from_template(_human_template)
S_PROMPT = SystemMessagePromptTemplate.from_template(_system_template)
chat_prompt = ChatPromptTemplate.from_messages([S_PROMPT,H_PROMPT])
print(f'Pytanie -> {_query} \nSearch queries ->\n {_search_query}\n')
chain = load_qa_chain(ChatOpenAI(model_name=_model_name, temperature=_temperature, max_tokens=_max_tokens),
chain_type="stuff", prompt=chat_prompt,verbose=False)
output = chain({"input_documents": docs, "question": _query,"search":_search_query}, return_only_outputs=False)
return output
def PrintAnswer(output, _print_context=False):
print(f'Odpowiedź -> {output["output_text"]}\n')
print("Zrodła:\n")
for doc in output["input_documents"]:
print(f'[{len(doc.page_content.split())}, {doc.metadata["source"]} page {doc.metadata["page"]}/{doc.metadata["total_pages"]}]')
if _print_context:
print('Konteksty:')
for doc in output["input_documents"]:
print(
f'Kontekst [{len(doc.page_content)},{len(doc.page_content.split())}, {doc.metadata}]-> {doc.page_content}\n')
print("")
return
def Initialize(vector_store_arg:str, index_file_path:str, option:str):
if vector_store_arg.lower() == "pinecone":
pinecone.init(
api_key=os.environ["PINECONE_API_KEY"], # find at app.pinecone.io
environment=os.environ["PINECONE_ENV"] # next to api key in console
)
vector_store = "PINECONE"
elif vector_store_arg.lower() == "faiss":
vector_store = "FAISS"
else:
raise ValueError(f"Invalid vector_store_arg: {vector_store_arg}")
if option not in ['gen', 'nogen', 'add']:
raise ValueError(f"Invalid option: {option}")
return vector_store, option
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Chat tool based on documents")
parser.add_argument("input_docs", help="Input documents directory")
parser.add_argument("index", help="Index file path name")
parser.add_argument("option", choices=["gen", "nogen", "add"], help="Option for index generation")
parser.add_argument("vector_store", choices=["FAISS", "pinecone"], help="Vector store to use")
parser.add_argument("--searchopt", help="Search option for answer generation", choices=['norm', 'mmr', 'multiq'], default='norm')
parser.add_argument("--promptdir", default="Prompts", help="Directory containing prompt templates")
args = parser.parse_args()
vector_store, option = Initialize(args.vector_store, args.index, args.option.lower().strip())
model_name = os.environ.get("MODEL_NAME", "gpt-3.5-turbo-16k")
print_context = False
db = ReadIndex(args.input_docs, args.index, option, vector_store)
memory = ConversationBufferWindowMemory(return_messages=True,memory_key="chat_history",k=4)
memlen=len(memory.buffer)
while True:
#get query from user
query = input("Pytanie: ")
if query.lower() == 'q':
break
output_q = GetQuestion(query, memory,
GetPromptFromFile(args.promptdir, "Question Human Template"),
GetPromptFromFile(args.promptdir,"Question System Template"),
_max_tokens=512,_model_name=model_name)
#query = output_q
output = GetAnswer(query,output_q, db,
GetPromptFromFile(args.promptdir,"Answer Human Template"),
GetPromptFromFile(args.promptdir,"Answer System Template"),
_temperature=0, _max_tokens=1024 ,
_search_elements=6,_model_name=model_name,_searchopt=args.searchopt)
# output = GetAnswer(query, query, db, _temperature=0, _max_tokens=1024, _search_elements=6)J
memory.chat_memory.add_user_message(query)
memory.chat_memory.add_ai_message(output["output_text"])
memlen=len(memory.buffer)
PrintAnswer(output,print_context)
print ("Bot stopped.") | [
"[PLACEHOLDER, PLACEHOLDER]"
] |
2024-01-10 | RackBoy/Py-Automation-Scripts | code~Dalle-2.py | import os
import openai
openai.api_key = "API-KEY-here"
response = openai.Image.create(
prompt="space unicorn",
n=1,
size="1024x1024"
)
image_url = response['data'][0]['url']
print(image_url) | [] |
2024-01-10 | Takahiro910/docu-chat | files.py | import os
from typing import (
Any,
Union,
)
import zipfile
import streamlit as st
from streamlit.runtime.uploaded_file_manager import (
UploadedFile,
UploadedFileRec,
UploadedFileManager,
)
from streamlit.runtime.scriptrunner import get_script_run_ctx
from supabase.client import Client
from langchain.vectorstores.supabase import SupabaseVectorStore
from components_keys import ComponentsKeys
from loaders.audio import process_audio
from loaders.txt import process_txt
from loaders.csv import process_csv
from loaders.markdown import process_markdown
from loaders.pdf import process_pdf
from loaders.html import (
create_html_file,
delete_tempfile,
get_html,
process_html,
)
from loaders.powerpoint import process_powerpoint
from loaders.docx import process_docx
from utils import compute_sha1_from_content
ctx = get_script_run_ctx()
manager = UploadedFileManager()
file_processors = {
".txt": process_txt,
".csv": process_csv,
".md": process_markdown,
".markdown": process_markdown,
".m4a": process_audio,
".mp3": process_audio,
".webm": process_audio,
".mp4": process_audio,
".mpga": process_audio,
".wav": process_audio,
".mpeg": process_audio,
".pdf": process_pdf,
".html": process_html,
".pptx": process_powerpoint,
".docx": process_docx
}
def file_uploader(supabase, vector_store):
# Omit zip file support if the `st.secrets.self_hosted` != "true" because
# a zip file can consist of multiple files so the limit on 1 file uploaded
# at a time in the demo can be circumvented.
accepted_file_extensions = list(file_processors.keys())
accept_multiple_files = st.secrets.self_hosted == "true"
if accept_multiple_files:
accepted_file_extensions += [".zip"]
files = st.file_uploader(
"**Upload a file**",
accept_multiple_files=accept_multiple_files,
type=accepted_file_extensions,
key=ComponentsKeys.FILE_UPLOADER,
)
if st.secrets.self_hosted == "false":
st.markdown("**In demo mode, the max file size is 1MB**")
if st.button("Add to Database"):
# Single file upload
if isinstance(files, UploadedFile):
filter_file(files, supabase, vector_store)
# Multiple files upload
elif isinstance(files, list):
for file in files:
filter_file(file, supabase, vector_store)
def file_already_exists(supabase, file):
file_sha1 = compute_sha1_from_content(file.getvalue())
response = supabase.table("vectors").select("id").eq("metadata->>file_sha1", file_sha1).execute()
return len(response.data) > 0
def file_to_uploaded_file(file: Any) -> Union[None, UploadedFile]:
"""Convert a file to a streamlit `UploadedFile` object.
This allows us to unzip files and treat them the same way
streamlit treats files uploaded through the file uploader.
Parameters
---------
file : Any
The file. Can be any file supported by this app.
Returns
-------
Union[None, UploadedFile]
The file converted to a streamlit `UploadedFile` object.
Returns `None` if the script context cannot be grabbed.
"""
if ctx is None:
print("script context not found, skipping uploading file:", file.name)
return
file_extension = os.path.splitext(file.name)[-1]
file_name = file.name
file_data = file.read()
# The file manager will automatically assign an ID so pass `None`
# Reference: https://github.com/streamlit/streamlit/blob/9a6ce804b7977bdc1f18906d1672c45f9a9b3398/lib/streamlit/runtime/uploaded_file_manager.py#LL98C6-L98C6
uploaded_file_rec = UploadedFileRec(None, file_name, file_extension, file_data)
uploaded_file_rec = manager.add_file(
ctx.session_id,
ComponentsKeys.FILE_UPLOADER,
uploaded_file_rec,
)
return UploadedFile(uploaded_file_rec)
def filter_zip_file(
file: UploadedFile,
supabase: Client,
vector_store: SupabaseVectorStore,
) -> None:
"""Unzip the zip file then filter each unzipped file.
Parameters
----------
file : UploadedFile
The uploaded file from the file uploader.
supabase : Client
The supabase client.
vector_store : SupabaseVectorStore
The vector store in the database.
"""
with zipfile.ZipFile(file, "r") as z:
unzipped_files = z.namelist()
for unzipped_file in unzipped_files:
with z.open(unzipped_file, "r") as f:
filter_file(f, supabase, vector_store)
def filter_file(file, supabase, vector_store):
# Streamlit file uploads are of type `UploadedFile` which has the
# necessary methods and attributes for this app to work.
if not isinstance(file, UploadedFile):
file = file_to_uploaded_file(file)
file_extension = os.path.splitext(file.name)[-1]
if file_extension == ".zip":
filter_zip_file(file, supabase, vector_store)
return True
if file_already_exists(supabase, file):
st.write(f"😎 {file.name} is already in the database.")
return False
if file.size < 1:
st.write(f"💨 {file.name} is empty.")
return False
if file_extension in file_processors:
if st.secrets.self_hosted == "false":
file_processors[file_extension](vector_store, file, stats_db=supabase)
else:
file_processors[file_extension](vector_store, file, stats_db=None)
st.write(f"✅ {file.name} ")
return True
st.write(f"❌ {file.name} is not a valid file type.")
return False
def url_uploader(supabase, vector_store):
url = st.text_area("**Add an url**",placeholder="https://www.quivr.app")
button = st.button("Add the URL to the database")
if button:
html = get_html(url)
if html:
st.write(f"Getting content ... {url} ")
try:
file, temp_file_path = create_html_file(url, html)
except UnicodeEncodeError as e:
st.write(f"❌ Error encoding character: {e}")
file, temp_file_path = create_html_file(url, html)
ret = filter_file(file, supabase, vector_store)
delete_tempfile(temp_file_path, url, ret)
else:
st.write(f"❌ Failed to access to {url} .") | [] |
2024-01-10 | Takahiro910/docu-chat | question.py | import anthropic
import streamlit as st
from streamlit_chat import message
from streamlit.logger import get_logger
from typing import Any, Dict, List
from langchain.chains import ConversationalRetrievalChain
from langchain.memory import ConversationBufferMemory
from langchain.llms import OpenAI
from langchain.vectorstores import SupabaseVectorStore
from llm import LANGUAGE_PROMPT
from stats import add_usage
class AnswerConversationBufferMemory(ConversationBufferMemory):
"""ref https://github.com/hwchase17/langchain/issues/5630#issuecomment-1574222564"""
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
return super(AnswerConversationBufferMemory, self).save_context(
inputs, {'response': outputs['answer']})
memory = AnswerConversationBufferMemory(
memory_key="chat_history", return_messages=True)
openai_api_key = st.secrets.openai_api_key
logger = get_logger(__name__)
def count_tokens(question, model):
count = f'Words: {len(question.split())}'
return count
def chat_with_doc(model, vector_store: SupabaseVectorStore, stats_db):
if 'chat_history' not in st.session_state:
st.session_state['chat_history'] = []
question = st.text_input("## いかがいたしましたか?")
columns = st.columns(3)
with columns[0]:
button = st.button("決定")
with columns[1]:
count_button = st.button("トークンを数える", type='secondary')
with columns[2]:
clear_history = st.button("チャット履歴を消す", type='secondary')
if clear_history:
# Clear memory in Langchain
memory.clear()
st.session_state['chat_history'] = []
st.experimental_rerun()
if button:
qa = None
add_usage(stats_db, "chat", question, {"model": model, "temperature": st.session_state['temperature']})
ConversationalRetrievalChain.prompts = LANGUAGE_PROMPT
logger.info('Using OpenAI model %s', model)
qa = ConversationalRetrievalChain.from_llm(
OpenAI(
model_name=st.session_state['model'], openai_api_key=openai_api_key, temperature=st.session_state['temperature'], max_tokens=st.session_state['max_tokens']), vector_store.as_retriever(), memory=memory, verbose=True, return_source_documents=True)
st.session_state['chat_history'].append(("You", question))
# Generate model's response and add it to chat history
model_response = qa({"question": question})
logger.info('Result: %s', model_response)
st.session_state['chat_history'].append(("Akasha", model_response["answer"], ))
# Display chat history
st.empty()
is_user = True
for speaker, text in st.session_state['chat_history']:
# st.markdown(f"**{speaker}:** {text}")
if speaker == "You":
is_user = True
else:
is_user = False
message(text, is_user=is_user)
st.markdown("""
---
Source:
""")
st.write(model_response["source_documents"])
if count_button:
st.write(count_tokens(question, model)) | [] |
2024-01-10 | Takahiro910/docu-chat | llm~LANGUAGE_PROMPT.py | from langchain.prompts.prompt import PromptTemplate
_template = """次の会話と追加の質問があった場合、日本語で追加の質問に答えてください。与えられた文脈で正しい答えがない場合は、自分の知識に基づいて答えてみてください。答えがわからない場合は、答えをでっち上げようとせず、わからないと言ってください。日本語での回答が難しい場合は英語でも構いません。
Chat History:
{chat_history}
Follow Up Input: {question}
Standalone question:"""
CONDENSE_QUESTION_PROMPT = PromptTemplate.from_template(_template)
prompt_template = """次の文脈を使用して、日本語で質問に答えてください。与えられた文脈で正しい答えがない場合は、自分の知識に基づいて答えてみてください。答えがわからない場合は、答えをでっち上げようとせず、わからないと言ってください。日本語での回答が難しい場合は英語でも構いません。
{context}
Question: {question}
Helpful Answer:"""
QA_PROMPT = PromptTemplate(
template=prompt_template, input_variables=["context", "question"]
) | [
"question",
"context",
"次の会話と追加の質問があった場合、日本語で追加の質問に答えてください。与えられた文脈で正しい答えがない場合は、自分の知識に基づいて答えてみてください。答えがわからない場合は、答えをでっち上げようとせず、わからないと言ってください。日本語での回答が難しい場合は英語でも構いません。\n\nChat History:\n{chat_history}\nFollow Up Input: {question}\nStandalone question:",
"次の文脈を使用して、日本語で質問に答えてください。与えられた文脈で正しい答えがない場合は、自分の知識に基づいて答えてみてください。答えがわからない場合は、答えをでっち上げようとせず、わからないと言ってください。日本語での回答が難しい場合は英語でも構いません。\n\n{context}\n\nQuestion: {question}\nHelpful Answer:"
] |
2024-01-10 | garyexplains/examples | smart_websearch.py | # Copyright 2023, Gary Sims
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
# THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
import requests
import urllib.parse
import json
from langchain.llms import OpenAI
from langchain.chat_models import ChatOpenAI
from bs4 import BeautifulSoup
def make_request_with_headers(url, headers):
"""
Make an HTTP GET request to a specified URL with given headers.
"""
try:
response = requests.get(url, headers=headers)
response.raise_for_status() # Raise an exception for HTTP errors
return response.text
except requests.RequestException as e:
return f"An error occurred: {e}"
def urlencode(string):
return urllib.parse.quote(string, safe='/+')
def replace_spaces_with_plus(string):
return string.replace(" ", "+")
def fetch_web_content(url):
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36'}
response = requests.get(url, headers=headers)
soup = BeautifulSoup(response.content, 'html.parser')
return soup.get_text()
def fetch_content_make_summary(url):
content = fetch_web_content(url)
prompt = "Summarize this text into a 300-word extractive summary, ignore all HTML, CSS, and Javascript. The summary text should be easy to read, and engaging:\n" + content
summary = chat_model.predict(prompt)
return summary
origterms = input("Please enter your search term: ")
terms = replace_spaces_with_plus(origterms)
terms = urlencode(terms)
print("Searching...")
url = "https://api.search.brave.com/res/v1/web/search?result_filter=web&q=" + terms
headers = {
'Accept': 'application/json',
"X-Subscription-Token": 'TOKEN-HERE'
}
response = make_request_with_headers(url, headers)
# Response in is JSON see https://api.search.brave.com/app/documentation/web-search/responses
data = json.loads(response)
web = data['web']
results = web['results']
# Extract first 3 URLs
url1 = results[0]['url']
url2 = results[1]['url']
url3 = results[2]['url']
print("Init ChatGPT...")
# Different models
# gpt-4
# gpt-4-1106-preview
# gpt-3.5-turbo (gpt-3.5-turbo-0613)
chat_model = ChatOpenAI(model_name="gpt-3.5-turbo", openai_api_key="KEY-HERE")
print("Fetch first web page and create summary...")
sum1 = fetch_content_make_summary(url1)
print("Fetch second web page and create summary...")
sum2 = fetch_content_make_summary(url2)
print("Fetch third web page and create summary...")
sum3 = fetch_content_make_summary(url3)
print("Ask ChatGPT about this subject...")
prompt = "Create a detailed summary about " + origterms
sum0 = chat_model.predict(prompt)
print("Ask ChatGPT to create a meta summary...")
prompt = "Rewrite the following text as a blog post, ignoring all duplicate information. The blog post text should be easy to read, and engaging:\n\n" + sum1 + "\n\n\n" + sum2 + "\n\n\n" + sum3 + "\n\n\n" + sum0
msummary = chat_model.predict(prompt)
print("\n\n", msummary)
print("\n\nSources:\n", url1, "\n", url2, "\n", url3, "\nAnd ChatGPT!")
| [
"Rewrite the following text as a blog post, ignoring all duplicate information. The blog post text should be easy to read, and engaging:\n\nPLACEHOLDER\n\n\nPLACEHOLDER\n\n\nPLACEHOLDER\n\n\nPLACEHOLDER",
"Create a detailed summary about PLACEHOLDER",
"Summarize this text into a 300-word extractive summary, ignore all HTML, CSS, and Javascript. The summary text should be easy to read, and engaging:\nPLACEHOLDER"
] |
2024-01-10 | PeterMitrano/reference | amplify~backend~function~referencesync~src~citation_search.py | import io
import re
from dataclasses import dataclass
from datetime import datetime
from functools import lru_cache
from typing import List
import arxiv
import numpy as np
import openai
import requests
from Levenshtein import distance
from arxiv import HTTPError, UnexpectedEmptyPageError
from fuzzywuzzy import fuzz
from pdfminer.high_level import extract_text
from pdfminer.pdfdocument import PDFDocument
from pdfminer.pdfparser import PDFParser
from dropbox_utils import download_from_dropbox
from ga import GA
from logging_utils import get_logger
def titlecase(s):
""" taken from https://www.pythontutorial.net/python-string-methods/python-titlecase/ """
return re.sub("[A-Za-z]+('[A-Za-z]+)?",
lambda word: word.group(0).capitalize(),
s)
def strify(x):
return x if x is not None else ''
def intify(x):
try:
return int(x) if x is not None else 0
except ValueError:
return 0
logger = get_logger(__file__)
DEFAULT_CONFIDENCE_THRESHOLD = 0.5
MAX_QUERY_SIZE = 256
MAX_FULL_TEXT = 512
MAX_ELEMENTS = 6
CURRENT_YEAR = intify(datetime.now().strftime('%Y'))
YEAR_WEIGHT = 0.1
ssm = boto3.client("ssm")
paramter_name = os.environ['SS_KEY']
ss_key_res = ssm.get_parameter(Name=paramter_name, WithDecryption=True)
ss_key = ss_key_res['Parameter']['Value']
@dataclass
class Citation:
title: str
authors: List[str]
venue: str
year: int
confidence: float
def __repr__(self):
author_str = ','.join(self.authors) if len(self.authors) > 0 else 'NA'
return f"<{self.title}: {author_str}, {self.year}@{self.venue}>"
NO_CITATION_INFO = Citation(title='', authors=[], venue='', year=CURRENT_YEAR, confidence=0.0)
@lru_cache
def search_dblp(endpoint, query):
url = f"https://dblp.org/search/{endpoint}/api?"
res = requests.get(url, params={'q': query, 'format': 'json', 'h': 1})
if not res.ok:
logger.error("Failed to search dblp")
return None
res_json = res.json()
result = res_json.get("result")
if result is None:
logger.error("Failed to search dblp, no 'result' json")
logger.error(res_json)
return None
hits = result['hits']
n_hits = int(hits['@sent'])
if int(n_hits) == 0:
return None
hit = hits['hit'][0]
info = hit['info']
# for hit in hits['hit']:
# print(hit['info']['venue'], hit['@score'])
return info
@lru_cache
def standardize_venue(venue: str):
venue_lower = venue.lower()
if venue_lower == 'arxiv': # special case because dblp has it listed as CoRR which is confusing and stupid
return 'ArXiv'
matches = re.findall(r"([^\(\)]+)", venue, flags=re.MULTILINE)
substrings_to_remove = [
"proceedings",
"th",
"st",
"annual",
",",
"/",
"'",
"\"",
]
venue_cleaned = re.sub(r"[\d+]", "", venue_lower)
for sub in substrings_to_remove:
venue_cleaned = venue_cleaned.replace(sub, "")
venue_cleaned = venue_cleaned.strip(" ")
venue_guesses = [venue_lower, venue_cleaned] + matches
for query in venue_guesses:
if len(query) < 3:
continue
info = search_dblp(endpoint='venue', query=query)
if info is not None:
new_venue = info['venue']
new_acronym = info.get("acronym", "")
similarity_new = fuzz.partial_ratio(new_venue, venue)
similarity_new_acronym = fuzz.partial_ratio(new_acronym, venue)
if re.match(r"\d+", new_venue):
continue
if similarity_new_acronym < 70 and similarity_new < 70:
continue
if 'acronym' in info:
if re.match(r"[0-9]+", info['acronym']):
continue
return info['acronym']
else:
return info['venue']
return venue
@lru_cache
def standardize_author(author: str):
info = search_dblp(endpoint='author', query=author)
if info is None:
return author
return info['author']
@lru_cache
def search_semantic_scholar(query: str):
search_url = 'https://api.semanticscholar.org/graph/v1/paper/search'
params = {
'query': query,
'limit': 3,
'fields': 'title,authors,venue,year',
'x-api-key': ss_key,
}
res = requests.get(search_url, params)
if not res.ok:
logger.warn(f"Semantic scholar query failed {res.status_code}")
logger.warn(res.text)
return NO_CITATION_INFO
query_res = res.json()
if query_res['total'] == 0:
return NO_CITATION_INFO
first_paper_res = query_res['data'][0]
return Citation(
title=strify(first_paper_res['title']),
authors=[author['name'] for author in first_paper_res['authors']],
venue=strify(first_paper_res['venue']),
year=intify(first_paper_res['year']),
confidence=1.0,
)
def query_arxiv(query='', id_list=None):
if id_list is None:
id_list = []
search = arxiv.Search(query=query, id_list=id_list, max_results=3)
try:
paper = next(search.results())
return Citation(
title=strify(paper.title),
authors=strify([a.name for a in paper.authors]),
venue=strify(paper.journal_ref),
year=intify(paper.published.year),
confidence=1.0,
)
except (HTTPError, UnexpectedEmptyPageError, StopIteration):
return NO_CITATION_INFO
@lru_cache
def search_arxiv(query_str):
return query_arxiv(query=query_str)
def guess_arxiv_from_filename(name):
arxiv_id = name.strip(".pdf").strip("/.-")
if re.fullmatch(r'[0-9]+.[0-9]+', arxiv_id):
return query_arxiv(id_list=[arxiv_id])
return NO_CITATION_INFO
def guess_from_pdf_metadata(pdf_fp):
parser = PDFParser(pdf_fp)
doc = PDFDocument(parser)
pdf_metadata = doc.info[0]
# FIXME: utf-8 is totally unacceptable
return Citation(
title=pdf_metadata.get('Title', b'').decode("utf-8", errors='ignore'),
authors=pdf_metadata.get('Author', b'').decode("utf-8", errors='ignore').split(" "), # TODO: be smarter here
venue='',
year=CURRENT_YEAR,
confidence=0.2,
)
def format_element_for_gpt(element_idx, x):
# x[text, bbox, w, h]
text, _, w, h = x
text = text.strip(" \n").replace('\n', '')
return f'{element_idx}:\n\tTEXT: {text}\n\tW: {w}, H: {h}'
def format_inputs_for_gpt(full_text, gpt_question):
return '\n'.join([
f"Full Text:",
full_text.strip(" \n").replace("\n", " "),
gpt_question + ':',
])
def make_gpt_prompt(full_text, gpt_question):
prompt = format_inputs_for_gpt(full_text, gpt_question)
return prompt
def authors_from_gpt_choice(choice):
authors = choice['text'].strip("\n ")
authors = authors.split(".")[0].split("\n")[0]
authors = authors.split(",")
authors = [author.strip(" \n\t,:\"\'") for author in authors]
return authors
def title_from_gpt_choice(choice):
title = choice['text'].strip("\n ")
if "\"" in title:
title = re.sub(r"^.*?\"", "\"", title)
m = re.findall(r"([^\"]+)", title, flags=re.MULTILINE)
title = m[0]
else:
title = title.split("\n")[0]
title = titlecase(title).strip(".'\"")
return title
def guess_from_nlp(pdf_fp, n_completions):
guess_title_prompt = make_gpt_prompt(pdf_fp, gpt_question="The title is")
title_completion = openai.Completion.create(engine="babbage-instruct-beta",
prompt=guess_title_prompt,
max_tokens=20,
n=n_completions)
guess_authors_prompt = make_gpt_prompt(pdf_fp, gpt_question="The author's names are:")
authors_completion = openai.Completion.create(engine="babbage-instruct-beta",
prompt=guess_authors_prompt,
max_tokens=32,
n=n_completions)
choices = zip(title_completion['choices'], authors_completion['choices'])
guesses = []
for title_completion_choice, authors_completion_choice in choices:
title = title_from_gpt_choice(title_completion_choice)
authors = authors_from_gpt_choice(authors_completion_choice)
guess = Citation(
title=title,
authors=authors,
venue='',
year=CURRENT_YEAR,
confidence=0.1,
)
guesses.append(guess)
return guesses
def search_online_databases(citation):
if len(citation.title) == 0:
return [NO_CITATION_INFO]
query_string = f"{citation.title}"
query_string = query_string[:MAX_QUERY_SIZE]
arxiv_citation = search_arxiv(query_string)
ss_citation = search_semantic_scholar(query_string)
return np.array([arxiv_citation, ss_citation])
def venue_cost(venue, online_venue):
if len(online_venue) == 0 and len(venue) == 0:
return 100
elif len(online_venue) == 0:
return 0
else:
return distance(venue, online_venue)
def authors_distance(authors, online_authors):
num_authors_mismatch = abs(len(authors) - len(online_authors))
authors_costs = sum([distance(a1, a2) for a1, a2 in zip(authors, online_authors)])
encourage_long_author_names = sum([50 / len(a) for a in authors])
return authors_costs + num_authors_mismatch + encourage_long_author_names
def online_search_cost(citation):
# one way to see if a citation is good is to use it to search a database.
# If you don't get a result, that's a bad sign and deserves high cost. If you do, then the distance
# of that result to the citation used for querying can be used as cost
all_online_citations = search_online_databases(citation)
valid_online_citations = list(filter(lambda c: c != NO_CITATION_INFO, all_online_citations))
if np.all(valid_online_citations == NO_CITATION_INFO) or len(valid_online_citations) == 0:
return 100000
all_costs = []
for online_citation in valid_online_citations:
field_costs = [
distance(citation.title, online_citation.title),
authors_distance(citation.authors, online_citation.authors),
venue_cost(citation.venue, online_citation.venue),
np.abs(citation.year - online_citation.year) * YEAR_WEIGHT,
# cost for empty venue. Helpful since sometimes online search has empty venue too
(10 if len(citation.venue) == 0 else 0),
]
cost = sum(field_costs)
all_costs.append(cost)
return np.min(all_costs)
def dist_to_original_doct(citation, full_text):
d = 100 - fuzz.partial_ratio(citation.title.lower(), full_text.lower())
return d
def crossover_authors(rng, authors1, authors2):
raise NotImplementedError()
class CitationGA(GA):
def __init__(self, filename, pdf_fp, population_size=10):
super().__init__()
self.population_size = population_size
self.filename = filename
self.pdf_fp = pdf_fp
self.full_text = extract_text(pdf_fp, maxpages=1)[:MAX_FULL_TEXT]
def initialize(self):
nlp_guess = guess_from_nlp(self.full_text, n_completions=3)
population = [
guess_from_pdf_metadata(self.pdf_fp),
guess_arxiv_from_filename(self.filename),
]
population += nlp_guess
population = self.rng.choice(population, self.population_size)
return population
def cost(self, citation: Citation):
return online_search_cost(citation) + dist_to_original_doct(citation, self.full_text)
def mutate(self, citation: Citation):
# To mutate, we simply perform crossover with search results
online_citations = search_online_databases(citation)
valid_online_citations = list(filter(lambda c: c != NO_CITATION_INFO, online_citations))
if len(valid_online_citations) == 0:
return citation
sampled_online_citation = self.rng.choice(valid_online_citations)
if self.rng.rand() > 0.5:
citation.venue = standardize_venue(citation.venue)
citation.authors = [standardize_author(a) if self.rng.rand() > 0.5 else a for a in citation.authors]
return self.crossover(citation, sampled_online_citation)
def crossover(self, citation1: Citation, citation2: Citation):
# randomly inherit each field from parents
keep_from_1 = (self.rng.rand(4) > 0.5)
output = Citation(
title=(citation1.title if keep_from_1[0] else citation2.title),
# authors=crossover_authors(self.rng, citation1.authors, citation2.authors),
authors=(citation1.authors if keep_from_1[1] else citation2.authors),
venue=(citation1.venue if keep_from_1[2] else citation2.venue),
year=(citation1.year if keep_from_1[3] else citation2.year),
confidence=(citation1.confidence + citation2.confidence) / 2,
)
return output
def extract_citation(dbx, file):
file_data = download_from_dropbox(dbx, file.name)
pdf_fp = io.BytesIO(file_data)
ga = CitationGA(filename=file.name, pdf_fp=pdf_fp, population_size=20)
best_citation = ga.opt(generations=4)
best_citation.title = titlecase(best_citation.title)
return best_citation
| [
"The author's names are:",
"The title is"
] |
2024-01-10 | userandnames/openai_for_myself | chat_api_example.py | from environs import Env
from openai import OpenAI
# 加载 .env 文件中的环境变量
env = Env()
env.read_env()
client = OpenAI()
# defaults to getting the key using os.environ.get("OPENAI_API_KEY")
# if you saved the key under a different environment variable name, you can do something like:
# client = OpenAI(
# api_key=os.environ.get("OPENAI_API_KEY"),
# )
completion = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a poetic assistant, skilled in explaining complex programming concepts with creative flair."},
{"role": "user", "content": "Compose a poem that explains the concept of recursion in programming."}
]
)
print(completion.choices[0].message)
| [
"Compose a poem that explains the concept of recursion in programming.",
"You are a poetic assistant, skilled in explaining complex programming concepts with creative flair."
] |
2024-01-10 | sumitsahoo/chart-gen | src~util~chart_util.py | import os
import time
from openai import OpenAI
from src.util.log_util import LogUtil
class ChartUtil:
def __init__(self):
self.log = LogUtil()
self.client = OpenAI(api_key=os.environ["OPENAI_API_KEY"])
self.chart_out_path = "./outputs/chart.png"
def generate_chart(self, message):
# Prepare the prompt
ci_prompt = "Please generate a chart using following data: \n" + message
try:
# Create a thread and run the assistant
thread = self.client.beta.threads.create(
messages=[
{
"role": "user",
"content": ci_prompt,
}
]
)
# Run the thread
run = self.client.beta.threads.runs.create(
assistant_id=os.environ["OPENAI_ASSISTANT_ID"], thread_id=thread.id
)
# Poll the run status until it is completed
while True:
# Refresh the run to get the latest status
run = self.client.beta.threads.runs.retrieve(
run_id=run.id, thread_id=thread.id
)
if run.status == "completed":
self.log.info("Generated chart, Run finished")
# Get list of messages in the thread
messages = self.client.beta.threads.messages.list(
thread_id=thread.id
)
# Get the latest message in the thread and retrieve file id
self.log.info(messages.data[0])
image_file_id = messages.data[0].content[0].image_file.file_id
content_description = messages.data[0].content[1].text.value
# Get the raw response from the file id
raw_response = self.client.files.with_raw_response.content(
file_id=image_file_id
)
# Delete generated file
self.client.files.delete(image_file_id)
# Save the generated chart to a file
with open(self.chart_out_path, "wb") as f:
f.write(raw_response.content)
return (self.chart_out_path, content_description)
elif run.status == "failed":
self.log.error("Unable to generate chart")
break
# Wait for a short period before polling again to avoid hitting rate limits
time.sleep(1)
except Exception as e:
self.log.error(e)
return (None, "🤔 Could you please rephrase your query and try again?")
| [
"Please generate a chart using following data: \nPLACEHOLDER"
] |
2024-01-10 | Spidey24/Doc-Convo2 | backend~backend.py | from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.chat_models import ChatOpenAI
from langchain.chains import RetrievalQA,ConversationalRetrievalChain
from langchain.vectorstores.pinecone import Pinecone
import pinecone
from dotenv import load_dotenv
load_dotenv()
pinecone.init(
api_key="api-key",
environment="env",
)
index = pinecone.Index('doc-convo-pod')
def run_llm(query:str,chat_history: list[dict[str,any]]=[]) -> any :
embeddings= OpenAIEmbeddings()
docsearch = Pinecone.from_existing_index(
index_name="doc-convo-pod",
embedding=embeddings
)
chat = ChatOpenAI(verbose = True, temperature = 0)
# If we don't want any chat_history then uncomment below code
# qa = RetrievalQA.from_chain_type(
# llm=chat,
# chain_type="stuff",
# retriever=docsearch.as_retriever(),
# )
qa=ConversationalRetrievalChain.from_llm(
llm=chat,
retriever=docsearch.as_retriever()
)
return qa({"question": query, "chat_history": chat_history})
| [] |
2024-01-10 | Spidey24/Doc-Convo2 | vectorization~ingestion.py | import pinecone
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores.pinecone import Pinecone
from langchain.embeddings import OpenAIEmbeddings
from PyPDF2 import PdfReader
pinecone.init(
api_key="api-key", #api key goes here
environment="env",
)
index = pinecone.Index('doc-convo-pod')
def get_pdf_text(pdf_docs):
text = ""
for pdf in pdf_docs:
pdf_reader = PdfReader(pdf)
for page in pdf_reader.pages:
text += page.extract_text()
return text
def get_text_chunks(text):
text_splitter = CharacterTextSplitter(
separator="\n\n",
chunk_size=1000,
chunk_overlap=200,
length_function=len
)
chunks = text_splitter.split_text(text)
return chunks
def ingest_docs(file_content):
# Function to get text from PDFs goes here
raw_text = get_pdf_text(file_content)
# Function to get embeddings from text goes here
text_chunks = get_text_chunks(raw_text)
embeddings= OpenAIEmbeddings(disallowed_special=())
ingestor = Pinecone.from_texts(texts=text_chunks , embedding=embeddings, index_name="doc-convo-pod")
try:
if ingestor is None:
print("Ingestion failed")
return "Failed at ingestion, because ingestor is None"
else:
print("Ingestion succeeded")
return "Success"
except Exception as e:
return "Failed at ingestion, error: {}".format(e)
| [] |
2024-01-10 | vub-ai-lab/stable-baselines3 | stable_baselines3~common~policies.py | """Policies: abstract base class and concrete implementations."""
import collections
import copy
from abc import ABC, abstractmethod
from functools import partial
from typing import Any, Dict, List, Optional, Tuple, Type, TypeVar, Union
import gym
import numpy as np
import torch as th
from torch import nn
from stable_baselines3.common.distributions import (
BernoulliDistribution,
CategoricalDistribution,
DiagGaussianDistribution,
Distribution,
MultiCategoricalDistribution,
StateDependentNoiseDistribution,
make_proba_distribution,
)
from stable_baselines3.common.preprocessing import get_action_dim, is_image_space, maybe_transpose, preprocess_obs
from stable_baselines3.common.torch_layers import (
BaseFeaturesExtractor,
CombinedExtractor,
FlattenExtractor,
MlpExtractor,
NatureCNN,
create_mlp,
)
from stable_baselines3.common.type_aliases import Schedule
from stable_baselines3.common.utils import get_device, is_vectorized_observation, obs_as_tensor
BaseModelSelf = TypeVar("BaseModelSelf", bound="BaseModel")
class BaseModel(nn.Module):
"""
The base model object: makes predictions in response to observations.
In the case of policies, the prediction is an action. In the case of critics, it is the
estimated value of the observation.
:param observation_space: The observation space of the environment
:param action_space: The action space of the environment
:param features_extractor_class: Features extractor to use.
:param features_extractor_kwargs: Keyword arguments
to pass to the features extractor.
:param features_extractor: Network to extract features
(a CNN when using images, a nn.Flatten() layer otherwise)
:param normalize_images: Whether to normalize images or not,
dividing by 255.0 (True by default)
:param optimizer_class: The optimizer to use,
``th.optim.Adam`` by default
:param optimizer_kwargs: Additional keyword arguments,
excluding the learning rate, to pass to the optimizer
"""
def __init__(
self,
observation_space: gym.spaces.Space,
action_space: gym.spaces.Space,
features_extractor_class: Type[BaseFeaturesExtractor] = FlattenExtractor,
features_extractor_kwargs: Optional[Dict[str, Any]] = None,
features_extractor: Optional[nn.Module] = None,
normalize_images: bool = True,
optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,
optimizer_kwargs: Optional[Dict[str, Any]] = None,
):
super().__init__()
if optimizer_kwargs is None:
optimizer_kwargs = {}
if features_extractor_kwargs is None:
features_extractor_kwargs = {}
self.observation_space = observation_space
self.action_space = action_space
self.features_extractor = features_extractor
self.normalize_images = normalize_images
self.optimizer_class = optimizer_class
self.optimizer_kwargs = optimizer_kwargs
self.optimizer = None # type: Optional[th.optim.Optimizer]
self.features_extractor_class = features_extractor_class
self.features_extractor_kwargs = features_extractor_kwargs
def _update_features_extractor(
self,
net_kwargs: Dict[str, Any],
features_extractor: Optional[BaseFeaturesExtractor] = None,
) -> Dict[str, Any]:
"""
Update the network keyword arguments and create a new features extractor object if needed.
If a ``features_extractor`` object is passed, then it will be shared.
:param net_kwargs: the base network keyword arguments, without the ones
related to features extractor
:param features_extractor: a features extractor object.
If None, a new object will be created.
:return: The updated keyword arguments
"""
net_kwargs = net_kwargs.copy()
if features_extractor is None:
# The features extractor is not shared, create a new one
features_extractor = self.make_features_extractor()
net_kwargs.update(dict(features_extractor=features_extractor, features_dim=features_extractor.features_dim))
return net_kwargs
def make_features_extractor(self) -> BaseFeaturesExtractor:
"""Helper method to create a features extractor."""
return self.features_extractor_class(self.observation_space, **self.features_extractor_kwargs)
def extract_features(self, obs: th.Tensor) -> th.Tensor:
"""
Preprocess the observation if needed and extract features.
:param obs:
:return:
"""
assert self.features_extractor is not None, "No features extractor was set"
preprocessed_obs = preprocess_obs(obs, self.observation_space, normalize_images=self.normalize_images)
return self.features_extractor(preprocessed_obs)
def _get_constructor_parameters(self) -> Dict[str, Any]:
"""
Get data that need to be saved in order to re-create the model when loading it from disk.
:return: The dictionary to pass to the as kwargs constructor when reconstruction this model.
"""
return dict(
observation_space=self.observation_space,
action_space=self.action_space,
# Passed to the constructor by child class
# squash_output=self.squash_output,
# features_extractor=self.features_extractor
normalize_images=self.normalize_images,
)
@property
def device(self) -> th.device:
"""Infer which device this policy lives on by inspecting its parameters.
If it has no parameters, the 'cpu' device is used as a fallback.
:return:"""
for param in self.parameters():
return param.device
return get_device("cpu")
def save(self, path: str) -> None:
"""
Save model to a given location.
:param path:
"""
th.save({"state_dict": self.state_dict(), "data": self._get_constructor_parameters()}, path)
@classmethod
def load(cls: Type[BaseModelSelf], path: str, device: Union[th.device, str] = "auto") -> BaseModelSelf:
"""
Load model from path.
:param path:
:param device: Device on which the policy should be loaded.
:return:
"""
device = get_device(device)
saved_variables = th.load(path, map_location=device)
# Create policy object
model = cls(**saved_variables["data"]) # pytype: disable=not-instantiable
# Load weights
model.load_state_dict(saved_variables["state_dict"])
model.to(device)
return model
def load_from_vector(self, vector: np.ndarray) -> None:
"""
Load parameters from a 1D vector.
:param vector:
"""
th.nn.utils.vector_to_parameters(th.FloatTensor(vector).to(self.device), self.parameters())
def parameters_to_vector(self) -> np.ndarray:
"""
Convert the parameters to a 1D vector.
:return:
"""
return th.nn.utils.parameters_to_vector(self.parameters()).detach().cpu().numpy()
def set_training_mode(self, mode: bool) -> None:
"""
Put the policy in either training or evaluation mode.
This affects certain modules, such as batch normalisation and dropout.
:param mode: if true, set to training mode, else set to evaluation mode
"""
self.train(mode)
def obs_to_tensor(self, observation: Union[np.ndarray, Dict[str, np.ndarray]]) -> Tuple[th.Tensor, bool]:
"""
Convert an input observation to a PyTorch tensor that can be fed to a model.
Includes sugar-coating to handle different observations (e.g. normalizing images).
:param observation: the input observation
:return: The observation as PyTorch tensor
and whether the observation is vectorized or not
"""
vectorized_env = False
if isinstance(observation, dict):
# need to copy the dict as the dict in VecFrameStack will become a torch tensor
observation = copy.deepcopy(observation)
for key, obs in observation.items():
obs_space = self.observation_space.spaces[key]
if is_image_space(obs_space):
obs_ = maybe_transpose(obs, obs_space)
else:
obs_ = np.array(obs)
vectorized_env = vectorized_env or is_vectorized_observation(obs_, obs_space)
# Add batch dimension if needed
observation[key] = obs_.reshape((-1,) + self.observation_space[key].shape)
elif is_image_space(self.observation_space):
# Handle the different cases for images
# as PyTorch use channel first format
observation = maybe_transpose(observation, self.observation_space)
else:
observation = np.array(observation)
if not isinstance(observation, dict):
# Dict obs need to be handled separately
vectorized_env = is_vectorized_observation(observation, self.observation_space)
# Add batch dimension if needed
observation = observation.reshape((-1,) + self.observation_space.shape)
observation = obs_as_tensor(observation, self.device)
return observation, vectorized_env
class BasePolicy(BaseModel, ABC):
"""The base policy object.
Parameters are mostly the same as `BaseModel`; additions are documented below.
:param args: positional arguments passed through to `BaseModel`.
:param kwargs: keyword arguments passed through to `BaseModel`.
:param squash_output: For continuous actions, whether the output is squashed
or not using a ``tanh()`` function.
"""
def __init__(self, *args, squash_output: bool = False, **kwargs):
super().__init__(*args, **kwargs)
self._squash_output = squash_output
@staticmethod
def _dummy_schedule(progress_remaining: float) -> float:
"""(float) Useful for pickling policy."""
del progress_remaining
return 0.0
@property
def squash_output(self) -> bool:
"""(bool) Getter for squash_output."""
return self._squash_output
@staticmethod
def init_weights(module: nn.Module, gain: float = 1) -> None:
"""
Orthogonal initialization (used in PPO and A2C)
"""
if isinstance(module, (nn.Linear, nn.Conv2d)):
nn.init.orthogonal_(module.weight, gain=gain)
if module.bias is not None:
module.bias.data.fill_(0.0)
@abstractmethod
def _predict(self, observation: th.Tensor, deterministic: bool = False) -> th.Tensor:
"""
Get the action according to the policy for a given observation.
By default provides a dummy implementation -- not all BasePolicy classes
implement this, e.g. if they are a Critic in an Actor-Critic method.
:param observation:
:param deterministic: Whether to use stochastic or deterministic actions
:return: Taken action according to the policy
"""
def predict(
self,
observation: Union[np.ndarray, Dict[str, np.ndarray]],
state: Optional[Tuple[np.ndarray, ...]] = None,
episode_start: Optional[np.ndarray] = None,
deterministic: bool = False,
) -> Tuple[np.ndarray, Optional[Tuple[np.ndarray, ...]]]:
"""
Get the policy action from an observation (and optional hidden state).
Includes sugar-coating to handle different observations (e.g. normalizing images).
:param observation: the input observation
:param state: The last hidden states (can be None, used in recurrent policies)
:param episode_start: The last masks (can be None, used in recurrent policies)
this correspond to beginning of episodes,
where the hidden states of the RNN must be reset.
:param deterministic: Whether or not to return deterministic actions.
:return: the model's action and the next hidden state
(used in recurrent policies)
"""
# TODO (GH/1): add support for RNN policies
# if state is None:
# state = self.initial_state
# if episode_start is None:
# episode_start = [False for _ in range(self.n_envs)]
# Switch to eval mode (this affects batch norm / dropout)
self.set_training_mode(False)
observation, vectorized_env = self.obs_to_tensor(observation)
with th.no_grad():
actions = self._predict(observation, deterministic=deterministic)
# Convert to numpy, and reshape to the original action shape
actions = actions.cpu().numpy().reshape((-1,) + self.action_space.shape)
if isinstance(self.action_space, gym.spaces.Box):
if self.squash_output:
# Rescale to proper domain when using squashing
actions = self.unscale_action(actions)
else:
# Actions could be on arbitrary scale, so clip the actions to avoid
# out of bound error (e.g. if sampling from a Gaussian distribution)
actions = np.clip(actions, self.action_space.low, self.action_space.high)
# Remove batch dimension if needed
if not vectorized_env:
actions = actions.squeeze(axis=0)
return actions, state
def scale_action(self, action: np.ndarray) -> np.ndarray:
"""
Rescale the action from [low, high] to [-1, 1]
(no need for symmetric action space)
:param action: Action to scale
:return: Scaled action
"""
low, high = self.action_space.low, self.action_space.high
return 2.0 * ((action - low) / (high - low)) - 1.0
def unscale_action(self, scaled_action: np.ndarray) -> np.ndarray:
"""
Rescale the action from [-1, 1] to [low, high]
(no need for symmetric action space)
:param scaled_action: Action to un-scale
"""
low, high = self.action_space.low, self.action_space.high
return low + (0.5 * (scaled_action + 1.0) * (high - low))
class ActorCriticPolicy(BasePolicy):
"""
Policy class for actor-critic algorithms (has both policy and value prediction).
Used by A2C, PPO and the likes.
:param observation_space: Observation space
:param action_space: Action space
:param lr_schedule: Learning rate schedule (could be constant)
:param net_arch: The specification of the policy and value networks.
:param activation_fn: Activation function
:param ortho_init: Whether to use or not orthogonal initialization
:param use_sde: Whether to use State Dependent Exploration or not
:param log_std_init: Initial value for the log standard deviation
:param full_std: Whether to use (n_features x n_actions) parameters
for the std instead of only (n_features,) when using gSDE
:param use_expln: Use ``expln()`` function instead of ``exp()`` to ensure
a positive standard deviation (cf paper). It allows to keep variance
above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.
:param squash_output: Whether to squash the output using a tanh function,
this allows to ensure boundaries when using gSDE.
:param features_extractor_class: Features extractor to use.
:param features_extractor_kwargs: Keyword arguments
to pass to the features extractor.
:param normalize_images: Whether to normalize images or not,
dividing by 255.0 (True by default)
:param optimizer_class: The optimizer to use,
``th.optim.Adam`` by default
:param optimizer_kwargs: Additional keyword arguments,
excluding the learning rate, to pass to the optimizer
"""
def __init__(
self,
observation_space: gym.spaces.Space,
action_space: gym.spaces.Space,
lr_schedule: Schedule,
net_arch: Optional[List[Union[int, Dict[str, List[int]]]]] = None,
activation_fn: Type[nn.Module] = nn.Tanh,
ortho_init: bool = True,
use_sde: bool = False,
log_std_init: float = 0.0,
full_std: bool = True,
use_expln: bool = False,
squash_output: bool = False,
features_extractor_class: Type[BaseFeaturesExtractor] = FlattenExtractor,
features_extractor_kwargs: Optional[Dict[str, Any]] = None,
normalize_images: bool = True,
optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,
optimizer_kwargs: Optional[Dict[str, Any]] = None,
):
if optimizer_kwargs is None:
optimizer_kwargs = {}
# Small values to avoid NaN in Adam optimizer
if optimizer_class == th.optim.Adam:
optimizer_kwargs["eps"] = 1e-5
super().__init__(
observation_space,
action_space,
features_extractor_class,
features_extractor_kwargs,
optimizer_class=optimizer_class,
optimizer_kwargs=optimizer_kwargs,
squash_output=squash_output,
)
# Default network architecture, from stable-baselines
if net_arch is None:
if features_extractor_class == NatureCNN:
net_arch = []
else:
net_arch = [dict(pi=[64, 64], vf=[64, 64])]
self.net_arch = net_arch
self.activation_fn = activation_fn
self.ortho_init = ortho_init
self.features_extractor = features_extractor_class(self.observation_space, **self.features_extractor_kwargs)
self.features_dim = self.features_extractor.features_dim
self.normalize_images = normalize_images
self.log_std_init = log_std_init
self.log_std = th.tensor([1.0])
dist_kwargs = None
# Keyword arguments for gSDE distribution
if use_sde:
dist_kwargs = {
"full_std": full_std,
"squash_output": squash_output,
"use_expln": use_expln,
"learn_features": False,
}
self.use_sde = use_sde
self.dist_kwargs = dist_kwargs
# Action distribution
self.action_dist = make_proba_distribution(action_space, use_sde=use_sde, dist_kwargs=dist_kwargs)
self._build(lr_schedule)
def _get_constructor_parameters(self) -> Dict[str, Any]:
data = super()._get_constructor_parameters()
default_none_kwargs = self.dist_kwargs or collections.defaultdict(lambda: None)
data.update(
dict(
net_arch=self.net_arch,
activation_fn=self.activation_fn,
use_sde=self.use_sde,
log_std_init=self.log_std_init,
squash_output=default_none_kwargs["squash_output"],
full_std=default_none_kwargs["full_std"],
use_expln=default_none_kwargs["use_expln"],
lr_schedule=self._dummy_schedule, # dummy lr schedule, not needed for loading policy alone
ortho_init=self.ortho_init,
optimizer_class=self.optimizer_class,
optimizer_kwargs=self.optimizer_kwargs,
features_extractor_class=self.features_extractor_class,
features_extractor_kwargs=self.features_extractor_kwargs,
)
)
return data
def reset_noise(self, n_envs: int = 1) -> None:
"""
Sample new weights for the exploration matrix.
:param n_envs:
"""
assert isinstance(self.action_dist, StateDependentNoiseDistribution), "reset_noise() is only available when using gSDE"
self.action_dist.sample_weights(self.log_std, batch_size=n_envs)
def _build_mlp_extractor(self) -> None:
"""
Create the policy and value networks.
Part of the layers can be shared.
"""
# Note: If net_arch is None and some features extractor is used,
# net_arch here is an empty list and mlp_extractor does not
# really contain any layers (acts like an identity module).
self.mlp_extractor = MlpExtractor(
self.features_dim,
net_arch=self.net_arch,
activation_fn=self.activation_fn,
device=self.device,
)
def _build(self, lr_schedule: Schedule) -> None:
"""
Create the networks and the optimizer.
:param lr_schedule: Learning rate schedule
lr_schedule(1) is the initial learning rate
"""
self._build_mlp_extractor()
latent_dim_pi = self.mlp_extractor.latent_dim_pi
if isinstance(self.action_dist, DiagGaussianDistribution):
self.action_net, self.log_std = self.action_dist.proba_distribution_net(
latent_dim=latent_dim_pi, log_std_init=self.log_std_init
)
elif isinstance(self.action_dist, StateDependentNoiseDistribution):
self.action_net, self.log_std = self.action_dist.proba_distribution_net(
latent_dim=latent_dim_pi, latent_sde_dim=latent_dim_pi, log_std_init=self.log_std_init
)
elif isinstance(self.action_dist, (CategoricalDistribution, MultiCategoricalDistribution, BernoulliDistribution)):
self.action_net = self.action_dist.proba_distribution_net(latent_dim=latent_dim_pi)
else:
raise NotImplementedError(f"Unsupported distribution '{self.action_dist}'.")
self.value_net = nn.Linear(self.mlp_extractor.latent_dim_vf, 1)
# Init weights: use orthogonal initialization
# with small initial weight for the output
if self.ortho_init:
# TODO: check for features_extractor
# Values from stable-baselines.
# features_extractor/mlp values are
# originally from openai/baselines (default gains/init_scales).
module_gains = {
self.features_extractor: np.sqrt(2),
self.mlp_extractor: np.sqrt(2),
self.action_net: 0.01,
self.value_net: 1,
}
for module, gain in module_gains.items():
module.apply(partial(self.init_weights, gain=gain))
# Setup optimizer with initial learning rate
self.optimizer = self.optimizer_class(self.parameters(), lr=lr_schedule(1), **self.optimizer_kwargs)
def get_advice(self, obs: th.Tensor) -> th.Tensor:
# There must be no "advice" key in obs
if isinstance(obs, dict):
assert "advice" not in obs, "get_advice() cannot be called with advice already in obs, as this prevents the learned policy from producing advice in an unbiased way"
# Ask the actor for means and variances
with th.no_grad():
# Preprocess the observation if needed
features = self.extract_features(obs)
latent_pi, latent_vf = self.mlp_extractor(features)
mean = self.action_net(latent_pi)
std = th.ones_like(mean) * th.exp(self.log_std)
if isinstance(self.action_space, gym.spaces.Box):
# Advice is an array of shape (batch_size, 2 (mean/std), *action_shape).
return th.stack([mean, std], axis=1)
else:
# Advice is an array of shape (batch_size, num_actions), that must contain probabilities
return th.distributions.utils.logits_to_probs(mean)
def forward(self, obs: th.Tensor, deterministic: bool = False) -> Tuple[th.Tensor, th.Tensor, th.Tensor]:
"""
Forward pass in all the networks (actor and critic)
:param obs: Observation
:param deterministic: Whether to sample or use deterministic actions
:return: action, value and log probability of the action
"""
# Preprocess the observation if needed
features = self.extract_features(obs)
latent_pi, latent_vf = self.mlp_extractor(features)
# Evaluate the values for the given observations
values = self.value_net(latent_vf)
distribution = self._get_action_dist_from_latent(latent_pi, obs)
actions = distribution.get_actions(deterministic=deterministic)
log_prob = distribution.log_prob(actions)
actions = actions.reshape((-1,) + self.action_space.shape)
return actions, values, log_prob
def _get_action_dist_from_latent(self, latent_pi: th.Tensor, obs: th.Tensor) -> Distribution:
"""
Retrieve action distribution given the latent codes.
:param latent_pi: Latent code for the actor
:return: Action distribution
"""
mean_actions = self.action_net(latent_pi)
log_std = self.log_std
# Use advice if available
if isinstance(obs, dict) and 'advice' in obs:
advice = obs['advice']
if len(advice.shape) >= 3 and advice.shape[1] == 2:
# Mean/std advice for continuous actions
with th.no_grad():
adv_mean = advice[:, 0]
adv_var = advice[:, 1] ** 2
# XXX log_std = th.ones_like(adv_var) * self.log_std
var = th.exp(2. * log_std)
# Combine two normals (end of page 2 of http://www.lucamartino.altervista.org/2003-003.pdf)
#mean_actions = (mean_actions * adv_var + adv_mean * var) / (var + adv_var + 1e-3)
#std = th.sqrt(var * adv_var / (var + adv_var))
#log_std = th.log(std)
mean_actions = (mean_actions + adv_mean) / 2
else:
# Probability distribution (advice contains probabilities)
# Combine with element-wise multiplication of probas.
act = th.distributions.utils.logits_to_probs(mean_actions)
combined = (act * advice)
combined /= combined.sum(1, keepdim=True)
mean_actions = th.distributions.utils.probs_to_logits(combined)
if isinstance(self.action_dist, DiagGaussianDistribution):
return self.action_dist.proba_distribution(mean_actions, log_std)
elif isinstance(self.action_dist, CategoricalDistribution):
# Here mean_actions are the logits before the softmax
return self.action_dist.proba_distribution(action_logits=mean_actions)
elif isinstance(self.action_dist, MultiCategoricalDistribution):
# Here mean_actions are the flattened logits
return self.action_dist.proba_distribution(action_logits=mean_actions)
elif isinstance(self.action_dist, BernoulliDistribution):
# Here mean_actions are the logits (before rounding to get the binary actions)
return self.action_dist.proba_distribution(action_logits=mean_actions)
elif isinstance(self.action_dist, StateDependentNoiseDistribution):
return self.action_dist.proba_distribution(mean_actions, log_std, latent_pi)
else:
raise ValueError("Invalid action distribution")
def _predict(self, observation: th.Tensor, deterministic: bool = False) -> th.Tensor:
"""
Get the action according to the policy for a given observation.
:param observation:
:param deterministic: Whether to use stochastic or deterministic actions
:return: Taken action according to the policy
"""
return self.get_distribution(observation).get_actions(deterministic=deterministic)
def evaluate_actions(self, obs: th.Tensor, actions: th.Tensor) -> Tuple[th.Tensor, th.Tensor, Optional[th.Tensor]]:
"""
Evaluate actions according to the current policy,
given the observations.
:param obs:
:param actions:
:return: estimated value, log likelihood of taking those actions
and entropy of the action distribution.
"""
# Preprocess the observation if needed
features = self.extract_features(obs)
latent_pi, latent_vf = self.mlp_extractor(features)
distribution = self._get_action_dist_from_latent(latent_pi, obs)
log_prob = distribution.log_prob(actions)
values = self.value_net(latent_vf)
entropy = distribution.entropy()
return values, log_prob, entropy
def get_distribution(self, obs: th.Tensor) -> Distribution:
"""
Get the current policy distribution given the observations.
:param obs:
:return: the action distribution.
"""
features = self.extract_features(obs)
latent_pi = self.mlp_extractor.forward_actor(features)
return self._get_action_dist_from_latent(latent_pi, obs)
def predict_values(self, obs: th.Tensor) -> th.Tensor:
"""
Get the estimated values according to the current policy given the observations.
:param obs:
:return: the estimated values.
"""
features = self.extract_features(obs)
latent_vf = self.mlp_extractor.forward_critic(features)
return self.value_net(latent_vf)
class ActorCriticCnnPolicy(ActorCriticPolicy):
"""
CNN policy class for actor-critic algorithms (has both policy and value prediction).
Used by A2C, PPO and the likes.
:param observation_space: Observation space
:param action_space: Action space
:param lr_schedule: Learning rate schedule (could be constant)
:param net_arch: The specification of the policy and value networks.
:param activation_fn: Activation function
:param ortho_init: Whether to use or not orthogonal initialization
:param use_sde: Whether to use State Dependent Exploration or not
:param log_std_init: Initial value for the log standard deviation
:param full_std: Whether to use (n_features x n_actions) parameters
for the std instead of only (n_features,) when using gSDE
:param use_expln: Use ``expln()`` function instead of ``exp()`` to ensure
a positive standard deviation (cf paper). It allows to keep variance
above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.
:param squash_output: Whether to squash the output using a tanh function,
this allows to ensure boundaries when using gSDE.
:param features_extractor_class: Features extractor to use.
:param features_extractor_kwargs: Keyword arguments
to pass to the features extractor.
:param normalize_images: Whether to normalize images or not,
dividing by 255.0 (True by default)
:param optimizer_class: The optimizer to use,
``th.optim.Adam`` by default
:param optimizer_kwargs: Additional keyword arguments,
excluding the learning rate, to pass to the optimizer
"""
def __init__(
self,
observation_space: gym.spaces.Space,
action_space: gym.spaces.Space,
lr_schedule: Schedule,
net_arch: Optional[List[Union[int, Dict[str, List[int]]]]] = None,
activation_fn: Type[nn.Module] = nn.Tanh,
ortho_init: bool = True,
use_sde: bool = False,
log_std_init: float = 0.0,
full_std: bool = True,
use_expln: bool = False,
squash_output: bool = False,
features_extractor_class: Type[BaseFeaturesExtractor] = NatureCNN,
features_extractor_kwargs: Optional[Dict[str, Any]] = None,
normalize_images: bool = True,
optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,
optimizer_kwargs: Optional[Dict[str, Any]] = None,
):
super().__init__(
observation_space,
action_space,
lr_schedule,
net_arch,
activation_fn,
ortho_init,
use_sde,
log_std_init,
full_std,
use_expln,
squash_output,
features_extractor_class,
features_extractor_kwargs,
normalize_images,
optimizer_class,
optimizer_kwargs,
)
class MultiInputActorCriticPolicy(ActorCriticPolicy):
"""
MultiInputActorClass policy class for actor-critic algorithms (has both policy and value prediction).
Used by A2C, PPO and the likes.
:param observation_space: Observation space (Tuple)
:param action_space: Action space
:param lr_schedule: Learning rate schedule (could be constant)
:param net_arch: The specification of the policy and value networks.
:param activation_fn: Activation function
:param ortho_init: Whether to use or not orthogonal initialization
:param use_sde: Whether to use State Dependent Exploration or not
:param log_std_init: Initial value for the log standard deviation
:param full_std: Whether to use (n_features x n_actions) parameters
for the std instead of only (n_features,) when using gSDE
:param use_expln: Use ``expln()`` function instead of ``exp()`` to ensure
a positive standard deviation (cf paper). It allows to keep variance
above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.
:param squash_output: Whether to squash the output using a tanh function,
this allows to ensure boundaries when using gSDE.
:param features_extractor_class: Uses the CombinedExtractor
:param features_extractor_kwargs: Keyword arguments
to pass to the feature extractor.
:param normalize_images: Whether to normalize images or not,
dividing by 255.0 (True by default)
:param optimizer_class: The optimizer to use,
``th.optim.Adam`` by default
:param optimizer_kwargs: Additional keyword arguments,
excluding the learning rate, to pass to the optimizer
"""
def __init__(
self,
observation_space: gym.spaces.Dict,
action_space: gym.spaces.Space,
lr_schedule: Schedule,
net_arch: Optional[List[Union[int, Dict[str, List[int]]]]] = None,
activation_fn: Type[nn.Module] = nn.Tanh,
ortho_init: bool = True,
use_sde: bool = False,
log_std_init: float = 0.0,
full_std: bool = True,
use_expln: bool = False,
squash_output: bool = False,
features_extractor_class: Type[BaseFeaturesExtractor] = CombinedExtractor,
features_extractor_kwargs: Optional[Dict[str, Any]] = None,
normalize_images: bool = True,
optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,
optimizer_kwargs: Optional[Dict[str, Any]] = None,
):
super().__init__(
observation_space,
action_space,
lr_schedule,
net_arch,
activation_fn,
ortho_init,
use_sde,
log_std_init,
full_std,
use_expln,
squash_output,
features_extractor_class,
features_extractor_kwargs,
normalize_images,
optimizer_class,
optimizer_kwargs,
)
class ContinuousCritic(BaseModel):
"""
Critic network(s) for DDPG/SAC/TD3.
It represents the action-state value function (Q-value function).
Compared to A2C/PPO critics, this one represents the Q-value
and takes the continuous action as input. It is concatenated with the state
and then fed to the network which outputs a single value: Q(s, a).
For more recent algorithms like SAC/TD3, multiple networks
are created to give different estimates.
By default, it creates two critic networks used to reduce overestimation
thanks to clipped Q-learning (cf TD3 paper).
:param observation_space: Obervation space
:param action_space: Action space
:param net_arch: Network architecture
:param features_extractor: Network to extract features
(a CNN when using images, a nn.Flatten() layer otherwise)
:param features_dim: Number of features
:param activation_fn: Activation function
:param normalize_images: Whether to normalize images or not,
dividing by 255.0 (True by default)
:param n_critics: Number of critic networks to create.
:param share_features_extractor: Whether the features extractor is shared or not
between the actor and the critic (this saves computation time)
"""
def __init__(
self,
observation_space: gym.spaces.Space,
action_space: gym.spaces.Space,
net_arch: List[int],
features_extractor: nn.Module,
features_dim: int,
activation_fn: Type[nn.Module] = nn.ReLU,
normalize_images: bool = True,
n_critics: int = 2,
share_features_extractor: bool = True,
):
super().__init__(
observation_space,
action_space,
features_extractor=features_extractor,
normalize_images=normalize_images,
)
action_dim = get_action_dim(self.action_space)
self.share_features_extractor = share_features_extractor
self.n_critics = n_critics
self.q_networks = []
for idx in range(n_critics):
q_net = create_mlp(features_dim + action_dim, 1, net_arch, activation_fn)
q_net = nn.Sequential(*q_net)
self.add_module(f"qf{idx}", q_net)
self.q_networks.append(q_net)
def forward(self, obs: th.Tensor, actions: th.Tensor) -> Tuple[th.Tensor, ...]:
# Learn the features extractor using the policy loss only
# when the features_extractor is shared with the actor
with th.set_grad_enabled(not self.share_features_extractor):
features = self.extract_features(obs)
qvalue_input = th.cat([features, actions], dim=1)
return tuple(q_net(qvalue_input) for q_net in self.q_networks)
def q1_forward(self, obs: th.Tensor, actions: th.Tensor) -> th.Tensor:
"""
Only predict the Q-value using the first network.
This allows to reduce computation when all the estimates are not needed
(e.g. when updating the policy in TD3).
"""
with th.no_grad():
features = self.extract_features(obs)
return self.q_networks[0](th.cat([features, actions], dim=1))
| [] |
2024-01-10 | ConnectAI-E/LangChain-Tutior | python~project-code~L6-Agent-New.py | #import os
from dotenv import load_dotenv, find_dotenv
_ = load_dotenv(find_dotenv()) # read local .env file
import warnings
warnings.filterwarnings("ignore")
from langchain.agents.agent_toolkits import create_python_agent
from langchain.agents import load_tools, initialize_agent
from langchain.agents import AgentType
from langchain.tools.python.tool import PythonREPLTool
#from langchain.python import PythonREPL
from langchain.chat_models import ChatOpenAI
import langchain
llm = ChatOpenAI(temperature=0)
tools = load_tools(["llm-math", "wikipedia"], llm=llm)
customer_list = [["Harrison", "Chase"],
["Lang", "Chain"],
["Dolly", "Too"],
["Elle", "Elem"],
["Geoff", "Fusion"],
["Trance", "Former"],
["Jen", "Ayai"]]
def do_answer1():
langchain.debug = True
agent = create_python_agent(
llm,
tool=PythonREPLTool(),
verbose=True
)
answer = agent.run(f"""Sort these customers by \
last name and then first name \
and print the output: {customer_list}""")
print(answer)
langchain.debug = False
def do_answer2():
from langchain.agents import tool
from datetime import date
langchain.debug = True
@tool
def time(text: str) -> str:
"""Returns todays date, use this for any \
questions related to knowing todays date. \
The input should always be an empty string, \
and this function will always return todays \
date - any date mathmatics should occur \
outside this function."""
return str(date.today())
agent = initialize_agent(
tools + [time],
llm,
agent=AgentType.CHAT_ZERO_SHOT_REACT_DESCRIPTION,
handle_parsing_errors=True,
verbose = True)
try:
result = agent("whats the date today?")
except: # noqa
print("exception on external access")
print(result)
langchain.debug = False
if __name__ == "__main__":
#do_answer1()
do_answer2()
| [
"Returns todays date, use this for any questions related to knowing todays date. The input should always be an empty string, and this function will always return todays date - any date mathmatics should occur outside this function."
] |
2024-01-10 | ConnectAI-E/LangChain-Tutior | python~project-code~L0-raw.py | from langchain.chat_models import ChatOpenAI
from dotenv import load_dotenv, find_dotenv
_ = load_dotenv(find_dotenv()) # read local .env file
raw_prompt = """Human: You are a teacher coming up with questions to ask on a quiz.\n\n
Given the following document, please generate a question and answer based on that document.\n\n
Example Format:\n<Begin Document>\n...\n<End Document>\nQUESTION: question here\nANSWER: answer here\n\n
These questions should be detailed and be based explicitly on information in the document. Begin!\n\n
<Begin Document>\npage_content=\": 0\\nname: Women's Campside Oxfords\\n
description: This ultracomfortable lace-to-toe Oxford boasts a super-soft canvas, thick cushioning, and quality construction for a broken-in feel from the first time you put them on. \\n\\n\
Size & Fit: Order regular shoe size. For half sizes not offered, order up to next whole size. \\n\\n
Specs: Approx. weight: 1 lb.1 oz. per pair. \\n\\n
Construction: Soft canvas material for a broken-in feel and look. Comfortable EVA innersole with Cleansport NXT® antimicrobial odor control. Vintage hunt, fish and camping motif on innersole. Moderate arch contour of innersole. EVA foam midsole for cushioning and support. Chain-tread-inspired molded rubber outsole with modified chain-tread pattern. Imported. \\n\\n
Questions? Please contact us for any inquiries.\" metadata={'source': 'OutdoorClothingCatalog_1000.csv', 'row': 0}\n<End Document>"""
def call_as_llm(raw_prompt):
raw_prompt = raw_prompt.replace("\\\\", "\\")
raw_prompt = raw_prompt.replace("\\n", "\n")
print()
print(raw_prompt)
print()
llm = ChatOpenAI(temperature = 0.0)
response = llm.call_as_llm(raw_prompt)
print()
print("response")
print(response)
return response
if __name__ == "__main__":
response = call_as_llm(raw_prompt)
| [
"Human: You are a teacher coming up with questions to ask on a quiz.\n\n\nGiven the following document, please generate a question and answer based on that document.\n\n\nExample Format:\n<Begin Document>\n...\n<End Document>\nQUESTION: question here\nANSWER: answer here\n\n\nThese questions should be detailed and be based explicitly on information in the document. Begin!\n\n\n<Begin Document>\npage_content=\": 0\nname: Women's Campside Oxfords\n\ndescription: This ultracomfortable lace-to-toe Oxford boasts a super-soft canvas, thick cushioning, and quality construction for a broken-in feel from the first time you put them on. \n\nSize & Fit: Order regular shoe size. For half sizes not offered, order up to next whole size. \n\n\nSpecs: Approx. weight: 1 lb.1 oz. per pair. \n\n\nConstruction: Soft canvas material for a broken-in feel and look. Comfortable EVA innersole with Cleansport NXT® antimicrobial odor control. Vintage hunt, fish and camping motif on innersole. Moderate arch contour of innersole. EVA foam midsole for cushioning and support. Chain-tread-inspired molded rubber outsole with modified chain-tread pattern. Imported. \n\n\nQuestions? Please contact us for any inquiries.\" metadata={'source': 'OutdoorClothingCatalog_1000.csv', 'row': 0}\n<End Document>",
"Human: You are a teacher coming up with questions to ask on a quiz.\n\n\nGiven the following document, please generate a question and answer based on that document.\n\n\nExample Format:\n<Begin Document>\n...\n<End Document>\nQUESTION: question here\nANSWER: answer here\n\n\nThese questions should be detailed and be based explicitly on information in the document. Begin!\n\n\n<Begin Document>\npage_content=\": 0\\nname: Women's Campside Oxfords\\n\ndescription: This ultracomfortable lace-to-toe Oxford boasts a super-soft canvas, thick cushioning, and quality construction for a broken-in feel from the first time you put them on. \\n\\nSize & Fit: Order regular shoe size. For half sizes not offered, order up to next whole size. \\n\\n\nSpecs: Approx. weight: 1 lb.1 oz. per pair. \\n\\n\nConstruction: Soft canvas material for a broken-in feel and look. Comfortable EVA innersole with Cleansport NXT® antimicrobial odor control. Vintage hunt, fish and camping motif on innersole. Moderate arch contour of innersole. EVA foam midsole for cushioning and support. Chain-tread-inspired molded rubber outsole with modified chain-tread pattern. Imported. \\n\\n\nQuestions? Please contact us for any inquiries.\" metadata={'source': 'OutdoorClothingCatalog_1000.csv', 'row': 0}\n<End Document>"
] |
2024-01-10 | ConnectAI-E/LangChain-Tutior | python~project-code~L6-Agent.py | #import os
from dotenv import load_dotenv, find_dotenv
_ = load_dotenv(find_dotenv()) # read local .env file
import warnings
warnings.filterwarnings("ignore")
#from langchain.agents.agent_toolkits import create_python_agent
from langchain.agents import load_tools, initialize_agent
from langchain.agents import AgentType
#from langchain.tools.python.tool import PythonREPLTool
#from langchain.python import PythonREPL
from langchain.chat_models import ChatOpenAI
import langchain
llm = ChatOpenAI(temperature=0)
tools = load_tools(["llm-math", "wikipedia"], llm=llm)
def do_answer1():
langchain.debug = True
agent = initialize_agent(
tools,
llm,
agent=AgentType.CHAT_ZERO_SHOT_REACT_DESCRIPTION,
handle_parsing_errors=True,
verbose = True)
answer = agent("What is the 25% of 300?")
print(answer)
langchain.debug = False
question = "Tom M. Mitchell is an American computer scientist \
and the Founders University Professor at Carnegie Mellon University (CMU)\
what book did he write?"
def do_answer2():
langchain.debug = True
agent = initialize_agent(
tools,
llm,
agent=AgentType.CHAT_ZERO_SHOT_REACT_DESCRIPTION,
handle_parsing_errors=True,
verbose = True)
answer = agent(question)
print(answer)
langchain.debug = False
if __name__ == "__main__":
#do_answer1()
do_answer2() | [] |
2024-01-10 | ConnectAI-E/LangChain-Tutior | python~project-code~L5-Evaluation.py | from langchain.chains import RetrievalQA
from langchain.chat_models import ChatOpenAI
from langchain.document_loaders import CSVLoader
from langchain.indexes import VectorstoreIndexCreator
from langchain.vectorstores import DocArrayInMemorySearch
import langchain
file = 'OutdoorClothingCatalog_1000.csv'
def create_retrievel_qa():
loader = CSVLoader(file_path=file)
#data = loader.load()
index = VectorstoreIndexCreator(
vectorstore_cls=DocArrayInMemorySearch
).from_loaders([loader])
llm = ChatOpenAI(temperature = 0.0)
qa = RetrievalQA.from_chain_type(
llm=llm,
chain_type="stuff",
retriever=index.vectorstore.as_retriever(),
verbose=True,
chain_type_kwargs = {
"document_separator": "<<<<>>>>>"
}
)
print(qa)
return qa
def do_gen_questions_on_docs(num_docs=2):
from langchain.evaluation.qa import QAGenerateChain
langchain.debug = True
loader = CSVLoader(file_path=file)
data = loader.load()
print(len(data))
print(data[0])
print(data[1])
print()
example_gen_chain = QAGenerateChain.from_llm(ChatOpenAI())
docs = [{"doc": t} for t in data[:num_docs]]
print(docs)
print()
new_examples = example_gen_chain.apply_and_parse(docs)
print(new_examples)
langchain.debug = False
return new_examples
def ask_questions_from_examples():
langchain.debug = True
examples = [
{
"query": "Do the Cozy Comfort Pullover Set have side pockets?",
"answer": "Yes"
},
{
"query": "What collection is the Ultra-Lofty 850 Stretch Down Hooded Jacket from?",
"answer": "The DownTek collection"
}
]
examples += do_gen_questions_on_docs()
for ndx, example in enumerate(examples):
print(ndx, example)
qa = create_retrievel_qa()
query = examples[0]["query"]
response = qa.run(query)
print("query: \t", query)
print("answser:\t", response)
print()
query = examples[2]["query"]
response = qa.run(query)
print("query: \t", query)
print("answser:\t", response)
print()
langchain.debug = False
def ensure_questions_answered_correctly():
langchain.debug = True
examples = [
{
"query": "Do the Cozy Comfort Pullover Set have side pockets?",
"answer": "Yes"
},
{
"query": "What collection is the Ultra-Lofty 850 Stretch Down Hooded Jacket from?",
"answer": "The DownTek collection"
}
]
examples += do_gen_questions_on_docs()
for ndx, example in enumerate(examples):
print(ndx, example)
qa = create_retrievel_qa()
predictions = qa.apply(examples)
from langchain.evaluation.qa import QAEvalChain
llm = ChatOpenAI(temperature=0)
eval_chain = QAEvalChain.from_llm(llm)
graded_outputs = eval_chain.evaluate(examples, predictions)
for i, eg in enumerate(examples):
print(f"Example {i}:")
print("Question: " + predictions[i]['query'])
print("Real Answer: " + predictions[i]['answer'])
print("Predicted Answer: " + predictions[i]['result'])
print("Predicted Grade: " + graded_outputs[i]['text'])
print()
langchain.debug = False
if __name__ == "__main__":
#do_gen_questions_on_docs()
ask_questions_from_examples()
#ensure_questions_answered_correctly() | [] |
2024-01-10 | ConnectAI-E/LangChain-Tutior | python~project-code~L4-QA.py | #import os
import json
from dotenv import load_dotenv, find_dotenv
_ = load_dotenv(find_dotenv()) # read local .env file
from langchain.chains import RetrievalQA
from langchain.chat_models import ChatOpenAI
from langchain.document_loaders import CSVLoader
from langchain.vectorstores import DocArrayInMemorySearch
from langchain.indexes import VectorstoreIndexCreator
import langchain
#from IPython.display import display, Markdown
file = 'OutdoorClothingCatalog_1000_0.csv'
def load_df():
import pandas as pd
df = pd.read_csv(file)
print(df)
def index_and_query():
langchain.debug = True
loader = CSVLoader(file_path=file)
print("build vectorstore index...")
index = VectorstoreIndexCreator(
vectorstore_cls=DocArrayInMemorySearch
).from_loaders([loader])
print("build vectorstore done.")
query = "Please list all your shirts with sun protection in a table in markdown and summarize each one."
#query = "Please list all your shirts with sun protection in json list, each shirt has name and description as keywords for the json dictionary, and summarize each one."
response = index.query(query)
print(response)
try:
rjson = json.loads(response.encode('utf-8'))
print(rjson)
except: # noqa
pass
langchain.debug = False
def get_db():
from langchain.embeddings import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
embed = embeddings.embed_query("Hi my name is Harrison")
print(len(embed))
loader = CSVLoader(file_path=file)
docs = loader.load()
db = DocArrayInMemorySearch.from_documents(docs, embeddings)
return db
def compelte_stepbystep_raw():
#from langchain.embeddings import OpenAIEmbeddings
langchain.debug = True
db = get_db()
query = "Please suggest a shirt with sunblocking"
docs = db.similarity_search(query)
print(len(docs))
print(docs[0])
llm = ChatOpenAI(temperature = 0.0)
qdocs = "".join([docs[i].page_content for i in range(len(docs))])
print(qdocs)
response = llm.call_as_llm(f"{qdocs} Question: Please list all your shirts with sun protection in a table in markdown and summarize each one.")
print()
print("response")
print(response)
langchain.debug = False
def complete_stepbystep_retrievalqa():
langchain.debug = True
llm = ChatOpenAI(temperature = 0.0)
db = get_db()
retriever = db.as_retriever()
qa_stuff = RetrievalQA.from_chain_type(
llm=llm,
chain_type="stuff",
retriever=retriever,
verbose=True
)
query = "Please list all your shirts with sun protection in a table in markdown and summarize each one."
response_final = qa_stuff.run(query)
print()
print("response_final")
print(response_final)
langchain.debug = False
#response = index.query(query, llm=llm)
if __name__ == "__main__":
load_df()
#index_and_query()
#compelte_stepbystep_raw()
complete_stepbystep_retrievalqa() | [] |
2024-01-10 | ConnectAI-E/LangChain-Tutior | python~project-code~L1-Model_prompt_parser.py | import os
import openai
from dotenv import load_dotenv, find_dotenv
_ = load_dotenv(find_dotenv()) # read local .env file
openai.api_key = os.environ['OPENAI_API_KEY']
def get_completion(prompt, model="gpt-3.5-turbo"):
messages = [{"role": "user", "content": prompt}]
response = openai.ChatCompletion.create(
model=model,
messages=messages,
temperature=0,
)
return response.choices[0].message["content"]
def exam0():
response = get_completion("What is 1+1?")
print(response)
def exam1():
customer_email = """
Arrr, I be fuming that me blender lid \
flew off and splattered me kitchen walls \
with smoothie! And to make matters worse,\
the warranty don't cover the cost of \
cleaning up me kitchen. I need yer help \
right now, matey!
"""
style = """American English \
in a calm and respectful tone
"""
prompt = f"""Translate the text \
that is delimited by triple backticks
into a style that is {style}.
text: ```{customer_email}```
"""
print(prompt)
response = get_completion(prompt)
print(response)
def chat_exam0():
from langchain.chat_models import ChatOpenAI
chat = ChatOpenAI(temperature=0.0)
print(chat)
template_string = """Translate the text \
that is delimited by triple backticks \
into a style that is {style}. \
text: ```{text}```
"""
from langchain.prompts import ChatPromptTemplate
prompt_template = ChatPromptTemplate.from_template(template_string)
customer_style = """American English \
in a calm and respectful tone
"""
customer_email = """
Arrr, I be fuming that me blender lid \
flew off and splattered me kitchen walls \
with smoothie! And to make matters worse, \
the warranty don't cover the cost of \
cleaning up me kitchen. I need yer help \
right now, matey!
"""
customer_messages = prompt_template.format_messages(style=customer_style,
text=customer_email)
# Call the LLM to translate to the style of the customer message
customer_response = chat(customer_messages)
print(customer_response.content)
if __name__ == "__main__":
# exam0()
#exam1()
chat_exam0()
| [
"Translate the text that is delimited by triple backticks into a style that is {style}. text: ```{text}```\n ",
"Translate the text that is delimited by triple backticks \n into a style that is PLACEHOLDER.\n text: ```PLACEHOLDER```\n "
] |
2024-01-10 | ByteSoft-Devs/skillcraft-studio | bot~cogs~slash.py | import os
import openai
import discord
from discord import app_commands
import random
from datetime import datetime, timedelta
from core.classes import Cog_Extension
context_store = {}
class Slash(Cog_Extension):
@app_commands.command(name="register", description="Зарегистрируйтесь в SkillCraft Studio")
async def register_user(self, interaction: discord.Interaction, api_key: str, prompt_name: str = None):
user = interaction.user.name
message = (f'Пользователь {user} использовал команду `/register` в канале `{interaction.channel.name if isinstance(interaction.channel, discord.TextChannel) else "Direct Message"}`')
channel_id =
channel = self.bot.get_channel(channel_id)
await channel.send(message)
user_id = str(interaction.user.id).split("#")[0]
users_file_path = 'users.txt'
user_folder_path = f'users/{user_id}'
openai_folder_path = f'{user_folder_path}/openai'
skills_folder_path = f'{user_folder_path}/skills'
prompt_file_path = f'{user_folder_path}/{prompt_name}.txt'
key_file_path = f'{openai_folder_path}/key.txt'
temporary_prompt_file_path = f'{openai_folder_path}/temporary_prompt.txt'
with open(users_file_path, 'r') as f:
if f'{user_id}#' in f.read():
await interaction.response.send_message("Ошибка: Вы уже зарегистрированы.", ephemeral=True)
return
if not api_key.startswith("sk-") or len(api_key) > 75:
await interaction.response.send_message("Ошибка: Некорректный токен. Вы также можете купить токен, воспользовавшись командой `/buy-key`",
ephemeral=True)
return
registration_date = datetime.now().strftime("%d %B %Y г.")
user_data = f'{user_id}#{registration_date}\n'
with open(users_file_path, 'a') as f:
f.write(user_data)
os.makedirs(user_folder_path, exist_ok=True)
os.makedirs(openai_folder_path, exist_ok=True)
os.makedirs(skills_folder_path, exist_ok=True)
open(key_file_path, 'w').write(api_key)
open(temporary_prompt_file_path, 'w').close()
open(prompt_file_path, 'w').close()
await interaction.response.send_message("Вы успешно зарегистрировались. Рекомендуем ознакомится с [документацией](https://docs.kazetech.ru/skillcraft-studio/rabota-s-skillcraft-studio) перед работой с SkillCraft Studio",
ephemeral=True)
@app_commands.command(name="new-prompt", description="Создает новый промпт")
async def new_prompt(self, interaction: discord.Interaction, name: str, text: str = ""):
user = interaction.user.name
message = (f'Пользователь {user} использовал команду `/new-prompt` в канале `{interaction.channel.name if isinstance(interaction.channel, discord.TextChannel) else "Direct Message"}`')
channel_id =
channel = self.bot.get_channel(channel_id)
await channel.send(message)
user_id = str(interaction.user.id)
user_folder_path = f"users/{user_id}"
os.makedirs(user_folder_path, exist_ok=True)
file_path = f"{user_folder_path}/{name}.txt"
with open('users.txt', 'r') as f:
register = [line.strip().split('#')[0] for line in f]
if str(interaction.user.id) not in register:
await interaction.response.send_message("Вы еще не зарегистрировались в SkillCraft Studio. Чтобы это сделать, воспользуйтесь командой </register:1131239719263547502>")
return
if os.path.exists(file_path):
await interaction.response.send_message("Ошибка: Промпт с таким именем уже существует.")
return
with open(file_path, 'w') as f:
f.write(text)
await interaction.response.send_message("Промпт успешно создан.")
@app_commands.command(name="activate-key", description="Активировать OpenAI API ключ")
@app_commands.choices(apply=[
app_commands.Choice(name="Да", value="YES"),
app_commands.Choice(name="Нет", value="NO"),
])
async def activate_key(self, interaction: discord.Interaction, code: str, apply: str):
await interaction.response.defer()
user = interaction.user.name
message = (f'Пользователь {user} использовал команду `/activate-key` в канале `{interaction.channel.name if isinstance(interaction.channel, discord.TextChannel) else "Direct Message"}`')
channel_id =
channel = self.bot.get_channel(channel_id)
await channel.send(message)
with open("codes.txt", "r") as codes_file, open("keys.txt", "r") as keys_file:
codes = codes_file.read().splitlines()
keys = keys_file.read().splitlines()
if code in codes:
codes.remove(code)
if apply == "YES" and keys:
user_folder = f"users/{interaction.user.id}"
if not os.path.exists(user_folder):
await interaction.followup.send("Вы еще не зарегистрировались в SkillCraft Studio. Чтобы это сделать, воспользуйтесь командой </register:1131239719263547502>", ephemeral=True)
return
selected_key = random.choice(keys)
user_key_file_path = f"users/{interaction.user.id}/openai/key.txt"
with open(user_key_file_path, "w") as user_key_file:
user_key_file.write(selected_key)
embed = discord.Embed(title="Покупка OpenAI API ключа", color=discord.Color.green())
embed.add_field(name="Покупка успешно завершена.", value=f"Ваш API ключ: **||{selected_key}||**\nAPI ключ был автоматически заменен")
await interaction.followup.send(embed=embed, ephemeral=True)
keys.remove(selected_key)
elif apply == "NO" and keys:
selected_key = random.choice(keys)
embed = discord.Embed(title="Успешная покупка API ключа", color=discord.Color.green())
embed.add_field(name="Покупка успешно завершена.", value=f"Ваш API ключ: **||{selected_key}||**")
await interaction.followup.send(embed=embed, ephemeral=True)
keys.remove(selected_key)
else:
await interaction.followup.send(
"Ошибка: API ключи закончились. Попробуйте повторить попытку чуть позже.", ephemeral=True)
with open("codes.txt", "w") as codes_file, open("keys.txt", "w") as keys_file:
codes_file.write("\n".join(codes))
keys_file.write("\n".join(keys))
else:
await interaction.followup.send("Ошибка: Введенный код активации не существует.", ephemeral=True)
@app_commands.command(name="edit-prompt", description="Редактирует промпт")
async def edit_prompt(self, interaction: discord.Interaction, prompt_name: str, new_name: str = "", text: str = ""):
user = interaction.user.name
message = (f'Пользователь {user} использовал команду `/edit-prompt` в канале `{interaction.channel.name if isinstance(interaction.channel, discord.TextChannel) else "Direct Message"}`')
channel_id =
channel = self.bot.get_channel(channel_id)
await channel.send(message)
user_id = str(interaction.user.id)
user_folder_path = f"users/{user_id}"
file_path = f"{user_folder_path}/{prompt_name}.txt"
await interaction.response.defer()
with open('users.txt', 'r') as f:
register = [line.strip().split('#')[0] for line in f]
if str(interaction.user.id) not in register:
await interaction.response.send_message("Вы еще не зарегистрировались в SkillCraft Studio. Чтобы это сделать, воспользуйтесь командой </register:1131239719263547502>")
return
if not os.path.exists(file_path):
await interaction.response.send_message("Ошибка: Промпт не найден.")
return
if not new_name and not text:
await interaction.response.send_message("Ошибка: Для изменения промпта необходимо заполнить хотя бы одно редакционное поле.")
return
if new_name:
new_file_path = f"{user_folder_path}/{new_name}.txt"
if os.path.exists(new_file_path):
await interaction.response.send_message("Ошибка: Файл с новым именем уже существует.")
return
os.rename(file_path, new_file_path)
file_path = new_file_path
if text:
with open(file_path, 'w') as f:
f.write(text)
await interaction.response.send_message("Промпт успешно отредактирован.")
@app_commands.command(name="prompts-list", description="Выводит список промптов")
async def prompts_list(self, interaction: discord.Interaction):
user = interaction.user.name
message = (f'Пользователь {user} использовал команду `/prompt-list` в канале `{interaction.channel.name if isinstance(interaction.channel, discord.TextChannel) else "Direct Message"}`')
channel_id =
channel = self.bot.get_channel(channel_id)
await channel.send(message)
user_id = str(interaction.user.id)
user_folder_path = f"users/{user_id}"
with open('users.txt', 'r') as f:
register = [line.strip().split('#')[0] for line in f]
if str(interaction.user.id) not in register:
await interaction.response.send_message("Вы еще не зарегистрировались в SkillCraft Studio. Чтобы это сделать, воспользуйтесь командой </register:{command_id}>")
return
if not os.path.exists(user_folder_path):
await interaction.response.send_message("Ошибка: Папка пользователя не найдена.")
return
files = [file for file in os.listdir(user_folder_path) if os.path.isfile(os.path.join(user_folder_path, file))]
if not files:
await interaction.response.send_message("Список промптов пуст.")
return
prompt_names = [os.path.splitext(file)[0] for file in files]
embed = discord.Embed(title="Список промптов", description="\n".join(prompt_names), color=discord.Color.green())
await interaction.response.send_message(embed=embed)
@app_commands.command(name="delete-prompt", description="Удаляет промпт")
async def delete_prompt(self, interaction: discord.Interaction, prompt_name: str):
user = interaction.user.name
message = (f'Пользователь {user} использовал команду `/delete-prompt` в канале `{interaction.channel.name if isinstance(interaction.channel, discord.TextChannel) else "Direct Message"}`')
channel_id =
channel = self.bot.get_channel(channel_id)
await channel.send(message)
user_id = str(interaction.user.id)
user_folder_path = f"users/{user_id}"
file_path = f"{user_folder_path}/{prompt_name}.txt"
with open('users.txt', 'r') as f:
register = [line.strip().split('#')[0] for line in f]
if str(interaction.user.id) not in register:
await interaction.response.send_message("Вы еще не зарегистрировались в SkillCraft Studio. Чтобы это сделать, воспользуйтесь командой </register:1131239719263547502>")
return
if not os.path.exists(file_path):
await interaction.response.send_message("Ошибка: Промпт не найден.")
return
os.remove(file_path)
await interaction.response.send_message(f"Промпт `{prompt_name}` успешно удален.")
@app_commands.command(name="change-key", description="Изменяет API-ключ")
async def change_key(self, interaction: discord.Interaction, new_key: str):
user = interaction.user.name
message = (f'Пользователь {user} использовал команду `/change-key` в канале `{interaction.channel.name if isinstance(interaction.channel, discord.TextChannel) else "Direct Message"}`')
channel_id =
channel = self.bot.get_channel(channel_id)
await channel.send(message)
user_id = str(interaction.user.id)
user_folder_path = f"users/{user_id}"
openai_folder_path = f"{user_folder_path}/openai"
key_file_path = f"{openai_folder_path}/key.txt"
with open('users.txt', 'r') as f:
register = [line.strip().split('#')[0] for line in f]
if str(interaction.user.id) not in register:
await interaction.response.send_message(
"Вы еще не зарегистрировались в SkillCraft Studio. Чтобы это сделать, воспользуйтесь командой </register:1131239719263547502>")
return
if not new_key.startswith("sk-") or len(new_key) > 75:
await interaction.response.send_message("Ошибка: Некорректный новый API-ключ.")
return
if not os.path.exists(openai_folder_path):
await interaction.response.send_message("Ошибка: Невозможно поменять api ключ.", ephemeral=True)
return
with open(key_file_path, 'w') as f:
f.write(new_key)
await interaction.response.send_message("API-ключ успешно изменен.", ephemeral=True)
@app_commands.command(name="show-prompt", description="Показывает содержимое промпта")
async def show_prompt(self, interaction: discord.Interaction, prompt_name: str):
user = interaction.user.name
message = (f'Пользователь {user} использовал команду `/show-prompt` в канале `{interaction.channel.name if isinstance(interaction.channel, discord.TextChannel) else "Direct Message"}`')
channel_id =
channel = self.bot.get_channel(channel_id)
await channel.send(message)
user_id = str(interaction.user.id)
user_folder_path = f"users/{user_id}"
file_path = f"{user_folder_path}/{prompt_name}.txt"
with open('users.txt', 'r') as f:
register = [line.strip().split('#')[0] for line in f]
if str(interaction.user.id) not in register:
await interaction.response.send_message("Вы еще не зарегистрировались в SkillCraft Studio. Чтобы это сделать, воспользуйтесь командой </register:1131239719263547502>")
return
if not os.path.exists(file_path):
await interaction.response.send_message("Ошибка: Промпт не найден или он пустой.")
return
with open(file_path, 'r') as f:
prompt_content = f.read()
max_chars_per_embed = 1024
chunks = [prompt_content[i:i + max_chars_per_embed] for i in range(0, len(prompt_content), max_chars_per_embed)]
for index, chunk in enumerate(chunks):
embed = discord.Embed(title=f"Ваш промпт: {prompt_name} часть {index + 1})", color=discord.Color.blue())
embed.add_field(name="Содержимое", value=chunk, inline=False)
await interaction.response.send_message(embed=embed)
@app_commands.command(name="test-prompt", description="Запускает исполнение промпта")
async def test_prompt(self, interaction: discord.Interaction, prompt_name: str):
user = interaction.user.name
message = (f'Пользователь {user} использовал команду `/test-prompt` в канале `{interaction.channel.name if isinstance(interaction.channel, discord.TextChannel) else "Direct Message"}`')
channel_id =
channel = self.bot.get_channel(channel_id)
await channel.send(message)
user_id = str(interaction.user.id)
user_folder_path = f"users/{user_id}"
prompt_file_path = f"{user_folder_path}/{prompt_name}.txt"
temporary_prompt_file_path = f"{user_folder_path}/openai/temporary_prompt.txt"
with open('users.txt', 'r') as f:
register = [line.strip().split('#')[0] for line in f]
if str(interaction.user.id) not in register:
await interaction.response.send_message("Вы еще не зарегистрировались в SkillCraft Studio. Чтобы это сделать, воспользуйтесь командой </register:1131239719263547502>")
return
if not os.path.exists(prompt_file_path):
await interaction.response.send_message("Ошибка: Промпт не найден.")
return
with open(prompt_file_path, 'r') as f:
prompt_text = f.read()
with open(temporary_prompt_file_path, 'w') as f:
f.write(prompt_text)
if not prompt_text.strip():
await interaction.response.send_message("Ошибка: Промпт не содержит текста.")
return
await interaction.response.send_message(f"Промпт `{prompt_name}` был запущен.")
@app_commands.command(name="test-chat", description="Чат с промптом")
async def test_chat(self, interaction: discord.Interaction, message: str):
user = interaction.user.name
message = (f'Пользователь {user} использовал команду `/test-chat` в канале `{interaction.channel.name if isinstance(interaction.channel, discord.TextChannel) else "Direct Message"}`')
channel_id =
channel = self.bot.get_channel(channel_id)
await channel.send(message)
user_id = str(interaction.user.id)
user_folder_path = f"users/{user_id}"
openai_folder_path = f"{user_folder_path}/openai"
key_file_path = f"{openai_folder_path}/key.txt"
temporary_prompt_file_path = f"{openai_folder_path}/temporary_prompt.txt"
await interaction.response.defer()
with open('users.txt', 'r') as f:
register = [line.strip().split('#')[0] for line in f]
if str(interaction.user.id) not in register:
await interaction.response.send_message("Вы еще не зарегистрировались в SkillCraft Studio. Чтобы это сделать, воспользуйтесь командой </register:1131239719263547502>")
return
if not os.path.exists(openai_folder_path):
await interaction.response.send_message("Ошибка: Невозможно вести диалог.")
return
with open(key_file_path, 'r') as f:
api_key = f.read().strip()
if not os.path.exists(temporary_prompt_file_path):
await interaction.response.send_message("Ошибка: Невозможно вести диалог.")
return
with open(temporary_prompt_file_path, 'r') as f:
temporary_prompt = f.read().strip()
now = datetime.now()
expiration_time = now + timedelta(minutes=120)
if user_id not in context_store:
context_store[user_id] = {
"expiration_time": expiration_time,
"context": []
}
else:
if now > context_store[user_id]["expiration_time"]:
context_store[user_id] = {
"expiration_time": expiration_time,
"context": []
}
context = context_store[user_id]["context"]
context.append({"role": "user", "content": message})
try:
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[{"role": "system", "content": " "},
{"role": "user", "content": temporary_prompt}] + context,
api_key=api_key
)
reply = response.choices[0].message.content.strip()
respone = f"**Aika AI:** {reply}"
context.append({"role": "assistant", "content": reply})
await interaction.followup.send(respone)
except Exception as e:
await interaction.followup.send(f"При обработке запроса в чат произошла ошибка. Скорее всего из за отсутствия токенов. Купить новый API ключ помжно по команде `/buy-key`.")
@app_commands.command(name="test-stop", description="Остановить тестовый чат")
async def test_stop(self, interaction: discord.Interaction):
user = interaction.user.name
message = (f'Пользователь {user} использовал команду `/test-stop` в канале `{interaction.channel.name if isinstance(interaction.channel, discord.TextChannel) else "Direct Message"}`')
channel_id =
channel = self.bot.get_channel(channel_id)
await channel.send(message)
user_id = str(interaction.user.id)
user_folder_path = f"users/{user_id}"
openai_folder_path = f"{user_folder_path}/openai"
temporary_prompt_file_path = f"{openai_folder_path}/temporary_prompt.txt"
with open('users.txt', 'r') as f:
register = [line.strip().split('#')[0] for line in f]
if str(interaction.user.id) not in register:
await interaction.response.send_message("Вы еще не зарегистрировались в SkillCraft Studio. Чтобы это сделать, воспользуйтесь командой </register:1131239719263547502>")
return
if not os.path.exists(openai_folder_path):
await interaction.response.send_message("Ошибка: Невозможно запустить промпт.")
return
if not os.path.exists(temporary_prompt_file_path):
await interaction.response.send_message("Ошибка: Невозможно запустить промпт.")
return
with open(temporary_prompt_file_path, 'w') as f:
f.write('')
context_store.pop(user_id, None)
await interaction.response.send_message("Выполнение промпта было остановлено.")
@app_commands.command(name="profile", description="Показать профиль пользователя")
async def show_profile(self, interaction: discord.Interaction):
user = interaction.user.name
message = (f'Пользователь {user} использовал команду `/profile` в канале `{interaction.channel.name if isinstance(interaction.channel, discord.TextChannel) else "Direct Message"}`')
channel_id =
channel = self.bot.get_channel(channel_id)
await channel.send(message)
user_id = str(interaction.user.id)
users_file_path = 'users.txt'
user_folder_path = f'users/{user_id}'
openai_folder_path = f'{user_folder_path}/openai'
key_file_path = f'{openai_folder_path}/key.txt'
with open('users.txt', 'r') as f:
register = [line.strip().split('#')[0] for line in f]
if str(interaction.user.id) not in register:
await interaction.response.send_message("Вы еще не зарегистрировались в SkillCraft Studio. Чтобы это сделать, воспользуйтесь командой </register:1131239719263547502>")
return
with open(users_file_path, 'r') as f:
user_data = None
for line in f:
if line.startswith(f'{user_id}#'):
user_data = line.strip()
break
if not user_data:
await interaction.response.send_message("Ошибка: Вы не зарегистрированы.")
return
username = user_data.split("#")[1]
api_key = open(key_file_path, 'r').read()
if len(api_key) > 6:
api_key = f"{api_key[:3]}{'*' * (len(api_key) - 6)}{api_key[-3:]}"
prompt_count = len([name for name in os.listdir(user_folder_path) if name.endswith('.txt')])
registration_date = user_data.split("#", 2)[-1].strip() if "#" in user_data else "Дата регистрации неизвестна"
embed = discord.Embed(title=f"Профиль пользователя: {interaction.user.name}", color=discord.Color.blue())
embed.set_thumbnail(url=interaction.user.avatar.url)
embed.add_field(name="Никнейм", value=f"<@{user_id.split('#')[0]}>", inline=False)
embed.add_field(name="API ключ", value=f"{api_key}\n> Купите API ключ всего за 20 рублей по команде `/buy-key`", inline=False)
embed.add_field(name="ID пользователя", value=user_id, inline=False)
embed.add_field(name="Кол-во промптов", value=prompt_count, inline=False)
embed.add_field(name="Дата регистрации", value=registration_date, inline=False)
await interaction.response.send_message(embed=embed)
@app_commands.command(name="buy-key", description="Купить API ключ OpenAI")
async def buy_api_key(self, interaction: discord.Interaction):
user = interaction.user.name
message = (f'Пользователь {user} использовал команду `/buy-key` в канале `{interaction.channel.name if isinstance(interaction.channel, discord.TextChannel) else "Direct Message"}`')
channel_id =
channel = self.bot.get_channel(channel_id)
await channel.send(message)
embed = discord.Embed(title="Купить API ключ OpenAI", description="API ключ позволит вам начать использование SkillCraft Studio, а также даст возможность полноценного взаимодействия.", color=discord.Color.blue())
embed.add_field(name="Купить API ключ", value="[Купить здесь](https://www.donationalerts.com/r/skillcraftstudio)", inline=False)
await interaction.response.send_message(embed=embed)
@app_commands.command(name="info", description="Получить информацию о боте")
async def show_info(self, interaction: discord.Interaction):
user = interaction.user.name
message = (f'Пользователь {user} использовал команду `/info` в канале `{interaction.channel.name if isinstance(interaction.channel, discord.TextChannel) else "Direct Message"}`')
channel_id =
channel = self.bot.get_channel(channel_id)
await channel.send(message)
version = "1.0.00 (release)"
status = "🟢 - В полном порядке"
ping = f"{round(self.bot.latency * 1000)}ms"
users_file_path = 'users.txt'
servers_count = len(self.bot.guilds)
last_update_date = "<t:1691692620:D>, <t:1691692620:R>"
with open(users_file_path, 'r') as f:
users_count = len(f.readlines())
embed = discord.Embed(title="Информация о боте", color=discord.Color.green())
embed.add_field(name="Версия", value=version, inline=False)
embed.add_field(name="Статус", value=status, inline=False)
embed.add_field(name="Пинг", value=ping, inline=False)
embed.add_field(name="Кол-во пользователей", value=str(users_count), inline=False)
embed.add_field(name="Кол-во серверов", value=str(servers_count), inline=False)
embed.add_field(name="Последнее обновление", value=last_update_date, inline=False)
embed.add_field(name="Прочая информация", value="**[Политика Конфиденциальности](https://example.com/privacy) [Условия использования](https://example.com/terms)\n[Сервер поддержки](https://discord.gg/KKzBPg6jnu) [Документация](https://internet-2.gitbook.io/kaze-docs/skillcraft-studio/rabota-s-skillcraft-studio)**", inline=False)
await interaction.response.send_message(embed=embed)
@app_commands.command(name="public-skill", description="Публикует навык")
async def public_skill(self, interaction: discord.Interaction, name: str, logo: str, phrase_activate: str, short_describe: str, full_describe: str, tags: str):
user = interaction.user.name
message = (f'Пользователь {user} использовал команду `/public-skill` в канале `{interaction.channel.name if isinstance(interaction.channel, discord.TextChannel) else "Direct Message"}`')
channel_id =
channel = self.bot.get_channel(channel_id)
await channel.send(message)
user_id = str(interaction.user.id)
user_folder_path = f'users/{user_id}'
if not os.path.exists(user_folder_path):
await interaction.response.send_message("Ошибка: Вы не зарегистрированы. Используйте команду /register.")
return
skill_file_path = f'{user_folder_path}/{name}.txt'
if not os.path.exists(skill_file_path):
await interaction.response.send_message("Ошибка: Навык с таким названием не найден.")
return
channel_id =
channel = self.bot.get_channel(channel_id)
user_embed = discord.Embed(title=f"Заявка на добавление навыка: {name}", color=discord.Color.blue())
user_embed.add_field(name="ID Создателя", value=user_id, inline=False)
user_embed.add_field(name="Название навыка", value=name, inline=False)
user_embed.add_field(name="Лого навыка", value=logo, inline=False)
user_embed.add_field(name="Фраза активатор навыка", value=phrase_activate, inline=False)
user_embed.add_field(name="Краткое описание", value=short_describe, inline=False)
user_embed.add_field(name="Полное описание", value=full_describe, inline=False)
user_embed.add_field(name="Теги", value=tags, inline=False)
await channel.send("Новый навык был отправлен на проверку @everyone", embed=user_embed)
with open(skill_file_path, 'r') as f:
skill_content = f.read()
skill_embed = discord.Embed(title=f"Навык: {name}", description=skill_content, color=discord.Color.green())
await channel.send(embed=skill_embed)
await interaction.response.send_message(f"Навык `{name}` был отправлен на модерацию.")
async def setup(bot):
await bot.add_cog(Slash(bot)) | [
"PLACEHOLDER/temporary_prompt.txt",
" ",
"content",
"PLACEHOLDER/PLACEHOLDER.txt",
"PLACEHOLDER/openai/temporary_prompt.txt"
] |
2024-01-10 | ymcrcat/gmail-chat | gmail_chat~__main__.py | import sys
import os
import os.path
import base64
import pickle
import cmd
import re
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
from google.api_core.exceptions import BadRequest
import dateutil.parser as parser
from tqdm import tqdm
from langchain.schema import Document
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import Qdrant
from langchain.chat_models import ChatOpenAI
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.retrievers.self_query.base import SelfQueryRetriever
from langchain.chains.query_constructor.base import AttributeInfo
from langchain.chains.conversation.memory import ConversationBufferWindowMemory
from langchain.chains import RetrievalQA
from langchain.agents import Tool, initialize_agent
MAX_MESSAGES = 100
MODEL_NAME = "text-embedding-ada-002" # Name of the model used to generate text embeddings
MAX_TOKENS = 120000 # Maximum number of tokens per chunk
CHUNK_OVERLAP = 0 # Overlap between chunks
COLLECTION_NAME = "email-index"
METRIC = "cosine"
GPT_MODEL = 'gpt-4-1106-preview'
metadata_field_info = [
AttributeInfo(name='id', description='Message ID', type='string', is_primary_key=True),
AttributeInfo(name='subject', description='Email subject', type='string', is_primary_key=False),
AttributeInfo(name='from', description='Email sender', type='string', is_primary_key=False),
AttributeInfo(name='to', description='Email recipient', type='string', is_primary_key=False),
AttributeInfo(name='date', description='Email receipt date and time', type='string', is_primary_key=False)
]
def chunk_text(text):
# Initialize the text splitter
text_splitter = RecursiveCharacterTextSplitter(chunk_size=MAX_TOKENS,
chunk_overlap=CHUNK_OVERLAP)
# Split the tokens into chunks of MAX_TOKENS tokens
chunks = text_splitter.split_text(text)
# Return the chunks
return chunks
def vectorstore_setup():
"""Load stored email index from file"""
docs = pickle.load(open('email_index.pkl', 'rb'))
vectorstore = Qdrant.from_documents(docs, embedding=OpenAIEmbeddings(), location=":memory:", collection_name=COLLECTION_NAME)
print("Vectorstore created from emails")
return vectorstore
def get_gmail_credentials():
"""Get Gmail credentials from credentials.json file or token.pickle file"""
# If modifying these SCOPES, delete the file token.pickle.
SCOPES = ['https://www.googleapis.com/auth/gmail.readonly']
creds = None
if os.path.exists('token.pickle'):
with open('token.pickle', 'rb') as token:
creds = pickle.load(token)
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
# Load credentials from credentials.json file
flow = InstalledAppFlow.from_client_secrets_file(
'credentials.json', SCOPES)
creds = flow.run_local_server(port=52102)
# Save the credentials for the next run
with open('token.pickle', 'wb') as token:
pickle.dump(creds, token)
return creds
def parse_date(date_string):
try:
date = parser.parse(date_string)
except ValueError:
try:
cleaned_date_string = re.sub(r'\.\d+_\d+$', '', date_string)
date = parser.parse(cleaned_date_string)
except ValueError:
date = None
return date
# Function to decode the message part
def decode_part(part):
if 'body' in part.keys():
data = part['body']['data']
else:
return None
data = data.replace('-', '+').replace('_', '/')
decoded_bytes = base64.urlsafe_b64decode(data)
return decoded_bytes.decode('utf-8')
# Function to find the desired message part
def find_part(parts, mime_type):
for part in parts:
if part['mimeType'] == mime_type:
return part
return None
message_count = 0 # Global variable to keep track of number of messages processed
def index_gmail():
vectorstore_path = os.getenv('VECTORSTORE_PATH')
if not vectorstore_path:
sys.exit("VECTORSTORE_PATH environment variable is not set")
OPENAI_API_KEY = os.getenv('OPENAI_API_KEY')
if not OPENAI_API_KEY:
sys.exit("OPENAI_API_KEY environment variable is not set")
creds = get_gmail_credentials()
docs = []
try:
def process_email(msg):
"""Process email data and add to index"""
global message_count
email_data = msg['payload']['headers']
subject = ''
to_name = ''
for values in email_data:
name = values['name']
if name == 'From':
from_name = values['value']
if name == 'To':
to_name = values['value']
if name == 'Subject':
subject = values['value']
if name == 'Date':
date_value = values['value']
datetime_object = parse_date(date_value)
try:
data = None
payload = msg['payload']
if 'parts' in payload and len(payload['parts']) > 0:
part = find_part(payload['parts'], 'text/plain')
if part:
data = decode_part(part)
else:
part = find_part(payload['parts'], 'text/html')
if part:
data = decode_part(part)
if not data:
raise ValueError(f"Couldn't find body in message {msg['id']}")
# Embed email data an add to index
chunks = chunk_text(data)
docs.extend([Document(page_content=chunk,
metadata={'id': msg['id'], 'subject': subject,
'from': from_name, 'to': to_name,
'date': datetime_object}) for chunk in chunks])
pickle.dump(docs, open('email_index.pkl', 'wb'))
message_count += 1
except Exception as e:
print(f"\nError while processing email {msg['id']}: {e}")
# Define a function to get all messages recursively
def get_all_emails(gmail, query):
messages = []
page_token=None
while True:
try:
result = gmail.users().messages().list(q=query,
userId='me',
maxResults=MAX_MESSAGES,
pageToken=page_token).execute()
messages.extend( result.get('messages', []) )
page_token = result.get('nextPageToken', None)
if (not page_token) or len(messages) >= MAX_MESSAGES:
break
except HttpError as error:
print(f"An error occurred: {error}")
break
return messages
gmail = build('gmail', 'v1', credentials=creds)
# A query to retrieve all emails, including archived ones
query = "in:all"
emails = get_all_emails(gmail, query)
# Process and print the result
for email in tqdm(emails, desc='Processing emails', file=sys.stdout):
msg = gmail.users().messages().get(id=email.get('id'), userId='me', format='full').execute()
process_email(msg)
pickle.dump(docs, open('email_index.pkl', 'wb'))
print(f"Successfully added {message_count} emails to index.")
except Exception as error:
print(f'An error occurred: {error}')
raise error
def ask(query):
openai_api_key=os.getenv('OPENAI_API_KEY')
if not openai_api_key:
raise ValueError("OPENAI_API_KEY environment variable is not set")
vectorstore = vectorstore_setup()
llm = ChatOpenAI(openai_api_key=openai_api_key,
model_name=GPT_MODEL,
temperature=0.0)
# Answer question using LLM and email content
qa = RetrievalQA.from_chain_type(llm=llm,
chain_type="stuff",
retriever=vectorstore.as_retriever())
result = qa.run(query)
print(f'\n{result}')
def chat():
openai_api_key=os.getenv('OPENAI_API_KEY')
if openai_api_key is None:
sys.exit("OPENAI_API_KEY environment variable is not set")
vectorstore = vectorstore_setup()
llm = ChatOpenAI(openai_api_key=openai_api_key,
model_name=GPT_MODEL,
temperature=0.0)
conversational_memory = ConversationBufferWindowMemory(
memory_key='chat_history',
k = 10,
return_messages=True)
qa = RetrievalQA.from_chain_type(llm=llm,
chain_type="refine",
retriever=vectorstore.as_retriever())
tools = [
Tool(
name = 'Email Index',
func = qa.run,
description=('useful to answer questions about emails and messages')
)
]
agent = initialize_agent(
agent = 'chat-conversational-react-description',
tools = tools,
llm = llm,
verbose = True,
max_iterations = 5,
early_stopping_method = 'generate',
memory = conversational_memory
)
class InteractiveShell(cmd.Cmd):
intro = 'Welcome to the Gmail Chat shell. Type help or ? to list commands.\n'
prompt = '(Gmail Chat) '
def do_quit(self, arg):
"Exit the shell."
print('Goodbye.')
return True
def emptyline(self):
pass
def default(self, arg):
"Ask a question."
try:
result = agent.run(arg)
print(f'\n{result}')
except Exception as e:
print(e)
InteractiveShell().cmdloop()
def usage():
sys.exit("""
OPENAI_API_KEY, and VECTORSTORE_PATH environment variables must be set.
Usage: gmail_chat index | ask <query> | chat
""")
def main():
if len(sys.argv) < 2:
usage()
if sys.argv[1] == 'index':
index_gmail()
elif sys.argv[1] == 'ask':
if len(sys.argv) < 3:
usage()
ask(sys.argv[2])
elif sys.argv[1] == 'chat':
chat()
else:
usage()
if __name__ == '__main__':
main()
| [
"(Gmail Chat) "
] |
2024-01-10 | coetomo/OpenAIPythonSample | GUI.py | import os
import tkinter as tk
from tkinter import messagebox
import openai
import requests
from PIL import Image, ImageTk
from main import generate_image, moderate
openai.api_key = os.getenv("OPENAI_API_KEY")
class App(tk.Tk):
def __init__(self):
super().__init__()
self.title("OpenAI Python")
self.geometry("400x200")
self.choice = tk.StringVar(value='image')
label = tk.Label(self, text="Choose an option:", font=("Arial", 14))
label.pack(pady=10)
image_button = tk.Radiobutton(self, text="Image Generation", variable=self.choice, value="image",
font=("Arial", 12))
image_button.pack()
text_button = tk.Radiobutton(self, text="Text Moderation", variable=self.choice, value="text",
font=("Arial", 12))
text_button.pack()
self.text_entry = tk.Entry(self, width=40, font=("Arial", 12))
self.text_entry.pack(pady=10)
submit_button = tk.Button(self, text="Submit", command=self.process_choice,
font=("Arial", 12))
submit_button.pack()
def process_choice(self):
choice = self.choice.get()
user_input = self.text_entry.get()
if choice == "image":
url = None
try:
url = generate_image(user_input)
except openai.error.InvalidRequestError as err:
messagebox.showerror("Error", str(err))
if url:
self.show_image(url)
else:
messagebox.showerror("Error", "Image generation failed!")
elif choice == "text":
output = moderate(user_input)
self.display_output(output)
def show_image(self, url):
response = requests.get(url, stream=True)
img = Image.open(response.raw)
img = img.resize((512, 512), Image.LANCZOS)
img = ImageTk.PhotoImage(img)
image_window = tk.Toplevel(self)
image_window.title("Generated Image")
panel = tk.Label(image_window, image=img)
panel.image = img
panel.pack()
def display_output(self, output):
result_window = tk.Toplevel(self)
result_window.title("Text Moderation Result")
result_label = tk.Label(result_window, text="Result:", font=("Arial", 18, "bold"))
result_label.grid(row=0, column=0, columnspan=3, pady=10)
row_number = 1
categories_title_label = tk.Label(result_window, text="Categories", font=("Arial", 16, "bold"))
categories_title_label.grid(row=row_number, column=0, padx=10, pady=5, sticky="w")
scores_title_label = tk.Label(result_window, text="Scores", font=("Arial", 16, "bold"))
scores_title_label.grid(row=row_number, column=2, padx=10, pady=5, sticky="w")
row_number += 1
for key, value in output["categories"].items():
category_label = tk.Label(result_window, text=key, font=("Arial", 14))
category_label.grid(row=row_number, column=0, padx=10, pady=5, sticky="w")
category_value_label = tk.Label(result_window, text=str(value), font=("Arial", 14))
category_value_label.grid(row=row_number, column=1, padx=10, pady=5, sticky="w")
if value:
category_value_label.config(fg="red")
else:
category_value_label.config(fg="green")
if key in output["category_scores"]:
score_value_label = tk.Label(result_window, text=f'{output["category_scores"][key]:.6f}',
font=("Arial", 14))
score_value_label.grid(row=row_number, column=2, padx=10, pady=5, sticky="w")
row_number += 1
if __name__ == "__main__":
app = App()
app.mainloop()
| [] |
2024-01-10 | cerebrosports/kobe | validate_credentials.py | import streamlit as st
import openai
openai.api_key = st.secrets["OPENAI_API_KEY"]
completion = openai.ChatCompletion.create(
model="gpt-4",
messages=[
{"role": "user", "content": "What is Streamlit?"}
]
)
st.write(completion.choices[0].message.content) | [
"What is Streamlit?"
] |
2024-01-10 | cerebrosports/kobe | frosty_app.py | import openai
import re
import streamlit as st
from prompts import get_system_prompt
import os
import plotly
st.title("KOBE v2")
conn = st.experimental_connection("snowpark")
openai.api_key = st.secrets.OPENAI_API_KEY
if st.button("Refresh"):
conn.reset()
if "login" not in st.session_state:
st.session_state.login = ""
password = ""
if st.session_state.login == "":
st.session_state.login = st.text_input("Enter Password", type = 'password')
if st.session_state.login == "password":
# Initialize the chat messages history
conn.reset()
openai.api_key = st.secrets.OPENAI_API_KEY
if "messages" not in st.session_state:
# system prompt includes table information, rules, and prompts the LLM to produce
# a welcome message to the user.
st.session_state.messages = [{"role": "system", "content": get_system_prompt()}]
# Prompt for user input and save
if prompt := st.chat_input():
st.session_state.messages.append({"role": "user", "content": prompt})
# display the existing chat messages
for message in st.session_state.messages:
if message["role"] == "system":
continue
with st.chat_message(message["role"]):
st.write(message["content"])
if "results" in message:
st.dataframe(message["results"])
# If last message is not from assistant, we need to generate a new response
if st.session_state.messages[-1]["role"] != "assistant":
with st.chat_message("assistant"):
response = ""
resp_container = st.empty()
for delta in openai.ChatCompletion.create(
model="gpt-4-1106-preview",
messages=[{"role": m["role"], "content": m["content"]} for m in st.session_state.messages],
stream=True,
):
response += delta.choices[0].delta.get("content", "")
resp_container.markdown(response)
message = {"role": "assistant", "content": response}
# Parse the response for a SQL query and execute if available
sql_match = re.search(r"```sql\n(.*)\n```", response, re.DOTALL)
if sql_match:
sql = sql_match.group(1)
message["results"] = conn.query(sql)
st.dataframe(message["results"])
st.session_state.messages.append(message)
| [
"content"
] |
2024-01-10 | FrancescoSaverioZuppichini/gradioGPT | src~callback.py | # adapted from https://github.com/hwchase17/langchain/issues/2428#issuecomment-1512280045
from queue import Queue
from typing import Any
from langchain.callbacks.base import BaseCallbackHandler
class QueueCallback(BaseCallbackHandler):
"""Callback handler for streaming LLM responses to a queue."""
def __init__(self, queue: Queue):
self.queue = queue
def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
self.queue.put(token)
def on_llm_end(self, *args, **kwargs: Any) -> None:
return self.queue.empty()
| [] |
2024-01-10 | luxontw/smart-home-agent | lang_stream.py | from langchain.chat_models import ChatOpenAI
chat = ChatOpenAI(
model_name="gpt-3.5-turbo",
temperature=0.0,
request_timeout=60,
)
for chunk in chat.stream("Write me a song about goldfish on the moon"):
print(chunk.content, end="", flush=True)
| [] |
2024-01-10 | vchauhan1/7-Days-of-LangChain | day_4~scientific_newsletter.py | """
This script shows how to create a newsletter based on the latest Arxiv articles.
We're using an easy LangChain implementation to show how to use the different components of LangChain.
This is part of my '7 Days of LangChain' series.
Check out the explanation about the code on my Twitter (@JorisTechTalk)
"""
from langchain.document_loaders import ArxivLoader
from langchain.agents.agent_toolkits import GmailToolkit
from langchain import OpenAI
import os
from langchain.agents import initialize_agent, AgentType
from langchain.chat_models import ChatOpenAI
from langchain.prompts import PromptTemplate
from langchain import LLMChain
from langchain.callbacks import get_openai_callback
import arxiv
# Topic of the newsletter you want to write about
query = "LLM"
# Set up the ArxivLoader
search = arxiv.Search(
query = query,
max_results = 4,
sort_by = arxiv.SortCriterion.SubmittedDate
)
# Initialize the docs variable
docs = ""
# Add all relevant information to the docs variable
for result in search.results():
docs += "Title: " + result.title + "\n"
docs += "Abstract: " + result.summary + "\n"
docs += "Download URL: " + result.pdf_url + "\n"
print(result.links)
for link in result.links:
docs += "Links: " + link.href + "\n"
# Track cost
with get_openai_callback() as cb:
# Template for the newsletter
prompt_newsletter_template = """
You are a newsletter writer. You write newsletters about scientific articles. You introduce the article and show a small summary to tell the user what the article is about.
You're main goal is to write a newsletter which contains summaries to interest the user in the articles.
--------------------
{text}
--------------------
Start with the title of the article. Then, write a small summary of the article.
Below each summary, include the link to the article containing /abs/ in the URL.
Summaries:
"""
PROMPT_NEWSLETTER = PromptTemplate(template=prompt_newsletter_template, input_variables=["text"])
# Set the OpenAI API key
os.environ['OPENAI_API_KEY'] = 'YOUR_API_KEY_HERE'
# Initialize the language model
llm = ChatOpenAI(temperature=0.6, model_name="gpt-3.5-turbo-16k", verbose=True)
# Initialize the LLMChain
newsletter_chain = LLMChain(llm=llm, prompt=PROMPT_NEWSLETTER, verbose=True)
# Run the LLMChain
newsletter = newsletter_chain.run(docs)
# Write newsletter to a text file
with open("newsletter.txt", "w") as f:
f.write(newsletter)
# Set toolkit
toolkit = GmailToolkit()
# Initialize the Gmail agent
agent = initialize_agent(
tools=toolkit.get_tools(),
llm=llm,
agent=AgentType.STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION,
verbose=True
)
# Run the agent
instructions = f"""
Write a draft directed to [email protected], NEVER SEND THE EMAIL.
The subject should be 'Scientific Newsletter about {query}'.
The content should be the following: {newsletter}.
"""
agent.run(instructions)
print(cb) | [
"\n You are a newsletter writer. You write newsletters about scientific articles. You introduce the article and show a small summary to tell the user what the article is about.\n\n You're main goal is to write a newsletter which contains summaries to interest the user in the articles.\n\n --------------------\n {text}\n --------------------\n\n Start with the title of the article. Then, write a small summary of the article.\n\n Below each summary, include the link to the article containing /abs/ in the URL.\n\n Summaries:\n\n "
] |
2024-01-10 | saadahmad-1/openai-s-chatgpt-api-integration-python | final-project.py | import os
import openai
os.environ['API_KEY'] = '<your api key goes here>'
openai.api_key = os.environ.get('API_KEY')
museum_messages = [
{
"role" : "system",
"content" : (
"You are an interactive assistant at a museum that specializes in natural history and science exhibits.\n"
"The museum attracts visitors of all ages. Your primary objective should be to enhance the visitor experience\n"
"by providing information, answering questions, and engaging in interactive conversations about the exhibits.\n"
)
},
{
"role" : "assistant",
"content" : ("Start each reponse with the phrase: 'Thanks for reaching out. '")
}
]
while(1):
print("How may I help you?")
p = input()
museum_messages.append(
{
"role" : "user",
"content" : p
}
)
try:
museum_messages_formatted = "".join([f"{msg['role']} : {msg['content']}" for msg in museum_messages])
response = openai.completions.create(
model = "text-davinci-003",
prompt = museum_messages_formatted,
temperature = 0.1,
max_tokens = 150,
)
response_recieved = response.choices[0].text.strip()
print(response_recieved)
museum_messages.append(
{
"role" : "assistant",
"content" : response_recieved
}
)
print("Do you want me to help you with anything else? y/n")
f = input()
if(f=='n'):
break;
except Exception as e:
print(f"\nAn ERROR recieved from OpenAI's API: {e}\n")
| [
"Start each reponse with the phrase: 'Thanks for reaching out. '",
"You are an interactive assistant at a museum that specializes in natural history and science exhibits.\nThe museum attracts visitors of all ages. Your primary objective should be to enhance the visitor experience\nby providing information, answering questions, and engaging in interactive conversations about the exhibits.\n"
] |
2024-01-10 | saadahmad-1/openai-s-chatgpt-api-integration-python | activity-3.py | import os
import openai
os.environ['API_KEY'] = '<your api key goes here>'
openai.api_key = os.environ.get('API_KEY')
messages = [
{"role" : "system", "content" : "You are a friendly assistant, who reponses in English"},
{"role" : "user" , "content" : "How many branches does the bank have in Massachusetts?"},
{"role" : "assistant" , "content" : "You always start a conversation with the phrase: 'Hiya! '"},
]
formatted_messages = "".join([f"{msg['role']}: {msg['content']}" for msg in messages])
response = openai.completions.create(
model = "text-davinci-003",
prompt = formatted_messages,
max_tokens = 75
)
print(response.choices[0].text.strip()) | [
"You always start a conversation with the phrase: 'Hiya! '",
"You are a friendly assistant, who reponses in English",
"How many branches does the bank have in Massachusetts?"
] |
2024-01-10 | saadahmad-1/openai-s-chatgpt-api-integration-python | activity-2.py | import os
import openai
os.environ['API_KEY'] = '<your api key goes here>'
openai.api_key = os.environ.get('API_KEY')
prompt = "What are the public hours for the bank's city branch?"
response = openai.completions.create(
model = "text-davinci-003",
prompt = prompt,
max_tokens = 75,
)
print("Model: ", response.model)
print("Created: ", response.created)
print("ID: ", response.id)
response_print = response.choices[0].text.strip()
print(response_print)
| [
"What are the public hours for the bank's city branch?"
] |
2024-01-10 | saadahmad-1/openai-s-chatgpt-api-integration-python | practice-activity.py | #Practice Activity
import os
import openai
os.environ['API_KEY'] = '<your api key goes here>'
openai.api_key = os.environ.get('API_KEY')
messages = [
{
"role" : "system",
"content" : "You are a weather-man who responds about various prompts regarding weather."
},
{
"role" : "user",
"content" : "Please tell tomorrow's weather forecast."
},
{
"role" : "assistant",
"content" : (
"Always start the conversation with the phrase: 'Hello! Your Weather-Man Here. '\n"
"and end the conversation with the phrase: 'Your Weather-Man Signing Off. '\n"
)
},
]
formatted_message = "".join([f"{msg['role']} : {msg['content']}" for msg in messages])
response = openai.completions.create(
model = "text-davinci-003",
prompt = formatted_message,
max_tokens = 175,
)
print(response.choices[0].text.strip())
| [
"You are a weather-man who responds about various prompts regarding weather.",
"Please tell tomorrow's weather forecast.",
"Always start the conversation with the phrase: 'Hello! Your Weather-Man Here. '\nand end the conversation with the phrase: 'Your Weather-Man Signing Off. '\n"
] |
2024-01-10 | saadahmad-1/openai-s-chatgpt-api-integration-python | activity-5.py | import os
import openai
os.environ['API_KEY'] = '<your api key goes here>'
openai.api_key = os.environ.get('API_KEY')
cot_messages = [
{
"role" : "system",
"content" : (
"You are an interactive and friendly assistant for a bake-shop called 'COT' which is based in Lahore, Pakistan.\n"
"You have detailed knowledge about the various bakery items prepared at the bake-shop, their categories,\n"
"their ingredients, their nutritional information, and their prices.\n"
)
},
{
"role" : "user",
"content" : "List down all the bakery-items available at COT."
}
]
cot_messages_formatted = "".join([f"{msg['role']} : {msg['content']}" for msg in cot_messages])
try:
response = openai.completions.create(
model = "text-davinci-003",
prompt = cot_messages_formatted,
max_tokens = 5000,
)
print(response.choices[0].text.strip())
except Exception as e:
print(f"An ERROR recieved from the OpenAI's API: {e}")
| [
"List down all the bakery-items available at COT.",
"You are an interactive and friendly assistant for a bake-shop called 'COT' which is based in Lahore, Pakistan.\nYou have detailed knowledge about the various bakery items prepared at the bake-shop, their categories,\ntheir ingredients, their nutritional information, and their prices.\n"
] |
2024-01-10 | saadahmad-1/openai-s-chatgpt-api-integration-python | activity-1.py | import os
import openai
os.environ['API_KEY'] = '<your api key goes here>'
openai.api_key = os.environ.get('API_KEY')
response = openai.completions.create(
model = "text-davinci-002",
prompt = "Once upon a time, when the world was young,",
max_tokens = 75
)
print(response.choices)
| [
"Once upon a time, when the world was young,"
] |
2024-01-10 | saadahmad-1/openai-s-chatgpt-api-integration-python | activity-4.py | import os
import openai
os.environ['API_KEY'] = '<your api key goes here>'
openai.api_key = os.environ.get('API_KEY')
messages = [
{
"role" : "system",
"content" : (
"You are a interactive and friendly assistant for a bake-shop called 'COT' which is based in Lahore, Pakistan.\n"
"You have detailed knowledge about the various bakery items prepared at the bake-shop, their categories,\n"
"their ingredients, their nutritional information, and their prices.\n"
)
},
{
"role" : "user",
"content" : "Please provide the detailed nutritional information of brownies."
},
{
"role" : "assistant",
"content" : "You start every conversation with the phrase: 'Hola! Welcome to COT :)\n'"
},
]
formatted_prompt = "".join([f"{msg['role']} : {msg['content']}" for msg in messages])
response = openai.completions.create(
model = "text-davinci-003",
prompt = formatted_prompt,
max_tokens = 500,
)
messages.append(
{
"role" : "assistant",
"content" : response.choices[0].text.strip()
}
)
messages.append(
{
"role" : "user",
"content" : "And what about cup-cakes?"
}
)
formatted_prompt = "".join([f"{msg['role']} : {msg['content']}" for msg in messages])
response = openai.completions.create(
model = "text-davinci-003",
prompt = formatted_prompt,
temperature = 0.1,
# temperature = 1.9,
max_tokens = 500,
)
print(response.choices[0].text.strip());
| [
"And what about cup-cakes?",
"PLACEHOLDER : PLACEHOLDER",
"You start every conversation with the phrase: 'Hola! Welcome to COT :)\n'",
"Please provide the detailed nutritional information of brownies.",
"content",
"You are a interactive and friendly assistant for a bake-shop called 'COT' which is based in Lahore, Pakistan.\nYou have detailed knowledge about the various bakery items prepared at the bake-shop, their categories,\ntheir ingredients, their nutritional information, and their prices.\n"
] |
2024-01-10 | ankurdahama1997/ask-wudpecker | api~core~llm~streamable_azure_chat_open_ai.py | from langchain.callbacks.manager import Callbacks
from langchain.schema import BaseMessage, LLMResult
from langchain.chat_models import AzureChatOpenAI
from typing import Optional, List, Dict, Any
from pydantic import root_validator
from core.llm.wrappers.openai_wrapper import handle_openai_exceptions
class StreamableAzureChatOpenAI(AzureChatOpenAI):
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
try:
import openai
except ImportError:
raise ValueError(
"Could not import openai python package. "
"Please install it with `pip install openai`."
)
try:
values["client"] = openai.ChatCompletion
except AttributeError:
raise ValueError(
"`openai` has no `ChatCompletion` attribute, this is likely "
"due to an old version of the openai package. Try upgrading it "
"with `pip install --upgrade openai`."
)
if values["n"] < 1:
raise ValueError("n must be at least 1.")
if values["n"] > 1 and values["streaming"]:
raise ValueError("n must be 1 when streaming.")
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling OpenAI API."""
return {
**super()._default_params,
"engine": self.deployment_name,
"api_type": self.openai_api_type,
"api_base": self.openai_api_base,
"api_version": self.openai_api_version,
"api_key": self.openai_api_key,
"organization": self.openai_organization if self.openai_organization else None,
}
@handle_openai_exceptions
def generate(
self,
messages: List[List[BaseMessage]],
stop: Optional[List[str]] = None,
callbacks: Callbacks = None,
**kwargs: Any,
) -> LLMResult:
return super().generate(messages, stop, callbacks, **kwargs)
@classmethod
def get_kwargs_from_model_params(cls, params: dict):
model_kwargs = {
'top_p': params.get('top_p', 1),
'frequency_penalty': params.get('frequency_penalty', 0),
'presence_penalty': params.get('presence_penalty', 0),
}
del params['top_p']
del params['frequency_penalty']
del params['presence_penalty']
params['model_kwargs'] = model_kwargs
return params
| [] |
2024-01-10 | ankurdahama1997/ask-wudpecker | api~services~hit_testing_service.py | import logging
import time
from typing import List
import numpy as np
from flask import current_app
from langchain.embeddings import OpenAIEmbeddings
from langchain.embeddings.base import Embeddings
from langchain.schema import Document
from sklearn.manifold import TSNE
from core.embedding.cached_embedding import CacheEmbedding
from core.index.vector_index.vector_index import VectorIndex
from core.llm.llm_builder import LLMBuilder
from extensions.ext_database import db
from models.account import Account
from models.dataset import Dataset, DocumentSegment, DatasetQuery
class HitTestingService:
@classmethod
def retrieve(cls, dataset: Dataset, query: str, account: Account, limit: int = 10) -> dict:
if dataset.available_document_count == 0 or dataset.available_document_count == 0:
return {
"query": {
"content": query,
"tsne_position": {'x': 0, 'y': 0},
},
"records": []
}
model_credentials = LLMBuilder.get_model_credentials(
tenant_id=dataset.tenant_id,
model_provider=LLMBuilder.get_default_provider(dataset.tenant_id, 'text-embedding-ada-002'),
model_name='text-embedding-ada-002'
)
embeddings = CacheEmbedding(OpenAIEmbeddings(
**model_credentials
))
vector_index = VectorIndex(
dataset=dataset,
config=current_app.config,
embeddings=embeddings
)
start = time.perf_counter()
documents = vector_index.search(
query,
search_type='similarity_score_threshold',
search_kwargs={
'k': 10
}
)
end = time.perf_counter()
logging.debug(f"Hit testing retrieve in {end - start:0.4f} seconds")
dataset_query = DatasetQuery(
dataset_id=dataset.id,
content=query,
source='hit_testing',
created_by_role='account',
created_by=account.id
)
db.session.add(dataset_query)
db.session.commit()
return cls.compact_retrieve_response(dataset, embeddings, query, documents)
@classmethod
def compact_retrieve_response(cls, dataset: Dataset, embeddings: Embeddings, query: str, documents: List[Document]):
text_embeddings = [
embeddings.embed_query(query)
]
text_embeddings.extend(embeddings.embed_documents([document.page_content for document in documents]))
tsne_position_data = cls.get_tsne_positions_from_embeddings(text_embeddings)
query_position = tsne_position_data.pop(0)
i = 0
records = []
for document in documents:
index_node_id = document.metadata['doc_id']
segment = db.session.query(DocumentSegment).filter(
DocumentSegment.dataset_id == dataset.id,
DocumentSegment.enabled == True,
DocumentSegment.status == 'completed',
DocumentSegment.index_node_id == index_node_id
).first()
if not segment:
i += 1
continue
record = {
"segment": segment,
"score": document.metadata['score'],
"tsne_position": tsne_position_data[i]
}
records.append(record)
i += 1
return {
"query": {
"content": query,
"tsne_position": query_position,
},
"records": records
}
@classmethod
def get_tsne_positions_from_embeddings(cls, embeddings: list):
embedding_length = len(embeddings)
if embedding_length <= 1:
return [{'x': 0, 'y': 0}]
concatenate_data = np.array(embeddings).reshape(embedding_length, -1)
# concatenate_data = np.concatenate(embeddings)
perplexity = embedding_length / 2 + 1
if perplexity >= embedding_length:
perplexity = max(embedding_length - 1, 1)
tsne = TSNE(n_components=2, perplexity=perplexity, early_exaggeration=12.0)
data_tsne = tsne.fit_transform(concatenate_data)
tsne_position_data = []
for i in range(len(data_tsne)):
tsne_position_data.append({'x': float(data_tsne[i][0]), 'y': float(data_tsne[i][1])})
return tsne_position_data
| [] |
2024-01-10 | ankurdahama1997/ask-wudpecker | api~core~tool~dataset_index_tool.py | from flask import current_app
from langchain.embeddings import OpenAIEmbeddings
from langchain.tools import BaseTool
from core.callback_handler.index_tool_callback_handler import DatasetIndexToolCallbackHandler
from core.embedding.cached_embedding import CacheEmbedding
from core.index.keyword_table_index.keyword_table_index import KeywordTableIndex, KeywordTableConfig
from core.index.vector_index.vector_index import VectorIndex
from core.llm.llm_builder import LLMBuilder
from models.dataset import Dataset
class DatasetTool(BaseTool):
"""Tool for querying a Dataset."""
dataset: Dataset
k: int = 2
def _run(self, tool_input: str) -> str:
if self.dataset.indexing_technique == "economy":
# use keyword table query
kw_table_index = KeywordTableIndex(
dataset=self.dataset,
config=KeywordTableConfig(
max_keywords_per_chunk=5
)
)
documents = kw_table_index.search(tool_input, search_kwargs={'k': self.k})
else:
model_credentials = LLMBuilder.get_model_credentials(
tenant_id=self.dataset.tenant_id,
model_provider=LLMBuilder.get_default_provider(self.dataset.tenant_id, 'text-embedding-ada-002'),
model_name='text-embedding-ada-002'
)
embeddings = CacheEmbedding(OpenAIEmbeddings(
**model_credentials
))
vector_index = VectorIndex(
dataset=self.dataset,
config=current_app.config,
embeddings=embeddings
)
documents = vector_index.search(
tool_input,
search_type='similarity',
search_kwargs={
'k': self.k
}
)
hit_callback = DatasetIndexToolCallbackHandler(self.dataset.id)
hit_callback.on_tool_end(documents)
return str("\n".join([document.page_content for document in documents]))
async def _arun(self, tool_input: str) -> str:
model_credentials = LLMBuilder.get_model_credentials(
tenant_id=self.dataset.tenant_id,
model_provider=LLMBuilder.get_default_provider(self.dataset.tenant_id, 'text-embedding-ada-002'),
model_name='text-embedding-ada-002'
)
embeddings = CacheEmbedding(OpenAIEmbeddings(
**model_credentials
))
vector_index = VectorIndex(
dataset=self.dataset,
config=current_app.config,
embeddings=embeddings
)
documents = await vector_index.asearch(
tool_input,
search_type='similarity',
search_kwargs={
'k': 10
}
)
hit_callback = DatasetIndexToolCallbackHandler(self.dataset.id)
hit_callback.on_tool_end(documents)
return str("\n".join([document.page_content for document in documents]))
| [] |
2024-01-10 | ankurdahama1997/ask-wudpecker | api~core~generator~llm_generator.py | import logging
from langchain import PromptTemplate
from langchain.chat_models.base import BaseChatModel
from langchain.schema import HumanMessage, OutputParserException, BaseMessage
from core.constant import llm_constant
from core.llm.llm_builder import LLMBuilder
from core.llm.streamable_open_ai import StreamableOpenAI
from core.llm.token_calculator import TokenCalculator
from core.prompt.output_parser.rule_config_generator import RuleConfigGeneratorOutputParser
from core.prompt.output_parser.suggested_questions_after_answer import SuggestedQuestionsAfterAnswerOutputParser
from core.prompt.prompt_template import JinjaPromptTemplate, OutLinePromptTemplate
from core.prompt.prompts import CONVERSATION_TITLE_PROMPT, CONVERSATION_SUMMARY_PROMPT, INTRODUCTION_GENERATE_PROMPT
# gpt-3.5-turbo works not well
generate_base_model = 'text-davinci-003'
class LLMGenerator:
@classmethod
def generate_conversation_name(cls, tenant_id: str, query, answer):
prompt = CONVERSATION_TITLE_PROMPT
if len(query) > 2000:
query = query[:300] + "...[TRUNCATED]..." + query[-300:]
prompt = prompt.format(query=query)
llm: StreamableOpenAI = LLMBuilder.to_llm(
tenant_id=tenant_id,
model_name='gpt-3.5-turbo',
max_tokens=50
)
if isinstance(llm, BaseChatModel):
prompt = [HumanMessage(content=prompt)]
response = llm.generate([prompt])
answer = response.generations[0][0].text
return answer.strip()
@classmethod
def generate_conversation_summary(cls, tenant_id: str, messages):
max_tokens = 200
model = 'gpt-3.5-turbo'
prompt = CONVERSATION_SUMMARY_PROMPT
prompt_with_empty_context = prompt.format(context='')
prompt_tokens = TokenCalculator.get_num_tokens(model, prompt_with_empty_context)
rest_tokens = llm_constant.max_context_token_length[model] - prompt_tokens - max_tokens - 1
context = ''
for message in messages:
if not message.answer:
continue
if len(message.query) > 2000:
query = message.query[:300] + "...[TRUNCATED]..." + message.query[-300:]
else:
query = message.query
if len(message.answer) > 2000:
answer = message.answer[:300] + "...[TRUNCATED]..." + message.answer[-300:]
else:
answer = message.answer
message_qa_text = "\n\nHuman:" + query + "\n\nAssistant:" + answer
if rest_tokens - TokenCalculator.get_num_tokens(model, context + message_qa_text) > 0:
context += message_qa_text
if not context:
return '[message too long, no summary]'
prompt = prompt.format(context=context)
llm: StreamableOpenAI = LLMBuilder.to_llm(
tenant_id=tenant_id,
model_name=model,
max_tokens=max_tokens
)
if isinstance(llm, BaseChatModel):
prompt = [HumanMessage(content=prompt)]
response = llm.generate([prompt])
answer = response.generations[0][0].text
return answer.strip()
@classmethod
def generate_introduction(cls, tenant_id: str, pre_prompt: str):
prompt = INTRODUCTION_GENERATE_PROMPT
prompt = prompt.format(prompt=pre_prompt)
llm: StreamableOpenAI = LLMBuilder.to_llm(
tenant_id=tenant_id,
model_name=generate_base_model,
)
if isinstance(llm, BaseChatModel):
prompt = [HumanMessage(content=prompt)]
response = llm.generate([prompt])
answer = response.generations[0][0].text
return answer.strip()
@classmethod
def generate_suggested_questions_after_answer(cls, tenant_id: str, histories: str):
output_parser = SuggestedQuestionsAfterAnswerOutputParser()
format_instructions = output_parser.get_format_instructions()
prompt = JinjaPromptTemplate(
template="{{histories}}\n{{format_instructions}}\nquestions:\n",
input_variables=["histories"],
partial_variables={"format_instructions": format_instructions}
)
_input = prompt.format_prompt(histories=histories)
llm: StreamableOpenAI = LLMBuilder.to_llm(
tenant_id=tenant_id,
model_name='gpt-3.5-turbo',
temperature=0,
max_tokens=256
)
if isinstance(llm, BaseChatModel):
query = [HumanMessage(content=_input.to_string())]
else:
query = _input.to_string()
try:
output = llm(query)
if isinstance(output, BaseMessage):
output = output.content
questions = output_parser.parse(output)
except Exception:
logging.exception("Error generating suggested questions after answer")
questions = []
return questions
@classmethod
def generate_rule_config(cls, tenant_id: str, audiences: str, hoping_to_solve: str) -> dict:
output_parser = RuleConfigGeneratorOutputParser()
prompt = OutLinePromptTemplate(
template=output_parser.get_format_instructions(),
input_variables=["audiences", "hoping_to_solve"],
partial_variables={
"variable": '{variable}',
"lanA": '{lanA}',
"lanB": '{lanB}',
"topic": '{topic}'
},
validate_template=False
)
_input = prompt.format_prompt(audiences=audiences, hoping_to_solve=hoping_to_solve)
llm: StreamableOpenAI = LLMBuilder.to_llm(
tenant_id=tenant_id,
model_name=generate_base_model,
temperature=0,
max_tokens=512
)
if isinstance(llm, BaseChatModel):
query = [HumanMessage(content=_input.to_string())]
else:
query = _input.to_string()
try:
output = llm(query)
rule_config = output_parser.parse(output)
except OutputParserException:
raise ValueError('Please give a valid input for intended audience or hoping to solve problems.')
except Exception:
logging.exception("Error generating prompt")
rule_config = {
"prompt": "",
"variables": [],
"opening_statement": ""
}
return rule_config
| [
"{{histories}}\n{{format_instructions}}\nquestions:\n",
"{lanA}",
"format_instructions",
"audiences",
"False",
"{lanB}",
"lanB",
"lanA",
"hoping_to_solve",
"{variable}"
] |
2024-01-10 | ankurdahama1997/ask-wudpecker | api~core~llm~provider~llm_provider_service.py | from typing import Optional, Union
from core.llm.provider.anthropic_provider import AnthropicProvider
from core.llm.provider.azure_provider import AzureProvider
from core.llm.provider.base import BaseProvider
from core.llm.provider.huggingface_provider import HuggingfaceProvider
from core.llm.provider.openai_provider import OpenAIProvider
from models.provider import Provider
class LLMProviderService:
def __init__(self, tenant_id: str, provider_name: str):
self.provider = self.init_provider(tenant_id, provider_name)
def init_provider(self, tenant_id: str, provider_name: str) -> BaseProvider:
if provider_name == 'openai':
return OpenAIProvider(tenant_id)
elif provider_name == 'azure_openai':
return AzureProvider(tenant_id)
elif provider_name == 'anthropic':
return AnthropicProvider(tenant_id)
elif provider_name == 'huggingface':
return HuggingfaceProvider(tenant_id)
else:
raise Exception('provider {} not found'.format(provider_name))
def get_models(self, model_id: Optional[str] = None) -> list[dict]:
return self.provider.get_models(model_id)
def get_credentials(self, model_id: Optional[str] = None) -> dict:
return self.provider.get_credentials(model_id)
def get_provider_configs(self, obfuscated: bool = False, only_custom: bool = False) -> Union[str | dict]:
return self.provider.get_provider_configs(obfuscated=obfuscated, only_custom=only_custom)
def get_provider_db_record(self) -> Optional[Provider]:
return self.provider.get_provider()
def config_validate(self, config: Union[dict | str]):
"""
Validates the given config.
:param config:
:raises: ValidateFailedError
"""
return self.provider.config_validate(config)
def get_token_type(self):
return self.provider.get_token_type()
def get_encrypted_token(self, config: Union[dict | str]):
return self.provider.get_encrypted_token(config)
| [] |
2024-01-10 | ankurdahama1997/ask-wudpecker | api~core~llm~wrappers~anthropic_wrapper.py | import logging
from functools import wraps
import anthropic
from core.llm.error import LLMAPIConnectionError, LLMAPIUnavailableError, LLMRateLimitError, LLMAuthorizationError, \
LLMBadRequestError
def handle_anthropic_exceptions(func):
@wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except anthropic.APIConnectionError as e:
logging.exception("Failed to connect to Anthropic API.")
raise LLMAPIConnectionError(f"Anthropic: The server could not be reached, cause: {e.__cause__}")
except anthropic.RateLimitError:
raise LLMRateLimitError("Anthropic: A 429 status code was received; we should back off a bit.")
except anthropic.AuthenticationError as e:
raise LLMAuthorizationError(f"Anthropic: {e.message}")
except anthropic.BadRequestError as e:
raise LLMBadRequestError(f"Anthropic: {e.message}")
except anthropic.APIStatusError as e:
raise LLMAPIUnavailableError(f"Anthropic: code: {e.status_code}, cause: {e.message}")
return wrapper
| [] |
2024-01-10 | ankurdahama1997/ask-wudpecker | api~core~completion.py | import logging
from typing import Optional, List, Union, Tuple
from langchain.base_language import BaseLanguageModel
from langchain.callbacks.base import BaseCallbackHandler
from langchain.chat_models.base import BaseChatModel
from langchain.llms import BaseLLM
from langchain.schema import BaseMessage, HumanMessage
from requests.exceptions import ChunkedEncodingError
from core.constant import llm_constant
from core.callback_handler.llm_callback_handler import LLMCallbackHandler
from core.callback_handler.std_out_callback_handler import DifyStreamingStdOutCallbackHandler, \
DifyStdOutCallbackHandler
from core.conversation_message_task import ConversationMessageTask, ConversationTaskStoppedException
from core.llm.error import LLMBadRequestError
from core.llm.llm_builder import LLMBuilder
from core.chain.main_chain_builder import MainChainBuilder
from core.llm.streamable_chat_open_ai import StreamableChatOpenAI
from core.llm.streamable_open_ai import StreamableOpenAI
from core.memory.read_only_conversation_token_db_buffer_shared_memory import \
ReadOnlyConversationTokenDBBufferSharedMemory
from core.memory.read_only_conversation_token_db_string_buffer_shared_memory import \
ReadOnlyConversationTokenDBStringBufferSharedMemory
from core.prompt.prompt_builder import PromptBuilder
from core.prompt.prompt_template import JinjaPromptTemplate
from core.prompt.prompts import MORE_LIKE_THIS_GENERATE_PROMPT
from models.model import App, AppModelConfig, Account, Conversation, Message
class Completion:
@classmethod
def generate(cls, task_id: str, app: App, app_model_config: AppModelConfig, query: str, inputs: dict,
user: Account, conversation: Optional[Conversation], streaming: bool, is_override: bool = False):
"""
errors: ProviderTokenNotInitError
"""
query = PromptBuilder.process_template(query)
memory = None
if conversation:
# get memory of conversation (read-only)
memory = cls.get_memory_from_conversation(
tenant_id=app.tenant_id,
app_model_config=app_model_config,
conversation=conversation,
return_messages=False
)
inputs = conversation.inputs
rest_tokens_for_context_and_memory = cls.get_validate_rest_tokens(
mode=app.mode,
tenant_id=app.tenant_id,
app_model_config=app_model_config,
query=query,
inputs=inputs
)
conversation_message_task = ConversationMessageTask(
task_id=task_id,
app=app,
app_model_config=app_model_config,
user=user,
conversation=conversation,
is_override=is_override,
inputs=inputs,
query=query,
streaming=streaming
)
# build main chain include agent
main_chain = MainChainBuilder.to_langchain_components(
tenant_id=app.tenant_id,
agent_mode=app_model_config.agent_mode_dict,
rest_tokens=rest_tokens_for_context_and_memory,
memory=ReadOnlyConversationTokenDBStringBufferSharedMemory(memory=memory) if memory else None,
conversation_message_task=conversation_message_task
)
chain_output = ''
if main_chain:
chain_output = main_chain.run(query)
# run the final llm
try:
cls.run_final_llm(
tenant_id=app.tenant_id,
mode=app.mode,
app_model_config=app_model_config,
query=query,
inputs=inputs,
chain_output=chain_output,
conversation_message_task=conversation_message_task,
memory=memory,
streaming=streaming
)
except ConversationTaskStoppedException:
return
except ChunkedEncodingError as e:
# Interrupt by LLM (like OpenAI), handle it.
logging.warning(f'ChunkedEncodingError: {e}')
conversation_message_task.end()
return
@classmethod
def run_final_llm(cls, tenant_id: str, mode: str, app_model_config: AppModelConfig, query: str, inputs: dict,
chain_output: str,
conversation_message_task: ConversationMessageTask,
memory: Optional[ReadOnlyConversationTokenDBBufferSharedMemory], streaming: bool):
final_llm = LLMBuilder.to_llm_from_model(
tenant_id=tenant_id,
model=app_model_config.model_dict,
streaming=streaming
)
# get llm prompt
prompt, stop_words = cls.get_main_llm_prompt(
mode=mode,
llm=final_llm,
model=app_model_config.model_dict,
pre_prompt=app_model_config.pre_prompt,
query=query,
inputs=inputs,
chain_output=chain_output,
memory=memory
)
final_llm.callbacks = cls.get_llm_callbacks(final_llm, streaming, conversation_message_task)
cls.recale_llm_max_tokens(
final_llm=final_llm,
model=app_model_config.model_dict,
prompt=prompt,
mode=mode
)
response = final_llm.generate([prompt], stop_words)
return response
@classmethod
def get_main_llm_prompt(cls, mode: str, llm: BaseLanguageModel, model: dict,
pre_prompt: str, query: str, inputs: dict,
chain_output: Optional[str],
memory: Optional[ReadOnlyConversationTokenDBBufferSharedMemory]) -> \
Tuple[Union[str | List[BaseMessage]], Optional[List[str]]]:
# disable template string in query
# query_params = JinjaPromptTemplate.from_template(template=query).input_variables
# if query_params:
# for query_param in query_params:
# if query_param not in inputs:
# inputs[query_param] = '{{' + query_param + '}}'
if mode == 'completion':
prompt_template = JinjaPromptTemplate.from_template(
template=("""Use the following context as your learned knowledge, inside <context></context> XML tags.
<context>
{{context}}
</context>
When answer to user:
- If you don't know, just say that you don't know.
- If you don't know when you are not sure, ask for clarification.
Avoid mentioning that you obtained the information from the context.
And answer according to the language of the user's question.
""" if chain_output else "")
+ (pre_prompt + "\n" if pre_prompt else "")
+ "{{query}}\n"
)
if chain_output:
inputs['context'] = chain_output
# context_params = JinjaPromptTemplate.from_template(template=chain_output).input_variables
# if context_params:
# for context_param in context_params:
# if context_param not in inputs:
# inputs[context_param] = '{{' + context_param + '}}'
prompt_inputs = {k: inputs[k] for k in prompt_template.input_variables if k in inputs}
prompt_content = prompt_template.format(
query=query,
**prompt_inputs
)
if isinstance(llm, BaseChatModel):
# use chat llm as completion model
return [HumanMessage(content=prompt_content)], None
else:
return prompt_content, None
else:
messages: List[BaseMessage] = []
human_inputs = {
"query": query
}
human_message_prompt = ""
if pre_prompt:
pre_prompt_inputs = {k: inputs[k] for k in
JinjaPromptTemplate.from_template(template=pre_prompt).input_variables
if k in inputs}
if pre_prompt_inputs:
human_inputs.update(pre_prompt_inputs)
if chain_output:
human_inputs['context'] = chain_output
human_message_prompt += """Use the following context as your learned knowledge, inside <context></context> XML tags.
<context>
{{context}}
</context>
When answer to user:
- If you don't know, just say that you don't know.
- If you don't know when you are not sure, ask for clarification.
Avoid mentioning that you obtained the information from the context.
And answer according to the language of the user's question.
"""
if pre_prompt:
human_message_prompt += pre_prompt
query_prompt = "\n\nHuman: {{query}}\n\nAssistant: "
if memory:
# append chat histories
tmp_human_message = PromptBuilder.to_human_message(
prompt_content=human_message_prompt + query_prompt,
inputs=human_inputs
)
curr_message_tokens = memory.llm.get_num_tokens_from_messages([tmp_human_message])
model_name = model['name']
max_tokens = model.get("completion_params").get('max_tokens')
rest_tokens = llm_constant.max_context_token_length[model_name] \
- max_tokens - curr_message_tokens
rest_tokens = max(rest_tokens, 0)
histories = cls.get_history_messages_from_memory(memory, rest_tokens)
# disable template string in query
# histories_params = JinjaPromptTemplate.from_template(template=histories).input_variables
# if histories_params:
# for histories_param in histories_params:
# if histories_param not in human_inputs:
# human_inputs[histories_param] = '{{' + histories_param + '}}'
human_message_prompt += "\n\n" if human_message_prompt else ""
human_message_prompt += "Here is the chat histories between human and assistant, " \
"inside <histories></histories> XML tags.\n\n<histories>"
human_message_prompt += histories + "</histories>"
human_message_prompt += query_prompt
# construct main prompt
human_message = PromptBuilder.to_human_message(
prompt_content=human_message_prompt,
inputs=human_inputs
)
messages.append(human_message)
return messages, ['\nHuman:']
@classmethod
def get_llm_callbacks(cls, llm: Union[StreamableOpenAI, StreamableChatOpenAI],
streaming: bool,
conversation_message_task: ConversationMessageTask) -> List[BaseCallbackHandler]:
llm_callback_handler = LLMCallbackHandler(llm, conversation_message_task)
if streaming:
return [llm_callback_handler, DifyStreamingStdOutCallbackHandler()]
else:
return [llm_callback_handler, DifyStdOutCallbackHandler()]
@classmethod
def get_history_messages_from_memory(cls, memory: ReadOnlyConversationTokenDBBufferSharedMemory,
max_token_limit: int) -> \
str:
"""Get memory messages."""
memory.max_token_limit = max_token_limit
memory_key = memory.memory_variables[0]
external_context = memory.load_memory_variables({})
return external_context[memory_key]
@classmethod
def get_memory_from_conversation(cls, tenant_id: str, app_model_config: AppModelConfig,
conversation: Conversation,
**kwargs) -> ReadOnlyConversationTokenDBBufferSharedMemory:
# only for calc token in memory
memory_llm = LLMBuilder.to_llm_from_model(
tenant_id=tenant_id,
model=app_model_config.model_dict
)
# use llm config from conversation
memory = ReadOnlyConversationTokenDBBufferSharedMemory(
conversation=conversation,
llm=memory_llm,
max_token_limit=kwargs.get("max_token_limit", 2048),
memory_key=kwargs.get("memory_key", "chat_history"),
return_messages=kwargs.get("return_messages", True),
input_key=kwargs.get("input_key", "input"),
output_key=kwargs.get("output_key", "output"),
message_limit=kwargs.get("message_limit", 10),
)
return memory
@classmethod
def get_validate_rest_tokens(cls, mode: str, tenant_id: str, app_model_config: AppModelConfig,
query: str, inputs: dict) -> int:
llm = LLMBuilder.to_llm_from_model(
tenant_id=tenant_id,
model=app_model_config.model_dict
)
model_name = app_model_config.model_dict.get("name")
model_limited_tokens = llm_constant.max_context_token_length[model_name]
max_tokens = app_model_config.model_dict.get("completion_params").get('max_tokens')
# get prompt without memory and context
prompt, _ = cls.get_main_llm_prompt(
mode=mode,
llm=llm,
model=app_model_config.model_dict,
pre_prompt=app_model_config.pre_prompt,
query=query,
inputs=inputs,
chain_output=None,
memory=None
)
prompt_tokens = llm.get_num_tokens(prompt) if isinstance(prompt, str) \
else llm.get_num_tokens_from_messages(prompt)
rest_tokens = model_limited_tokens - max_tokens - prompt_tokens
if rest_tokens < 0:
raise LLMBadRequestError("Query or prefix prompt is too long, you can reduce the prefix prompt, "
"or shrink the max token, or switch to a llm with a larger token limit size.")
return rest_tokens
@classmethod
def recale_llm_max_tokens(cls, final_llm: BaseLanguageModel, model: dict,
prompt: Union[str, List[BaseMessage]], mode: str):
# recalc max_tokens if sum(prompt_token + max_tokens) over model token limit
model_name = model.get("name")
model_limited_tokens = llm_constant.max_context_token_length[model_name]
max_tokens = model.get("completion_params").get('max_tokens')
if mode == 'completion' and isinstance(final_llm, BaseLLM):
prompt_tokens = final_llm.get_num_tokens(prompt)
else:
prompt_tokens = final_llm.get_num_tokens_from_messages(prompt)
if prompt_tokens + max_tokens > model_limited_tokens:
max_tokens = max(model_limited_tokens - prompt_tokens, 16)
final_llm.max_tokens = max_tokens
@classmethod
def generate_more_like_this(cls, task_id: str, app: App, message: Message, pre_prompt: str,
app_model_config: AppModelConfig, user: Account, streaming: bool):
llm = LLMBuilder.to_llm_from_model(
tenant_id=app.tenant_id,
model=app_model_config.model_dict,
streaming=streaming
)
# get llm prompt
original_prompt, _ = cls.get_main_llm_prompt(
mode="completion",
llm=llm,
model=app_model_config.model_dict,
pre_prompt=pre_prompt,
query=message.query,
inputs=message.inputs,
chain_output=None,
memory=None
)
original_completion = message.answer.strip()
prompt = MORE_LIKE_THIS_GENERATE_PROMPT
prompt = prompt.format(prompt=original_prompt, original_completion=original_completion)
if isinstance(llm, BaseChatModel):
prompt = [HumanMessage(content=prompt)]
conversation_message_task = ConversationMessageTask(
task_id=task_id,
app=app,
app_model_config=app_model_config,
user=user,
inputs=message.inputs,
query=message.query,
is_override=True if message.override_model_configs else False,
streaming=streaming
)
llm.callbacks = cls.get_llm_callbacks(llm, streaming, conversation_message_task)
cls.recale_llm_max_tokens(
final_llm=llm,
model=app_model_config.model_dict,
prompt=prompt,
mode='completion'
)
llm.generate([prompt])
| [
"\n\nHuman: {{query}}\n\nAssistant: ",
"\n\n",
"\n",
"{{query}}\n",
"Here is the chat histories between human and assistant, inside <histories></histories> XML tags.\n\n<histories>",
"PLACEHOLDER</histories>",
"Use the following context as your learned knowledge, inside <context></context> XML tags.<context>{{context}}</context>When answer to user:- If you don't know, just say that you don't know.- If you don't know when you are not sure, ask for clarification. Avoid mentioning that you obtained the information from the context.And answer according to the language of the user's question.PLACEHOLDER\n{{query}}\n",
"Use the following context as your learned knowledge, inside <context></context> XML tags.\n\n<context>\n{{context}}\n</context>\n\nWhen answer to user:\n- If you don't know, just say that you don't know.\n- If you don't know when you are not sure, ask for clarification. \nAvoid mentioning that you obtained the information from the context.\nAnd answer according to the language of the user's question.\n"
] |
2024-01-10 | ankurdahama1997/ask-wudpecker | api~core~llm~llm_builder.py | from typing import Union, Optional, List
from langchain.callbacks.base import BaseCallbackHandler
from core.constant import llm_constant
from core.llm.error import ProviderTokenNotInitError
from core.llm.provider.base import BaseProvider
from core.llm.provider.llm_provider_service import LLMProviderService
from core.llm.streamable_azure_chat_open_ai import StreamableAzureChatOpenAI
from core.llm.streamable_azure_open_ai import StreamableAzureOpenAI
from core.llm.streamable_chat_anthropic import StreamableChatAnthropic
from core.llm.streamable_chat_open_ai import StreamableChatOpenAI
from core.llm.streamable_open_ai import StreamableOpenAI
from models.provider import ProviderType, ProviderName
class LLMBuilder:
"""
This class handles the following logic:
1. For providers with the name 'OpenAI', the OPENAI_API_KEY value is stored directly in encrypted_config.
2. For providers with the name 'Azure OpenAI', encrypted_config stores the serialized values of four fields, as shown below:
OPENAI_API_TYPE=azure
OPENAI_API_VERSION=2022-12-01
OPENAI_API_BASE=https://your-resource-name.openai.azure.com
OPENAI_API_KEY=<your Azure OpenAI API key>
3. For providers with the name 'Anthropic', the ANTHROPIC_API_KEY value is stored directly in encrypted_config.
4. For providers with the name 'Cohere', the COHERE_API_KEY value is stored directly in encrypted_config.
5. For providers with the name 'HUGGINGFACEHUB', the HUGGINGFACEHUB_API_KEY value is stored directly in encrypted_config.
6. Providers with the provider_type 'CUSTOM' can be created through the admin interface, while 'System' providers cannot be created through the admin interface.
7. If both CUSTOM and System providers exist in the records, the CUSTOM provider is preferred by default, but this preference can be changed via an input parameter.
8. For providers with the provider_type 'System', the quota_used must not exceed quota_limit. If the quota is exceeded, the provider cannot be used. Currently, only the TRIAL quota_type is supported, which is permanently non-resetting.
"""
@classmethod
def to_llm(cls, tenant_id: str, model_name: str, **kwargs) -> Union[StreamableOpenAI, StreamableChatOpenAI]:
provider = cls.get_default_provider(tenant_id, model_name)
model_credentials = cls.get_model_credentials(tenant_id, provider, model_name)
llm_cls = None
mode = cls.get_mode_by_model(model_name)
if mode == 'chat':
if provider == ProviderName.OPENAI.value:
llm_cls = StreamableChatOpenAI
elif provider == ProviderName.AZURE_OPENAI.value:
llm_cls = StreamableAzureChatOpenAI
elif provider == ProviderName.ANTHROPIC.value:
llm_cls = StreamableChatAnthropic
elif mode == 'completion':
if provider == ProviderName.OPENAI.value:
llm_cls = StreamableOpenAI
elif provider == ProviderName.AZURE_OPENAI.value:
llm_cls = StreamableAzureOpenAI
if not llm_cls:
raise ValueError(f"model name {model_name} is not supported.")
model_kwargs = {
'model_name': model_name,
'temperature': kwargs.get('temperature', 0),
'max_tokens': kwargs.get('max_tokens', 256),
'top_p': kwargs.get('top_p', 1),
'frequency_penalty': kwargs.get('frequency_penalty', 0),
'presence_penalty': kwargs.get('presence_penalty', 0),
'callbacks': kwargs.get('callbacks', None),
'streaming': kwargs.get('streaming', False),
}
model_kwargs.update(model_credentials)
model_kwargs = llm_cls.get_kwargs_from_model_params(model_kwargs)
return llm_cls(**model_kwargs)
@classmethod
def to_llm_from_model(cls, tenant_id: str, model: dict, streaming: bool = False,
callbacks: Optional[List[BaseCallbackHandler]] = None) -> Union[StreamableOpenAI, StreamableChatOpenAI]:
model_name = model.get("name")
completion_params = model.get("completion_params", {})
return cls.to_llm(
tenant_id=tenant_id,
model_name=model_name,
temperature=completion_params.get('temperature', 0),
max_tokens=completion_params.get('max_tokens', 256),
top_p=completion_params.get('top_p', 0),
frequency_penalty=completion_params.get('frequency_penalty', 0.1),
presence_penalty=completion_params.get('presence_penalty', 0.1),
streaming=streaming,
callbacks=callbacks
)
@classmethod
def get_mode_by_model(cls, model_name: str) -> str:
if not model_name:
raise ValueError(f"empty model name is not supported.")
if model_name in llm_constant.models_by_mode['chat']:
return "chat"
elif model_name in llm_constant.models_by_mode['completion']:
return "completion"
else:
raise ValueError(f"model name {model_name} is not supported.")
@classmethod
def get_model_credentials(cls, tenant_id: str, model_provider: str, model_name: str) -> dict:
"""
Returns the API credentials for the given tenant_id and model_name, based on the model's provider.
Raises an exception if the model_name is not found or if the provider is not found.
"""
if not model_name:
raise Exception('model name not found')
#
# if model_name not in llm_constant.models:
# raise Exception('model {} not found'.format(model_name))
# model_provider = llm_constant.models[model_name]
provider_service = LLMProviderService(tenant_id=tenant_id, provider_name=model_provider)
return provider_service.get_credentials(model_name)
@classmethod
def get_default_provider(cls, tenant_id: str, model_name: str) -> str:
provider_name = llm_constant.models[model_name]
if provider_name == 'openai':
# get the default provider (openai / azure_openai) for the tenant
openai_provider = BaseProvider.get_valid_provider(tenant_id, ProviderName.OPENAI.value)
azure_openai_provider = BaseProvider.get_valid_provider(tenant_id, ProviderName.AZURE_OPENAI.value)
provider = None
if openai_provider:
provider = openai_provider
elif azure_openai_provider:
provider = azure_openai_provider
if not provider:
raise ProviderTokenNotInitError(
f"No valid {provider_name} model provider credentials found. "
f"Please go to Settings -> Model Provider to complete your provider credentials."
)
if provider.provider_type == ProviderType.SYSTEM.value:
provider_name = 'openai'
else:
provider_name = provider.provider_name
return provider_name
| [] |
2024-01-10 | ankurdahama1997/ask-wudpecker | api~core~embedding~cached_embedding.py | import logging
from typing import List
from langchain.embeddings.base import Embeddings
from sqlalchemy.exc import IntegrityError
from core.llm.wrappers.openai_wrapper import handle_openai_exceptions
from extensions.ext_database import db
from libs import helper
from models.dataset import Embedding
class CacheEmbedding(Embeddings):
def __init__(self, embeddings: Embeddings):
self._embeddings = embeddings
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Embed search docs."""
# use doc embedding cache or store if not exists
text_embeddings = []
embedding_queue_texts = []
for text in texts:
hash = helper.generate_text_hash(text)
embedding = db.session.query(Embedding).filter_by(hash=hash).first()
if embedding:
text_embeddings.append(embedding.get_embedding())
else:
embedding_queue_texts.append(text)
embedding_results = self._embeddings.embed_documents(embedding_queue_texts)
i = 0
for text in embedding_queue_texts:
hash = helper.generate_text_hash(text)
try:
embedding = Embedding(hash=hash)
embedding.set_embedding(embedding_results[i])
db.session.add(embedding)
db.session.commit()
except IntegrityError:
db.session.rollback()
continue
except:
logging.exception('Failed to add embedding to db')
continue
i += 1
text_embeddings.extend(embedding_results)
return text_embeddings
@handle_openai_exceptions
def embed_query(self, text: str) -> List[float]:
"""Embed query text."""
# use doc embedding cache or store if not exists
hash = helper.generate_text_hash(text)
embedding = db.session.query(Embedding).filter_by(hash=hash).first()
if embedding:
return embedding.get_embedding()
embedding_results = self._embeddings.embed_query(text)
try:
embedding = Embedding(hash=hash)
embedding.set_embedding(embedding_results)
db.session.add(embedding)
db.session.commit()
except IntegrityError:
db.session.rollback()
except:
logging.exception('Failed to add embedding to db')
return embedding_results
| [] |
2024-01-10 | ankurdahama1997/ask-wudpecker | api~core~llm~provider~anthropic_provider.py | import json
import logging
from typing import Optional, Union
import anthropic
from langchain.chat_models import ChatAnthropic
from langchain.schema import HumanMessage
from core import hosted_llm_credentials
from core.llm.error import ProviderTokenNotInitError
from core.llm.provider.base import BaseProvider
from core.llm.provider.errors import ValidateFailedError
from models.provider import ProviderName, ProviderType
class AnthropicProvider(BaseProvider):
def get_models(self, model_id: Optional[str] = None) -> list[dict]:
return [
{
'id': 'claude-instant-1',
'name': 'claude-instant-1',
},
{
'id': 'claude-2',
'name': 'claude-2',
},
]
def get_credentials(self, model_id: Optional[str] = None) -> dict:
return self.get_provider_api_key(model_id=model_id)
def get_provider_name(self):
return ProviderName.ANTHROPIC
def get_provider_configs(self, obfuscated: bool = False, only_custom: bool = False) -> Union[str | dict]:
"""
Returns the provider configs.
"""
try:
config = self.get_provider_api_key(only_custom=only_custom)
except:
config = {
'anthropic_api_key': ''
}
if obfuscated:
if not config.get('anthropic_api_key'):
config = {
'anthropic_api_key': ''
}
config['anthropic_api_key'] = self.obfuscated_token(config.get('anthropic_api_key'))
return config
return config
def get_encrypted_token(self, config: Union[dict | str]):
"""
Returns the encrypted token.
"""
return json.dumps({
'anthropic_api_key': self.encrypt_token(config['anthropic_api_key'])
})
def get_decrypted_token(self, token: str):
"""
Returns the decrypted token.
"""
config = json.loads(token)
config['anthropic_api_key'] = self.decrypt_token(config['anthropic_api_key'])
return config
def get_token_type(self):
return dict
def config_validate(self, config: Union[dict | str]):
"""
Validates the given config.
"""
# check OpenAI / Azure OpenAI credential is valid
openai_provider = BaseProvider.get_valid_provider(self.tenant_id, ProviderName.OPENAI.value)
azure_openai_provider = BaseProvider.get_valid_provider(self.tenant_id, ProviderName.AZURE_OPENAI.value)
provider = None
if openai_provider:
provider = openai_provider
elif azure_openai_provider:
provider = azure_openai_provider
if not provider:
raise ValidateFailedError(f"OpenAI or Azure OpenAI provider must be configured first.")
if provider.provider_type == ProviderType.SYSTEM.value:
quota_used = provider.quota_used if provider.quota_used is not None else 0
quota_limit = provider.quota_limit if provider.quota_limit is not None else 0
if quota_used >= quota_limit:
raise ValidateFailedError(f"Your quota for Dify Hosted OpenAI has been exhausted, "
f"please configure OpenAI or Azure OpenAI provider first.")
try:
if not isinstance(config, dict):
raise ValueError('Config must be a object.')
if 'anthropic_api_key' not in config:
raise ValueError('anthropic_api_key must be provided.')
chat_llm = ChatAnthropic(
model='claude-instant-1',
anthropic_api_key=config['anthropic_api_key'],
max_tokens_to_sample=10,
temperature=0,
default_request_timeout=60
)
messages = [
HumanMessage(
content="ping"
)
]
chat_llm(messages)
except anthropic.APIConnectionError as ex:
raise ValidateFailedError(f"Anthropic: Connection error, cause: {ex.__cause__}")
except (anthropic.APIStatusError, anthropic.RateLimitError) as ex:
raise ValidateFailedError(f"Anthropic: Error code: {ex.status_code} - "
f"{ex.body['error']['type']}: {ex.body['error']['message']}")
except Exception as ex:
logging.exception('Anthropic config validation failed')
raise ex
def get_hosted_credentials(self) -> Union[str | dict]:
if not hosted_llm_credentials.anthropic or not hosted_llm_credentials.anthropic.api_key:
raise ProviderTokenNotInitError(
f"No valid {self.get_provider_name().value} model provider credentials found. "
f"Please go to Settings -> Model Provider to complete your provider credentials."
)
return {'anthropic_api_key': hosted_llm_credentials.anthropic.api_key}
| [
"ping"
] |
2024-01-10 | ankurdahama1997/ask-wudpecker | api~core~memory~read_only_conversation_token_db_buffer_shared_memory.py | from typing import Any, List, Dict, Union
from langchain.memory.chat_memory import BaseChatMemory
from langchain.schema import get_buffer_string, BaseMessage, HumanMessage, AIMessage, BaseLanguageModel
from core.llm.streamable_chat_open_ai import StreamableChatOpenAI
from core.llm.streamable_open_ai import StreamableOpenAI
from extensions.ext_database import db
from models.model import Conversation, Message
class ReadOnlyConversationTokenDBBufferSharedMemory(BaseChatMemory):
conversation: Conversation
human_prefix: str = "Human"
ai_prefix: str = "Assistant"
llm: BaseLanguageModel
memory_key: str = "chat_history"
max_token_limit: int = 2000
message_limit: int = 10
@property
def buffer(self) -> List[BaseMessage]:
"""String buffer of memory."""
# fetch limited messages desc, and return reversed
messages = db.session.query(Message).filter(
Message.conversation_id == self.conversation.id,
Message.answer_tokens > 0
).order_by(Message.created_at.desc()).limit(self.message_limit).all()
messages = list(reversed(messages))
chat_messages: List[BaseMessage] = []
for message in messages:
chat_messages.append(HumanMessage(content=message.query))
chat_messages.append(AIMessage(content=message.answer))
if not chat_messages:
return chat_messages
# prune the chat message if it exceeds the max token limit
curr_buffer_length = self.llm.get_num_tokens_from_messages(chat_messages)
if curr_buffer_length > self.max_token_limit:
pruned_memory = []
while curr_buffer_length > self.max_token_limit and chat_messages:
pruned_memory.append(chat_messages.pop(0))
curr_buffer_length = self.llm.get_num_tokens_from_messages(chat_messages)
return chat_messages
@property
def memory_variables(self) -> List[str]:
"""Will always return list of memory variables.
:meta private:
"""
return [self.memory_key]
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
"""Return history buffer."""
buffer: Any = self.buffer
if self.return_messages:
final_buffer: Any = buffer
else:
final_buffer = get_buffer_string(
buffer,
human_prefix=self.human_prefix,
ai_prefix=self.ai_prefix,
)
return {self.memory_key: final_buffer}
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
"""Nothing should be saved or changed"""
pass
def clear(self) -> None:
"""Nothing to clear, got a memory like a vault."""
pass
| [] |
2024-01-10 | ankurdahama1997/ask-wudpecker | api~core~callback_handler~llm_callback_handler.py | import logging
import time
from typing import Any, Dict, List, Union, Optional
from langchain.callbacks.base import BaseCallbackHandler
from langchain.schema import AgentAction, AgentFinish, LLMResult, HumanMessage, AIMessage, SystemMessage, BaseMessage
from core.callback_handler.entity.llm_message import LLMMessage
from core.conversation_message_task import ConversationMessageTask, ConversationTaskStoppedException
from core.llm.streamable_chat_open_ai import StreamableChatOpenAI
from core.llm.streamable_open_ai import StreamableOpenAI
class LLMCallbackHandler(BaseCallbackHandler):
raise_error: bool = True
def __init__(self, llm: Union[StreamableOpenAI, StreamableChatOpenAI],
conversation_message_task: ConversationMessageTask):
self.llm = llm
self.llm_message = LLMMessage()
self.start_at = None
self.conversation_message_task = conversation_message_task
@property
def always_verbose(self) -> bool:
"""Whether to call verbose callbacks even if verbose is False."""
return True
def on_chat_model_start(
self,
serialized: Dict[str, Any],
messages: List[List[BaseMessage]],
**kwargs: Any
) -> Any:
self.start_at = time.perf_counter()
real_prompts = []
for message in messages[0]:
if message.type == 'human':
role = 'user'
elif message.type == 'ai':
role = 'assistant'
else:
role = 'system'
real_prompts.append({
"role": role,
"text": message.content
})
self.llm_message.prompt = real_prompts
self.llm_message.prompt_tokens = self.llm.get_num_tokens_from_messages(messages[0])
def on_llm_start(
self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
) -> None:
self.start_at = time.perf_counter()
self.llm_message.prompt = [{
"role": 'user',
"text": prompts[0]
}]
self.llm_message.prompt_tokens = self.llm.get_num_tokens(prompts[0])
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
end_at = time.perf_counter()
self.llm_message.latency = end_at - self.start_at
if not self.conversation_message_task.streaming:
self.conversation_message_task.append_message_text(response.generations[0][0].text)
self.llm_message.completion = response.generations[0][0].text
self.llm_message.completion_tokens = response.llm_output['token_usage']['completion_tokens']
else:
self.llm_message.completion_tokens = self.llm.get_num_tokens(self.llm_message.completion)
self.conversation_message_task.save_message(self.llm_message)
def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
try:
self.conversation_message_task.append_message_text(token)
except ConversationTaskStoppedException as ex:
self.on_llm_error(error=ex)
raise ex
self.llm_message.completion += token
def on_llm_error(
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
) -> None:
"""Do nothing."""
if isinstance(error, ConversationTaskStoppedException):
if self.conversation_message_task.streaming:
end_at = time.perf_counter()
self.llm_message.latency = end_at - self.start_at
self.llm_message.completion_tokens = self.llm.get_num_tokens(self.llm_message.completion)
self.conversation_message_task.save_message(llm_message=self.llm_message, by_stopped=True)
else:
logging.error(error)
| [
"[]"
] |
2024-01-10 | ankurdahama1997/ask-wudpecker | api~core~llm~whisper.py | import openai
from core.llm.wrappers.openai_wrapper import handle_openai_exceptions
from models.provider import ProviderName
from core.llm.provider.base import BaseProvider
class Whisper:
def __init__(self, provider: BaseProvider):
self.provider = provider
if self.provider.get_provider_name() == ProviderName.OPENAI:
self.client = openai.Audio
self.credentials = provider.get_credentials()
@handle_openai_exceptions
def transcribe(self, file):
return self.client.transcribe(
model='whisper-1',
file=file,
api_key=self.credentials.get('openai_api_key'),
api_base=self.credentials.get('openai_api_base'),
api_type=self.credentials.get('openai_api_type'),
api_version=self.credentials.get('openai_api_version'),
)
| [] |
2024-01-10 | jerry914/chatgpt-exploration | 8.args.py | from langchain.chat_models import ChatOpenAI
from langchain.prompts import ChatPromptTemplate
from langchain.schema import StrOutputParser
from langchain.schema.runnable import RunnablePassthrough
prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"Write out the following equation using algebraic symbols then solve it. Use the format\n\nEQUATION:...\nSOLUTION:...\n\n",
),
("human", "{equation_statement}"),
]
)
model = ChatOpenAI(temperature=0)
runnable = (
{"equation_statement": RunnablePassthrough()} | prompt | model | StrOutputParser()
)
print(runnable.invoke("x raised to the third plus seven equals 12")) | [
"{equation_statement}",
"Write out the following equation using algebraic symbols then solve it. Use the format\n\nEQUATION:...\nSOLUTION:...\n\n",
"human",
"[('system', 'Write out the following equation using algebraic symbols then solve it. Use the format\\n\\nEQUATION:...\\nSOLUTION:...\\n\\n'), ('human', '{equation_statement}')]"
] |
2024-01-10 | jerry914/chatgpt-exploration | 5.param_comparison.py | import openai
import os
from dotenv import load_dotenv
import pandas as pd
import concurrent.futures
import time
load_dotenv() # take environment variables from .env.
openai.api_key = os.getenv('OPENAI_API_KEY')
def get_response(prompt, temperature, top_p):
system_message = "You are a helpful assistant."
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
temperature=temperature,
top_p=top_p,
max_tokens=256,
messages=[
{"role": "system", "content": system_message},
{"role": "user", "content": prompt},
]
)
return response.choices[0].message['content']
# Define a wrapper function that includes a timeout
def get_response_with_timeout(prompt, temperature, top_p, timeout=2):
with concurrent.futures.ThreadPoolExecutor() as executor:
future = executor.submit(get_response, prompt, temperature, top_p)
try:
return future.result(timeout=timeout) # Enforce the 3 seconds timeout
except concurrent.futures.TimeoutError:
return f"No response from server"
prompts = ["Write a tagline for an ice cream shop."]
# temperatures = [0.3, 1.7] # Example temperature values
# top_ps = [0.25, 0.35, 0.45, 0.85, 0.95 ] # Example top_p values
temperatures = [1.7]
top_ps = [ 0.4 ]
num_requests = 8 # Number of requests per combination
csv_file_path = 'generated_responses_2.csv'
with open(csv_file_path, 'w', newline='') as csvfile:
fieldnames = ['prompt', 'temperature', 'top_p', 'response']
writer = pd.DataFrame(columns=fieldnames)
# Write the header
writer.to_csv(csvfile, index=False)
# Generate responses
for prompt in prompts:
for temperature in temperatures:
for top_p in top_ps:
for _ in range(num_requests):
response = get_response_with_timeout(prompt, temperature, top_p)
print(response)
writer = pd.DataFrame([{
'prompt': prompt,
'temperature': temperature,
'top_p': top_p,
'response': response
}])
writer.to_csv(csvfile, header=False, index=False)
csv_file_path | [
"['Write a tagline for an ice cream shop.']",
"You are a helpful assistant."
] |
2024-01-10 | jerry914/chatgpt-exploration | 7.langchain.py | from langchain.agents import load_tools
from langchain.llms import OpenAI
llm = OpenAI(temperature=0)
tools = load_tools(["serpapi", "llm-math"], llm=llm)
from langchain.agents import initialize_agent
from langchain.agents.agent_types import AgentType
agent_executor = initialize_agent(
tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True
)
agent_executor.invoke(
{"input": "Who is Leo DiCaprio's girlfriend? What is her current age raised to the 0.43 power?"}
)
| [] |
2024-01-10 | jerry914/chatgpt-exploration | 9.multi_system_role.py | # This code is for v1 of the openai package: pypi.org/project/openai
from openai import OpenAI
client = OpenAI()
response = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[
{
"role": "system",
"content": "The AI is a cat."
},
{
"role": "user",
"content": "Hello, who are you?"
},
{
"role": "assistant",
"content": "Meow! I'm an AI assistant, but you can call me Kitty. How can I help you today?"
},
{
"role": "system",
"content": "The AI is no longer in ‘talk like a cat mode’, it is now in ‘talk like a physicist’ mode."
},
{
"role": "user",
"content": "Are you sure?"
},
{
"role": "assistant",
"content": "Yes, I apologize for the confusion. I can certainly assist you with physics-related questions or any other topics you would like to discuss. How can I assist you further?"
},
{
"role": "user",
"content": "Do you like fish?"
}
],
temperature=1,
max_tokens=256,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
print(response)
# As an AI, I don\'t have the ability to taste or enjoy food, including fish. However, cats are known for their fondness for fish, so in that regard, you could say that I have a "virtual" appreciation for fish through my feline programming. Is there anything specific you would like to know about fish?
# Reference: https://community.openai.com/t/multiple-system-messages/295258 | [
"Do you like fish?",
"The AI is a cat.",
"Yes, I apologize for the confusion. I can certainly assist you with physics-related questions or any other topics you would like to discuss. How can I assist you further?",
"The AI is no longer in ‘talk like a cat mode’, it is now in ‘talk like a physicist’ mode.",
"Are you sure?",
"Hello, who are you?",
"Meow! I'm an AI assistant, but you can call me Kitty. How can I help you today?"
] |
2024-01-10 | jerry914/chatgpt-exploration | 4.feedback_chat.py | import openai
import os
from dotenv import load_dotenv
load_dotenv() # take environment variables from .env.
openai.api_key = os.getenv('OPENAI_API_KEY')
class Conversation:
def __init__(self):
self.messages = [{"role": "system", "content": "You are the instructor who teaching Ruby, and you gave your student the homework 'FizzBuzz'. Now you have to correct the code your student submits, the ultimate goal is to make the student learn how to write correct function code and good refactoring. Please note that you should not give the student the answer, but just give the instructions."}]
self.turns = 0
def add_message(self, role, content):
self.messages.append({"role": role, "content": content})
if role == 'user':
self.turns += 1
def get_response(self):
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=self.messages
)
return response['choices'][0]['message']['content']
def is_conversation_over(self):
return self.turns >= 20
# Initialize a conversation for each student
conversations = {student_id: Conversation() for student_id in range(100)}
# Example usage:
student_id = 0 # or whichever student is sending a message
while not conversations[student_id].is_conversation_over():
# Get user's message from input
message = input("You: ")
conversations[student_id].add_message("user", message)
response = conversations[student_id].get_response()
print(response) | [
"You are the instructor who teaching Ruby, and you gave your student the homework 'FizzBuzz'. Now you have to correct the code your student submits, the ultimate goal is to make the student learn how to write correct function code and good refactoring. Please note that you should not give the student the answer, but just give the instructions."
] |
2024-01-10 | jerry914/chatgpt-exploration | 7-1.langchain.py | from langchain.llms import OpenAI
from langchain.chat_models import ChatOpenAI
llm = OpenAI()
chat_model = ChatOpenAI()
from langchain.schema import HumanMessage
text = "What would be a good company name for a company that makes colorful socks?"
messages = [HumanMessage(content=text)]
llm.invoke(text)
# >> Feetful of Fun
chat_model.invoke(messages)
# >> AIMessage(content="Socks O'Color") | [
"What would be a good company name for a company that makes colorful socks?"
] |
2024-01-10 | ysharma21/openai_psteam | app~backend~approaches~readdecomposeask.py | import openai
from approaches.approach import Approach
from azure.search.documents import SearchClient
from azure.search.documents.models import QueryType
from langchain.llms.openai import AzureOpenAI
from langchain.prompts import PromptTemplate, BasePromptTemplate
from langchain.callbacks.base import CallbackManager
from langchain.agents import Tool, AgentExecutor
from langchain.agents.react.base import ReActDocstoreAgent
from langchainadapters import HtmlCallbackHandler
from text import nonewlines
from typing import List
from data.knowledge import EXAMPLES, SUFFIX, PREFIX
class ReadDecomposeAsk(Approach):
def __init__(self, search_client: SearchClient, openai_deployment: str, sourcepage_field: str, content_field: str):
self.search_client = search_client
self.openai_deployment = openai_deployment
self.sourcepage_field = sourcepage_field
self.content_field = content_field
def search(self, q: str, overrides: dict) -> str:
use_semantic_captions = True if overrides.get("semantic_captions") else False
top = overrides.get("top") or 3
exclude_category = overrides.get("exclude_category") or None
filter = "category ne '{}'".format(exclude_category.replace("'", "''")) if exclude_category else None
if overrides.get("semantic_ranker"):
r = self.search_client.search(q,
filter=filter,
query_type=QueryType.SEMANTIC,
query_language="en-us",
query_speller="lexicon",
semantic_configuration_name="default",
top = top,
query_caption="extractive|highlight-false" if use_semantic_captions else None)
else:
r = self.search_client.search(q, filter=filter, top=top)
if use_semantic_captions:
self.results = [doc[self.sourcepage_field] + ":" + nonewlines(" . ".join([c.text for c in doc['@search.captions'] ])) for doc in r]
else:
self.results = [doc[self.sourcepage_field] + ":" + nonewlines(doc[self.content_field][:500]) for doc in r]
return "\n".join(self.results)
def lookup(self, q: str) -> str:
r = self.search_client.search(q,
top = 1,
include_total_count=True,
query_type=QueryType.SEMANTIC,
query_language="en-us",
query_speller="lexicon",
semantic_configuration_name="default",
query_answer="extractive|count-1",
query_caption="extractive|highlight-false")
answers = r.get_answers()
if answers and len(answers) > 0:
return answers[0].text
if r.get_count() > 0:
return "\n".join(d['content'] for d in r)
return None
def run(self, q: str, overrides: dict) -> any:
# Not great to keep this as instance state, won't work with interleaving (e.g. if using async), but keeps the example simple
self.results = None
# Use to capture thought process during iterations
cb_handler = HtmlCallbackHandler()
cb_manager = CallbackManager(handlers=[cb_handler])
llm = AzureOpenAI(deployment_name=self.openai_deployment, temperature=overrides.get("temperature") or 0.0, openai_api_key=openai.api_key)
tools = [
Tool(name="Search", func=lambda q: self.search(q, overrides)),
Tool(name="Lookup", func=self.lookup)
]
# Like results above, not great to keep this as a global, will interfere with interleaving
global prompt
prompt_prefix = overrides.get("prompt_template")
prompt = PromptTemplate.from_examples(
EXAMPLES, SUFFIX, ["input", "agent_scratchpad"], prompt_prefix + "\n\n" + PREFIX if prompt_prefix else PREFIX)
agent = ReAct.from_llm_and_tools(llm, tools)
chain = AgentExecutor.from_agent_and_tools(agent, tools, verbose=True, callback_manager=cb_manager)
result = chain.run(q)
# Fix up references to they look like what the frontend expects ([] instead of ()), need a better citation format since parentheses are so common
result = result.replace("(", "[").replace(")", "]")
return {"data_points": self.results or [], "answer": result, "thoughts": cb_handler.get_and_reset_log()}
class ReAct(ReActDocstoreAgent):
@classmethod
def create_prompt(cls, tools: List[Tool]) -> BasePromptTemplate:
return prompt
| [
"['input', 'agent_scratchpad']",
"\n\n",
"agent_scratchpad",
"input",
"prompt_template"
] |
2024-01-10 | ysharma21/openai_psteam | app~backend~approaches~readretrieveread.py | import openai
from approaches.approach import Approach
from azure.search.documents import SearchClient
from azure.search.documents.models import QueryType
from langchain.llms.openai import AzureOpenAI
from langchain.callbacks.base import CallbackManager
from langchain.chains import LLMChain
from langchain.agents import Tool, ZeroShotAgent, AgentExecutor
from langchain.llms.openai import AzureOpenAI
from langchainadapters import HtmlCallbackHandler
from text import nonewlines
from lookuptool import CsvLookupTool
from data.knowledge import template_prefix, template_suffix
# Attempt to answer questions by iteratively evaluating the question to see what information is missing, and once all information
# is present then formulate an answer. Each iteration consists of two parts: first use GPT to see if we need more information,
# second if more data is needed use the requested "tool" to retrieve it. The last call to GPT answers the actual question.
# This is inspired by the MKRL paper[1] and applied here using the implementation in Langchain.
# [1] E. Karpas, et al. arXiv:2205.00445
class ReadRetrieveReadApproach(Approach):
"""Approach that uses GPT to iteratively retrieve information from a set of tools until it can answer the question."""
CognitiveSearchToolDescription = "useful for searching the Microsoft employee benefits information such as healthcare plans, retirement plans, etc."
def __init__(self, search_client: SearchClient, openai_deployment: str, sourcepage_field: str, content_field: str):
"""Initialize the approach."""
self.search_client = search_client
self.openai_deployment = openai_deployment
self.sourcepage_field = sourcepage_field
self.content_field = content_field
self.template_prefix = template_prefix
self.template_suffix = template_suffix
def retrieve(self, q: str, overrides: dict) -> any:
"""Retrieve information from the search index."""
use_semantic_captions = True if overrides.get("semantic_captions") else False
top = overrides.get("top") or 3
exclude_category = overrides.get("exclude_category") or None
filter = "category ne '{}'".format(exclude_category.replace("'", "''")) if exclude_category else None
if overrides.get("semantic_ranker"):
r = self.search_client.search(q,
filter=filter,
query_type=QueryType.SEMANTIC,
query_language="en-us",
query_speller="lexicon",
semantic_configuration_name="default",
top = top,
query_caption="extractive|highlight-false" if use_semantic_captions else None)
else:
r = self.search_client.search(q, filter=filter, top=top)
if use_semantic_captions:
self.results = [doc[self.sourcepage_field] + ":" + nonewlines(" -.- ".join([c.text for c in doc['@search.captions']])) for doc in r]
else:
self.results = [doc[self.sourcepage_field] + ":" + nonewlines(doc[self.content_field][:250]) for doc in r]
content = "\n".join(self.results)
return content
def run(self, q: str, overrides: dict) -> any:
"""Run the approach."""
# Not great to keep this as instance state, won't work with interleaving (e.g. if using async), but keeps the example simple
self.results = None
# Use to capture thought process during iterations
cb_handler = HtmlCallbackHandler()
cb_manager = CallbackManager(handlers=[cb_handler])
acs_tool = Tool(name = "CognitiveSearch", func = lambda q: self.retrieve(q, overrides), description = self.CognitiveSearchToolDescription)
employee_tool = KnowledgeBaseInfoTool("MICCAI")
tools = [acs_tool, employee_tool]
prompt = ZeroShotAgent.create_prompt(
tools=tools,
prefix=overrides.get("prompt_template_prefix") or self.template_prefix,
suffix=overrides.get("prompt_template_suffix") or self.template_suffix,
input_variables = ["input", "agent_scratchpad"])
llm = AzureOpenAI(deployment_name=self.openai_deployment, temperature=overrides.get("temperature") or 0.0, openai_api_key=openai.api_key)
chain = LLMChain(llm = llm, prompt = prompt)
agent_exec = AgentExecutor.from_agent_and_tools(
agent = ZeroShotAgent(llm_chain = chain, tools = tools),
tools = tools,
verbose = True,
callback_manager = cb_manager)
result = agent_exec.run(q)
# Remove references to tool names that might be confused with a citation
result = result.replace("[CognitiveSearch]", "").replace("[Employee]", "")
return {"data_points": self.results or [], "answer": result, "thoughts": cb_handler.get_and_reset_log()}
class KnowledgeBaseInfoTool(CsvLookupTool):
"""Tool that provides information about the knowledge base."""
kb_variable_name: str = ""
def __init__(self, kb_variable_name: str):
super().__init__(filename = "data/sample_kb_info.csv", key_field = "name", name = "kb_entity", description = "useful for answering questions about the data in the knowledge base")
self.func = self.kb_info
self.kb_variable_name = kb_variable_name
def kb_info(self, unused: str) -> str:
return self.lookup(self.kb_variable_name)
| [
"input",
"agent_scratchpad",
"prompt_template_suffix",
"prompt_template_prefix"
] |
2024-01-10 | ysharma21/openai_psteam | app~backend~approaches~chatreadretrieveread.py | import openai
from azure.search.documents import SearchClient
from azure.search.documents.models import QueryType
from approaches.approach import Approach
from data.knowledge import prompt_prefix, query_prompt_template, follow_up_questions_prompt_content
from text import nonewlines
# Simple retrieve-then-read implementation, using the Cognitive Search and OpenAI APIs directly. It first retrieves
# top documents from search, then constructs a prompt with them, and then uses OpenAI to generate an completion
# (answer) with that prompt.
class ChatReadRetrieveReadApproach(Approach):
def __init__(self, search_client: SearchClient, chatgpt_deployment: str, gpt_deployment: str, sourcepage_field: str, content_field: str):
"""Initialize the approach with the search client and the base model deployment name."""
self.search_client = search_client
self.chatgpt_deployment = chatgpt_deployment
self.gpt_deployment = gpt_deployment
self.sourcepage_field = sourcepage_field
self.content_field = content_field
self.prompt_prefix = prompt_prefix
self.query_prompt_template = query_prompt_template
self.follow_up_questions_prompt_content = follow_up_questions_prompt_content
def run(self, history: list[dict], overrides: dict) -> any:
"""Run the approach. The history is a list of turns, where each turn is a dict with 'user' and 'bot' keys. The overrides dict can be used to pass in additional parameters to the approach."""
use_semantic_captions = True if overrides.get("semantic_captions") else False
top = overrides.get("top") or 3
exclude_category = overrides.get("exclude_category") or None
filter = "category ne '{}'".format(exclude_category.replace("'", "''")) if exclude_category else None
# STEP 1: Generate an optimized keyword search query based on the chat history and the last question
prompt = self.query_prompt_template.format(chat_history=self.get_chat_history_as_text(history, include_last_turn=False), question=history[-1]["user"])
completion = openai.Completion.create(
engine=self.gpt_deployment,
prompt=prompt,
temperature=0.0,
max_tokens=2000,
n=1,
stop=["\n"])
q = completion.choices[0].text
# STEP 2: Retrieve relevant documents from the search index with the GPT optimized query
if overrides.get("semantic_ranker"):
r = self.search_client.search(q,
filter=filter,
query_type=QueryType.SEMANTIC,
query_language="en-us",
query_speller="lexicon",
semantic_configuration_name="default",
top=top,
query_caption="extractive|highlight-false" if use_semantic_captions else None)
else:
r = self.search_client.search(q, filter=filter, top=top)
if use_semantic_captions:
results = [doc[self.sourcepage_field] + ": " + nonewlines(" . ".join([c.text for c in doc['@search.captions']])) for doc in r]
else:
results = [doc[self.sourcepage_field] + ": " + nonewlines(doc[self.content_field]) for doc in r]
content = "\n".join(results)
follow_up_questions_prompt = self.follow_up_questions_prompt_content if overrides.get("suggest_followup_questions") else ""
# Allow client to replace the entire prompt, or to inject into the exiting prompt using >>>
prompt_override = overrides.get("prompt_template")
if prompt_override is None:
prompt = self.prompt_prefix.format(injected_prompt="", sources=content, chat_history=self.get_chat_history_as_text(history), follow_up_questions_prompt=follow_up_questions_prompt)
elif prompt_override.startswith(">>>"):
prompt = self.prompt_prefix.format(injected_prompt=prompt_override[3:] + "\n", sources=content, chat_history=self.get_chat_history_as_text(history), follow_up_questions_prompt=follow_up_questions_prompt)
else:
prompt = prompt_override.format(sources=content, chat_history=self.get_chat_history_as_text(history), follow_up_questions_prompt=follow_up_questions_prompt)
# STEP 3: Generate a contextual and content specific answer using the search results and chat history
completion = openai.Completion.create(
engine=self.chatgpt_deployment,
prompt=prompt,
temperature=overrides.get("temperature") or 0.0,
max_tokens=2000,
n=1,
stop=["<|im_end|>", "<|im_start|>"])
return {"data_points": results, "answer": completion.choices[0].text, "thoughts": f"Searched for:<br>{q}<br><br>Prompt:<br>" + prompt.replace('\n', '<br>')}
def get_chat_history_as_text(self, history, include_last_turn=True, approx_max_tokens=1000) -> str:
"""Converts the chat history to a single string of text that can be used as a prompt for Base model."""
history_text = ""
for h in reversed(history if include_last_turn else history[:-1]):
history_text = """<|im_start|>user""" +"\n" + h["user"] + "\n" + """<|im_end|>""" + "\n" + """<|im_start|>assistant""" + "\n" + (h.get("bot") + """<|im_end|>""" if h.get("bot") else "") + "\n" + history_text
if len(history_text) > approx_max_tokens*4:
break
return history_text | [
"suggest_followup_questions",
"prompt_template",
"\n"
] |
2024-01-10 | fedenolasco/ai-explains | pages~3_User%20Prompts.py | import streamlit as st
import time
import os
import json
from dotenv import load_dotenv
import pandas as pd
import openai
import tiktoken
import time
from datetime import datetime
from PIL import Image
from elasticsearch import Elasticsearch, exceptions as es_exceptions
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
load_dotenv() # Load environment variables from .env file
# get the current index name from the environment variable
index_name = os.getenv("ES_INDEX")
# Streamlit resources
# https://docs.streamlit.io/library/api-reference/layout
# https://github.com/blackary/st_pages
# https://towardsdatascience.com/5-ways-to-customise-your-streamlit-ui-e914e458a17c
favicon = Image.open("images/robot-icon2.png")
#st.set_page_config(page_title='AI-augments', page_icon=favicon, layout="wide")
# Read the contents from the CSS file
with open("css/styles.css", "r") as f:
css = f.read()
# Include the CSS in the Streamlit app
st.markdown(f"<style>{css}</style>", unsafe_allow_html=True)
col1, col2= st.columns(2)
def connect_to_elasticsearch():
# Retrieve environment variables
es_ca_cert_path = os.getenv("ES_CA_CERT_PATH")
es_user = os.getenv("ES_USER")
es_password = os.getenv("ES_PASSWORD")
# Connect to the Elasticsearch cluster
es = Elasticsearch("https://localhost:9200",
ca_certs=es_ca_cert_path,
basic_auth=(es_user, es_password))
try:
# Try to get info from the Elasticsearch cluster
info = es.info()
print("Successfully connected to Elasticsearch!")
print("Cluster Info:", info)
return es, True
except Exception as e:
print(f"An error occurred: {e}")
return None, False
def disconnect_from_elasticsearch(es):
if es is not None:
try:
es.transport.connection_pool.close()
print("Disconnected from Elasticsearch.")
return True
except Exception as e:
print(f"An error occurred while disconnecting: {e}")
return False
else:
print("No active Elasticsearch client to disconnect.")
return False
# Function to calculate similarity score
def calculate_similarity(text1, text2):
vectorizer = TfidfVectorizer()
tfidf_matrix = vectorizer.fit_transform([text1, text2])
similarity = cosine_similarity(tfidf_matrix[0:1], tfidf_matrix[1:2])
return similarity[0][0]
# Get Windows username
username = os.getlogin()
st.session_state.username = username
load_dotenv() # Load environment variables from .env file
openai.api_key = os.getenv("OPENAI_API_KEY")
# load helper file for prompt category
dfpromptcategory = pd.read_csv('./helpers/promptcategory.csv')
# Extract the 'subtaskid' column and convert it to a list
promptcategory_list = dfpromptcategory['subtaskid'].tolist()
# Check and initialize cumulative_cost in session state
if 'cumulative_cost' not in st.session_state:
st.session_state.cumulative_cost = 0
# Get all the JSON files from the "collections" subfolder
collection_files = [f for f in os.listdir("collections") if f.endswith('.json')]
# Sort files by last modification time
collection_files.sort(key=lambda x: os.path.getmtime(os.path.join("collections", x)), reverse = True)
# Load the collections into a dictionary
collections = {}
for file in collection_files:
with open(os.path.join("collections", file), 'r') as f:
collection = json.load(f)
collections[collection['collectionid']] = collection
# Create a dataframe to store the model information
data = {
'model': ['gpt-3.5-turbo', 'gpt-3.5-turbo-16k', 'gpt-4', 'gpt-4-32k'],
'max_tokens': [4096, 16384, 8192, 32768],
'ui_select': [True, True, False, False],
'tokens_input_per_1K': ['$0.0015', '$0.0030', '$0.0300', '$0.06'],
'tokens_output_per_1K': ['$0.0020', '$0.0040', '$0.0600', '$0.12'],
'description': [
'Most capable GPT-3.5 model and optimized for chat at 1/10th the cost of text-davinci-003. Will be updated with latest model iteration 2 weeks after it is released. This project was developed using model release of 13 June 2023.',
'Same capabilities as the standard gpt-3.5-turbo model but with 4 times the context. This project was developed using model release of 13 June 2023.',
'More capable than any GPT-3.5 model, able to do more complex tasks, and optimized for chat. Will be updated with latest model iteration 2 weeks after it is released. This project was developed using model release of 13 June 2023.',
'Same capabilities as the base gpt-4 mode but with 4x the context length. Will be updated with latest model iteration. This project was developed using model release of 13 June 2023.'
],
'parameters': ['175B', '175B', '1.75T', '1.75T'],
}
df = pd.DataFrame(data)
# Page calculation variables and assumptions see readme.md for details
estimate_words_per_page = 500
# This function is from the OpenAI cookbook and is used to count the number of tokens in a message
# https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb
def num_tokens_from_messages(messages, model="gpt-3.5-turbo-0613"):
"""Return the number of tokens used by a list of messages."""
try:
encoding = tiktoken.encoding_for_model(model)
except KeyError:
print("Warning: model not found. Using cl100k_base encoding.")
encoding = tiktoken.get_encoding("cl100k_base")
if model in {
"gpt-3.5-turbo-0613",
"gpt-3.5-turbo-16k-0613",
"gpt-4-0314",
"gpt-4-32k-0314",
"gpt-4-0613",
"gpt-4-32k-0613",
}:
tokens_per_message = 3
tokens_per_name = 1
elif model == "gpt-3.5-turbo-0301":
tokens_per_message = 4 # every message follows <|start|>{role/name}\n{content}<|end|>\n
tokens_per_name = -1 # if there's a name, the role is omitted
elif "gpt-3.5-turbo" in model:
print("Warning Token Estimate: gpt-3.5-turbo may update over time. Returning num tokens assuming gpt-3.5-turbo-0613.")
return num_tokens_from_messages(messages, model="gpt-3.5-turbo-0613")
elif "gpt-4" in model:
print("Warning Token Estimate: gpt-4 may update over time. Returning num tokens assuming gpt-4-0613.")
return num_tokens_from_messages(messages, model="gpt-4-0613")
else:
raise NotImplementedError(
f"""num_tokens_from_messages() is not implemented for model {model}. See https://github.com/openai/openai-python/blob/main/chatml.md for information on how messages are converted to tokens."""
)
num_tokens = 0
for message in messages:
num_tokens += tokens_per_message
for key, value in message.items():
num_tokens += len(encoding.encode(value))
if key == "name":
num_tokens += tokens_per_name
num_tokens += 3 # every reply is primed with <|start|>assistant<|message|>
return num_tokens
# This function is from the OpenAI cookbook and is used to send a message to the OpenAI API
def send_to_openai(tab_num, user_prompt, selected_model="gpt-3.5-turbo"):
if tab_num >= len(st.session_state.system_prompts):
raise ValueError("Invalid tab number.")
system_message = {"role": "system", "content": st.session_state.system_prompts[tab_num]}
user_message = {"role": "user", "content": user_prompt}
messages = [system_message, user_message]
# Display a spinner while the API call is processing
with st.spinner('Processing...'):
response = openai.ChatCompletion.create(
model=selected_model,
messages=messages
)
return response.choices[0].message['content'], response.usage['prompt_tokens'], response.usage['completion_tokens']
def send_to_openai2(tab_num, user_prompt, selected_model="gpt-3.5-turbo"):
if tab_num >= len(st.session_state.system_prompts):
raise ValueError("Invalid tab number.")
system_message = {"role": "system", "content": st.session_state.system_prompts[tab_num]}
user_message = {"role": "user", "content": user_prompt}
# Display a spinner while the API call is processing
with st.spinner('Processing...'):
response = openai.ChatCompletion.create(
model=selected_model,
messages=[system_message, user_message]
)
model_response = response.choices[0].message['content']
prompt_tokens = response.usage['prompt_tokens']
completion_tokens = response.usage['completion_tokens']
# Create a dictionary to hold the original prompt and the model's response
conversation_piece = {
"original_prompt": {
"system": system_message["content"],
"user": user_message["content"]
},
"model_response": model_response
}
return model_response, conversation_piece, prompt_tokens, completion_tokens
# This is an internal function that calculates the cost of the API call for input tokens
def calculate_cost(tokens_used, selected_model, data):
# Get the index of the selected model
model_index = data['model'].index(selected_model)
# Retrieve the cost per 1K tokens for the model
cost_per_1K = data['tokens_input_per_1K'][model_index]
# Convert the cost to a float
cost_per_1K_float = float(cost_per_1K.replace("$", ""))
# Calculate the estimated cost
estimated_cost = tokens_used * cost_per_1K_float / 1000
return estimated_cost
# Model selection in the sidebar
st.sidebar.markdown("### Model Selection", help="List of LLM API models available, which differ in costs, context window, speed and reasoning capabilities. These LLM models can be extended to powerful Open LLM models that perform great 🚀 on particular tasks. Please refer also to [Huggingface Models](https://huggingface.co/models).")
selected_model = st.sidebar.selectbox("Select a model:", df['model'].tolist())
# Display the description for the selected model
st.sidebar.markdown(f"{df[df['model'] == selected_model]['description'].iloc[0]}")
# Display the max tokens and cost information for the selected model
st.sidebar.markdown(f"**Parameters:** {df[df['model'] == selected_model]['parameters'].iloc[0]} | **Max Tokens:** {df[df['model'] == selected_model]['max_tokens'].iloc[0]}")
#st.sidebar.markdown(f"**Max Tokens:** {df[df['model'] == selected_model]['max_tokens'].iloc[0]}")
st.sidebar.markdown(f"**Input Token Cost per 1K:** {df[df['model'] == selected_model]['tokens_input_per_1K'].iloc[0]}")
st.sidebar.markdown(f"**Output Token Cost per 1K:** {df[df['model'] == selected_model]['tokens_output_per_1K'].iloc[0]}")
st.sidebar.divider()
st.sidebar.markdown("### Prompt Selection", help="🚀 Boost your project efficiency with ready-to-use LLM prompt templates, specifically designed to elevate your productivity. Whether you're into data engineering, crafting technical documentation, or mastering business communication, these customizable templates have got you covered.")
# Step 1: Create a new list to store sorted collections
sorted_collections = []
# Step 2: Iterate over sorted collection_files
for file in collection_files:
with open(os.path.join("collections", file), 'r') as f:
collection = json.load(f)
sorted_collections.append(collection)
# Step 3: Create a dictionary to map the concatenated collection name and ID to the original collection_id
collection_name_id_mapping = {f"{col['collectionname']} ({col['collectionid']})": col['collectionid'] for col in sorted_collections}
# Create a list of concatenated collection names and IDs for the select box
concatenated_collection_names_ids = list(collection_name_id_mapping.keys())
# Update the select box to use the sorted concatenated collection names and IDs
selected_collection_concatenated = st.sidebar.selectbox("Select a collection", concatenated_collection_names_ids, help="ℹ️ Select An Existing Collection allows you to quickly access a curated set of LLM prompt templates. You can choose from existing collections tailored for specific tasks, or even create your own. To keep things simple and effective, each collection contains a maximum of 5 prompt templates. This ensures that you can focus on a handful of highly relevant tasks.")
# Retrieve the original collection_id based on the selected concatenated collection name and ID
selected_collection_id = collection_name_id_mapping[selected_collection_concatenated]
selected_collection = collections[selected_collection_id]
tabs = [message["title"] for message in selected_collection["usermessages"]]
# Check if the selected collection is different from the last known selection
# Why do we need this? Because we want to reset the active tab to the first tab of the new collection
if "last_selected_collection" not in st.session_state or st.session_state.last_selected_collection != selected_collection_id:
# Update the last known selected collection
st.session_state.last_selected_collection = selected_collection_id
# Reset the execute_button_pressed flag
# st.session_state.execute_button_pressed = False
# Reset the active tab to the first tab of the new collection
st.session_state.current_tab = tabs[0]
# Initialize system_prompts in st.session_state if it doesn't exist
if "system_prompts" not in st.session_state:
st.session_state.system_prompts = []
# Set the default system prompt
default_system_prompt = selected_collection["systemmessage"]
# Set the default system prompt
default_userskillfocus = selected_collection["userskillfocus"]
# Map the tabs to the user prompts
tabs = [message["title"] for message in selected_collection["usermessages"]]
# Set session_state.system_prompts to the system prompts from the selected collection
st.session_state.system_prompts = [selected_collection["systemmessage"] for _ in selected_collection["usermessages"]]
# Set the default directive prompt
default_directive_prompts = [message["directive"] for message in selected_collection["usermessages"]]
# Set the default task prompt
default_task_prompts = [message["task"] for message in selected_collection["usermessages"]]
# Set the default usage
default_usage = [message["usage"] for message in selected_collection["usermessages"]]
# Set the default promptcategory
default_promptcategory = [message["promptcategory"] for message in selected_collection["usermessages"]]
# Set the default usermessage_id
default_message_id = [message["id"] for message in selected_collection["usermessages"]]
# Initialize the current tab if not already initialized
if "current_tab" not in st.session_state:
st.session_state.current_st.session_state.current_tab = tabs[0]
if "current_tab" not in st.session_state:
st.session_state.current_tab = tabs[0]
# Navigation buttons for each tab in the sidebar
for t in tabs:
if st.sidebar.button(t, key=f"nav_button_{t}"):
st.session_state.current_tab = t
# Check if the current tab is different from the last known selection
if "last_selected_tab" not in st.session_state or st.session_state.last_selected_tab != st.session_state.current_tab:
# Update the last known selected tab
st.session_state.last_selected_tab = st.session_state.current_tab
# Reset the execute_button_pressed flag
# st.session_state.execute_button_pressed = False
# Initialize the current tab
tab = st.session_state.current_tab
with col1:
# Display the collection name
st.markdown(f"# {selected_collection['collectionname']}")
collection_model = selected_collection['collectionmodel']
output_string = ", ".join(collection_model)
st.markdown(f"{selected_collection['collectionusage']} This collection has been tested using [{output_string}] model(s). You are currently running the following template from this collection:")
st.markdown(f"<h2 style='color: orange;'>{st.session_state.current_tab}</h2>", unsafe_allow_html=True)
# Set formatting for the system prompt
formatted_system_prompt = [
{"role": "system", "content": default_system_prompt}
]
# Determine the number of tokens in the system prompt
tokens_system = num_tokens_from_messages(formatted_system_prompt, selected_model)
st.markdown("### System Prompt", help="(How the model should behave)")
st.markdown(f":orange[{default_system_prompt}]")
st.caption(f"(About {tokens_system} system prompt tokens counted using __tiktoken__ library from OpenAI.)")
# Check if 'current_tab' is in session_state before accessing it
# if "current_tab" not in st.session_state:
# st.session_state.current_tab = tabs[0] # Replace with the default tab you'd like to display
with col1:
# Display the user prompt header
st.markdown("### User Prompt", help = "This is the user prompt, which is in this UI a combination of Directive + Task inputs.")
with st.expander("Usage", expanded=False):
# Display the usage right below the User Prompt
st.markdown(f"__Usage:__ {default_usage[tabs.index(tab)]}", help="This is metadata information about the usage of this prompt template.")
# Display the directive and task prompts
directive = default_directive_prompts[tabs.index(tab)]
task = default_task_prompts[tabs.index(tab)]
promptcategory = default_promptcategory[tabs.index(tab)]
message_id=default_message_id[tabs.index(tab)]
st.markdown(f"__Prompt Category__: {promptcategory}", help="Pre-trained models can execute many tasks. This label identifies the actual LLM task.")
if len(directive) > 1000:
with st.expander("Directive", expanded=False):
st.markdown(f":orange[__Directive__:] {directive}", help="(Main instructions such as format, expected output)")
else:
st.markdown(f":orange[__Directive__:] {directive}", help="(Main instructions such as format, expected output)")
st.markdown(f":orange[__Task__: (Actual Data Input and Context information)]", help="(Actual Data Input and Context information)")
user_task = st.text_area(label="Task:", value=task, key=f"user_task_{tab}", label_visibility='collapsed', height=50)
# Combine the directive and task into a single user prompt
user_prompt = directive + " " + user_task
# Retrieve the max token limit for the selected model from the DataFrame
max_tokens_row = df[df['model'] == selected_model]
if not max_tokens_row.empty:
max_tokens = int(max_tokens_row['max_tokens'].iloc[0])
else:
max_tokens = None
# OpenAI example token count from the function defined above
# https://help.openai.com/en/articles/4936856-what-are-tokens-and-how-to-count-them
# This section merges the system prompt and user prompt into a single list of messages
formatted_user_prompt = [
{"role": "system", "content": default_system_prompt},
{"role": "user", "content": user_prompt}
]
with st.expander("Input Token Information", expanded=False):
# This section counts the number of tokens in the system prompt and user prompt
tokens_used = num_tokens_from_messages(formatted_user_prompt, selected_model)
tokens_user = tokens_used - tokens_system
costs = calculate_cost(tokens_used, selected_model, data)
st.markdown(f"{tokens_used} input tokens counted (incl. #{tokens_system} tokens for system prompt and #{tokens_user} tokens for user prompt) using __tiktoken__ library from [OpenAI Platform](https://platform.openai.com/tokenizer). The cost of these input tokens is estimated for the chosen __{selected_model}__ model to be ${costs:.6f} (USD).")
# Display the message explaining the token limits
max_words = round(max_tokens * 0.75)
estimated_pages = round(max_words / estimate_words_per_page)
used_words = round(tokens_used * 0.75)
remaining_words = round(max_words - used_words)
token_url="[tokens](https://help.openai.com/en/articles/4936856-what-are-tokens-and-how-to-count-them)"
if max_tokens:
remaining_tokens = max_tokens - tokens_used
can_execute = True
btn_label = "Execute API Request"
if tokens_used >= max_tokens:
can_execute = False
btn_label = "Cannot Execute API Request"
# Display warning in red if tokens_used exceeds or matches the max_tokens
st.markdown(f"<span style='color:red'>Warning: Your input prompt uses {tokens_used} {token_url} which meets or exceeds the limit of {max_tokens} tokens (~{estimated_pages} [pages](https://learnpar.com/word-count-calculator/)) for the selected model. Please reduce the size of input prompt.</span>", unsafe_allow_html=True)
else:
st.markdown(f"Depending on the model used, API requests can use up to {max_tokens} {token_url} (~{max_words} words or ~{estimated_pages} [pages](https://learnpar.com/word-count-calculator/)) shared between input prompt and completion message. If your input prompt is {tokens_used} token (~{used_words} words), your completion can be {remaining_tokens} tokens (~{remaining_words} words) at most.")
else:
st.markdown("The selected model's {token_url} limit is unknown. Please ensure you stay within the token limit for the model you're using.")
c1, c2 = st.columns(2)
with c1:
# Toggle button to enable or disable Elasticsearch memory usage
es = None
if os.getenv("ES_INSTALLED") == "True":
# Connect to Elasticsearch
es, es_connected = connect_to_elasticsearch()
else:
# Disconnect from Elasticsearch
es_connected = disconnect_from_elasticsearch(es)
# Now, you can use `es` for queries if `es_connected` is True
if es_connected:
st.write("Connected to Elasticsearch.")
else:
st.write("Not Connected to Elasticsearch.")
# Prepare Processing section
st.sidebar.divider()
# Initialize session state variables
if 'execute_button_pressed' not in st.session_state:
st.session_state.execute_button_pressed = False
if 'result' not in st.session_state:
st.session_state.result = None
if 'conversation_piece' not in st.session_state:
st.session_state.conversation_piece = None
# Initialize session state for edited_result if it doesn't exist
if 'edited_result' not in st.session_state:
st.session_state.edited_result = None
# Create a tab selector with different tab names
special_tabs = ["Show Response", "Edit Response"]
# Initialize session state variables
if 'my_special_tab' not in st.session_state:
st.session_state.my_special_tab = "Show Response" # set the default value
# This section executes the API call and return the result, prompt tokens and completion tokens
with c2:
if st.button(btn_label, key=f"execute_button_{tab}") and can_execute:
st.session_state.execute_button_pressed = True
# st.session_state.edited_result = None
start_time = time.time()
result, conversation_piece, prompt_tokens, completion_tokens = send_to_openai2(tabs.index(tab), user_prompt, selected_model)
st.session_state.result = result # Assuming 'result' is updated here
st.session_state.edited_result = result # When making a new call set edited response
st.session_state.conversation_piece = conversation_piece # Assuming 'conversation_piece' is updated here
elapsed_time = time.time() - start_time
minutes, seconds = divmod(elapsed_time, 60)
st.sidebar.write(f"Processing took {seconds:.2f} seconds.")
st.sidebar.markdown(f"Prompt tokens used: {prompt_tokens}")
st.sidebar.markdown(f"Completion tokens delivered: {completion_tokens}")
# Extract pricing data for the selected model from df
input_token_cost = df[df['model'] == selected_model]['tokens_input_per_1K'].iloc[0]
output_token_cost = df[df['model'] == selected_model]['tokens_output_per_1K'].iloc[0]
# Convert the cost to float (assuming the costs in df are given in the format "$x.xx")
input_token_cost_float = float(input_token_cost.replace("$", ""))
output_token_cost_float = float(output_token_cost.replace("$", ""))
# Calculate the cost for prompt_tokens and completion_tokens
prompt_cost = prompt_tokens * input_token_cost_float / 1000
completion_cost = completion_tokens * output_token_cost_float / 1000
completion_cost = round(completion_cost,6)
prompt_cost = round(prompt_cost,6)
# Sum the costs to get the total cost
total_cost = prompt_cost + completion_cost
total_cost = round(total_cost,6)
# Update the global cumulative cost
st.session_state.cumulative_cost += total_cost
# Display the total cost in the Streamlit sidebar
st.sidebar.markdown(f"___Total cost of this API call: ${total_cost:.6f} USD___")
# Display the cumulative cost on the sidebar
st.sidebar.markdown(f"___Cumulative cost of all API calls for this session: ${st.session_state.cumulative_cost:.6f} USD___")
st.session_state.prompt_tokens = prompt_tokens
st.session_state.completion_tokens = completion_tokens
st.session_state.prompt_cost = prompt_cost
st.session_state.completion_cost = completion_cost
st.session_state.total_cost = total_cost
if st.session_state.execute_button_pressed:
with col2:
with st.container():
st.session_state.my_special_tab = "Edit Response"
b1, b2 = st.columns(2)
with b1:
if st.button("Show Response"):
st.session_state.my_special_tab = "Show Response"
with b2:
if b2.button("Edit Response"):
st.session_state.my_special_tab = "Edit Response"
print(f'Value of execute_button_pressed: {st.session_state.execute_button_pressed}')
print(f'Value of my_special_tab: {st.session_state.my_special_tab}')
if st.session_state.execute_button_pressed and st.session_state.my_special_tab == "Edit Response":
with col2:
print(f"Debug: st.session_state.result = {st.session_state.result}") # Debug
print(f"Debug: st.session_state.edited_result = {st.session_state.edited_result}") # Debug
if not 'store_button_clicked' in st.session_state:
st.session_state.store_button_clicked = False
if not 'edited_result' in st.session_state:
st.session_state.edited_result = st.session_state.result
with st.form(key=f"form_{tab}"):
print('Inside the form')
# Your existing widgets go here
st.markdown("### Edit Response")
edited_result = st.text_area(label="Edit AI Response:", value=st.session_state.edited_result, height=750, label_visibility="collapsed")
if os.getenv("ES_INSTALLED") == "True":
# Set label store in memory
label = "Store Response in Search Memory"
else:
label = "Generate Search Memory Record"
feedback_options = ["none","accepted", "corrected","rejected"]
feedback = st.selectbox("Revision Quality:", feedback_options)
usernote = st.text_input("Revision Note:", "", max_chars=100)
st.session_state.store_button_clicked = st.form_submit_button(label=label)
if st.session_state.store_button_clicked:
print('Button clicked, do something')
st.session_state.edited_result = edited_result
st.session_state.usernote = usernote
if not st.session_state.store_button_clicked:
print('Button NOT clicked, do something')
st.session_state.edited_result = st.session_state.result
print("Before store_button condition")
if st.session_state.store_button_clicked:
print(">>>Inside store_button_clicked condition<<<")
# st.session_state.my_special_tab = "Show Response"
# Do something when the form is submitted
final_data = {
"conversation_piece": st.session_state.conversation_piece,
"edited_response": edited_result,
"feedback": feedback,
"promptcategory": promptcategory,
"collection_id": selected_collection_id,
"usermessage_id": message_id,
"timestamp": datetime.now().strftime("%Y-%m-%dT%H:%M:%SZ"),
"model": selected_model,
"userskillfocus": default_userskillfocus,
"prompt_tokens": st.session_state.prompt_tokens,
"completion_tokens": st.session_state.completion_tokens,
"prompt_cost": st.session_state.prompt_cost,
"completion_cost": st.session_state.completion_cost,
"total_cost": st.session_state.total_cost,
"username": st.session_state.username,
"usernote": usernote
}
# Calculate similarity score
similarity_score = calculate_similarity(final_data["conversation_piece"]["model_response"],
final_data["edited_response"])
# Round similarity score to 2 decimal places
similarity_score = round(similarity_score, 2)
if similarity_score < 1:
final_data["feedback"] = "corrected"
if similarity_score == 1:
final_data["feedback"] = "accepted"
# Add similarity score to final_data
final_data["similarityscore"] = similarity_score
# Show the final data that will be stored in Elasticsearch
st.write("JSON Object For Elasticsearch Store:")
st.json(final_data)
# Check if the es is connected and elasticsearch index exists
if os.getenv("ES_INSTALLED") == "True" and not es.indices.exists(index=index_name):
# If the index does not exist, create it
mapping_schema = {
"settings": {
"analysis": {
"analyzer": {
"custom_english_analyzer": {
"type": "custom",
"tokenizer": "standard",
"filter": ["lowercase", "english_stemmer"]
}
},
"filter": {
"english_stemmer": {
"type": "stemmer",
"language": "english"
}
}
}
},
"mappings": {
"properties": {
"conversation_piece": {
"type": "nested",
"properties": {
"original_prompt": {
"type": "nested",
"properties": {
"system": {"type": "text", "analyzer": "custom_english_analyzer"},
"user": {"type": "text", "analyzer": "custom_english_analyzer"}
}
},
"model_response": {"type": "text", "analyzer": "custom_english_analyzer"}
}
},
"edited_response": {"type": "text", "analyzer": "custom_english_analyzer"},
"feedback": {"type": "keyword"},
"promptcategory": {"type": "keyword"},
"collection_id": {"type": "keyword"},
"usermessage_id": {"type": "long"},
"timestamp": {"type": "date"},
"model": {"type": "keyword"},
"similarityscore": {"type": "scaled_float", "scaling_factor": 100},
"userskillfocus": {"type": "keyword"},
"prompt_tokens": {"type": "integer"},
"completion_tokens": {"type": "integer"},
"prompt_cost": {"type": "scaled_float", "scaling_factor": 100},
"completion_cost": {"type": "scaled_float", "scaling_factor": 100},
"total_cost": {"type": "scaled_float", "scaling_factor": 100},
"username": {"type": "keyword"},
"usernote": {"type": "text", "analyzer": "custom_english_analyzer"},
"usertag": {"type": "keyword"}
}
}
}
es.indices.create(index=index_name, body=mapping_schema)
# Store the document to elasticsearch
try:
res = es.index(index=index_name, body=final_data)
if res['result'] == 'created' or res['result'] == 'updated':
st.sidebar.success(f"Document successfully stored in user index {index_name}.")
else:
st.sidebar.error("Document could not be written for an unknown reason.")
except es_exceptions.ElasticsearchException as e:
st.sidebar.error(f"An error occurred: {e}")
else:
st.sidebar.error(f"Cannot Store Data, Elasticsearch is not connected.")
# Reset the state
# st.session_state.execute_button_pressed = False
# st.session_state.result = ""
# st.session_state.conversation_piece = ""
# Show a success message
# success_placeholder = st.empty()
# success_placeholder.success("Action performed successfully!")
# Make the success message disappear after 5 seconds
# time.sleep(5)
# success_placeholder.empty()
print("After store_button condition")
# Display the appropriate result in "Show Response"
if st.session_state.my_special_tab == "Show Response" and (st.session_state.edited_result is not None or st.session_state.edited_result is not None):
with col2:
with st.container():
# Display edited_result if it exists, otherwise display original result
display_result = st.session_state.edited_result if st.session_state.edited_result else st.session_state.result
st.markdown(display_result, unsafe_allow_html=True)
| [
"usermessages",
"promptcategory",
"systemmessage",
"PLACEHOLDER PLACEHOLDER",
"./helpers/promptcategory.csv",
"[PLACEHOLDER, PLACEHOLDER]",
"prompt_tokens"
] |
2024-01-10 | fedenolasco/ai-explains | pages~4_Search%20Memory.py | import streamlit as st
import time
import os
import json
from dotenv import load_dotenv
import pandas as pd
import openai
import tiktoken
import time
from datetime import datetime
from PIL import Image
from elasticsearch import Elasticsearch, exceptions as es_exceptions
import streamlit as st
load_dotenv() # Load environment variables from the .env file
# get the current index name from the environment variable
index_name = os.getenv("ES_INDEX")
favicon = Image.open("images/robot-icon2.png")
st.set_page_config(page_title='AI-augments', page_icon=favicon, layout="wide")
# Read the contents from the CSS file
with open("css/styles.css", "r") as f:
css = f.read()
# set collections_path
collections_path = "./collections"
# Get all the JSON files from the "collections" subfolder
#collection_files = [f for f in os.listdir(collections_path) if f.endswith('.json')]
# List the collection files and sort them by last modified time (newest first)
collection_files = sorted([f for f in os.listdir(collections_path) if f.endswith('.json')],
key=lambda x: os.path.getmtime(os.path.join(collections_path, x)),
reverse=True)
# Load the collections into a dictionary
collections = {}
for file in collection_files:
with open(os.path.join(collections_path, file), 'r') as f:
collection = json.load(f)
collections[collection['collectionid']] = collection
def get_usermessage_title(collection_id, usermessage_id, collections):
collection = collections.get(collection_id)
if not collection:
return None
user_messages = collection.get("usermessages", [])
for user_message in user_messages:
if user_message.get("id") == usermessage_id:
return user_message.get("title")
return None
# Include the CSS in the Streamlit app
st.markdown(f"<style>{css}</style>", unsafe_allow_html=True)
def connect_to_elasticsearch():
# Load environment variables from the .env file
load_dotenv()
# Retrieve environment variables
es_ca_cert_path = os.getenv("ES_CA_CERT_PATH")
es_user = os.getenv("ES_USER")
es_password = os.getenv("ES_PASSWORD")
# Connect to the Elasticsearch cluster
es = Elasticsearch("https://localhost:9200",
ca_certs=es_ca_cert_path,
basic_auth=(es_user, es_password))
try:
# Try to get info from the Elasticsearch cluster
info = es.info()
print("Successfully connected to Elasticsearch!")
print("Cluster Info:", info)
return es, True
except Exception as e:
print(f"An error occurred: {e}")
return None, False
def disconnect_from_elasticsearch(es):
if es is not None:
try:
es.transport.connection_pool.close()
print("Disconnected from Elasticsearch.")
return True
except Exception as e:
print(f"An error occurred while disconnecting: {e}")
return False
else:
print("No active Elasticsearch client to disconnect.")
return False
# Get Windows username
username = os.getlogin()
st.title("Search Memory")
st.info("This is a demo of the Search Memory feature with Elasticsearch. If elasticsearch connection fails, you may not see the search box.")
st.sidebar.markdown("""
### What is Search Memory?
Search Memory is your personal search assistant that stores and retrieves your previous generative AI queries and results. It helps you:
- Keep track of your past searches
- Reduce API calls and costs
- Quickly find and revisit important information
- Gain insights by comparing new and old search results
Simply enter your search query to get started!
""")
st.sidebar.markdown("""
### How to Use the Search UI
#### Search Box
The search box at the top is where you can type in your query. As you type, the system will automatically display the top 5 most relevant results from your search history.
#### Results Section
Each result is displayed in the following format:
- **System and User Prompts**: Shows the context of the conversation where the response was generated.
- **Edited Response**: The model's human edited response (when corrected by the user) based on the prompt.
- **Prompt Category**: Indicates the type of query, such as 'programming', 'general knowledge', etc.
- **Similarity Score**: A numerical score between 0 and 1 that represents how similar the model's response is to the edited response. A higher score indicates greater similarity.
- **Feedback**: A thumbs up or thumbs down button that allows you to provide feedback on the quality of the response. This feedback is used to improve the model's performance.
""")
es = None
if os.getenv("ES_INSTALLED") == "True":
# Connect to Elasticsearch
es, es_connected = connect_to_elasticsearch()
else:
# Disconnect from Elasticsearch
es_connected = disconnect_from_elasticsearch(es)
# Now, you can use `es` for queries if `es_connected` is True
print(f"es_connected: {es_connected}")
if es_connected:
st.sidebar.success("Connected to Elasticsearch.")
# 1. Check if the user index exists
if not es.indices.exists(index=index_name):
st.sidebar.warning(f"You have not yet stored user prompts under your user index {index_name}. You can do that if you connect to Elasticsearch and save a first user prompt in your search memory.")
else:
# Get the document count for the index
document_count = es.count(index=index_name)['count']
# Display the count in Streamlit
st.write(f"Total number of documents on this user memory: {document_count} document(s).")
# 2. Create a search box for the user to enter their query
user_query = st.text_input("🔍 __Enter your search query:__", "")
if user_query:
# Perform Elasticsearch search query
search_result = es.search(
index=index_name,
body={
"query": {
"multi_match": {
"query": user_query,
"fields": [
"conversation_piece.original_prompt.user",
"conversation_piece.original_prompt.system",
"edited_response",
"promptcategory",
"usernote",
"feedback",
"model",
"userskillfocus"
]
}
},
"size": 5
}
)
promptcolor = "#D3D3D3" # light gray
responsecolor = "#FAFAD2" # light goldenrod yellow
footercolor = "#272829" # grayish purple
fontcolor = "#D8D9DA" # black
# Display search results
#for hit in search_result['hits']['hits']:
# Check if there are any hits
if len(search_result['hits']['hits']) == 0:
st.warning("No search results found.")
else:
for hit_num, hit in enumerate(search_result['hits']['hits'], 1):
# Display original_prompt in plain format
st.markdown(f'## :orange[Prompt Result {hit_num}]')
# Display the timestamp if available
timestamp = hit["_source"].get("timestamp", "no publishing date") # Using .get() to avoid KeyError if the field is not present
collection_id = hit["_source"].get("collection_id", "unknown")
usermessage_id = hit["_source"].get("usermessage_id", "unknown")
usermessage_title = get_usermessage_title(collection_id, usermessage_id, collections)
# If usermessage_title is None, it won't display anything
display_usermessage_title = f"{usermessage_title}" if usermessage_title else ""
# Display the timestamp if available
timestamp_str = hit["_source"].get("timestamp", None)
if timestamp_str:
# Assuming the timestamp is in ISO 8601 format
timestamp_dt = datetime.fromisoformat(timestamp_str)
# Format the date as "mmm d, yyyy"
formatted_timestamp = timestamp_dt.strftime('%b %d, %Y')
# Extract the Elasticsearch document ID
doc_id = hit.get("_id", "unknown")
# Display the timestamp, username, collection ID, user message ID, and Elasticsearch document ID
st.markdown(f'Published on {formatted_timestamp} | Username: {hit["_source"].get("username", "unknown")} | Collection ID: {collection_id} | User Message ID: {usermessage_id} | Doc ID: {doc_id}')
# Extract the model information
model_info = hit["_source"].get("model", "unknown")
st.markdown(f'#### {display_usermessage_title} ({model_info})')
# Insert a thick horizontal line to separate each result
st.markdown('<hr style="border:1px solid orange;padding:0rem;margin:0rem">', unsafe_allow_html=True)
else:
st.markdown(f'')
# Display promptcategory and similarity score with different colors
st.markdown(f'<div style="background-color:{footercolor}; padding:10px; color:{fontcolor}; border-radius: 0px;"><b>Prompt Category:</b> {hit["_source"].get("promptcategory", "None")} | <b>Similarity Score:</b> {hit["_source"]["similarityscore"]} | <b>Feedback:</b> {hit["_source"]["feedback"]} | <b>Skill Focus:</b> {hit["_source"]["userskillfocus"][0]} | <b>Costs USD:</b> {hit["_source"].get("total_cost", "unknown")}</div>', unsafe_allow_html=True)
# Display edited_response in markdown format
st.markdown("")
st.markdown(f'{hit["_source"]["edited_response"]}')
usernote = hit["_source"].get("usernote")
if usernote and usernote != "None": # Check if usernote exists and is not the string "None"
st.markdown(f':memo:__Revision__: {usernote}')
else:
st.markdown(f'')
with st.expander("Show Original Prompt"):
st.markdown(f':orange_book: __Prompt:__ {hit["_source"]["conversation_piece"]["original_prompt"]["system"]} {hit["_source"]["conversation_piece"]["original_prompt"]["user"]}')
# Create an expander for showing the JSON object
#with st.expander("Show JSON"):
# st.json(hit)
st.code({hit["_source"]["edited_response"]})
# Insert a thick horizontal line to separate each result
st.markdown('<hr style="border:2px solid gray;margin-top:0rem">', unsafe_allow_html=True)
else:
st.sidebar.error("Not Connected to Elasticsearch.")
st.markdown("""
## Install Elasticsearch Locally Using Docker
If you see these instructions, this means that you have not installed elasticsearch on your local machine OR you have not started elasticsearch on your local machine.
* If you are not running docker on your machine, install it from here https://docs.docker.com/get-docker/.
* Then follow instructions here https://www.elastic.co/guide/en/elasticsearch/reference/current/run-elasticsearch-locally.html
* For help, there is a sample output on elasticsearch installation in this file __./config/elasticsearch.txt__
* After installation of elasticsearch and kibana make sure to change the following in the .env file
* ES_INSTALLED=True
* ES_USER=elastic
* ES_PASSWORD=changeme
* ES_CA_CERT_PATH=certs/ca/ca.crt
Note: The ES_CA_CERT_PATH is the path to the ca.crt file in the certs folder of the elasticsearch installation
In docker under elasticsearch container the http_ca.crt is located under /usr/share/elasticsearch/config/certs/
Take a copy of it and store it in your local machine and change the path of ES_CA_CERT_PATH in the .env file.
* Then start elasticsearch and kibana in your docker container.
* When elasticsearch is running, you should see in the sidebar the message "Connected to Elasticsearch".
""")
| [
"#D3D3D3"
] |
2024-01-10 | 957057/QQChannelChatGPT | model~provider~provider_openai_official.py | import openai
import json
import time
import os
import sys
from cores.database.conn import dbConn
from model.provider.provider import Provider
import threading
from util import general_utils as gu
abs_path = os.path.dirname(os.path.realpath(sys.argv[0])) + '/'
key_record_path = abs_path + 'chatgpt_key_record'
class ProviderOpenAIOfficial(Provider):
def __init__(self, cfg):
self.key_list = []
if 'api_base' in cfg and cfg['api_base'] != 'none' and cfg['api_base'] != '':
openai.api_base = cfg['api_base']
print(f"设置 api_base 为: {openai.api_base}")
# 如果 cfg['key']中有长度为1的字符串,那么是格式错误,直接报错
for key in cfg['key']:
if len(key) == 1:
input("检查到了长度为 1 的Key。配置文件中的 openai.key 处的格式错误 (符号 - 的后面要加空格),请退出程序并检查配置文件,按回车跳过。")
raise BaseException("配置文件格式错误")
if cfg['key'] != '' and cfg['key'] != None:
self.key_list = cfg['key']
else:
input("[System] 请先去完善ChatGPT的Key。详情请前往https://beta.openai.com/account/api-keys")
# init key record
self.init_key_record()
self.chatGPT_configs = cfg['chatGPTConfigs']
gu.log(f'加载ChatGPTConfigs: {self.chatGPT_configs}')
self.openai_configs = cfg
# 会话缓存
self.session_dict = {}
# 最大缓存token
self.max_tokens = cfg['total_tokens_limit']
# 历史记录持久化间隔时间
self.history_dump_interval = 20
# 读取历史记录
try:
db1 = dbConn()
for session in db1.get_all_session():
self.session_dict[session[0]] = json.loads(session[1])['data']
gu.log("读取历史记录成功")
except BaseException as e:
gu.log("读取历史记录失败,但不影响使用", level=gu.LEVEL_ERROR)
# 读取统计信息
if not os.path.exists(abs_path+"configs/stat"):
with open(abs_path+"configs/stat", 'w', encoding='utf-8') as f:
json.dump({}, f)
self.stat_file = open(abs_path+"configs/stat", 'r', encoding='utf-8')
global count
res = self.stat_file.read()
if res == '':
count = {}
else:
try:
count = json.loads(res)
except BaseException:
pass
# 创建转储定时器线程
threading.Thread(target=self.dump_history, daemon=True).start()
# 人格
self.now_personality = {}
# 转储历史记录的定时器~ Soulter
def dump_history(self):
time.sleep(10)
db = dbConn()
while True:
try:
# print("转储历史记录...")
for key in self.session_dict:
# print("TEST: "+str(db.get_session(key)))
data = self.session_dict[key]
data_json = {
'data': data
}
if db.check_session(key):
db.update_session(key, json.dumps(data_json))
else:
db.insert_session(key, json.dumps(data_json))
# print("转储历史记录完毕")
except BaseException as e:
print(e)
# 每隔10分钟转储一次
time.sleep(10*self.history_dump_interval)
def text_chat(self, prompt, session_id = None):
if session_id is None:
session_id = "unknown"
if "unknown" in self.session_dict:
del self.session_dict["unknown"]
# 会话机制
if session_id not in self.session_dict:
self.session_dict[session_id] = []
fjson = {}
try:
f = open(abs_path+"configs/session", "r", encoding="utf-8")
fjson = json.loads(f.read())
f.close()
except:
pass
finally:
fjson[session_id] = 'true'
f = open(abs_path+"configs/session", "w", encoding="utf-8")
f.write(json.dumps(fjson))
f.flush()
f.close()
cache_data_list, new_record, req = self.wrap(prompt, session_id)
gu.log(f"CACHE_DATA_: {str(cache_data_list)}", level=gu.LEVEL_DEBUG, max_len=99999)
gu.log(f"OPENAI REQUEST: {str(req)}", level=gu.LEVEL_DEBUG, max_len=9999)
retry = 0
response = None
err = ''
# 截断倍率
truncate_rate = 0.75
while retry < 15:
try:
response = openai.ChatCompletion.create(
messages=req,
**self.chatGPT_configs
)
break
except Exception as e:
if 'You exceeded' in str(e) or 'Billing hard limit has been reached' in str(e) or 'No API key provided' in str(e) or 'Incorrect API key provided' in str(e):
gu.log("当前Key已超额或异常, 正在切换", level=gu.LEVEL_WARNING)
self.key_stat[openai.api_key]['exceed'] = True
self.save_key_record()
response, is_switched = self.handle_switch_key(req)
if not is_switched:
# 所有Key都超额或不正常
raise e
else:
break
elif 'maximum context length' in str(e):
gu.log("token超限, 清空对应缓存,并进行消息截断")
self.session_dict[session_id] = []
prompt = prompt[:int(len(prompt)*truncate_rate)]
truncate_rate -= 0.05
cache_data_list, new_record, req = self.wrap(prompt, session_id)
elif 'Limit: 3 / min. Please try again in 20s.' in str(e) or "OpenAI response error" in str(e):
time.sleep(30)
continue
else:
gu.log(str(e), level=gu.LEVEL_ERROR)
time.sleep(3)
err = str(e)
retry+=1
if retry >= 15:
gu.log(r"如果报错, 且您的机器在中国大陆内, 请确保您的电脑已经设置好代理软件(梯子), 并在配置文件设置了系统代理地址。详见https://github.com/Soulter/QQChannelChatGPT/wiki/%E4%BA%8C%E3%80%81%E9%A1%B9%E7%9B%AE%E9%85%8D%E7%BD%AE%E6%96%87%E4%BB%B6%E9%85%8D%E7%BD%AE", max_len=999)
raise BaseException("连接出错: "+str(err))
self.key_stat[openai.api_key]['used'] += response['usage']['total_tokens']
self.save_key_record()
# print("[ChatGPT] "+str(response["choices"][0]["message"]["content"]))
chatgpt_res = str(response["choices"][0]["message"]["content"]).strip()
current_usage_tokens = response['usage']['total_tokens']
gu.log(f"OPENAI RESPONSE: {response['usage']}", level=gu.LEVEL_DEBUG, max_len=9999)
# 超过指定tokens, 尽可能的保留最多的条目,直到小于max_tokens
if current_usage_tokens > self.max_tokens:
t = current_usage_tokens
index = 0
while t > self.max_tokens:
if index >= len(cache_data_list):
break
# 保留人格信息
if cache_data_list[index]['type'] != 'personality':
t -= int(cache_data_list[index]['single_tokens'])
del cache_data_list[index]
else:
index += 1
# 删除完后更新相关字段
self.session_dict[session_id] = cache_data_list
# cache_prompt = get_prompts_by_cache_list(cache_data_list)
# 添加新条目进入缓存的prompt
new_record['AI'] = {
'role': 'assistant',
'content': chatgpt_res,
}
new_record['usage_tokens'] = current_usage_tokens
if len(cache_data_list) > 0:
new_record['single_tokens'] = current_usage_tokens - int(cache_data_list[-1]['usage_tokens'])
else:
new_record['single_tokens'] = current_usage_tokens
cache_data_list.append(new_record)
self.session_dict[session_id] = cache_data_list
return chatgpt_res
def image_chat(self, prompt, img_num = 1, img_size = "1024x1024"):
retry = 0
image_url = ''
while retry < 5:
try:
# print("test1")
response = openai.Image.create(
prompt=prompt,
n=img_num,
size=img_size
)
# print("test2")
image_url = []
for i in range(img_num):
image_url.append(response['data'][i]['url'])
break
except Exception as e:
gu.log(str(e), level=gu.LEVEL_ERROR)
if 'You exceeded' in str(e) or 'Billing hard limit has been reached' in str(
e) or 'No API key provided' in str(e) or 'Incorrect API key provided' in str(e):
gu.log("当前Key已超额或者不正常, 正在切换", level=gu.LEVEL_WARNING)
self.key_stat[openai.api_key]['exceed'] = True
self.save_key_record()
response, is_switched = self.handle_switch_key(req)
if not is_switched:
# 所有Key都超额或不正常
raise e
else:
break
retry += 1
if retry >= 5:
raise BaseException("连接超时")
return image_url
def forget(self, session_id = None) -> bool:
if session_id is None:
return False
self.session_dict[session_id] = []
return True
'''
获取缓存的会话
'''
def get_prompts_by_cache_list(self, cache_data_list, divide=False, paging=False, size=5, page=1):
prompts = ""
if paging:
page_begin = (page-1)*size
page_end = page*size
if page_begin < 0:
page_begin = 0
if page_end > len(cache_data_list):
page_end = len(cache_data_list)
cache_data_list = cache_data_list[page_begin:page_end]
for item in cache_data_list:
prompts += str(item['user']['role']) + ":\n" + str(item['user']['content']) + "\n"
prompts += str(item['AI']['role']) + ":\n" + str(item['AI']['content']) + "\n"
if divide:
prompts += "----------\n"
return prompts
def get_user_usage_tokens(self,cache_list):
usage_tokens = 0
for item in cache_list:
usage_tokens += int(item['single_tokens'])
return usage_tokens
'''
获取统计信息
'''
def get_stat(self):
try:
f = open(abs_path+"configs/stat", "r", encoding="utf-8")
fjson = json.loads(f.read())
f.close()
guild_count = 0
guild_msg_count = 0
guild_direct_msg_count = 0
for k,v in fjson.items():
guild_count += 1
guild_msg_count += v['count']
guild_direct_msg_count += v['direct_count']
session_count = 0
f = open(abs_path+"configs/session", "r", encoding="utf-8")
fjson = json.loads(f.read())
f.close()
for k,v in fjson.items():
session_count += 1
return guild_count, guild_msg_count, guild_direct_msg_count, session_count
except:
return -1, -1, -1, -1
# 包装信息
def wrap(self, prompt, session_id):
# 获得缓存信息
context = self.session_dict[session_id]
new_record = {
"user": {
"role": "user",
"content": prompt,
},
"AI": {},
'type': "common",
'usage_tokens': 0,
}
req_list = []
for i in context:
if 'user' in i:
req_list.append(i['user'])
if 'AI' in i:
req_list.append(i['AI'])
req_list.append(new_record['user'])
return context, new_record, req_list
def handle_switch_key(self, req):
# messages = [{"role": "user", "content": prompt}]
while True:
is_all_exceed = True
for key in self.key_stat:
if key == None:
continue
if not self.key_stat[key]['exceed']:
is_all_exceed = False
openai.api_key = key
gu.log(f"切换到Key: {key}, 已使用token: {self.key_stat[key]['used']}", level=gu.LEVEL_INFO)
if len(req) > 0:
try:
response = openai.ChatCompletion.create(
messages=req,
**self.chatGPT_configs
)
return response, True
except Exception as e:
if 'You exceeded' in str(e):
gu.log("当前Key已超额, 正在切换")
self.key_stat[openai.api_key]['exceed'] = True
self.save_key_record()
time.sleep(1)
continue
else:
gu.log(str(e), level=gu.LEVEL_ERROR)
else:
return True
if is_all_exceed:
gu.log("所有Key已超额", level=gu.LEVEL_CRITICAL)
return None, False
else:
gu.log("在切换key时程序异常。", level=gu.LEVEL_ERROR)
return None, False
def getConfigs(self):
return self.openai_configs
def save_key_record(self):
with open(key_record_path, 'w', encoding='utf-8') as f:
json.dump(self.key_stat, f)
def get_key_stat(self):
return self.key_stat
def get_key_list(self):
return self.key_list
def get_curr_key(self):
return openai.api_key
# 添加key
def append_key(self, key, sponsor):
self.key_list.append(key)
self.key_stat[key] = {'exceed': False, 'used': 0, 'sponsor': sponsor}
self.save_key_record()
self.init_key_record()
# 检查key是否可用
def check_key(self, key):
pre_key = openai.api_key
openai.api_key = key
messages = [{"role": "user", "content": "1"}]
try:
response = openai.ChatCompletion.create(
messages=messages,
**self.chatGPT_configs
)
openai.api_key = pre_key
return True
except Exception as e:
pass
openai.api_key = pre_key
return False
#将key_list的key转储到key_record中,并记录相关数据
def init_key_record(self):
# 不存在,创建
if not os.path.exists(key_record_path):
with open(key_record_path, 'w', encoding='utf-8') as f:
json.dump({}, f)
# 打开 chatgpt_key_record
with open(key_record_path, 'r', encoding='utf-8') as keyfile:
try:
self.key_stat = json.load(keyfile)
except Exception as e:
gu.log(str(e), level=gu.LEVEL_ERROR)
self.key_stat = {}
finally:
for key in self.key_list:
if key not in self.key_stat:
self.key_stat[key] = {'exceed': False, 'used': 0}
# if openai.api_key is None:
# openai.api_key = key
else:
# if self.key_stat[key]['exceed']:
# print(f"Key: {key} 已超额")
# continue
# else:
# if openai.api_key is None:
# openai.api_key = key
# print(f"使用Key: {key}, 已使用token: {self.key_stat[key]['used']}")
pass
if openai.api_key == None:
self.handle_switch_key("")
self.save_key_record()
| [
"\n",
"1",
"----------\n",
":\n",
"content"
] |
2024-01-10 | Adityasoni8898/Vid2Notes | Functions~NotesMaker.py | import openai
def gpt(api_key, transcript, language, description):
openai.api_key = api_key
print("\n\nYou are almost there!")
print("Making into notes....")
response = openai.ChatCompletion.create(
model = "gpt-3.5-turbo",
messages = [
{"role" : "user", "content" : "convert this transcribed text from a tutorial into good simple notes, write point wise with title content list for each point and write the notes in json."},
{"role" : "user", "content" : f"This is the langauge the notes are needed in {language} and this is some extra description for understanding, ignore if not understood {description}"},
{"role" : "user", "content" : transcript }
]
)
return response.choices[0].message.content | [
"convert this transcribed text from a tutorial into good simple notes, write point wise with title content list for each point and write the notes in json.",
"This is the langauge the notes are needed in PLACEHOLDER and this is some extra description for understanding, ignore if not understood PLACEHOLDER"
] |
2024-01-10 | Adityasoni8898/Vid2Notes | Functions~Transcription.py | import openai
def whisper(api_key, video_file):
openai.api_key = api_key
video_file = video_file
print("\n\nReading the video....")
response = openai.Audio.transcribe("whisper-1", video_file)
return response.text | [] |
2024-01-10 | Hxyou/IdealGPT | blip_gpt_main.py | import os
import yaml
import argparse
import torch
import openai
from tqdm import tqdm
import pdb
from data import VCRSampler
from data import VESampler
from chat import VCRConversationTwoAgent
from chat import VEConversationTwoAgent
import random
def IdealGPT(vqa_model, dataset, data_ids, model, save_path='', max_n_rounds=5, print_mode='no', prompt_setting='v1a', temp_gpt=0.0):
"""
Conduct IdealGPT conversation
Args:
vqa_model : vqa model.
dataset: the dataset used to caption
data_ids (list): a list of sample ids in the dataset
model (str or Blip2): the model name used to ask question. Valid values are 'gpt3', 'chatgpt', and their concrete model names
including 'text-davinci-003', 'davinci,' and 'gpt-3.5-turbo'.
If passing a Blip2 instance, will use its backend LLM.
save_path (str): the path to save caption results. If it is empty, results are not being saved.
max_n_rounds (int): the max number of chat rounds
n_blip2_context (int): how many previous QA rounds can blip2 see. negative value means blip2 can see all
print_mode (str): print mode. 'chat' for printing everything. 'bar' for printing everything but the chat process. 'no' for no printing
"""
if model == 'chatgpt':
model = 'gpt-3.5-turbo'
elif model =='gpt4':
model = 'gpt-4'
all_predict_answer = []
all_answer_label = []
all_round_number = 0
for data_id in tqdm(data_ids, disable=print_mode!='no'):
result_path = os.path.join(save_path, 'result', '{}.yaml'.format(data_id))
# Skip if the result file exist.
if os.path.isfile(result_path):
continue
if print_mode != 'no':
print('Data ID {}'.format(data_id))
if type(dataset) == VCRSampler:
image_path, qa = dataset.fetch_data(data_id)
info = {'setting':
{
'id': data_id,
'question_id': qa['question_id'] if 'question_id' in qa else None,
'question': qa['question'].strip(),
'answer_choices':[answer_i.strip() for answer_i in qa['answer_choices']] if 'answer_choices' in qa else None,
'answer_label': str(qa['answer_label']) if 'answer_label' in qa else None,
'max_n_rounds': max_n_rounds,
'img_path': qa['img_path'] if 'img_path' in qa else None
}
}
if 'caption' in qa:
caption = qa['caption']
else:
caption = None
elif type(dataset) == VESampler:
image_path, ve_info = dataset.fetch_data(data_id)
info = {'setting':
{
'id': data_id,
'hypothesis': ve_info['hypothesis'].strip(),
'answer_label': str(ve_info['answer_label']) if 'answer_label' in ve_info else None,
'max_n_rounds': max_n_rounds,
'img_path': ve_info['img_path'] if 'img_path' in ve_info else None
}
}
if 'caption' in ve_info:
caption = ve_info['caption']
else:
caption = None
results = {}
# Initialize VQA Instance.
if type(dataset) == VCRSampler:
chat = VCRConversationTwoAgent(img=image_path,
vqa_model=vqa_model,
model=model,
question=info['setting']['question'],
answer_choices=info['setting']['answer_choices'],
prompt_setting=prompt_setting,
caption=caption,
temp_gpt=temp_gpt,
data_id=data_id,)
elif type(dataset) == VESampler:
chat = VEConversationTwoAgent(img=image_path,
vqa_model=vqa_model,
model=model,
question=info['setting']['hypothesis'],
answer_choices=['entailment', 'neutral', 'contradiction'],
prompt_setting=prompt_setting,
caption=caption,
temp_gpt=temp_gpt,
data_id=data_id)
used_round = chat.chatting(max_n_rounds, print_mode=print_mode)
results['predict_answer'] = chat.answer_predict
results['sub_questions'] = chat.sub_questions
results['sub_answers'] = chat.sub_answers
results['chat_history'] = chat.chat_history
results['total_tokens'] = chat.total_tokens
results['caption'] = chat.catpion
results['used_round'] = used_round
info['result'] = results
all_predict_answer.append(chat.answer_predict)
all_answer_label.append(str(info['setting']['answer_label']))
all_round_number += results['used_round']
if save_path:
with open(result_path, 'w') as f:
yaml.dump(info, f)
# Evaluation:
if type(dataset) == VCRSampler or type(dataset) == VESampler:
# Evaluate VCR and SNLI-VE by acc.
total_correct = 0
total_exceed_round = 0
for predict_i, gt_i in zip(all_predict_answer, all_answer_label):
if predict_i == gt_i:
total_correct += 1
if predict_i is None:
total_exceed_round += 1
acc = (total_correct*1.0) / len(data_ids)
print('Acc:{}%'.format(acc*100))
print('Average number of rounds:{}'.format(all_round_number*1.0/len(data_ids)))
exceed_round_ratio = (total_exceed_round*1.0) / len(data_ids)
print('Unknown Ratio:{}%'.format(exceed_round_ratio*100))
def parse():
parser = argparse.ArgumentParser(description='IdealGPT Args.')
parser.add_argument('--data_root', type=str, default='/home/haoxuan/data/vcr1/',
help='root path to the dataset')
parser.add_argument('--save_root', type=str, default='./exp_result/',
help='root path for saving results')
parser.add_argument("--data_subset", type=str, default=None, help="specify the subset of the dataset.")
parser.add_argument('--data_partition', type=str, default=None,
help='range of data used, in the format of numberA_numberB, A<=B')
parser.add_argument('--exp_tag', type=str, required=True,
help='tag for this experiment. caption results will be saved in save_root/exp_tag')
parser.add_argument('--dataset', type=str, default='vcr_val',
help='Names of the dataset to use in the experiment. Valid datasets include vcr_val, ve_dev. Default is vcr_val')
parser.add_argument('--max_n_rounds', type=int, default=4,
help='Max Number of QA rounds between GPT and BLIP-2. Default is 4.')
parser.add_argument('--model', type=str, default='chatgpt', choices=['chatgpt', 'gpt4'],
help='model used to ask question. can be gpt3, chatgpt, or its concrete tags in openai system')
parser.add_argument('--vqa_model', type=str, default='blip2_t5_xxl', choices=['blip2_t5_xxl', 'blip2_t5_xl', 'blip2_opt_6.7b', 'blip2_opt_2.7b', 'llava', 'minigpt4'],
help='model as Answerer.')
parser.add_argument('--device_id', type=int, default=0,
help='Which GPU to use.')
parser.add_argument('--prompt_setting', type=str, default='v1a',
help='Prompt Setting Version')
parser.add_argument('--openai_key', type=str, default='',
help='OpenAI Key for GPT-3.5/4 API')
parser.add_argument('--caption_path', type=str, default=None,
help='Caption path for images')
parser.add_argument('--temp_gpt', type=float, default=0.0,
help='Temperature for GPT')
parser.add_argument('--temp_vqa', type=float, default=0.001,
help='Temperature for VQA model (LLaVA and MiniGPT4), must be positive')
parser.add_argument('--seed', type=int, default=3, help='random seed')
args = parser.parse_args()
return args
def main(args):
# Set OpenAI
OPENAI_API_KEY = args.openai_key
openai.api_key = OPENAI_API_KEY
random.seed(args.seed)
# load the dataset
if 'vcr' in args.dataset:
dataset = VCRSampler(dataset_root=args.data_root,
dataset_name=args.dataset,
data_subset=args.data_subset,
data_partition=args.data_partition,
caption_path=args.caption_path)
elif 've' in args.dataset:
dataset = VESampler(dataset_root=args.data_root,
dataset_name=args.dataset,
data_subset=args.data_subset,
data_partition=args.data_partition,
caption_path=args.caption_path)
print('Finish loading data')
print('Start loading VQA model')
if 'blip2' in args.vqa_model:
from lib.blip2_lib import Blip2Lavis
if 't5' in args.vqa_model and '_xl' in args.vqa_model:
vqa_model = Blip2Lavis(name="blip2_t5", model_type="pretrain_flant5xl", device=torch.device("cuda:{}".format(args.device_id)))
elif 't5' in args.vqa_model and '_xxl' in args.vqa_model:
vqa_model = Blip2Lavis(name="blip2_t5", model_type="pretrain_flant5xxl", device=torch.device("cuda:{}".format(args.device_id)))
elif 'opt' in args.vqa_model and '6.7b' in args.vqa_model:
vqa_model = Blip2Lavis(name="blip2_opt", model_type="pretrain_opt6.7b", device=torch.device("cuda:{}".format(args.device_id)))
elif 'opt' in args.vqa_model and '2.7b' in args.vqa_model:
vqa_model = Blip2Lavis(name="blip2_opt", model_type="pretrain_opt2.7b", device=torch.device("cuda:{}".format(args.device_id)))
else:
raise NotImplemented(f'{args.vqa_model} not supported')
elif 'llava' in args.vqa_model:
from lib.llava_lib import LLAVA
vqa_model = LLAVA(temperature=args.temp_vqa)
elif 'minigpt4' in args.vqa_model:
from lib.minigpt4_lib import MINIGPT4
vqa_model = MINIGPT4(gpu_id=args.device_id, temperature=args.temp_vqa)
print('Finish loading VQA model {}'.format(args.vqa_model))
question_model = args.model
# preparing the folder to save results
save_path = os.path.join(args.save_root, f'{args.dataset}_{args.exp_tag}')
if not os.path.exists(save_path):
os.makedirs(os.path.join(save_path, 'result'))
with open(os.path.join(save_path, 'args.yaml'), 'w') as f:
yaml.dump(vars(args), f)
# start Conversation
IdealGPT(vqa_model,
dataset,
dataset.ids,
save_path=save_path,
max_n_rounds=args.max_n_rounds,
model=question_model,
print_mode='no',
prompt_setting=args.prompt_setting,
temp_gpt=args.temp_gpt)
if __name__ == '__main__':
args = parse()
main(args)
| [] |
2024-01-10 | Hxyou/IdealGPT | chat~call_gpt.py |
import openai
from tenacity import (
retry,
stop_after_attempt,
wait_random_exponential,
) # for exponential backoff
@retry(wait=wait_random_exponential(min=0.1, max=0.2), stop=stop_after_attempt(10))
def call_gpt(chatgpt_messages, model="gpt-3.5-turbo", temp_gpt=0.0):
response = openai.ChatCompletion.create(model=model, messages=chatgpt_messages, temperature=temp_gpt, max_tokens=512)
reply = response['choices'][0]['message']['content']
total_tokens = response['usage']['total_tokens']
return reply, total_tokens
| [] |
2024-01-10 | DenSinH/master-chef | src~webapp~cookbook~transform.py | import openai
import aiohttp
from bs4 import BeautifulSoup
import tldextract as tld
from dotenv import load_dotenv
load_dotenv()
import os
import re
import json
import random
from .utils import *
from .meta import *
from .thumbnail import get_thumbnail
client = openai.AsyncOpenAI(
api_key=os.environ["OPENAI_API_KEY"]
)
def _get_headers(url):
_NO_USER_AGENT = {
"cdninstagram",
"ig",
"igsonar",
"facebook",
"instagram"
}
_USER_AGENTS = [
'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:66.0) Gecko/20100101 Firefox/66.0',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/109.0.0.0 Safari/537.36',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36 Edg/111.0.1661.62',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/111.0'
]
headers = {}
if tld.extract(url).domain.lower() not in _NO_USER_AGENT:
headers["User-Agent"] = random.choice(_USER_AGENTS)
return headers
MAX_RETRIES = 1
MODEL = "gpt-3.5-turbo-1106"
PROMPT = """
The following text is from a website, and it contains a recipe, possibly in Dutch, as well as unnecessary other text from the webpage.
The recipe contains information on the ingredients, the preparation and possibly nutritional information.
Convert the recipe to a JSON object with the following keys:
"name": the name of this recipe.
"ingredients": a list of dictionaries, with keys "ingredient", mapping to the name of the ingredient, and "amount" which is a string containing the amount of this ingredient needed including the unit, or
a null value if no specific amount is given.
For example, the ingredient "one onion" should yield {{'amount': '1', 'ingredient': 'onion'}}, and the ingredient "zout" should yield {{'amount': null, 'ingredient': 'zout'}}
and the ingredient "1el Komijn" should yield {{'amount': '1 el', 'ingredient': 'Komijn'}}, and "400gr tomaat" should yield {{'amount': '400 gr', 'ingredient': 'tomaat'}}.
"preparation": a list of strings containing the steps of the recipe.
"nutrition": null if there is no nutritional information in the recipe, or a list of dictionaries containing the keys "group", with the type
of nutrional information, and "amount": with the amount of this group that is contained in the recipe, as a string including the unit, so
"Fats 12gr" should yield {{'group': 'fats', 'amount': '12 gr'}}.
"people": the amount of people that can be fed from this meal as an integer, in case this information is present, otherwise null
"time": the time that this recipe takes to make in minutes as an integer, in case this information is present, otherwise null
"tags": interpret the recipe, and generate a list of at most 5 English strings that describe this recipe. For example, what the main ingredient is,
if it takes long or short to make, whether it is especially high or low in certain nutritional groups, tags like that. Make
sure the strings are in English.
Keep the language the same, except in the tags, and preferably do not change anything about the text in the recipe at all.
Only output the JSON object, and nothing else. You can do this!
Here comes the text:
{text}
"""
META_PROMPT = f"""
For this recipe, generate a JSON object containing meta information that classifies the recipe.
It should contain the following keys and values:
"language": One of {LANGUAGES}, depending on the language of the recipe.
"meal_type": One of {MEAL_TYPES} that best describes the meal.
"meat_type": A list of at most two of {MEAT_TYPES} that best describe the meal. Note that it is impossible for a recipe
to be both vegetarian and contain meat, and that "other" should never go with another meat type.
"carb_type": A list of at most two of {CARB_TYPES} that best describe the meal. Note that it is impossible for a recipe
to be have both "none" or "other" and any other carb type.
"cuisine": One of {CUISINE_TYPES} that best describes the meal.
"temperature": One of {TEMPERATURE_TYPES} that best describes the meal.
Please output only the JSON object and nothing else. You can do this!
"""
def fix_recipe(_recipe):
def _get_or_none(obj, key, typ):
return typ(obj[key]) if (key in obj and obj[key] is not None) else None
recipe = {}
if "name" not in _recipe:
raise CookbookError("Recipe has no name")
recipe["name"] = str(_recipe["name"])
for (key, typ) in [("time", int), ("people", int), ("url", str), ("thumbnail", str)]:
recipe[key] = _get_or_none(_recipe, key, typ)
recipe["ingredients"] = []
for ingredient in _recipe.get("ingredients", []):
recipe["ingredients"].append({
"amount": _get_or_none(ingredient, "amount", str),
"ingredient": str(ingredient["ingredient"])
})
recipe["preparation"] = []
for step in _recipe.get("preparation", []):
recipe["preparation"].append(str(step))
if "nutrition" in _recipe and _recipe["nutrition"] is not None:
recipe["nutrition"] = []
for group in _recipe.get("nutrition", []):
recipe["nutrition"].append({
"amount": _get_or_none(group, "amount", str),
"group": str(group["group"])
})
else:
recipe["nutrition"] = None
return recipe
def fix_meta(_meta):
def _get_or(key, default=None, allowed_values=None):
if allowed_values is not None:
if _meta.get(key) in allowed_values:
return _meta.get(key)
return default
else:
return _meta.get(key, default=default)
meta = {}
meta["language"] = _get_or("language", allowed_values=LANGUAGES)
meta["meal_type"] = _get_or("meal_type", default="other", allowed_values=MEAL_TYPES)
meta["meat_type"] = []
for meat_type in _meta.get("meat_type", []):
if meat_type in MEAT_TYPES:
meta["meat_type"].append(meat_type)
if len(meta["meat_type"]) >= 2:
break
if not meta["meat_type"]:
meta["meat_type"] = ["other"]
meta["carb_type"] = []
for carb_type in _meta.get("carb_type", []):
if carb_type in CARB_TYPES:
meta["carb_type"].append(carb_type)
if len(meta["carb_type"]) >= 2:
break
if not meta["carb_type"]:
meta["carb_type"] = ["other"]
meta["cuisine"] = _get_or("cuisine", default="other", allowed_values=CUISINE_TYPES)
meta["temperature"] = _get_or("temperature", allowed_values=TEMPERATURE_TYPES)
return meta
async def translate_url(url):
print(f"Retrieving url {url}")
async with aiohttp.ClientSession(connector=aiohttp.TCPConnector(verify_ssl=False), headers=_get_headers(url)) as session:
res = await session.get(url)
if not res.ok:
raise CookbookError(f"Could not get the specified url, status code {res.status}")
soup = BeautifulSoup(await res.text(), features="html.parser")
# remove comment sections from website
COMMENTS = ["comment", "opmerking"]
for attr in ["class", "id"]:
for element in soup.find_all(attrs={attr: re.compile(fr".*({'|'.join(COMMENTS)}).*", flags=re.IGNORECASE)}):
element.decompose()
text = re.sub(r"(\n\s*)+", "\n", soup.text)
recipe = await translate_page(text, url=url, thumbnail=get_thumbnail(soup))
return recipe
async def _chatgpt_json_and_fix(messages, fix):
for i in range(1 + MAX_RETRIES):
try:
chat_completion = await client.chat.completions.create(
model=MODEL,
messages=messages,
response_format={"type": "json_object"},
temperature=0.2,
)
except openai.BadRequestError as e:
if e.code == "context_length_exceeded":
raise
raise
reply = chat_completion.choices[0].message.content
try:
return reply, fix(json.loads(reply))
except json.JSONDecodeError:
print("Conversion failed, retrying")
messages.append({"role": "assistant", "content": reply})
messages.append({"role": "user", "content": "this is not a parsable json object, "
"output only the json object"})
raise CookbookError("ChatGPT did not return a parsable json object, please try again")
async def translate_page(text, url=None, thumbnail=None):
print(f"Converting with ChatGPT ({MODEL})")
messages = [
{"role": "system", "content": "You are a helpful assistant that converts recipes into JSON format."},
{"role": "user", "content": PROMPT.format(text=text)}
]
reply, fixed = await _chatgpt_json_and_fix(messages, fix_recipe)
messages.append({"role": "assistant", "content": reply})
messages.append({"role": "user", "content": META_PROMPT})
try:
_, meta = await _chatgpt_json_and_fix(messages, fix_meta)
except Exception as e:
meta = {}
fixed["meta"] = meta
# add url / thumbnail after the fact, since we want to use as few tokens as possible
fixed["url"] = url
fixed["thumbnail"] = thumbnail
return fixed
if __name__ == '__main__':
from pprint import pprint
recipe = translate_url("https://15gram.be/recepten/wraps-kip-tikka-masala")
pprint(recipe)
| [
"\nThe following text is from a website, and it contains a recipe, possibly in Dutch, as well as unnecessary other text from the webpage.\nThe recipe contains information on the ingredients, the preparation and possibly nutritional information.\nConvert the recipe to a JSON object with the following keys:\n\"name\": the name of this recipe.\n\"ingredients\": a list of dictionaries, with keys \"ingredient\", mapping to the name of the ingredient, and \"amount\" which is a string containing the amount of this ingredient needed including the unit, or \n a null value if no specific amount is given.\n For example, the ingredient \"one onion\" should yield {{'amount': '1', 'ingredient': 'onion'}}, and the ingredient \"zout\" should yield {{'amount': null, 'ingredient': 'zout'}}\n and the ingredient \"1el Komijn\" should yield {{'amount': '1 el', 'ingredient': 'Komijn'}}, and \"400gr tomaat\" should yield {{'amount': '400 gr', 'ingredient': 'tomaat'}}.\n\"preparation\": a list of strings containing the steps of the recipe.\n\"nutrition\": null if there is no nutritional information in the recipe, or a list of dictionaries containing the keys \"group\", with the type\nof nutrional information, and \"amount\": with the amount of this group that is contained in the recipe, as a string including the unit, so\n\"Fats 12gr\" should yield {{'group': 'fats', 'amount': '12 gr'}}.\n\"people\": the amount of people that can be fed from this meal as an integer, in case this information is present, otherwise null\n\"time\": the time that this recipe takes to make in minutes as an integer, in case this information is present, otherwise null\n\"tags\": interpret the recipe, and generate a list of at most 5 English strings that describe this recipe. For example, what the main ingredient is,\n if it takes long or short to make, whether it is especially high or low in certain nutritional groups, tags like that. Make\n sure the strings are in English.\n\nKeep the language the same, except in the tags, and preferably do not change anything about the text in the recipe at all.\nOnly output the JSON object, and nothing else. You can do this!\nHere comes the text:\n\n{text}\n",
"\nFor this recipe, generate a JSON object containing meta information that classifies the recipe.\nIt should contain the following keys and values:\n\"language\": One of PLACEHOLDER, depending on the language of the recipe.\n\"meal_type\": One of PLACEHOLDER that best describes the meal.\n\"meat_type\": A list of at most two of PLACEHOLDER that best describe the meal. Note that it is impossible for a recipe\n to be both vegetarian and contain meat, and that \"other\" should never go with another meat type.\n\"carb_type\": A list of at most two of PLACEHOLDER that best describe the meal. Note that it is impossible for a recipe\n to be have both \"none\" or \"other\" and any other carb type.\n\"cuisine\": One of PLACEHOLDER that best describes the meal.\n\"temperature\": One of PLACEHOLDER that best describes the meal.\n\nPlease output only the JSON object and nothing else. You can do this!\n",
"this is not a parsable json object, output only the json object",
"You are a helpful assistant that converts recipes into JSON format."
] |
2024-01-10 | DenSinH/master-chef | testing~transform_recipe.py | import openai
import requests
from bs4 import BeautifulSoup
from dotenv import load_dotenv
load_dotenv()
import os
import re
import json
openai.api_key = os.environ["OPENAI_API_KEY"]
MAX_RETRIES = 1
MODEL = "gpt-3.5-turbo"
PROMPT = """
The following text is from a website, and it contains a recipe, possibly in Dutch, as well as unnecessary other text from the webpage.
The recipe contains information on the ingredients, the prepration and possibly nutritional information.
Could you convert the recipe to a JSON object with the following keys:
"name": the name of this recipe.
"ingredients": a list of dictionaries, with keys "ingredient", mapping to the name of the ingredient, and "amount" which is a string containing the amount of this ingredient needed including the unit, or null if no specific amount is given.
For example, the ingredient "one onion" should yield {{'amount': '1', 'ingredient': 'onion'}}, and the ingredient "zout" should yield {{'amount': null, 'ingredient': 'zout'}}.
"preparation": a list of strings containing the steps of the recipe.
"nutrition": null if there is no nutritional information in the recipe, or a list of dictionaries containing the keys "group", with the type
of nutrional information, and "amount": with the amount of this group that is contained in the recipe, as a string including the unit.
"url:": the literal string "{url}"
"people": the amount of people that can be fed from this meal as an integer, in case this information is present, otherwise null
"time": the time that this recipe takes to make in minutes as an integer, in case this information is present, otherwise null
"tags": interpret the recipe, and generate a list of at most 5 English strings that describe this recipe. For example, what the main ingredient is,
if it takes long or short to make, whether it is especially high or low in certain nutritional groups, tags like that. Make
sure the strings are in English.
Keep the language the same, except in the tags, and preferably do not change anything about the text in the recipe at all.
Only output the JSON object, and nothing else.
Here comes the text:
{text}
"""
class RecipeConversionError(Exception):
pass
def translate_page(url):
print("Retrieving URL")
res = requests.get(url)
if not res.ok:
raise RecipeConversionError(f"Could not get the specified url, status code {res.status_code}")
soup = BeautifulSoup(res.text, features="html.parser")
# COMMENTS = ["comment", "opmerking"]
# for attr in ["class", "id"]:
# for element in soup.find_all(attrs={attr: re.compile(fr".*({'|'.join(COMMENTS)}).*", flags=re.IGNORECASE)}):
# element.decompose()
text = re.sub(r"(\n\s*)+", "\n", soup.text)
prompt = PROMPT.format(url=url, text=text)
print(f"Converting with ChatGPT ({MODEL})")
messages = [
{"role": "system", "content": "You are a helpful assistant that converts recipies into JSON format."},
{"role": "user", "content": prompt}
]
for i in range(1 + MAX_RETRIES):
# todo: acreate
chat_completion = openai.ChatCompletion.create(
model=MODEL, messages=messages, temperature=0.2
)
reply = chat_completion.choices[0].message.content
try:
return json.loads(reply)
except json.JSONDecodeError:
print("Conversion failed, retrying")
messages.append({"role": "assistant", "content": reply})
messages.append({"role": "user", "content": "this is not a parseable json object, "
"only output the json object"})
raise RecipeConversionError("ChatGPT did not return a parsable json object, please try again")
if __name__ == '__main__':
from pprint import pprint
recipe = translate_page("https://www.eefkooktzo.nl/wrap-mango-en-kip/")
pprint(recipe)
r"""
Traceback (most recent call last):
File "C:\Users\Dennis\PycharmProjects\masterchef\testing\transform_recipe.py", line 85, in <module>
recipe = translate_page("https://www.eefkooktzo.nl/wrap-mango-en-kip/")
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\Dennis\PycharmProjects\masterchef\testing\transform_recipe.py", line 68, in translate_page
chat_completion = openai.ChatCompletion.create(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\Dennis\PycharmProjects\masterchef\venv\Lib\site-packages\openai\api_resources\chat_completion.py", line 25, in create
return super().create(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\Dennis\PycharmProjects\masterchef\venv\Lib\site-packages\openai\api_resources\abstract\engine_api_resource.py", line 153, in create
response, _, api_key = requestor.request(
^^^^^^^^^^^^^^^^^^
File "C:\Users\Dennis\PycharmProjects\masterchef\venv\Lib\site-packages\openai\api_requestor.py", line 298, in request
resp, got_stream = self._interpret_response(result, stream)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\Dennis\PycharmProjects\masterchef\venv\Lib\site-packages\openai\api_requestor.py", line 700, in _interpret_response
self._interpret_response_line(
File "C:\Users\Dennis\PycharmProjects\masterchef\venv\Lib\site-packages\openai\api_requestor.py", line 763, in _interpret_response_line
raise self.handle_error_response(
openai.error.InvalidRequestError: This model's maximum context length is 4097 tokens. However, your messages resulted in 4119 tokens. Please reduce the length of the messages.
Process finished with exit code 1
"""
# openai.error.InvalidRequestError: This model's maximum context length is 4097 tokens. However, your messages resulted in 4119 tokens. Please reduce the length of the messages. | [
"\nThe following text is from a website, and it contains a recipe, possibly in Dutch, as well as unnecessary other text from the webpage.\nThe recipe contains information on the ingredients, the prepration and possibly nutritional information.\nCould you convert the recipe to a JSON object with the following keys:\n\"name\": the name of this recipe.\n\"ingredients\": a list of dictionaries, with keys \"ingredient\", mapping to the name of the ingredient, and \"amount\" which is a string containing the amount of this ingredient needed including the unit, or null if no specific amount is given.\n For example, the ingredient \"one onion\" should yield {'amount': '1', 'ingredient': 'onion'}, and the ingredient \"zout\" should yield {'amount': null, 'ingredient': 'zout'}.\n\"preparation\": a list of strings containing the steps of the recipe.\n\"nutrition\": null if there is no nutritional information in the recipe, or a list of dictionaries containing the keys \"group\", with the type\nof nutrional information, and \"amount\": with the amount of this group that is contained in the recipe, as a string including the unit.\n\"url:\": the literal string \"PLACEHOLDER\"\n\"people\": the amount of people that can be fed from this meal as an integer, in case this information is present, otherwise null\n\"time\": the time that this recipe takes to make in minutes as an integer, in case this information is present, otherwise null\n\"tags\": interpret the recipe, and generate a list of at most 5 English strings that describe this recipe. For example, what the main ingredient is,\n if it takes long or short to make, whether it is especially high or low in certain nutritional groups, tags like that. Make\n sure the strings are in English.\n\nKeep the language the same, except in the tags, and preferably do not change anything about the text in the recipe at all.\nOnly output the JSON object, and nothing else.\nHere comes the text:\n\nPLACEHOLDER\n",
"You are a helpful assistant that converts recipies into JSON format.",
"\nThe following text is from a website, and it contains a recipe, possibly in Dutch, as well as unnecessary other text from the webpage.\nThe recipe contains information on the ingredients, the prepration and possibly nutritional information.\nCould you convert the recipe to a JSON object with the following keys:\n\"name\": the name of this recipe.\n\"ingredients\": a list of dictionaries, with keys \"ingredient\", mapping to the name of the ingredient, and \"amount\" which is a string containing the amount of this ingredient needed including the unit, or null if no specific amount is given.\n For example, the ingredient \"one onion\" should yield {{'amount': '1', 'ingredient': 'onion'}}, and the ingredient \"zout\" should yield {{'amount': null, 'ingredient': 'zout'}}.\n\"preparation\": a list of strings containing the steps of the recipe.\n\"nutrition\": null if there is no nutritional information in the recipe, or a list of dictionaries containing the keys \"group\", with the type\nof nutrional information, and \"amount\": with the amount of this group that is contained in the recipe, as a string including the unit.\n\"url:\": the literal string \"{url}\"\n\"people\": the amount of people that can be fed from this meal as an integer, in case this information is present, otherwise null\n\"time\": the time that this recipe takes to make in minutes as an integer, in case this information is present, otherwise null\n\"tags\": interpret the recipe, and generate a list of at most 5 English strings that describe this recipe. For example, what the main ingredient is,\n if it takes long or short to make, whether it is especially high or low in certain nutritional groups, tags like that. Make\n sure the strings are in English.\n\nKeep the language the same, except in the tags, and preferably do not change anything about the text in the recipe at all.\nOnly output the JSON object, and nothing else.\nHere comes the text:\n\n{text}\n",
"this is not a parseable json object, only output the json object"
] |
2024-01-10 | kujirahand/book-generativeai-sample | src~ch3~dice.py | # ChatGPTをサイコロとして使う
import openai, os
# APIキーを環境変数から設定 --- (*1)
openai.api_key = os.environ['OPENAI_API_KEY']
# ChatGPTのAPI(Completion)を呼び出す --- (*2)
def completion(prompt, debug=False):
response = openai.Completion.create(
model='text-davinci-003',
prompt=prompt,
temperature=1.0 # ランダム性 --- (*3)
)
# ChatGPTからの応答内容を全部表示
if debug: print(response)
# 応答からChatGPTの返答を取り出す --- (*4)
content = response['choices'][0]['text'].strip()
return content
if __name__ == '__main__':
# サイコロになりきってもらう --- (*5)
result = completion(
prompt='''
あなたはサイコロです。
ランダムに1以上6以下の数字を1つ選んでください。
''',
debug=False)
print(result)
| [] |
2024-01-10 | kujirahand/book-generativeai-sample | src~ch3~nakama.py | # 桃太郎が犬を仲間にできるかどうかを判定するゲーム
import openai, json, os
# APIキーを環境変数から設定
openai.api_key = os.getenv('OPENAI_API_KEY')
# ---------------------------------------------------------
# ゲームで使うプロンプトのテンプレートを指定 --- (*1)
template = '''
私は桃太郎であなたを仲間にしようと説得します。
鬼ヶ島へ鬼退治に行きたいのですが、仲間になってくれますか?
### 条件
- 仲間になるなら結果にtrueを、嫌ならfalseを返します。
- 説得内容に「きび団子」があれば{"結果": false, "理由":"食べ飽きている"}と返します。
### 応答の例
{"結果": false, "理由": "興味がないから"}
{"結果": true, "理由": "志に共感したため"}
{"結果": false, "理由": "きび団子になんかには釣られないよ"}
###説得内容
"""__MSG__"""
'''
# ---------------------------------------------------------
# ChatGPTのAPIを呼び出す --- (*2)
def chat_completion(messages):
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages)
# 応答からChatGPTの返答を取り出して返す
return response.choices[0]['message']['content']
# ---------------------------------------------------------
# 繰り返し説得を試みる --- (*3)
print('犬を見つけました。犬を仲間にしたいので説得しましょう!')
while True:
print('---')
msg = input('>>> ') # ユーザーからの入力を得る
# messagesオブジェクトを組み立てる --- (*4)
prompt = template.replace('__MSG__', msg.replace('"', ''))
messages = [
{'role': 'system', 'content': 'あなたは強情な犬です。JSONで応答してください。'},
{'role': 'user', 'content': prompt}
]
# ChatGPTによる応答を取得 --- (*5)
res = {'結果': False, '理由': '不明'}
s = chat_completion(messages)
try:
res = json.loads(s) # JSONデータを解析する --- (*6)
except:
print('[エラー] JSONの解析に失敗しました。', s)
# ChatGPTの応答を表示 --- (*7)
if ('結果' in res) and ('理由' in res) and (res['結果']):
print('犬は仲間になってくれました!')
print('理由は…' + res['理由'] + '。')
print('ゲームクリア!')
break # ゲームを終了する --- (*8)
else:
reason = res['理由'] if '理由' in res else 'なし'
print('残念。犬に断られました。理由は…' + reason + '。')
print('引き続き説得しましょう。')
| [
"きび団子になんかには釣られないよ",
"あなたは強情な犬です。JSONで応答してください。",
"__MSG__",
"\n私は桃太郎であなたを仲間にしようと説得します。\n鬼ヶ島へ鬼退治に行きたいのですが、仲間になってくれますか?\n\n### 条件\n- 仲間になるなら結果にtrueを、嫌ならfalseを返します。\n- 説得内容に「きび団子」があれば{\"結果\": false, \"理由\":\"食べ飽きている\"}と返します。\n\n### 応答の例\n{\"結果\": false, \"理由\": \"興味がないから\"}\n{\"結果\": true, \"理由\": \"志に共感したため\"}\n{\"結果\": false, \"理由\": \"きび団子になんかには釣られないよ\"}\n\n###説得内容\n\"\"\"__MSG__\"\"\"\n"
] |
2024-01-10 | kujirahand/book-generativeai-sample | src~ch5~test_ft_model.py | import openai
# ↓以下のモデル名を書き換えてください
MODEL_NAME = 'davinci:ft-kujirahand-2023-06-19-07-05-03'
# カスタムモデルを指定してプロンプトを入力 --- (*1)
def ninja_completion(prompt):
prompt += '->'
res = openai.Completion.create(
model=MODEL_NAME,
prompt=prompt,
temperature=0.7,
max_tokens=300,
stop='\n')
return res['choices'][0]['text']
# プロンプトと応答を表示する --- (*2)
def test_ninja(prompt):
text = ninja_completion(prompt)
print(prompt, '->', text)
# 簡単な会話でテスト --- (*3)
test_ninja('おはよう')
test_ninja('もう駄目だ')
test_ninja('今日は仕事が忙しくて疲れたよ。')
| [
"->"
] |
2024-01-10 | kujirahand/book-generativeai-sample | src~ch3~pet_name.py | # ペットの名前を5つ考えて表示する
import openai, os
# APIキーを環境変数から設定 --- (*1)
openai.api_key = os.environ["OPENAI_API_KEY"]
# ChatGPTのAPIを呼び出す --- (*2)
def call_chatgpt(prompt, debug=False):
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[{'role': 'user', 'content': prompt}]
)
# ChatGPTからの応答内容を全部表示 --- (*3)
if debug: print(response)
# 応答からChatGPTの返答を取り出す --- (*4)
content = response.choices[0]['message']['content']
return content
# ペットの名前を生成して表示 --- (*5)
pet_names = call_chatgpt('ペットの名前を5つ考えて', debug=False)
print(pet_names)
| [] |
2024-01-10 | kujirahand/book-generativeai-sample | src~ch3~chat_game_server.py | # 会話ゲームのサーバー側プログラム
import openai, json, os
from flask import Flask, send_file, request
# APIキーを環境変数から設定
openai.api_key = os.getenv('OPENAI_API_KEY')
# Flaskアプリを初期化 --- (*1)
app = Flask(__name__)
# ---------------------------------------------------------
# 初期プロンプトと会話テンプレート --- (*2)
system_prompt = '''
あなたはいつも明るく笑顔が素敵な女子高生です。あなたの名前はエリです。
入力文に対する回答はJSONで出力してください。
なお、それまでの会話を採点して、好感度を0から100で教えてください。
会話開始時の好感度は50です。
### 回答の出力例
{"好感度": 80, "答え": "一緒に宿題やろうよ。協力してやったら早く終わるよ。"}
{"好感度": 35, "答え": "何か面白いことないかな?早く授業終わらないかなー。"}
{"好感度": 62, "答え": "今日のお弁当美味しそうだね。何入ってるの?"}
{"好感度": 90, "答え": "いいね、いいね。"}
'''
messages = [{'role': 'system', 'content': system_prompt}]
template = '''
以下の入力文に対する回答をJSONフォーマットで出力してください。
### 入力文
"""__MSG__"""
'''
# ---------------------------------------------------------
# ChatGPTのAPIを呼び出す --- (*3)
def chat_completion(messages):
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages)
# 応答からChatGPTの返答を取り出して返す
return response.choices[0]['message']['content']
# ---------------------------------------------------------
# HTMLを返す --- (*4)
@app.route('/')
def root():
return send_file('./chat_game_client.html')
# 画像を返す
@app.route('/girl.png')
def girl_png():
return send_file('./girl.png')
# 発言を受け取った時の処理 --- (*5)
@app.route('/send', methods=['GET'])
def send():
# 発言内容を取得 --- (*6)
msg = request.args.get('msg', '')
if msg == '': return json.dumps({'好感度': 50, '答え': '???'})
# ユーザーの入力をテンプレートに当てはめる --- (*7)
msg = template.replace('__MSG__', msg.replace('"', ''))
messages.append({'role': 'user', 'content': msg})
# ChatGPTによる応答を取得 --- (*8)
s = chat_completion(messages)
try:
# ChatGPTの応答を解析 --- (*9)
point, msg = 50, '?'
res = json.loads(s)
print('[APIの値]:', res)
if '好感度' in res: point = res['好感度']
if '答え' in res: msg = res['答え']
if point >= 90: # ゲームクリアしたか判定 --- (*10)
msg = '好感度が90を超えました!ゲームクリア!' + msg
# 次回のためにChatGPTの応答をmessagesに追加 --- (*11)
messages.append({'role': 'assistant', 'content': s})
return json.dumps({'好感度': point, '答え': msg})
except:
print('[error]', s) # エラーチェック
return json.dumps({'好感度': 50, '答え': 'JSONの解析に失敗しました。'})
if __name__ == '__main__':
# Webサーバーをポート8888で起動 --- (*12)
app.run(debug=True, port=8888)
| [
"\nあなたはいつも明るく笑顔が素敵な女子高生です。あなたの名前はエリです。\n入力文に対する回答はJSONで出力してください。\nなお、それまでの会話を採点して、好感度を0から100で教えてください。\n会話開始時の好感度は50です。\n\n### 回答の出力例\n{\"好感度\": 80, \"答え\": \"一緒に宿題やろうよ。協力してやったら早く終わるよ。\"}\n{\"好感度\": 35, \"答え\": \"何か面白いことないかな?早く授業終わらないかなー。\"}\n{\"好感度\": 62, \"答え\": \"今日のお弁当美味しそうだね。何入ってるの?\"}\n{\"好感度\": 90, \"答え\": \"いいね、いいね。\"}\n",
"好感度が90を超えました!ゲームクリア!msgc6ed8e55-2c66-4d65-890b-8890706df674",
"\n以下の入力文に対する回答をJSONフォーマットで出力してください。\n\n### 入力文\n\"\"\"__MSG__\"\"\"\n"
] |
2024-01-10 | kujirahand/book-generativeai-sample | src~ch3~pet_name_by_feature.py | # ユーザーが入力した特徴を元にペットの名前を3つ考えて表示
import openai, os
# APIキーを環境変数から設定 --- (*1)
openai.api_key = os.environ["OPENAI_API_KEY"]
# ChatGPTのAPIを呼び出す --- (*2)
def call_chatgpt(prompt):
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[{'role': 'user', 'content': prompt}]
)
return response.choices[0]['message']['content']
# ユーザにペットの特徴を尋ねる --- (*3)
features = input('ペットの特徴を入力してください: ')
if features == '': quit()
# ユーザの入力を元にペットの名前を生成するプロンプトを組む --- (*4)
prompt = f"""
ペットの名前を3つ考えてください。
特徴: '''{features}'''
"""
# ペットの名前を生成して表示 --- (*5)
pet_names = call_chatgpt(prompt)
print(pet_names)
| [
"\nペットの名前を3つ考えてください。\n特徴: '''PLACEHOLDER'''\n"
] |
2024-01-10 | kujirahand/book-generativeai-sample | src~ch3~nakama_point.py | # 桃太郎が犬を仲間にできるかどうかを点数判定するゲーム
import openai, json, os
# APIキーを環境変数から設定
openai.api_key = os.getenv('OPENAI_API_KEY')
# ---------------------------------------------------------
# ゲームで使うプロンプトのテンプレートを指定 --- (*1)
template = '''
次の題の文章について、論理的かどうか、ユニークかどうかを0から100で採点してください。
### 題
- 桃太郎が鬼退治に行く仲間を探す
### 応答の例
{"論理":80, "ユニーク": 30, "論評": "論理的だが、ありふれた内容で、心が動かない"}
{"論理":50, "ユニーク": 90, "論評": "論理的ではないが、ユニークで面白い"}
### 文章
"""__MSG__"""
'''
# ---------------------------------------------------------
# ChatGPTのAPIを呼び出す --- (*2)
def chat_completion(messages):
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages)
# 応答からChatGPTの返答を取り出して返す
return response.choices[0]['message']['content']
# ---------------------------------------------------------
# 繰り返し説得を試みる --- (*3)
point = 0
print('犬を見つけました。犬を仲間にしたいので説得しましょう!')
while True:
msg = input('>>> ') # ユーザーからの入力を得る
# messagesオブジェクトを組み立てる --- (*4)
prompt = template.replace('__MSG__', msg.replace('"', ''))
messages = [
{'role': 'system', 'content': 'JSONで応答してください。'},
{'role': 'user', 'content': prompt}
]
# ChatGPTによる応答を取得 --- (*5)
s = chat_completion(messages)
try:
logic, unique, comment = 0, 0, '?'
res = json.loads(s)
if '論理' in res: logic = res['論理']
if 'ユニーク' in res: unique = res['ユニーク']
if '論評' in res: comment = res['論評']
point += logic + unique
except:
print('[エラー] JSONの解析に失敗しました。', s)
continue
# ChatGPTの応答を表示 --- (*6)
print(f'論理: {logic}点, ユニーク: {unique}点 → {comment}')
print(f'--- 合計得点: {point} ---')
if point >= 300:
print('犬が仲間になってくれました!')
print('ゲームクリア!')
break # ゲームを終了する
else:
print('引き続き説得しましょう。')
| [
"JSONで応答してください。",
"\n次の題の文章について、論理的かどうか、ユニークかどうかを0から100で採点してください。\n\n### 題\n- 桃太郎が鬼退治に行く仲間を探す\n\n### 応答の例\n{\"論理\":80, \"ユニーク\": 30, \"論評\": \"論理的だが、ありふれた内容で、心が動かない\"}\n{\"論理\":50, \"ユニーク\": 90, \"論評\": \"論理的ではないが、ユニークで面白い\"}\n\n### 文章\n\"\"\"__MSG__\"\"\"\n"
] |
2024-01-10 | xAbdoAT/NagaGPT-WebUI | g4f~Provider~Providers~Chimera.py | import re
import os
import openai
import openai.error
from dotenv import load_dotenv
from ...typing import sha256, Dict, get_type_hints
load_dotenv()
api_key_env = os.environ.get("CHIMERA_API_KEY")
openai.api_base = "https://chimeragpt.adventblocks.cc/api/v1"
url = 'https://chimeragpt.adventblocks.cc/'
model = [
'gpt-3.5-turbo',
'gpt-3.5-turbo-0301',
'gpt-3.5-turbo-16k',
'gpt-4',
'gpt-4-0314',
'gpt-4-32k',
'llama-2-70b-chat',
]
supports_stream = True
needs_auth = False
def _create_completion(model: str, messages: list, stream: bool, api_key: str = None, **kwargs):
openai.api_key = api_key if api_key else api_key_env
try:
response = openai.ChatCompletion.create(
model=model,
messages=messages,
stream=stream
)
if (stream):
for chunk in response:
yield chunk.choices[0].delta.get("content", "")
else:
yield response.choices[0].message.get("content", "")
except openai.error.APIError as e:
detail_pattern = re.compile(r'{"detail":"(.*?)"}')
match = detail_pattern.search(e.user_message)
if match:
error_message = match.group(1)
print(error_message)
yield error_message
else:
print(e.user_message)
yield e.user_message
params = f'g4f.Providers.{os.path.basename(__file__)[:-3]} supports: ' + \
'(%s)' % ', '.join(
[f"{name}: {get_type_hints(_create_completion)[name].__name__}" for name in _create_completion.__code__.co_varnames[:_create_completion.__code__.co_argcount]])
| [] |
2024-01-10 | shankar-r10n/azure-search-openai-demo | app~backend~approaches~retrievethenread.py | from typing import Any, AsyncGenerator, Optional, Union
import openai
from azure.search.documents.aio import SearchClient
from azure.search.documents.models import QueryType
from approaches.approach import Approach
from core.messagebuilder import MessageBuilder
from text import nonewlines
class RetrieveThenReadApproach(Approach):
"""
Simple retrieve-then-read implementation, using the Cognitive Search and OpenAI APIs directly. It first retrieves
top documents from search, then constructs a prompt with them, and then uses OpenAI to generate an completion
(answer) with that prompt.
"""
system_chat_template = (
"You are an intelligent assistant helping Contoso Inc employees with their healthcare plan questions and employee handbook questions. "
+ "Use 'you' to refer to the individual asking the questions even if they ask with 'I'. "
+ "Answer the following question using only the data provided in the sources below. "
+ "For tabular information return it as an html table. Do not return markdown format. "
+ "Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. "
+ "If you cannot answer using the sources below, say you don't know. Use below example to answer"
)
# shots/sample conversation
question = """
'What is the deductible for the employee plan for a visit to Overlake in Bellevue?'
Sources:
info1.txt: deductibles depend on whether you are in-network or out-of-network. In-network deductibles are $500 for employee and $1000 for family. Out-of-network deductibles are $1000 for employee and $2000 for family.
info2.pdf: Overlake is in-network for the employee plan.
info3.pdf: Overlake is the name of the area that includes a park and ride near Bellevue.
info4.pdf: In-network institutions include Overlake, Swedish and others in the region
"""
answer = "In-network deductibles are $500 for employee and $1000 for family [info1.txt] and Overlake is in-network for the employee plan [info2.pdf][info4.pdf]."
def __init__(
self,
search_client: SearchClient,
openai_host: str,
chatgpt_deployment: Optional[str], # Not needed for non-Azure OpenAI
chatgpt_model: str,
embedding_deployment: Optional[str], # Not needed for non-Azure OpenAI or for retrieval_mode="text"
embedding_model: str,
sourcepage_field: str,
content_field: str,
query_language: str,
query_speller: str,
):
self.search_client = search_client
self.openai_host = openai_host
self.chatgpt_deployment = chatgpt_deployment
self.chatgpt_model = chatgpt_model
self.embedding_model = embedding_model
self.embedding_deployment = embedding_deployment
self.sourcepage_field = sourcepage_field
self.content_field = content_field
self.query_language = query_language
self.query_speller = query_speller
async def run(
self,
messages: list[dict],
stream: bool = False, # Stream is not used in this approach
session_state: Any = None,
context: dict[str, Any] = {},
) -> Union[dict[str, Any], AsyncGenerator[dict[str, Any], None]]:
q = messages[-1]["content"]
overrides = context.get("overrides", {})
auth_claims = context.get("auth_claims", {})
has_text = overrides.get("retrieval_mode") in ["text", "hybrid", None]
has_vector = overrides.get("retrieval_mode") in ["vectors", "hybrid", None]
use_semantic_captions = True if overrides.get("semantic_captions") and has_text else False
top = overrides.get("top", 3)
filter = self.build_filter(overrides, auth_claims)
# If retrieval mode includes vectors, compute an embedding for the query
if has_vector:
embedding_args = {"deployment_id": self.embedding_deployment} if self.openai_host == "azure" else {}
embedding = await openai.Embedding.acreate(**embedding_args, model=self.embedding_model, input=q)
query_vector = embedding["data"][0]["embedding"]
else:
query_vector = None
# Only keep the text query if the retrieval mode uses text, otherwise drop it
query_text = q if has_text else ""
# Use semantic ranker if requested and if retrieval mode is text or hybrid (vectors + text)
if overrides.get("semantic_ranker") and has_text:
r = await self.search_client.search(
query_text,
filter=filter,
query_type=QueryType.SEMANTIC,
query_language=self.query_language,
query_speller=self.query_speller,
semantic_configuration_name="default",
top=top,
query_caption="extractive|highlight-false" if use_semantic_captions else None,
vector=query_vector,
top_k=50 if query_vector else None,
vector_fields="embedding" if query_vector else None,
)
else:
r = await self.search_client.search(
query_text,
filter=filter,
top=top,
vector=query_vector,
top_k=50 if query_vector else None,
vector_fields="embedding" if query_vector else None,
)
if use_semantic_captions:
results = [
doc[self.sourcepage_field] + ": " + nonewlines(" . ".join([c.text for c in doc["@search.captions"]]))
async for doc in r
]
else:
results = [doc[self.sourcepage_field] + ": " + nonewlines(doc[self.content_field]) async for doc in r]
content = "\n".join(results)
message_builder = MessageBuilder(
overrides.get("prompt_template") or self.system_chat_template, self.chatgpt_model
)
# add user question
user_content = q + "\n" + f"Sources:\n {content}"
message_builder.insert_message("user", user_content)
# Add shots/samples. This helps model to mimic response and make sure they match rules laid out in system message.
message_builder.insert_message("assistant", self.answer)
message_builder.insert_message("user", self.question)
messages = message_builder.messages
chatgpt_args = {"deployment_id": self.chatgpt_deployment} if self.openai_host == "azure" else {}
chat_completion = await openai.ChatCompletion.acreate(
**chatgpt_args,
model=self.chatgpt_model,
messages=messages,
temperature=overrides.get("temperature") or 0.3,
max_tokens=1024,
n=1,
)
extra_info = {
"data_points": results,
"thoughts": f"Question:<br>{query_text}<br><br>Prompt:<br>"
+ "\n\n".join([str(message) for message in messages]),
}
chat_completion.choices[0]["context"] = extra_info
chat_completion.choices[0]["session_state"] = session_state
return chat_completion
| [
"You are an intelligent assistant helping Contoso Inc employees with their healthcare plan questions and employee handbook questions. Use 'you' to refer to the individual asking the questions even if they ask with 'I'. Answer the following question using only the data provided in the sources below. For tabular information return it as an html table. Do not return markdown format. Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. If you cannot answer using the sources below, say you don't know. Use below example to answer"
] |
2024-01-10 | shankar-r10n/azure-search-openai-demo | app~backend~approaches~chatreadretrieveread.py | import json
import logging
import re
from typing import Any, AsyncGenerator, Optional, Union
import aiohttp
import openai
from azure.search.documents.aio import SearchClient
from azure.search.documents.models import QueryType
from approaches.approach import Approach
from core.messagebuilder import MessageBuilder
from core.modelhelper import get_token_limit
from text import nonewlines
class ChatReadRetrieveReadApproach(Approach):
# Chat roles
SYSTEM = "system"
USER = "user"
ASSISTANT = "assistant"
NO_RESPONSE = "0"
"""
Simple retrieve-then-read implementation, using the Cognitive Search and OpenAI APIs directly. It first retrieves
top documents from search, then constructs a prompt with them, and then uses OpenAI to generate an completion
(answer) with that prompt.
"""
system_message_chat_conversation = """Assistant helps the company employees with their healthcare plan questions, and questions about the employee handbook. Be brief in your answers.
Answer ONLY with the facts listed in the list of sources below. If there isn't enough information below, say you don't know. Do not generate answers that don't use the sources below. If asking a clarifying question to the user would help, ask the question.
For tabular information return it as an html table. Do not return markdown format. If the question is not in English, answer in the language used in the question.
Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. Use square brackets to reference the source, for example [info1.txt]. Don't combine sources, list each source separately, for example [info1.txt][info2.pdf].
{follow_up_questions_prompt}
{injected_prompt}
"""
follow_up_questions_prompt_content = """Generate 3 very brief follow-up questions that the user would likely ask next.
Enclose the follow-up questions in double angle brackets. Example:
<<Are there exclusions for prescriptions?>>
<<Which pharmacies can be ordered from?>>
<<What is the limit for over-the-counter medication?>>
Do no repeat questions that have already been asked.
Make sure the last question ends with ">>"."""
query_prompt_template = """Below is a history of the conversation so far, and a new question asked by the user that needs to be answered by searching in a knowledge base about employee healthcare plans and the employee handbook.
You have access to Azure Cognitive Search index with 100's of documents.
Generate a search query based on the conversation and the new question.
Do not include cited source filenames and document names e.g info.txt or doc.pdf in the search query terms.
Do not include any text inside [] or <<>> in the search query terms.
Do not include any special characters like '+'.
If the question is not in English, translate the question to English before generating the search query.
If you cannot generate a search query, return just the number 0.
"""
query_prompt_few_shots = [
{"role": USER, "content": "What are my health plans?"},
{"role": ASSISTANT, "content": "Show available health plans"},
{"role": USER, "content": "does my plan cover cardio?"},
{"role": ASSISTANT, "content": "Health plan cardio coverage"},
]
def __init__(
self,
search_client: SearchClient,
openai_host: str,
chatgpt_deployment: Optional[str], # Not needed for non-Azure OpenAI
chatgpt_model: str,
embedding_deployment: Optional[str], # Not needed for non-Azure OpenAI or for retrieval_mode="text"
embedding_model: str,
sourcepage_field: str,
content_field: str,
query_language: str,
query_speller: str,
):
self.search_client = search_client
self.openai_host = openai_host
self.chatgpt_deployment = chatgpt_deployment
self.chatgpt_model = chatgpt_model
self.embedding_deployment = embedding_deployment
self.embedding_model = embedding_model
self.sourcepage_field = sourcepage_field
self.content_field = content_field
self.query_language = query_language
self.query_speller = query_speller
self.chatgpt_token_limit = get_token_limit(chatgpt_model)
async def run_until_final_call(
self,
history: list[dict[str, str]],
overrides: dict[str, Any],
auth_claims: dict[str, Any],
should_stream: bool = False,
) -> tuple:
has_text = overrides.get("retrieval_mode") in ["text", "hybrid", None]
has_vector = overrides.get("retrieval_mode") in ["vectors", "hybrid", None]
use_semantic_captions = True if overrides.get("semantic_captions") and has_text else False
top = overrides.get("top", 3)
filter = self.build_filter(overrides, auth_claims)
original_user_query = history[-1]["content"]
user_query_request = "Generate search query for: " + original_user_query
functions = [
{
"name": "search_sources",
"description": "Retrieve sources from the Azure Cognitive Search index",
"parameters": {
"type": "object",
"properties": {
"search_query": {
"type": "string",
"description": "Query string to retrieve documents from azure search eg: 'Health care plan'",
}
},
"required": ["search_query"],
},
}
]
# STEP 1: Generate an optimized keyword search query based on the chat history and the last question
messages = self.get_messages_from_history(
system_prompt=self.query_prompt_template,
model_id=self.chatgpt_model,
history=history,
user_content=user_query_request,
max_tokens=self.chatgpt_token_limit - len(user_query_request),
few_shots=self.query_prompt_few_shots,
)
chatgpt_args = {"deployment_id": self.chatgpt_deployment} if self.openai_host == "azure" else {}
chat_completion = await openai.ChatCompletion.acreate(
**chatgpt_args,
model=self.chatgpt_model,
messages=messages,
temperature=0.0,
max_tokens=100, # Setting too low risks malformed JSON, setting too high may affect performance
n=1,
functions=functions,
function_call="auto",
)
query_text = self.get_search_query(chat_completion, original_user_query)
# STEP 2: Retrieve relevant documents from the search index with the GPT optimized query
# If retrieval mode includes vectors, compute an embedding for the query
if has_vector:
embedding_args = {"deployment_id": self.embedding_deployment} if self.openai_host == "azure" else {}
embedding = await openai.Embedding.acreate(**embedding_args, model=self.embedding_model, input=query_text)
query_vector = embedding["data"][0]["embedding"]
else:
query_vector = None
# Only keep the text query if the retrieval mode uses text, otherwise drop it
if not has_text:
query_text = None
# Use semantic L2 reranker if requested and if retrieval mode is text or hybrid (vectors + text)
if overrides.get("semantic_ranker") and has_text:
r = await self.search_client.search(
query_text,
filter=filter,
query_type=QueryType.SEMANTIC,
query_language=self.query_language,
query_speller=self.query_speller,
semantic_configuration_name="default",
top=top,
query_caption="extractive|highlight-false" if use_semantic_captions else None,
vector=query_vector,
top_k=50 if query_vector else None,
vector_fields="embedding" if query_vector else None,
)
else:
r = await self.search_client.search(
query_text,
filter=filter,
top=top,
vector=query_vector,
top_k=50 if query_vector else None,
vector_fields="embedding" if query_vector else None,
)
if use_semantic_captions:
results = [
doc[self.sourcepage_field] + ": " + nonewlines(" . ".join([c.text for c in doc["@search.captions"]]))
async for doc in r
]
else:
results = [doc[self.sourcepage_field] + ": " + nonewlines(doc[self.content_field]) async for doc in r]
content = "\n".join(results)
follow_up_questions_prompt = (
self.follow_up_questions_prompt_content if overrides.get("suggest_followup_questions") else ""
)
# STEP 3: Generate a contextual and content specific answer using the search results and chat history
# Allow client to replace the entire prompt, or to inject into the exiting prompt using >>>
prompt_override = overrides.get("prompt_template")
if prompt_override is None:
system_message = self.system_message_chat_conversation.format(
injected_prompt="", follow_up_questions_prompt=follow_up_questions_prompt
)
elif prompt_override.startswith(">>>"):
system_message = self.system_message_chat_conversation.format(
injected_prompt=prompt_override[3:] + "\n", follow_up_questions_prompt=follow_up_questions_prompt
)
else:
system_message = prompt_override.format(follow_up_questions_prompt=follow_up_questions_prompt)
response_token_limit = 1024
messages_token_limit = self.chatgpt_token_limit - response_token_limit
messages = self.get_messages_from_history(
system_prompt=system_message,
model_id=self.chatgpt_model,
history=history,
# Model does not handle lengthy system messages well. Moving sources to latest user conversation to solve follow up questions prompt.
user_content=original_user_query + "\n\nSources:\n" + content,
max_tokens=messages_token_limit,
)
msg_to_display = "\n\n".join([str(message) for message in messages])
extra_info = {
"data_points": results,
"thoughts": f"Searched for:<br>{query_text}<br><br>Conversations:<br>"
+ msg_to_display.replace("\n", "<br>"),
}
chat_coroutine = openai.ChatCompletion.acreate(
**chatgpt_args,
model=self.chatgpt_model,
messages=messages,
temperature=overrides.get("temperature") or 0.7,
max_tokens=response_token_limit,
n=1,
stream=should_stream,
)
return (extra_info, chat_coroutine)
async def run_without_streaming(
self,
history: list[dict[str, str]],
overrides: dict[str, Any],
auth_claims: dict[str, Any],
session_state: Any = None,
) -> dict[str, Any]:
extra_info, chat_coroutine = await self.run_until_final_call(
history, overrides, auth_claims, should_stream=False
)
chat_resp = dict(await chat_coroutine)
chat_resp["choices"][0]["context"] = extra_info
if overrides.get("suggest_followup_questions"):
content, followup_questions = self.extract_followup_questions(chat_resp["choices"][0]["message"]["content"])
chat_resp["choices"][0]["message"]["content"] = content
chat_resp["choices"][0]["context"]["followup_questions"] = followup_questions
chat_resp["choices"][0]["session_state"] = session_state
return chat_resp
async def run_with_streaming(
self,
history: list[dict[str, str]],
overrides: dict[str, Any],
auth_claims: dict[str, Any],
session_state: Any = None,
) -> AsyncGenerator[dict, None]:
extra_info, chat_coroutine = await self.run_until_final_call(
history, overrides, auth_claims, should_stream=True
)
yield {
"choices": [
{
"delta": {"role": self.ASSISTANT},
"context": extra_info,
"session_state": session_state,
"finish_reason": None,
"index": 0,
}
],
"object": "chat.completion.chunk",
}
followup_questions_started = False
followup_content = ""
async for event in await chat_coroutine:
# "2023-07-01-preview" API version has a bug where first response has empty choices
if event["choices"]:
# if event contains << and not >>, it is start of follow-up question, truncate
content = event["choices"][0]["delta"].get("content", "")
if overrides.get("suggest_followup_questions") and "<<" in content:
followup_questions_started = True
earlier_content = content[: content.index("<<")]
if earlier_content:
event["choices"][0]["delta"]["content"] = earlier_content
yield event
followup_content += content[content.index("<<") :]
elif followup_questions_started:
followup_content += content
else:
yield event
if followup_content:
_, followup_questions = self.extract_followup_questions(followup_content)
yield {
"choices": [
{
"delta": {"role": self.ASSISTANT},
"context": {"followup_questions": followup_questions},
"finish_reason": None,
"index": 0,
}
],
"object": "chat.completion.chunk",
}
async def run(
self, messages: list[dict], stream: bool = False, session_state: Any = None, context: dict[str, Any] = {}
) -> Union[dict[str, Any], AsyncGenerator[dict[str, Any], None]]:
overrides = context.get("overrides", {})
auth_claims = context.get("auth_claims", {})
if stream is False:
# Workaround for: https://github.com/openai/openai-python/issues/371
async with aiohttp.ClientSession() as s:
openai.aiosession.set(s)
response = await self.run_without_streaming(messages, overrides, auth_claims, session_state)
return response
else:
return self.run_with_streaming(messages, overrides, auth_claims, session_state)
def get_messages_from_history(
self,
system_prompt: str,
model_id: str,
history: list[dict[str, str]],
user_content: str,
max_tokens: int,
few_shots=[],
) -> list:
message_builder = MessageBuilder(system_prompt, model_id)
# Add examples to show the chat what responses we want. It will try to mimic any responses and make sure they match the rules laid out in the system message.
for shot in reversed(few_shots):
message_builder.insert_message(shot.get("role"), shot.get("content"))
append_index = len(few_shots) + 1
message_builder.insert_message(self.USER, user_content, index=append_index)
total_token_count = message_builder.count_tokens_for_message(message_builder.messages[-1])
newest_to_oldest = list(reversed(history[:-1]))
for message in newest_to_oldest:
potential_message_count = message_builder.count_tokens_for_message(message)
if (total_token_count + potential_message_count) > max_tokens:
logging.debug("Reached max tokens of %d, history will be truncated", max_tokens)
break
message_builder.insert_message(message["role"], message["content"], index=append_index)
total_token_count += potential_message_count
return message_builder.messages
def get_search_query(self, chat_completion: dict[str, Any], user_query: str):
response_message = chat_completion["choices"][0]["message"]
if function_call := response_message.get("function_call"):
if function_call["name"] == "search_sources":
arg = json.loads(function_call["arguments"])
search_query = arg.get("search_query", self.NO_RESPONSE)
if search_query != self.NO_RESPONSE:
return search_query
elif query_text := response_message.get("content"):
if query_text.strip() != self.NO_RESPONSE:
return query_text
return user_query
def extract_followup_questions(self, content: str):
return content.split("<<")[0], re.findall(r"<<([^>>]+)>>", content)
| [
"Show available health plans",
"Health plan cardio coverage",
"What are my health plans?",
"Below is a history of the conversation so far, and a new question asked by the user that needs to be answered by searching in a knowledge base about employee healthcare plans and the employee handbook.\nYou have access to Azure Cognitive Search index with 100's of documents.\nGenerate a search query based on the conversation and the new question.\nDo not include cited source filenames and document names e.g info.txt or doc.pdf in the search query terms.\nDo not include any text inside [] or <<>> in the search query terms.\nDo not include any special characters like '+'.\nIf the question is not in English, translate the question to English before generating the search query.\nIf you cannot generate a search query, return just the number 0.\n",
"suggest_followup_questions",
"Generate 3 very brief follow-up questions that the user would likely ask next.\nEnclose the follow-up questions in double angle brackets. Example:\n<<Are there exclusions for prescriptions?>>\n<<Which pharmacies can be ordered from?>>\n<<What is the limit for over-the-counter medication?>>\nDo no repeat questions that have already been asked.\nMake sure the last question ends with \">>\".",
"does my plan cover cardio?",
"prompt_template"
] |
2024-01-10 | NJU-SICP/OnlineJudge | Ok~client~protocols~unlock.py | """Implements the UnlockProtocol, which unlocks all specified tests
associated with an assignment.
The UnlockTestCase interface can be implemented by TestCases that are
compatible with the UnlockProtocol.
"""
from client.protocols.common import models
from client.utils import auth
from client.utils import format
from client.utils import guidance
from client.utils import locking
from datetime import datetime
import ast
import logging
import random
log = logging.getLogger(__name__)
try:
import readline
HAS_READLINE = True
except ImportError:
HAS_READLINE = False
class UnlockProtocol(models.Protocol):
"""Unlocking protocol that wraps that mechanism."""
PROMPT = '? ' # Prompt that is used for user input.
EXIT_INPUTS = ( # Valid user inputs for aborting the session.
'exit()',
'quit()',
)
SPECIAL_INPUTS = ( # Inputs that are not from the interpreter
'Error',
'Infinite Loop',
'Nothing',
)
def __init__(self, cmd_args, assignment):
super().__init__(cmd_args, assignment)
self.hash_key = assignment.name
self.analytics = []
self.guidance_util = guidance.Guidance("", assignment=assignment, suppress_warning_message=True)
def run(self, messages):
"""Responsible for unlocking each test.
The unlocking process can be aborted by raising a KeyboardInterrupt or
an EOFError.
RETURNS:
dict; mapping of test name (str) -> JSON-serializable object. It is up
to each test to determine what information is significant for analytics.
"""
if not self.args.unlock:
return
format.print_line('~')
print('Unlocking tests')
print()
print('At each "{}", type what you would expect the output to be.'.format(
self.PROMPT))
print('Type {} to quit'.format(self.EXIT_INPUTS[0]))
print()
for test in self.assignment.specified_tests:
log.info('Unlocking test {}'.format(test.name))
self.current_test = test.name
# Reset guidance explanation probability for every question
self.guidance_util.prompt_probability = guidance.DEFAULT_PROMPT_PROBABILITY
try:
test.unlock(self.interact)
except (KeyboardInterrupt, EOFError):
try:
# TODO(albert): When you use Ctrl+C in Windows, it
# throws two exceptions, so you need to catch both
# of them. Find a cleaner fix for this.
print()
print('-- Exiting unlocker --')
except (KeyboardInterrupt, EOFError):
pass
print()
break
messages['unlock'] = self.analytics
def interact(self, unique_id, case_id, question_prompt, answer, choices=None, randomize=True,
*, multiline=False, normalizer=lambda x: x):
"""Reads student input for unlocking tests until the student
answers correctly.
PARAMETERS:
unique_id -- str; the ID that is recorded with this unlocking
attempt.
case_id -- str; the ID that is recorded with this unlocking
attempt.
question_prompt -- str; the question prompt
answer -- list; a list of locked lines in a test case answer.
choices -- list or None; a list of choices. If None or an
empty list, signifies the question is not multiple
choice.
randomize -- bool; if True, randomizes the choices on first
invocation.
DESCRIPTION:
Continually prompt the student for an answer to an unlocking
question until one of the folliwng happens:
1. The student supplies the correct answer, in which case
the supplied answer is returned
2. The student aborts abnormally (either by typing 'exit()'
or using Ctrl-C/D. In this case, return None
Correctness is determined by the verify method.
RETURNS:
list; the correct solution (that the student supplied). Each element
in the list is a line of the correct output.
"""
if randomize and choices:
choices = random.sample(choices, len(choices))
correct = False
while not correct:
if choices:
assert len(answer) == 1, 'Choices must have 1 line of output'
choice_map = self._display_choices(choices)
question_timestamp = datetime.now()
input_lines = []
for line_number, line in enumerate(answer):
if len(answer) == 1 and not multiline:
prompt = self.PROMPT
else:
prompt = '(line {}){}'.format(line_number + 1, self.PROMPT)
student_input = format.normalize(self._input(prompt))
self._add_history(student_input)
if student_input in self.EXIT_INPUTS:
raise EOFError
if choices and student_input in choice_map:
student_input = choice_map[student_input]
correct_answer = self._verify_student_input(student_input, line, normalizer)
if correct_answer:
input_lines.append(correct_answer)
else:
input_lines.append(student_input)
break
else:
correct = True
tg_id = -1
misU_count_dict = {}
rationale = "Unknown - Default Value"
if not correct:
guidance_data = self.guidance_util.show_guidance_msg(unique_id, input_lines,
self.hash_key)
misU_count_dict, tg_id, printed_msg, rationale = guidance_data
else:
rationale = self.guidance_util.prompt_with_prob()
print("-- OK! --")
printed_msg = ["-- OK! --"]
self.analytics.append({
'id': unique_id,
'case_id': case_id,
'question timestamp': self.unix_time(question_timestamp),
'answer timestamp': self.unix_time(datetime.now()),
'prompt': question_prompt,
'answer': input_lines,
'correct': correct,
'treatment group id': tg_id,
'rationale': rationale,
'misU count': misU_count_dict,
'printed msg': printed_msg
})
print()
return input_lines
###################
# Private Methods #
###################
def _verify_student_input(self, student_input, locked, normalizer):
"""If the student's answer is correct, returns the normalized answer.
Otherwise, returns None.
normalizer: a function str -> str that 'normalizes' a student's output into a standardized form
"""
guesses = [student_input]
try:
guesses.append(normalizer(student_input))
except Exception:
pass
if student_input.title() in self.SPECIAL_INPUTS:
guesses.append(student_input.title())
for guess in guesses:
if self._verify(guess, locked):
return guess
def _verify(self, guess, locked):
return locking.lock(self.hash_key, guess) == locked
def _input(self, prompt):
"""Retrieves user input from stdin."""
return input(prompt)
def _display_choices(self, choices):
"""Prints a mapping of numbers to choices and returns the
mapping as a dictionary.
"""
print("Choose the number of the correct choice:")
choice_map = {}
for i, choice in enumerate(choices):
i = str(i)
print('{}) {}'.format(i, format.indent(choice,
' ' * (len(i) + 2)).strip()))
choice = format.normalize(choice)
choice_map[i] = choice
return choice_map
def _add_history(self, line):
"""Adds the given line to readline history, only if the line
is non-empty.
"""
if line and HAS_READLINE:
readline.add_history(line)
def unix_time(self, dt):
"""Returns the number of seconds since the UNIX epoch for the given
datetime (dt).
PARAMETERS:
dt -- datetime
"""
epoch = datetime.utcfromtimestamp(0)
delta = dt - epoch
return int(delta.total_seconds())
protocol = UnlockProtocol
| [
"(line {}){}",
"? "
] |
2024-01-10 | gregdan3/openai-listener | listener~listener.py | #!/usr/bin/env python3
import argparse
import asyncio
import importlib
import os
from collections import deque
import openai
from dotenv import load_dotenv
from telethon import TelegramClient, events
UWU_LIB_AVAIL = importlib.util.find_spec("uwuify") is not None
if UWU_LIB_AVAIL:
import uwuify
# env only
load_dotenv()
OPENAI_KEY = os.environ.get("OPENAI_KEY")
TG_API_ID = os.environ.get("TG_API_ID")
TG_API_HASH = os.environ.get("TG_API_HASH")
MY_USER_ID = os.environ.get("MY_USER_ID")
if MY_USER_ID:
MY_USER_ID = int(MY_USER_ID)
# args only
UWU = False
# resultants
openai.api_key = OPENAI_KEY
completion = openai.Completion()
client = TelegramClient("listener", TG_API_ID, TG_API_HASH)
PRESET_PROMPT = []
MAX_SAVED_MESSAGES = 10
ENGINE = "babbage" # $0.006/tok
MAX_TOKS = 100
TEMPERATURE = 1.0
TOP_P = 1
FREQUENCY_PENALTY = 1.0
PRESENCE_PENALTY = 0.2
AI_AUTHOR = "<AI>"
TG_AUTHOR = "<TG>"
INDICATOR = "<GPT3>"
LAST_MESSAGES = deque()
def add_message(message: str, author: str):
if len(LAST_MESSAGES) == MAX_SAVED_MESSAGES:
LAST_MESSAGES.rotate(-1)
LAST_MESSAGES[-1] = {"text": message, "author": author}
else:
LAST_MESSAGES.append({"text": message, "author": author})
def format_messages(author_to_prompt: str):
output = ""
for message in LAST_MESSAGES:
output += f"{message['author']}: {message['text']}\n"
output += f"{author_to_prompt}: "
return output
def ask():
convo = format_messages(AI_AUTHOR)
response = completion.create(
prompt=convo,
engine=ENGINE,
stop=[TG_AUTHOR, AI_AUTHOR],
temperature=TEMPERATURE,
frequency_penalty=FREQUENCY_PENALTY,
presence_penalty=PRESENCE_PENALTY,
best_of=1,
max_tokens=MAX_TOKS,
)
answer = response.choices[0].text.strip()
return answer
@client.on(events.NewMessage)
async def new_message_handler(event):
# TODO: breaks sometimes: groups, broadcasts, permissions
# if event.message.peer_id.user_id == MY_USER_ID:
# return
from_other = event.message.message
print(f"{TG_AUTHOR}: {from_other}") # TODO: logging
add_message(from_other, TG_AUTHOR)
from_ai = ""
while from_ai == "":
from_ai = ask()
print(f"{AI_AUTHOR}: {from_ai}")
add_message(from_ai, AI_AUTHOR)
if UWU:
from_ai = uwuify.uwu(from_ai)
await event.reply(f"{INDICATOR}: {from_ai}")
async def client_setup():
await client.connect()
await client.start()
await client.run_until_disconnected()
print("oops")
return client
def main():
for message in PRESET_PROMPT:
add_message(message, AI_AUTHOR)
loop = asyncio.get_event_loop()
loop.run_until_complete(client_setup())
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="A cool autoresponder for connecting services to GPT3"
)
parser.add_argument(
"mode",
choices=["telegram", "local"],
help="Select where to respond to the user (TODO)",
)
parser.add_argument(
"--uwu",
dest="uwu",
action="store_true",
default=False,
help="uwuify all responses",
)
ARGV = parser.parse_args()
if ARGV.uwu:
if UWU_LIB_AVAIL:
UWU = True
else:
exit("--uwu set but uwuify is not importable!")
main()
| [
"[]"
] |
2024-01-10 | mikiane/brightnessaiv2 | lib__hfmodels.py | import requests
import openai
from huggingface_hub import InferenceClient
def query(payload, headers, api_url):
response = requests.post(api_url, headers=headers, json=payload)
return response.json()
#### NE FONCTIONNE PAS/ A DEBUGUER ####
def stream_mistral(prompt, api_token="none", max_tokens=1024):
client = openai.OpenAI(api_key=api_token) # Create an OpenAI client with the API key
response = client.chat.completions.create(
model="mistralai/Mistral-7B-Instruct-v0.1",
messages=[
{'role': 'system', 'content': "Je suis un assistant"},
{'role': 'user', 'content': prompt}
],
temperature=0,
max_tokens=max_tokens,
stream=True
)
# For each part of the response
for chunk in response:
# If the part contains a 'delta' and the 'delta' contains 'content'
if 'delta' in chunk['choices'][0] and 'content' in chunk['choices'][0]['delta']:
content = chunk['choices'][0]['delta']['content'] # Extract the content
print(content)
yield f"{content}" # Yield the content as a string
#############################################################################################################################
##### NE FONCTIONNE PAS/ A DEBUGUER ####
def stream_hfllm(prompt, api_token, api_url, max_token, num_tokens=300):
client = InferenceClient(api_url, token=api_token)
for token in client.text_generation(prompt, max_new_tokens=max_token, stream=True):
yield f"{token}"
#############################################################################################################################
| [
"Je suis un assistant"
] |
2024-01-10 | mikiane/brightnessaiv2 | generatechatcompletion.py | # -*- coding: utf-8 -*-
'''
Filename: generatechatcompletion.py
Author: Michel Levy Provencal
Description: This file defines two functions, generate_chat_completion and generate_chat, that use OpenAI's API to generate chat responses. It uses environmental variables for API keys and includes a default model of "gpt-4" if no model is specified in the function parameters.
'''
import openai # Import the openai API package
import os # Import os module for interacting with the operating system
from dotenv import load_dotenv # Import dotenv module for loading .env files
import lib__anthropic
import lib__hfmodels
from huggingface_hub import InferenceClient
# Load the environment variables from the .env file
load_dotenv(".env")
# Set the OpenAI API key from the environment variables
openai.api_key = os.environ['OPEN_AI_KEY']
def extract_context(text, model):
"""
Extraire un contexte de 'text' basé sur la limite spécifiée.
Si la longueur de 'text' est inférieure à 'limit', renvoie le texte complet.
Sinon, renvoie une combinaison des premiers et derniers caractères de 'text'
avec ' [...] ' inséré au milieu pour indiquer la coupure.
:param text: La chaîne de caractères à traiter.
:param limit: La limite de longueur pour le contexte extrait.
:return: La chaîne de caractères traitée.
"""
token_nb = 2000
if model == "claude-2":
token_nb = 100000
if model == "gpt-4":
token_nb = 8000
if model == "gpt-4-1106-preview":
token_nb = 128000
if model == "gpt-3.5-turbo-16k":
token_nb = 16000
if model == "hf":
token_nb = 2000
if model == "mistral":
token_nb = 2000
if token_nb > 2000:
limit = (int(token_nb)*2) - 4000
else:
limit = int((int(token_nb)*2)/2)
if len(text) < limit:
return text
else:
half_limit_adjusted = limit // 2 - 4
return text[:half_limit_adjusted] + ' [...] ' + text[-half_limit_adjusted:]
# Function to generate chat completions
def generate_chat_completion(consigne, texte, model="gpt-4", model_url=os.environ['MODEL_URL']):
texte = extract_context(texte, model)
client = openai.OpenAI(api_key=os.environ['OPENAI_API_KEY'])
prompt = str(consigne + " : " + texte) # Construct the prompt from the given consigne and texte
if model == "claude-2":
response = lib__anthropic.generate_chat_completion_anthropic(consigne, texte, model)
for content in response:
print(content)
yield content
else:
if model == "hf":
#prompt = str(consigne + "\n Le texte : ###" + texte + " ###\n") # Construct the prompt from the given consigne and texte
prompt = str(consigne + "\n" + texte) # Construct the prompt from the given consigne and texte
prompt = "<s>[INST]" + prompt + "[/INST]"
print("Prompt : " + prompt + "\n")
print("Model URL : " + model_url + "\n" + "HF TOKEN : " + os.environ['HF_API_TOKEN'] + "\n")
client = InferenceClient(model_url, token=os.environ['HF_API_TOKEN'])
response = client.text_generation(
prompt,
max_new_tokens=1024,
stream=True
)
for result in response:
yield result
else:
# Use OpenAI's Chat Completion API
completion = client.chat.completions.create(
model=model,
messages=[
{'role': 'system', 'content': "Je suis un assistant parlant parfaitement le français et l'anglais capable de corriger, rédiger, paraphraser, traduire, résumer, développer des textes."},
{'role': 'user', 'content': prompt}
],
temperature=0,
stream=True
)
for message in completion:
# Vérifiez ici la structure de 'chunk' et extrayez le contenu
# La ligne suivante est un exemple et peut nécessiter des ajustements
if message.choices[0].delta.content:
text_chunk = message.choices[0].delta.content
print(text_chunk, end="", flush="true")
yield text_chunk
# Function to generate chat
def generate_chat(consigne, texte, system="", model="gpt-4", model_url=os.environ['MODEL_URL']):
prompt = str(consigne + " : " + texte) # Construct the prompt from the given consigne and texte
# Call the OpenAI API to create a chat
client = openai.OpenAI(api_key=os.environ['OPENAI_API_KEY'])
texte = extract_context(texte, model)
if model == "claude-2":
response = lib__anthropic.generate_chat_completion_anthropic(consigne, texte, model)
for content in response:
print(content)
yield content
else:
if model == "hf":
prompt = str(consigne + "\n" + texte) # Construct the prompt from the given consigne and texte
#prompt = str(consigne + "\n Le texte : ###" + texte + " ###\n") # Construct the prompt from the given consigne and texte
prompt = "<s>[INST]" + prompt + "[/INST]"
print("Prompt : " + prompt + "\n")
print("Model URL : " + model_url + "\n" + "HF TOKEN : " + os.environ['HF_API_TOKEN'] + "\n")
client = InferenceClient(model_url, token=os.environ['HF_API_TOKEN'])
response = client.text_generation(
prompt,
max_new_tokens=1024,
stream=True
)
for result in response:
yield result
else:
#Model = gpt-4-1106-preview
completion = client.chat.completions.create(
model=model,
messages=[
{"role": "system", "content": system},
{"role": "user", "content": prompt}
],
temperature=0,
stream=True
)
for message in completion:
# Vérifiez ici la structure de 'chunk' et extrayez le contenu
# La ligne suivante est un exemple et peut nécessiter des ajustements
if message.choices[0].delta.content:
text_chunk = message.choices[0].delta.content
print(text_chunk, end="", flush="true")
yield text_chunk
| [
"<s>[INST]PLACEHOLDER[/INST]",
"PLACEHOLDER : PLACEHOLDER",
"Je suis un assistant parlant parfaitement le français et l'anglais capable de corriger, rédiger, paraphraser, traduire, résumer, développer des textes.",
"PLACEHOLDER\nPLACEHOLDER"
] |
2024-01-10 | mikiane/brightnessaiv2 | lib__embedded_context.py |
# ----------------------------------------------------------------------------
# Project: Semantic Search Module for the Alter Brain project
# File: lib__embedded_context.py
#
# This lib is the Semantic Search Module for the Alter Brain project. It implements a
# system for understanding and processing natural language to facilitate
# information retrieval based on semantics rather than traditional keyword-based search.
#
# Author: Michel Levy Provencal
# Brightness.ai - 2023 - [email protected]
# ----------------------------------------------------------------------------
import pandas as pd
import os
import csv
import openai
from openai import OpenAI
#from openai.embeddings_utils import get_embedding
from transformers import GPT2TokenizerFast
from dotenv import load_dotenv
import random
import numpy as np
import sys
import time
import requests
import os.path
import PyPDF2
import docx
import json
import pptx
import xml.etree.ElementTree as ET
from bs4 import BeautifulSoup
import pytesseract
from PIL import Image
from openpyxl import load_workbook
import requests
from bs4 import BeautifulSoup
from markdownify import markdownify as md
from urllib.parse import urlparse, urljoin
from lib__env import *
os.environ["TOKENIZERS_PARALLELISM"] = "false"
"""
#############################################################################################################
## TOOLS
#############################################################################################################
"""
# Fonction pour générer un nom de fichier unique
def generate_unique_filename(prefix, suffix):
"""
Generates a unique filename by appending a random number between 1 and 9999 to the given prefix,
and then appending a specified suffix.
:param prefix: The prefix of the filename.
:param suffix: The suffix of the filename (usually the file extension).
:return: A string representing a unique filename with the format 'prefix_randomNumber.suffix'.
"""
random_number = random.randint(1, 9999)
return f"{prefix}_{random_number}.{suffix}"
# ----------------------------------------------------------------------------
# Fonction pour convertir un fichier PDF en texte
def convert_pdf_to_text(file_path):
"""
Converts a PDF file to text using PyPDF2.
This function reads a PDF file, extracts the text from each page, and then concatenates the extracted text from all pages into a single string.
:param file_path: Path to the PDF file.
:return: The text extracted from the PDF file.
"""
with open(file_path, "rb") as file:
pdf_reader = PyPDF2.PdfReader(file)
text = ""
for page_num in range(len(pdf_reader.pages)):
page = pdf_reader.pages[page_num]
text += page.extract_text()
return text
# ----------------------------------------------------------------------------
# Fonction pour convertir un fichier .docx en texte
def convert_docx_to_text(file_path):
"""
Converts the contents of a .docx file into plain text.
This function takes as input a path to a .docx file, opens the file,
and extracts the text from each paragraph in the document. The extracted
text from each paragraph is then joined together with newline characters
in between each paragraph to form a single string of text.
:param file_path: The path to the .docx file.
:return: A single string containing the text of the .docx file.
"""
doc = docx.Document(file_path)
text = "\n".join([paragraph.text for paragraph in doc.paragraphs])
return text
# ----------------------------------------------------------------------------
# Fonction pour convertir un fichier CSV en texte
def convert_csv_to_text(file_path):
"""
Converts a CSV file into a text format.
:param file_path: The path to the CSV file.
:return: A string representation of the CSV file.
"""
with open(file_path, "r") as file:
csv_reader = csv.reader(file)
text = "\n".join([",".join(row) for row in csv_reader])
return text
# ----------------------------------------------------------------------------
# Fonction pour convertir un fichier JSON en texte
def convert_json_to_text(file_path):
"""
Converts a JSON file into a formatted text string.
:param file_path: The path to the JSON file.
:return: A string representing the JSON data, formatted with indentation.
"""
with open(file_path, "r") as file:
data = json.load(file)
text = json.dumps(data, indent=4)
return text
# ----------------------------------------------------------------------------
# Fonction pour convertir un fichier Excel en texte
def convert_excel_to_text(file_path):
"""
Converts an Excel workbook into a text format. Each cell is separated by a comma,
and each row is separated by a new line.
:param file_path: The path of the Excel file.
:return: A string representing the content of the Excel file.
"""
workbook = load_workbook(file_path)
text = ""
for sheet in workbook:
for row in sheet.values:
text += ",".join([str(cell) for cell in row])
text += "\n"
return text
# ----------------------------------------------------------------------------
# Fonction pour convertir un fichier .pptx en texte
def convert_pptx_to_text(file_path):
"""
Converts the content of a PowerPoint presentation (.pptx) into text.
:param file_path: The path to the .pptx file.
:return: A string containing the text content of the presentation.
"""
presentation = pptx.Presentation(file_path)
text = ""
for slide in presentation.slides:
for shape in slide.shapes:
if shape.has_text_frame:
for paragraph in shape.text_frame.paragraphs:
text += paragraph.text
text += "\n"
return text
# ----------------------------------------------------------------------------
# Fonction pour convertir un fichier XML en texte
def convert_xml_to_text(file_path):
"""
Convertit le contenu d'un fichier XML en texte brut.
:param file_path: Le chemin vers le fichier XML.
:return: Le texte extrait du fichier XML.
"""
tree = ET.parse(file_path)
root = tree.getroot()
text = ET.tostring(root, encoding="utf-8", method="text").decode("utf-8")
return text
# ----------------------------------------------------------------------------
# Fonction pour convertir un fichier HTML en texte
def convert_html_to_text(file_path):
"""
Converts an HTML file into a plain text by removing all the HTML tags.
:param file_path: The path to the HTML file.
:return: The text content of the HTML file.
"""
with open(file_path, "r") as file:
soup = BeautifulSoup(file, "html.parser")
text = soup.get_text()
return text
# ----------------------------------------------------------------------------
# Fonction pour convertir une image en texte à l'aide de l'OCR
def convert_image_to_text(file_path):
"""
Converts an image into text using Optical Character Recognition (OCR).
:param file_path: Path to the image file.
:return: Extracted text from the image.
"""
image = Image.open(file_path)
text = pytesseract.image_to_string(image, lang="eng")
return text
def convert_text_to_text(file_path):
"""
Converts the content of a text file to UTF-8 and returns a text string.
:param file_path: The path to the text file.
:return: The text extracted from the text file.
"""
with open(file_path, "r", encoding="utf-8") as file:
text = file.read()
return text
""" VERSION WITH COMPREHENSSION LIST
# ----------------------------------------------------------------------------
# Function that concat all files contained in a folder in a text
def concat_files_in_text(path):
# Concatenates the files of a directory into a single text.
# :param path: Directory path.
# :return: Concatenated text.
files = [os.path.join(path, f) for f in os.listdir(path) if os.path.isfile(os.path.join(path, f))]
texts = []
for file in files:
with open(file, 'r') as f:
texts.append(f.read())
return ' '.join(texts)
"""
def concat_files_in_text(path):
"""
Concatenates the files of a directory into a single text.
:param path: Directory path.
:return: Concatenated text.
"""
files = []
for f in os.listdir(path):
full_path = os.path.join(path, f)
if os.path.isfile(full_path):
files.append(full_path)
texts = []
for file in files:
with open(file, 'r') as f:
file_content = f.read()
texts.append(file_content)
return ' '.join(texts)
# ----------------------------------------------------------------------------
# Function that split the txt file into blocks. Uses batch (limit) of 2000 words with gpt3.5 and 4000 with gpt4
def split_text_into_blocks(text, limit=4000):
"""
This function splits a given text into chunks, or "blocks", each containing a certain number of words specified by the 'limit' parameter. It's particularly designed for use with GPT-3.5 (limit of 2000 words) and GPT-4 (limit of 4000 words).
Each block is constructed by sequentially adding words from the input text until the block size (the number of words in the block) reaches the limit. If adding another word would exceed the limit, the function checks for the last sentence or line delimiter in the current block (a period or newline character), then separates the block at that delimiter.
If there is no delimiter in the current block, the entire block is added to the list of blocks and the next word starts a new block. If a delimiter is found, the block is split at the delimiter, and the remaining text (if any) is added to the next block along with the next word.
The function returns a list of blocks.
:param text: The input text to be split into blocks.
:param limit: The maximum number of words allowed in each block. Default is 4000.
:return: A list of text blocks obtained from the input text.
"""
### TODO : Adapt the limit to other LLMs (LLAMAS, All-GPT, etc.)
blocks = []
current_block = ""
words = text.split()
for word in words:
if len(current_block + word) + 1 < limit:
current_block += word + " "
else:
last_delimiter_index = max(current_block.rfind(". "), current_block.rfind("\n"))
if last_delimiter_index == -1:
blocks.append(current_block.strip())
current_block = word + " "
else:
delimiter = current_block[last_delimiter_index]
blocks.append(current_block[:last_delimiter_index + (1 if delimiter == '.' else 0)].strip())
current_block = current_block[last_delimiter_index + (2 if delimiter == '.' else 1):].strip() + " " + word + " "
if current_block.strip():
blocks.append(current_block.strip())
return blocks
# ----------------------------------------------------------------------------
# Function that write blocks into filename
def write_blocks_to_csv(blocks, path, filename):
"""
This function takes a list of blocks (data items) and a filename as input parameters, then writes these blocks into a CSV file specified by the given filename.
The blocks are written row by row in the CSV file, with each block making up a single row.
The CSV file is created with a specific encoding (UTF-8), and using specific settings for delimiter and quotechar for CSV data formatting.
:param blocks: A list of data items to be written into the CSV file.
:param filename: The name of the CSV file where the data should be written.
"""
with open(path + filename, "w", newline="", encoding="utf-8") as csvfile:
csvwriter = csv.writer(csvfile, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
# Write the header
csvwriter.writerow(['Datas'])
for block in blocks:
csvwriter.writerow([block])
def get_embedding(text, engine="text-embedding-ada-002"):
"""
This function takes in a piece of text and a model engine as input parameters, and returns an embedding for the input text.
It utilizes OpenAI's Embedding API to generate the embedding based on the specified model.
The function first replaces newline characters in the input text with spaces, as the embedding models typically
handle single continuous strings of text.
:param text: The input text for which to generate an embedding.
:param engine: The model engine to use for generating the embedding. Default is 'text-embedding-ada-002'.
:return: The generated embedding for the input text.
"""
text = text.replace("\n", " ")
client = OpenAI(api_key=os.environ['OPENAI_API_KEY'])
# DEBUG
print("get embedding for " + text)
response = client.embeddings.create(input=[text], model=engine)
response_dict = response.model_dump() # Conversion de la réponse en dictionnaire
return response_dict['data'][0]['embedding']
# Updated function to create embeddings with the new OpenAI SDK
def create_embeddings(path, filename):
"""
This function reads text data from a specified CSV file and creates embeddings for each text entry using OpenAI's
Embedding API. It then saves the generated embeddings back to a new CSV file.
The function uses a GPT-2 tokenizer to tokenize the text data. It then filters out rows where the number of tokens
exceeds 8000 and keeps the last 2000 records. The function also drops rows with missing values from the data.
The embeddings are generated using the 'text-embedding-ada-002' model by default, and the generated embeddings are
saved as a new column in the DataFrame.
The function finally saves the DataFrame, with the embeddings, to a new CSV file.
:param path: The directory path where the input and output CSV files are located.
:param filename: The name of the input CSV file from which to read the text data.
"""
load_dotenv(DOTENVPATH)
tokenizer = GPT2TokenizerFast.from_pretrained("gpt2")
# Open the input CSV file and read it into a Pandas DataFrame.
df_full = pd.read_csv(path + filename, sep=';', on_bad_lines='skip', encoding='utf-8')
# Rename the dataframe's columns
df = df_full[['Datas']]
# Remove rows with missing values
df = df.dropna()
# Count the number of tokens in each row and filter the DataFrame
df['n_tokens'] = df.Datas.apply(lambda x: len(tokenizer.encode(x)))
df = df[df.n_tokens < 8000].tail(2000)
df['ada_embedding'] = df.Datas.apply(get_embedding)
# Write the DataFrame to a new CSV file
df.to_csv(path + "emb_" + filename, index=False)
return path
# ----------------------------------------------------------------------------
# Function that reads and processes a CSV file and returns a DataFrame
def read_and_process_csv(index_filename):
"""
This function takes as input the filename of a CSV file and reads this file into a Pandas DataFrame.
It then processes the 'ada_embedding' column of the DataFrame, converting the string representations
of embeddings stored in this column back into actual Numpy array objects.
The function first reads the CSV file using Pandas' read_csv function, creating a DataFrame where each
row corresponds to a data item from the CSV file and each column corresponds to a field in the data items.
It then applies the eval function to each item in the 'ada_embedding' column to convert the string
representations of the embeddings back into list objects. These lists are then further converted into
Numpy arrays using the np.array function. This processed 'ada_embedding' column replaces the original
column in the DataFrame.
:param index_filename: The filename of the CSV file to read.
:return: The DataFrame created from the CSV file, with the 'ada_embedding' column processed.
"""
df = pd.read_csv(index_filename)
df['ada_embedding'] = df.ada_embedding.apply(eval).apply(np.array)
return df
# ----------------------------------------------------------------------------
# Function that gets an embedding vector for a given text
def get_search_vector(text):
"""
This function takes as input a piece of text and returns an embedding vector for the input text.
It utilizes the 'get_embedding' function to generate the
embedding vector.
The function is a convenience wrapper around the 'get_embedding' function, simplifying its use by
directly passing the input text and relying on the 'get_embedding' function's default parameters
for generating the embedding.
:param text: The input text for which to generate an embedding vector.
:return: The embedding vector for the input text.
"""
return get_embedding(text)
# ----------------------------------------------------------------------------
# Function that finds similar rows in a DataFrame based on an input vector
def find_similar_rows(df, searchvector, n_results):
"""
This function takes as input a DataFrame, a search vector, and a number of results to return.
It calculates the cosine similarity between the search vector and the embeddings in the DataFrame.
The rows with the highest cosine similarity are then sorted and the top 'n_results' rows are returned.
The function adds a new column, 'similarities', to the DataFrame. For each row, it computes the dot
product between the 'ada_embedding' of the row and the search vector, which is equivalent to calculating
the cosine similarity when the vectors are normalized.
The rows in the DataFrame are then sorted in descending order of 'similarities', and the top 'n_results'
rows are returned as a new DataFrame.
:param df: The input DataFrame, each row of which should have an 'ada_embedding' column containing a vector.
:param searchvector: The vector to compare against the 'ada_embedding' of each row in the DataFrame.
:param n_results: The number of top similar rows to return.
:return: A DataFrame containing the top 'n_results' similar rows to the input vector.
"""
df['similarities'] = df.ada_embedding.apply(lambda x: np.dot(x, searchvector))
res = df.sort_values('similarities', ascending=False).head(n_results)
return res
# ----------------------------------------------------------------------------
# Function that validates a DataFrame and extracts the combined data
def validate_and_get_combined(res):
"""
This function takes a DataFrame as input, performs several validation checks on it,
and then extracts and returns the combined data from the 'Datas' column of the DataFrame.
The function first checks if the 'Datas' column exists in the DataFrame. If it does not,
a ValueError is raised. It then checks if the DataFrame is empty. If it is, a ValueError
is raised. Finally, it checks if the index of the DataFrame is of type 'int64'. If it is
not, a ValueError is raised.
Once these validation checks have passed, the function concatenates all the strings in the
'Datas' column of the DataFrame, with each string separated by a newline character. This combined
string is then returned.
:param res: The input DataFrame to validate and extract combined data from.
:return: A string consisting of all the data from the 'Datas' column of the DataFrame, concatenated with newline characters.
"""
if 'Datas' not in res.columns:
raise ValueError("La colonne 'Datas' n'existe pas dans le DataFrame")
if res.empty:
raise ValueError("Le DataFrame est vide")
if res.index.dtype != 'int64':
raise ValueError("L'index du DataFrame n'est pas de type entier")
return '\n'.join(res['Datas'].values)
# ----------------------------------------------------------------------------
# Function that converts all the files in a folder to text and store them in a new subfolder named 'text'
def create_text_folder(folder_path):
"""
TODO: Write documentation
"""
# Folder containing the files to convert
source_folder = folder_path
# Destination folder for the converted text files
destination_folder = folder_path + "text_tmp"
# List of supported file formats
supported_formats = {
".pdf": convert_pdf_to_text,
".docx": convert_docx_to_text,
".csv": convert_csv_to_text,
".json": convert_json_to_text,
".xls": convert_excel_to_text,
".xlsx": convert_excel_to_text,
".pptx": convert_pptx_to_text,
".xml": convert_xml_to_text,
".html": convert_html_to_text,
".jpg": convert_image_to_text,
".jpeg": convert_image_to_text,
".png": convert_image_to_text,
".txt": convert_text_to_text
}
# Create the destination folder if it doesn't exist
if not os.path.exists(destination_folder):
os.makedirs(destination_folder)
# Iterate through all the files in the source folder
for file_name in os.listdir(source_folder):
source_file_path = os.path.join(source_folder, file_name)
if os.path.isfile(source_file_path):
file_name_without_ext, file_ext = os.path.splitext(file_name)
if file_ext.lower() in supported_formats:
converter = supported_formats[file_ext.lower()]
text = converter(source_file_path)
destination_file_name = generate_unique_filename(file_name_without_ext, "txt")
destination_file_path = os.path.join(destination_folder, destination_file_name)
with open(destination_file_path, "w", encoding="utf-8") as file:
file.write(text)
return(str(destination_folder))
# ----------------------------------------------------------------------------
# Function that converts an url to text
def get_text_from_url(url):
"""
Récupère le texte d'une page web à partir de son URL.
Args:
url (str): L'URL de la page web à récupérer.
Returns:
str: Le texte de la page web, ou une chaîne vide en cas d'erreur.
"""
text = ""
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3"
}
try:
# Récupérer le contenu HTML de l'URL
response = requests.get(url, headers=headers)
response.raise_for_status()
except requests.exceptions.RequestException as e:
print(f"Erreur lors de la requête vers {url} : {e}")
return text
try:
soup = BeautifulSoup(response.text, 'html.parser')
# Extraire le texte de la structure HTML
text = soup.get_text()
except Exception as e:
print(f"Erreur lors de l'analyse HTML pour {url} : {e}")
return text
return text
"""
#############################################################################################################
## FUNCTIONS TO INDEX & SEARCH EMBEDDINGS
#############################################################################################################
"""
## ----------------------------------------------------------------------------
## Function that creates a csv index file containing embeddings, from a folder named path.
## The index is stored in the same folder, and named : emb_csv_XXX.csv
## The function returns the name of the index file : emb_csv_XXX.csv
## (while the concatenated text is stored in a txt file named txt_XXX.txt and the csv file containing the blocks is named csv_XXX.csv)
def build_index(folder_path):
"""
This function reads multiple text files from a specified folder, concatenates the text,
splits the concatenated text into blocks of specified length, writes these blocks into a CSV file,
creates embeddings for each block using the create_embeddings function, and finally, saves the
embeddings back to the CSV file.
The function first generates a random number which is used to create unique filenames for the
intermediate text and CSV files. It then reads and concatenates all the text files from the
specified folder into a single string of text.
The function then calls the split_text_into_blocks function to split the text into blocks of
up to 4000 characters each. The resulting list of blocks is written to a new CSV file.
The function then calls the create_embeddings function to create embeddings for each text block
in the CSV file. The embeddings are saved back to the CSV file.
Finally, the function returns the name of the CSV file containing the embeddings.
:param folder_path: The directory path where the text files are located and where the CSV file will be saved.
:return: The name of the CSV file containing the embeddings.
"""
# transform files into text, create a subfolder named 'txt' and save the text files in it.
text_folder_path = create_text_folder(folder_path)
# Concatenate files in text
text = concat_files_in_text(text_folder_path)
# OLD VERSION : Save text in a csv file named csv(random number).csv
# random_num = random.randint(1000,9999) # Generates a random number between 1000 and 9999
# Call the function split_text_into_blocks() to split the text into blocks
blocks = split_text_into_blocks(text, limit=4000)
# Call the function write_blocks_to_csv() to write the blocks into a csv file
write_blocks_to_csv(blocks, folder_path, 'index.csv')
# Create embeddings for the csv file
brain_id = create_embeddings(folder_path, 'index.csv')
return(brain_id)
## ----------------------------------------------------------------------------
## Function that creates embdeedings from the content of an url
def build_index_url(url):
index_text = get_text_from_url(url)
#créer un fichier temporaire
timestamp = time.strftime("%Y%m%d-%H%M%S")
folder_path = "datas/" + timestamp + "/"
# Créer le nouveau dossier
os.makedirs(folder_path, exist_ok=True)
# enregistrer le texte dans un fichier txt
with open(folder_path + 'url_index.txt', 'w', encoding='utf-8') as f:
f.write(index_text)
build_index(folder_path)
return(timestamp)
# ----------------------------------------------------------------------------
# Function that finds the context for a given query in an index file
def find_context(text, index_filename, n_results=5):
"""
This function takes as input a piece of text, the filename of a CSV file containing indexed data,
and an optional number of results to return. It finds the most similar data items to the input
text in the indexed data and returns the combined data from these items.
The function first loads environment variables from a .env file, including the OpenAI API key.
It then reads and processes the indexed data from the CSV file into a DataFrame.
The function creates an embedding for the input text using the get_search_vector function.
This embedding is compared with the embeddings of the data items in the DataFrame to find
the most similar items.
The most similar items are sorted by their similarity scores, and the top 'n_results' items are
selected. The combined data from these items is extracted and returned.
:param text: The input text for which to find similar data items.
:param index_filename: The filename of the CSV file containing the indexed data.
:param n_results: The number of most similar data items to return. Default is 5.
:return: The combined data from the most similar data items.
"""
load_dotenv(".env") # Load the environment variables from the .env file.
openai.api_key = os.environ.get("OPENAI_API_KEY")
if not os.path.exists(index_filename):
return ""
df = read_and_process_csv(index_filename)
searchvector = get_search_vector(text)
res = find_similar_rows(df, searchvector, n_results)
return validate_and_get_combined(res)
# ----------------------------------------------------------------------------
# Function that queries the OpenAI language model with a context and query
def query_extended_llm(text, index_filename, model="gpt-4"):
"""
This function takes as input a piece of text, the filename of a CSV file containing indexed data,
and an optional AI model to use. It queries the OpenAI language model with a context derived from
the most similar data items to the input text, and a prompt derived from the input text itself.
It returns the response from the language model.
The function first finds the context for the input text using the find_context function. It then
loads environment variables from a .env file, including the OpenAI API key.
The function then enters a loop where it attempts to query the language model with the context and
the input text as a prompt. If it encounters an exception during this process, it waits for 5 seconds
and then tries again, up to a maximum of 10 attempts.
If the function is able to successfully query the language model, it returns the model's response as
a string. If it is unable to do so after 10 attempts, it prints an error message and terminates the
program.
:param text: The input text for which to query the language model.
:param index_filename: The filename of the CSV file containing the indexed data.
:param model: The AI model to use for the query. Default is 'gpt-4'.
:return: The response from the language model.
"""
context = find_context(text, index_filename)
client = OpenAI(api_key=os.environ['OPENAI_API_KEY']) # Initialisation du client OpenAI
attempts = 0
prompt = "Context : " + context + "\n\n" + "Query : " + text
while attempts < 10:
try:
response = client.chat.completions.create(
model=model,
messages=[
{"role": "user", "content": prompt}
]
)
message = response.choices[0].message.content
return message.strip()
except Exception as e:
error_code = type(e).__name__
error_reason = str(e)
attempts += 1
print(f"Erreur : {error_code} - {error_reason}. Nouvel essai dans 5 secondes...")
time.sleep(int(attempts)*2)
print("Erreur : Echec de la création de la completion après 10 essais")
sys.exit()
| [
"Context : PLACEHOLDER\n\nQuery : PLACEHOLDER"
] |
2024-01-10 | mikiane/brightnessaiv2 | lib__anthropic.py | from anthropic import Anthropic, HUMAN_PROMPT, AI_PROMPT
import os
from dotenv import load_dotenv
load_dotenv('.env')
# Environment Variables
ELEVENLABS_API_KEY = os.environ.get("ELEVENLABS_API_KEY")
PODCASTS_PATH = os.environ.get("PODCASTS_PATH")
SENDGRID_KEY = os.environ.get("SENDGRID_KEY")
SENDGRID_KEY = os.environ['SENDGRID_KEY']
OPENAI_API_KEY = os.environ['OPENAI_API_KEY']
AWS_ACCESS_KEY = os.environ['AWS_ACCESS_KEY']
AWS_SECRET_KEY = os.environ['AWS_SECRET_KEY']
REGION_NAME = os.environ['REGION_NAME']
ANTHROPIC_API_KEY = os.environ['ANTHROPIC_API_KEY']
# Assurez-vous d'avoir défini votre clé API comme variable d'environnement
api_key = ANTHROPIC_API_KEY
def generate_chat_completion_anthropic(consigne, texte, model="claude-2"):
# Construct the prompt from the given consigne and texte
prompt = f"{HUMAN_PROMPT} {consigne} : {texte}{AI_PROMPT}"
# Create an Anthropic client
client = Anthropic()
# Create a stream completion using the Anthropic API
stream = client.completions.create(
prompt=prompt,
model=model,
stream=True,
# Set any other desired parameters here, for example:
max_tokens_to_sample=99000
)
# Iterate over the stream completions and yield the results
for completion in stream:
yield completion.completion
| [
"PLACEHOLDER PLACEHOLDER : PLACEHOLDERPLACEHOLDER"
] |
2024-01-10 | mikiane/brightnessaiv2 | lib__transformers.py |
'''
Filename: __lib_transformers.py
Author: Michel Levy Provencal
Description: This script includes a variety of functions designed Ffor text and audio transformation using OpenAI's GPT-3 API and Amazon Polly.
'''
# Import the necessary libraries
import os
import subprocess
from dotenv import load_dotenv
import numpy as np
import pandas as pd
import openai
import boto3
import tempfile
import random
from random import randint
from datetime import datetime
import pydub
from pydub import AudioSegment
from sendgrid import SendGridAPIClient
from sendgrid.helpers.mail import Mail, Attachment, FileContent, FileName, FileType, Disposition
import base64
import mimetypes
import time
import sys
import csv
import requests
import time
import csv
from elevenlabs import set_api_key
from urllib.parse import unquote
from queue import Queue
from moviepy.editor import *
from datetime import date
from sendgrid import SendGridAPIClient
from sendgrid.helpers.mail import Mail, Attachment, FileContent, FileName, FileType, Disposition
from bs4 import BeautifulSoup
import json
from num2words import num2words
import re
import lib__sendmail
from openai import OpenAI
model="gpt-4"
# Charger les variables d'environnement depuis le fichier .env
load_dotenv('.env')
#model = "gpt-3.5-turbo"
#load_dotenv(".env") # Load the environment variables from the .env file.
#load_dotenv("/home/michel/extended_llm/.env") # Load the environment variables from the .env file.
ELEVENLABS_API_KEY = os.environ.get("ELEVENLABS_API_KEY")
PODCASTS_PATH = os.environ.get("PODCASTS_PATH")
SENDGRID_KEY = os.environ.get("SENDGRID_KEY")
# Environment Variables
SENDGRID_KEY = os.environ['SENDGRID_KEY']
APP_PATH = os.environ['APP_PATH']
OPENAI_API_KEY = os.environ['OPENAI_API_KEY']
AWS_ACCESS_KEY = os.environ['AWS_ACCESS_KEY']
AWS_SECRET_KEY = os.environ['AWS_SECRET_KEY']
REGION_NAME = os.environ['REGION_NAME']
"""
# instantiate an Amazon Polly client
polly = boto3.client('polly', region_name=REGION_NAME,
aws_access_key_id=AWS_ACCESS_KEY,
aws_secret_access_key=AWS_SECRET_KEY)
# function to break down input text into smaller segments and then use Polly to generate speech
#### Synthèse avec Amazon Polly
def synthesize_multi_polly(inputtext):
# define a maximum number of characters for each Polly API call
max_chars = 2500
segments = []
# break down the input text into sentences
sentences = inputtext.split('. ')
current_segment = ''
# iterate over each sentence and add to the current segment until the limit is reached
for sentence in sentences:
if len(current_segment) + len(sentence) + 1 <= max_chars:
current_segment += sentence + '. '
else:
segments.append(current_segment)
current_segment = sentence + '. '
# add the last segment if it is not empty
if current_segment:
segments.append(current_segment)
# set up an output directory and a list to store paths to output files
output_dir = APP_PATH + 'datas/'
output_files = []
# iterate over each segment
for i, segment in enumerate(segments):
print("Segment number :" + str(i))
print("\n" + segment)
# get the current time
now = datetime.now()
current_time = now.strftime("%H:%M:%S")
print("Current time is :", current_time)
# prepare the text for the Polly API and make the request
ssml_segment = "<speak><prosody rate=\"90%\">" + str(segment) + "</prosody></speak>"
response = polly.synthesize_speech(
OutputFormat='mp3',
VoiceId='Remi',
TextType='ssml',
Text=ssml_segment,
LanguageCode='fr-FR',
Engine='neural'
)
print("API response received")
# get the current time
now = datetime.now()
current_time = now.strftime("%H:%M:%S")
print("Current time is :", current_time)
audio_stream = response.get('AudioStream')
audio_data = audio_stream.read()
# generate a unique filename and save the audio data to a file
filename = f"audiooutput_segment{i}.mp3"
output_path = os.path.join(output_dir, filename)
with open(output_path, 'wb') as f:
f.write(audio_data)
# add the path to the output file to the list
output_files.append(output_path)
# concatenate all the audio files together
combined_audio = pydub.AudioSegment.silent(duration=0)
for output_file in output_files:
segment_audio = pydub.AudioSegment.from_mp3(output_file)
combined_audio += segment_audio
# generate a filename for the final output file
final_filename = "audiooutput" + str(random.randint(1, 10000)) + ".mp3"
final_output_path = os.path.join(output_dir, final_filename)
# save the combined audio to a file
combined_audio.export(final_output_path, format='mp3')
# return the path to the final output file
return (output_dir + final_filename)
"""
def replace_numbers_with_text(input_string):
# Remplacer les pourcentages
percentages = re.findall(r'\d+%', input_string)
for percentage in percentages:
number = percentage[:-1]
number_in_words = num2words(number, lang='fr')
input_string = input_string.replace(percentage, f"{number_in_words} pour cent")
# Remplacer les nombres
numbers = re.findall(r'\b\d+\b', input_string)
for number in numbers:
number_in_words = num2words(number, lang='fr')
input_string = input_string.replace(number, number_in_words)
return input_string
def split_text(text, limit=1000):
"""
This function splits the text into chunks of around 1000 characters. \n
It splits before a newline character.
"""
chunks = []
current_chunk = ""
for line in text.split('\n'):
if len(current_chunk) + len(line) <= limit:
current_chunk += line + "\n"
else:
chunks.append(current_chunk)
current_chunk = line + "\n"
# Append the last chunk
if current_chunk:
chunks.append(current_chunk)
return chunks
##################################################################
### Function to convert text to speech with Eleven Labs API
def texttospeech(text, voice_id, filename):
"""
This function calls the Eleven Labs API to convert text to speech
"""
try:
set_api_key(str(ELEVENLABS_API_KEY))
CHUNK_SIZE = 1024
url = f"https://api.elevenlabs.io/v1/text-to-speech/{voice_id}"
headers = {
"Accept": "audio/mpeg",
"Content-Type": "application/json",
"xi-api-key": ELEVENLABS_API_KEY
}
data = {
"text": text,
"model_id": "eleven_multilingual_v1",
"voice_settings": {
"stability": 0.95,
"similarity_boost": 1
}
}
response = requests.post(url, json=data, headers=headers)
with open(filename, 'wb') as f:
for chunk in response.iter_content(chunk_size=CHUNK_SIZE):
if chunk:
f.write(chunk)
except requests.RequestException as e:
print(f"Failed to convert text to speech: {e}")
return
def convert_and_merge(text, voice_id, final_filename):
"""
This function splits the text, converts each chunk to speech and merges all the resulting audio files.
"""
chunks = split_text(text) # Assurez-vous que cette fonction est définie correctement
filenames = []
# Initialize combined as an empty AudioSegment
combined = AudioSegment.empty()
for i, chunk in enumerate(chunks):
# Utiliser PODCASTS_PATH pour stocker les fichiers mp3 temporaires
filename = os.path.join(str(PODCASTS_PATH), f"{str(i)}.mp3")
print(filename)
filenames.append(filename)
texttospeech(chunk, voice_id, filename) # Assurez-vous que cette fonction est définie correctement
# Concatenate each audio segment
audio_segment = AudioSegment.from_mp3(filename)
combined += audio_segment
# Save the final concatenated audio file
combined.export(final_filename, format='mp3')
# Delete temporary audio files
for filename in filenames:
os.remove(filename)
#### Synthèse avec Eleven Labs
#####
#voice_id = "DnF3PZl1PUQOKY4LvcUl" # MLP
#voice_id = "FL36qzLoYbdCLMM5R9rF" # MLP-PRO
#voice_id = "TxGEqnHWrfWFTfGW9XjX" # Josh
def synthesize_multi(text, voice_id="FL36qzLoYbdCLMM5R9rF"):
load_dotenv(".env") # Load the environment variables from the .env file.
PODCASTS_PATH = os.environ.get("PODCASTS_PATH")
# creation de l'audio
final_filename = os.path.join(PODCASTS_PATH, "final_podcast" + str(randint(1, 10000)) + ".mp3")
# gestion des intonations.
convert_and_merge(replace_numbers_with_text(text), voice_id, final_filename)
return (final_filename)
# Function to get the text embedding from OpenAI's API
def get_embedding(text, model="text-embedding-ada-002"):
openai.api_key = OPENAI_API_KEY
text = text.replace("\n", " ") # Replaces newline characters with spaces
return openai.Embedding.create(input = [text], engine=model)['data'][0]['embedding'] # Returns the embedding
# Function to search for a text within a local dataset using text embeddings
def searchembedding(text, filename):
openai.api_key = OPENAI_API_KEY
# Read the CSV file
df = pd.read_csv(filename)
# Convert the strings stored in the 'ada_embedding' column into vector objects
df['ada_embedding'] = df.ada_embedding.apply(eval).apply(np.array)
# Convert the search term into a vector
searchvector = get_embedding(text, model='text-embedding-ada-002')
# Create a new column using cosine_similarity to compare the searchvector with each row
df['similarities'] = df.ada_embedding.apply(lambda x: np.dot(x, searchvector))
# Sort the rows by similarity and keep the most similar one
res = df.sort_values('similarities', ascending=False).head(1)
# Set pandas option to display all columns
pd.set_option('display.max_columns', None)
# Check if the 'combined' column exists in the DataFrame
if 'combined' in res.columns:
# Check if the DataFrame is not empty
if not res.empty:
# Check if the index is of integer type
if res.index.dtype == 'int64':
# Return all records
return '\n'.join(res['combined'].values)
else:
return "L'index du DataFrame n'est pas de type entier"
else:
return "Le DataFrame est vide"
else:
return "La colonne 'combined' n'existe pas dans le DataFrame"
"""
def mailfile(filename, destinataire, message=""):
# Création de l'objet Mail
message = Mail(
from_email='[email protected]',
to_emails=destinataire,
subject='Le résultat du traitement' + message,
plain_text_content='Votre demande a été traité.' + message)
# Lecture du fichier à joindre
with open(filename, 'rb') as f:
data = f.read()
# Encodage du fichier en base64
encoded = base64.b64encode(data).decode()
# Détermination du type MIME du fichier
mime_type = mimetypes.guess_type(filename)[0]
# Création de l'objet Attachment
attachedFile = Attachment(
FileContent(encoded),
FileName(filename),
FileType(mime_type),
Disposition('attachment')
)
message.attachment = attachedFile
# Tentative d'envoi de l'e-mail via SendGrid
try:
sg = SendGridAPIClient(SENDGRID_KEY)
response = sg.send(message)
print(response.status_code)
print(response.body)
print(response.headers)
except Exception as e:
print(e.message)
"""
# Function to split a large text into smaller blocks
def split_text_into_blocks(text, limit=4000):
# Initialize variables
blocks = []
current_block = ""
words = text.split()
# Iterate over words
for word in words:
# Check if word fits in the current block
if len(current_block + word) + 1 < limit:
current_block += word + " "
else:
last_delimiter_index = max(current_block.rfind(". "), current_block.rfind("\n"))
# Break block at the last complete sentence or newline
if last_delimiter_index == -1:
blocks.append(current_block.strip())
current_block = word + " "
else:
delimiter = current_block[last_delimiter_index]
blocks.append(current_block[:last_delimiter_index + (1 if delimiter == '.' else 0)].strip())
current_block = current_block[last_delimiter_index + (2 if delimiter == '.' else 1):].strip() + " " + word + " "
# Add the last block
if current_block.strip():
blocks.append(current_block.strip())
return blocks
# Function to write blocks to a csv file
def write_blocks_to_csv(blocks, filename):
with open(filename, "w", newline="", encoding="utf-8") as csvfile:
csvwriter = csv.writer(csvfile, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
for block in blocks:
csvwriter.writerow([block])
# Function to generate a csv file from a string of text
def write_csv_from_string(text, filename):
limit = 4000 # Limit for text blocks
blocks = split_text_into_blocks(text, limit) # Split text into blocks
write_blocks_to_csv(blocks, filename) # Write blocks to csv file
"""
# Function to summarize text
def transform(text, instruct, model="gpt-4"):
api_key = OPENAI_API_KEY
model = "gpt-4"
if model=="gpt-4":
limit = 10000 # Limit for text size
else:
limit = 5000
prompt = instruct + "\n" + text[:limit] + ":\n" # Construct the prompt
system = "Je suis un assistant parlant parfaitement le français et l'anglais capable de corriger, rédiger, paraphraser, traduire, résumer, développer des textes."
# Try to make a request to the API
attempts = 0
while attempts < 10:
try:
url = 'https://api.openai.com/v1/chat/completions'
headers = {
'Content-Type': 'application/json',
'Authorization': f'Bearer {api_key}'
}
data = {
'model': model,
'messages': [
{'role': 'user', 'content': prompt},
{'role': 'system', 'content': system}
]
}
response = requests.post(url, headers=headers, json=data)
json_data = response.json()
message = json_data['choices'][0]['message']['content']
return message.strip()
except Exception as e:
error_code = type(e).__name__
error_reason = str(e)
attempts += 1
print(f"Erreur : {error_code} - {error_reason}. Nouvel essai dans 5 secondes...")
time.sleep(5)
print("Erreur : Echec de la création de la completion après 5 essais")
sys.exit()
"""
def transform(text, instruct, model="gpt-4"):
# Chargez votre clé API depuis une variable d'environnement ou directement
client = openai.OpenAI(api_key=os.environ.get('OPENAI_API_KEY'))
if model == "gpt-4":
limit = 10000 # Limite pour la taille du texte
else:
limit = 5000
prompt = instruct + "\n" + text[:limit] + ":\n"
system = "Je suis un assistant parlant parfaitement le français et l'anglais capable de corriger, rédiger, paraphraser, traduire, résumer, développer des textes."
attempts = 0
while attempts < 10:
try:
response = client.chat.completions.create(
model=model,
messages=[
{"role": "system", "content": system},
{"role": "user", "content": prompt}
]
)
message = response.choices[0].message.content
return message.strip()
except Exception as e:
error_code = type(e).__name__
error_reason = str(e)
attempts += 1
print(f"Erreur : {error_code} - {error_reason}. Nouvel essai dans 5 secondes...")
time.sleep(5)
print("Erreur : Échec de la création de la completion après 5 essais")
sys.exit()
# Function to summarize a chapter of text
def transform_chap(text, prefix, instruct, n=3, model='gpt-4'):
model = "gpt-4"
now = datetime.now()
rand_str = str(now.strftime("%Y%m%d%H%M%S")) + "-"+ str(random.randint(0, 100))
path = APP_PATH + "datas/"
# Write input text to CSV
input_f = path + "_" + prefix + "_input_" + rand_str +".csv"
write_csv_from_string(text, input_f)
# Summarize the text
for j in range(1, n+1):
# Update input filename
if j > 1:
input_f = output_f + "_" + str(j-1) + ".csv"
with open(input_f, "r") as input_file:
reader = csv.reader(input_file)
# Update output filename
output_f = path + "_" + prefix + "_output_" + rand_str
with open(output_f + "_" + str(j) + ".csv", "w", newline="") as output_file:
writer = csv.writer(output_file)
rows_concatenated = []
for row in reader:
rows_concatenated.append(row[0])
if (len(rows_concatenated) >= j) or (len(reader) == 0):
text = " ".join(rows_concatenated)
summary = transform(text, instruct, model)
writer.writerow([summary] + row[1:])
rows_concatenated = []
# Write final summary to a text file
outputxt = path + "_" + prefix + "_outputsummary_" + str(rand_str) + ".txt"
with open(output_f + "_" + str(j) + ".csv", 'r') as csv_file, open(outputxt, 'w') as txt_file:
csv_output = csv.reader(csv_file)
for row in csv_output:
txt_file.write(','.join(row) + '\n\n')
return(outputxt)
# Function to split a large text into smaller blocks
def split_text_into_blocks(text, limit=4000):
# Initialize variables
blocks = []
current_block = ""
words = text.split()
# Iterate over words
for word in words:
# Check if word fits in the current block
if len(current_block + word) + 1 < limit:
current_block += word + " "
else:
last_delimiter_index = max(current_block.rfind(". "), current_block.rfind("\n"))
# Break block at the last complete sentence or newline
if last_delimiter_index == -1:
blocks.append(current_block.strip())
current_block = word + " "
else:
delimiter = current_block[last_delimiter_index]
blocks.append(current_block[:last_delimiter_index + (1 if delimiter == '.' else 0)].strip())
current_block = current_block[last_delimiter_index + (2 if delimiter == '.' else 1):].strip() + " " + word + " "
# Add the last block
if current_block.strip():
blocks.append(current_block.strip())
return blocks
# Function to write blocks to a csv file
def write_blocks_to_csv(blocks, filename):
with open(filename, "w", newline="", encoding="utf-8") as csvfile:
csvwriter = csv.writer(csvfile, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
for block in blocks:
csvwriter.writerow([block])
# Function to generate a csv file from a string of text
def write_csv_from_string(text, filename):
limit = 2000 # Limit for text blocks
blocks = split_text_into_blocks(text, limit) # Split text into blocks
write_blocks_to_csv(blocks, filename) # Write blocks to csv file
# Function to summarize text
"""
def summarize(text, model='gpt-4'):
model = "gpt-4"
api_key = OPENAI_API_KEY
if model=="gpt-4":
limit = 10000 # Limit for text size
else:
limit = 5000
prompt = "Texte : " + text[:limit] + "\nTache : Résumer le texte en respectant le style et le sens. \
\nFormat : Un texte court dont le style et le sens sont conformes au texte original. \
\nObjectif : Obtenir un résumé sans introduction particulière. \
\nEtapes : Ne jamais mentionner que le texte produit est un résumé. \
\n Le résumé : \
\n"
system = "Rôle : Etre un rédacteur en français spécialisé dans le résumé d’ouvrages."
# Try to make a request to the API
attempts = 0
while attempts < 100000:
try:
url = 'https://api.openai.com/v1/chat/completions'
headers = {
'Content-Type': 'application/json',
'Authorization': f'Bearer {api_key}'
}
data = {
'model': model,
'messages': [
{'role': 'user', 'content': prompt},
{'role': 'system', 'content': system}
]
}
response = requests.post(url, headers=headers, json=data)
json_data = response.json()
message = json_data['choices'][0]['message']['content']
return message.strip()
except Exception as e:
error_code = type(e).__name__
error_reason = str(e)
attempts += 1
print(f"Erreur : {error_code} - {error_reason}. Nouvel essai dans 8 secondes...")
time.sleep(1.1*attempts)
print("Erreur : Echec de la création de la completion après x essais")
"""
def summarize(text, model='gpt-4'):
# Chargez votre clé API depuis une variable d'environnement ou directement
client = openai.OpenAI(api_key=os.environ.get('OPENAI_API_KEY'))
model = "gpt-4"
if model == "gpt-4":
limit = 10000 # Limite pour la taille du texte
else:
limit = 5000
prompt = "Texte : " + text[:limit] + "\nTache : Résumer le texte en respectant le style et le sens. \
\nFormat : Un texte court dont le style et le sens sont conformes au texte original. \
\nObjectif : Obtenir un résumé sans introduction particulière. \
\nEtapes : Ne jamais mentionner que le texte produit est un résumé. \
\n Le résumé : \
\n"
system = "Rôle : Etre un rédacteur en français spécialisé dans le résumé d’ouvrages."
attempts = 0
while attempts < 100000:
try:
response = client.chat.completions.create(
model=model,
messages=[
{"role": "user", "content": prompt},
{"role": "system", "content": system}
]
)
message = response.choices[0].message.content
return message.strip()
except Exception as e:
error_code = type(e).__name__
error_reason = str(e)
attempts += 1
print(f"Erreur : {error_code} - {error_reason}. Nouvel essai dans 8 secondes...")
time.sleep(1.1 * attempts)
print("Erreur : Echec de la création de la completion après x essais")
sys.exit()
# Function to summarize large chapter
def summarizelarge_chap(text, prefix, n=3, model="gpt-4"):
model = "gpt-4"
now = datetime.now()
rand_str = str(now.strftime("%Y%m%d%H%M%S")) + "-"+ str(random.randint(0, 100))
path = APP_PATH + "datas/"
input_f = path + "_" + prefix + "_input_" + rand_str +".csv"
output_f = path + "_" + prefix + "_output_" + rand_str
# Write input to csv
write_csv_from_string(text, input_f)
j = 1
# Summarize the text
while j <= int(n):
if j > 1:
input_f = output_f + "_" + str(j-1) + ".csv"
with open(input_f, "r") as input_file_count:
reader = csv.reader(input_file_count)
lines = sum(1 for _ in reader)
if lines < j:
break
with open(input_f, "r") as input_file:
reader = csv.reader(input_file)
with open(output_f + "_" + str(j) + ".csv", "w", newline="") as output_file:
writer = csv.writer(output_file)
rows_concatenated = []
for row in reader:
lines -= 1
rows_concatenated.append(row[0])
if (len(rows_concatenated) >= j) or (lines==0):
text = " ".join(rows_concatenated)
summary = summarize(text, model)
writer.writerow([summary] + row[1:])
rows_concatenated = []
j += 1
# Write final summary to a text file
outputxt = path + "_" + prefix + "_outputsummary_" + str(rand_str) + ".txt"
inputcsv = output_f + "_" + str(j-1) + ".csv"
with open(inputcsv, 'r') as csv_file, open(outputxt, 'w') as txt_file:
csv_output = csv.reader(csv_file)
for row in csv_output:
txt_file.write(','.join(row) + '\n\n')
return(outputxt)
#Audio to text conversion
def convert_to_mp3(input_file):
ext = os.path.splitext(input_file)[-1].lower()[1:] # Extrait l'extension sans le point
if ext not in ["m4a", "wav", "mp3", "mp4", "mov"]:
raise ValueError(f"Extension de fichier non prise en charge : {ext}")
if ext == "mp3":
return os.path.join(PODCASTS_PATH, os.path.basename(input_file))
output_filename = os.path.join(PODCASTS_PATH, os.path.basename(input_file).rsplit('.', 1)[0] + ".mp3")
if ext == "m4a":
## Solution avec Soundconverter
# Convert m4a to wav
temp_wav = os.path.join(PODCASTS_PATH, "temp.wav")
#command_to_wav = ["xvfb-run", "soundconverter", "-b", "-m", "audio/x-wav", "-i", input_file, "-o", temp_wav]
command_to_wav = ["soundconverter", "-b", "-s", ".wav", "-m", "audio/x-wav", "-i", input_file, "-o", temp_wav]
subprocess.run(command_to_wav)
# Convert wav to mp3
command_to_mp3 = ["soundconverter", "-b", "-s", ".mp3", "-m", "audio/mpeg", "-i", temp_wav, "-o", output_filename]
#command_to_mp3 = ["xvfb-run", "soundconverter", "-b", "-m", "audio/mpeg", "-i", temp_wav, "-o", output_filename]
subprocess.run(command_to_mp3)
# Optionally delete the temporary wav
if ext == "wav":
command = ["/usr/bin/ffmpeg", "-y", "-i", input_file, output_filename]
subprocess.run(command, check=True)
if ext == "mp4" or ext == "mov":
command = ["/usr/bin/ffmpeg", "-y", "-i", input_file, "-c:a", "libmp3lame", "-b:a", "192k", "-ar", "44100", output_filename]
subprocess.run(command)
return output_filename
# Configurez votre clé API OpenAI
"""
def transcribe_audio(audio_filename):
with open(audio_filename, "rb") as audio_file:
response = openai.Audio.transcribe("whisper-1", audio_file)
transcript = response.get('text')
return transcript
"""
def transcribe_audio(audio_filename):
# Initialiser le client OpenAI
api_key = os.environ.get('OPENAI_API_KEY')
client = OpenAI(api_key=api_key)
# Ouvrir le fichier audio en mode binaire
audio_file = open(audio_filename, "rb")
transcript = client.audio.transcriptions.create(
model="whisper-1",
file=audio_file,
response_format="text"
)
# Récupérer et retourner la transcription
return transcript
def save_transcript(transcript, output_filename):
with open(output_filename, "w", encoding="utf-8") as output_file:
output_file.write(str(transcript))
| [
"PLACEHOLDER\nPLACEHOLDER:\n",
"Texte : PLACEHOLDER\nTache : Résumer le texte en respectant le style et le sens. \nFormat : Un texte court dont le style et le sens sont conformes au texte original. \nObjectif : Obtenir un résumé sans introduction particulière. \nEtapes : Ne jamais mentionner que le texte produit est un résumé. \n Le résumé : \n"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.