date_collected
stringclasses 1
value | repo_name
stringlengths 6
116
| file_name
stringlengths 2
220
| file_contents
stringlengths 13
357k
| prompts
sequence |
---|---|---|---|---|
2024-01-10 | developers221/dddddd | ix~chains~llm_chain.py | import json
import logging
from typing import Any, List, Dict, Optional
from langchain.callbacks.manager import AsyncCallbackManagerForChainRun
from ix.chains.callbacks import IxHandler
from langchain import LLMChain as LangchainLLMChain
from langchain.agents.agent_toolkits.base import BaseToolkit
from langchain.chat_models import ChatOpenAI
from langchain.prompts.chat import (
HumanMessagePromptTemplate,
SystemMessagePromptTemplate,
AIMessagePromptTemplate,
)
from langchain.tools import Tool, format_tool_to_openai_function
from ix.chains.functions import FunctionSchema
from ix.task_log.models import TaskLogMessage
logger = logging.getLogger(__name__)
TEMPLATE_CLASSES = {
"system": SystemMessagePromptTemplate,
"user": HumanMessagePromptTemplate,
"assistant": AIMessagePromptTemplate,
}
class LLMChain(LangchainLLMChain):
"""
Extension of LLMChain to provide additional functionality:
- OpenAI functions may be connected as functions.
- input_keys excludes memory variables so that memory may be directly attached.
"""
# List of OpenAI functions to include in requests.
functions: List[FunctionSchema | Tool | BaseToolkit] = None
function_call: str = None
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.load_functions()
def load_functions(self) -> None:
"""Load functions for OpenAI if llm is OpenAI"""
if not isinstance(self.llm, ChatOpenAI):
logger.error(f"llm is not ChatOpenAI, it is {type(self.llm)}")
return
if not self.functions:
return
if not isinstance(self.llm_kwargs, dict):
self.llm_kwargs = {}
if self.function_call:
self.llm_kwargs["function_call"] = {"name": self.function_call}
# convert Langchain BaseTool and BaseToolkit to OpenAI functions. FunctionSchema
# are already OpenAI functions, we don't need to convert them.
converted_functions = []
for function in self.functions:
if isinstance(function, Tool):
converted_functions.append(format_tool_to_openai_function(function))
elif isinstance(function, BaseToolkit):
converted_functions.extend(
format_tool_to_openai_function(tool_func)
for tool_func in function.get_tools()
)
else:
converted = function.copy()
converted["parameters"] = json.loads(function["parameters"])
converted_functions.append(converted)
self.llm_kwargs["functions"] = converted_functions
@property
def input_keys(self) -> List[str]:
"""
Overridden to filter out memory variables from input_variables.
This is to be compatible with Sequence, which will raise a validation
error since it does not detect the variable is from memory.
"""
as_set = set(self.prompt.input_variables)
if self.memory:
as_set -= set(self.memory.memory_variables)
return list(as_set)
class LLMReply(LLMChain):
"""
Wrapper around LLMChain that records output as an ASSISTANT message.
This simplifies making simple agents that just reply to messages.
"""
async def _acall(
self,
inputs: Dict[str, Any],
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
) -> Dict[str, str]:
response = await super()._acall(inputs=inputs, run_manager=run_manager)
ix_handler = IxHandler.from_manager(run_manager)
await ix_handler.send_agent_msg(response["text"])
return response
def run(self, *args, **kwargs) -> Any:
response = super().run(*args, **kwargs)
TaskLogMessage.objects.create(
task_id=self.callbacks.task.id,
role="ASSISTANT",
parent=self.callbacks.think_msg,
content={
"type": "ASSISTANT",
"text": response,
# "agent": str(self.callback_manager.task.agent.id),
"agent": self.callbacks.agent.alias,
},
)
return response
| [
"{'system': PLACEHOLDER, 'user': PLACEHOLDER, 'assistant': PLACEHOLDER}"
] |
2024-01-10 | developers221/dddddd | ix~chains~fixture_src~parsers.py | from langchain.document_loaders.parsers import LanguageParser
from langchain.text_splitter import Language
from ix.api.components.types import NodeTypeField, parse_enum_choices
LANGUAGE_CHOICES = parse_enum_choices(Language)
LANGUAGE = {
"name": "language",
"type": "string",
"input_type": "select",
"choices": LANGUAGE_CHOICES,
"required": True,
"default": "python",
}
LANGUAGE_PARSER_CLASS_PATH = (
"langchain.document_loaders.parsers.language.language_parser.LanguageParser"
)
LANGUAGE_PARSER = {
"class_path": LANGUAGE_PARSER_CLASS_PATH,
"type": "parser",
"name": "Language Parser",
"description": "Parse code for various programming languages.",
"fields": []
+ NodeTypeField.get_fields(
LanguageParser.__init__,
include=["parser_threshold", "language"],
),
}
PARSERS = [LANGUAGE_PARSER]
__all__ = ["PARSERS", "LANGUAGE", "LANGUAGE_CHOICES", "LANGUAGE_PARSER_CLASS_PATH"]
| [] |
2024-01-10 | developers221/dddddd | ix~agents~process.py | import logging
from typing import TypedDict, Optional, Any, Dict
from asgiref.sync import sync_to_async
from langchain.schema.runnable import RunnableConfig
from ix.agents.models import Agent
from ix.chains.callbacks import IxHandler
from ix.chains.models import Chain as ChainModel
from ix.task_log.models import Task
# logging
logger = logging.getLogger(__name__)
class UserInput(TypedDict):
authorized_ticks: int
feedback: Optional[str]
class ChatMessage(TypedDict):
role: str
content: str
class AgentProcess:
def __init__(
self,
task: Task,
agent: Agent,
chain: ChainModel,
):
self.chain = chain
self.task = task
self.agent = agent
async def start(self, inputs: Optional[Dict[str, Any]] = None) -> bool:
"""
start agent loop
"""
logger.info(f"starting process loop task_id={self.task.id} input_id={inputs}")
response = await self.chat_with_ai(inputs)
logger.debug(f"Response from model, task_id={self.task.id} response={response}")
return True
async def chat_with_ai(self, user_input: Dict[str, Any]) -> Any:
handler = IxHandler(agent=self.agent, chain=self.chain, task=self.task)
try:
# TODO: chain loading needs to be made async
chain = await sync_to_async(self.chain.load_chain)(handler)
logger.info(
f"Sending request to chain={self.chain.name} prompt={user_input}"
)
# auto-map user_input to other input keys if not provided.
# work around until chat input key can be configured per chain
inputs = user_input.copy()
if "input" not in inputs:
inputs["input"] = user_input["user_input"]
if "question" not in inputs:
inputs["question"] = user_input["user_input"]
return await chain.ainvoke(inputs, RunnableConfig(callbacks=[handler]))
except Exception as e:
# validation errors aren't caught by callbacks.
await handler.send_error_msg(e)
return None
| [] |
2024-01-10 | developers221/dddddd | ix~memory~artifacts.py | import logging
import concurrent.futures
from typing import Dict, Any, List
from uuid import UUID
from django.db.models import Q
from langchain.schema import BaseMemory
from ix.task_log.models import Artifact
from ix.utils.asyncio import run_coroutine_in_new_loop
logger = logging.getLogger(__name__)
class ArtifactMemory(BaseMemory):
"""
A memory implementation that loads artifacts into the context
"""
save_artifact: bool = False
load_artifact: bool = False
# read
input_key: str = "artifact_keys"
memory_key: str = "related_artifacts"
session_id: str
supports_session: bool = True
supported_scopes: set = {"chat"}
@property
def memory_variables(self) -> List[str]:
return [self.memory_key]
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
"""Load related artifacts into memory"""
logger.debug(
f"ArtifactMemory.load_memory_variables input_key={self.input_key} inputs={inputs}"
)
# split session id back into chat_id
chat_id = self.session_id.split("_")[-1]
# search for artifacts
text = ""
artifact_keys = inputs.get(self.input_key, None)
if artifact_keys:
id_clauses = Q(key__in=artifact_keys) | Q(name__in=artifact_keys)
try:
id_clauses |= Q(
pk__in=[UUID(artifact_key) for artifact_key in artifact_keys]
)
except ValueError:
# ignore if not UUIDs
pass
with concurrent.futures.ThreadPoolExecutor() as executor:
future = executor.submit(
run_coroutine_in_new_loop, self.get_artifacts(chat_id, id_clauses)
)
text = future.result()
# return formatted artifacts
return {self.memory_key: text}
async def get_artifacts(self, chat_id, id_clauses) -> None:
text = ""
artifacts = Artifact.objects.filter(
id_clauses,
(
Q(task__leading_chats__id=chat_id)
| Q(task__parent__leading_chats__id=chat_id)
),
).order_by("-created_at")
artifacts = [artifact async for artifact in artifacts]
logger.debug(f"Found n={len(artifacts)} artifacts")
if artifacts:
# format each artifact
# HAX: group by key to avoid duplicates, this is done here since it's
# a lot simpler than doing it in the query. This method will still
# query all the duplicates but only becomes an issue if there is a
# large number of duplicates
artifact_strs = {}
for artifact in artifacts:
if artifact.key not in artifact_strs:
artifact_strs[artifact.key] = artifact.as_memory_text()
artifact_prompt = "".join(artifact_strs.values())
text = f"REFERENCED ARTIFACTS:\n{artifact_prompt}"
return text
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
"""
No-op for now. May move artifact saving here in the future. Artifacts
are currently saved by SaveArtifact chain.
"""
pass
def clear(self) -> None:
pass
| [] |
2024-01-10 | developers221/dddddd | ix~chains~tests~test_config_loader.py | from copy import deepcopy
from pathlib import Path
import pytest
from unittest.mock import MagicMock
from langchain.chains import ConversationalRetrievalChain
from langchain.document_loaders.generic import GenericLoader
from langchain.document_loaders.parsers import LanguageParser
from langchain.embeddings import OpenAIEmbeddings
from langchain.text_splitter import TextSplitter
from langchain.vectorstores import Redis
from ix.chains.fixture_src.chains import CONVERSATIONAL_RETRIEVAL_CHAIN_CLASS_PATH
from ix.chains.fixture_src.document_loaders import GENERIC_LOADER_CLASS_PATH
from ix.chains.fixture_src.embeddings import OPENAI_EMBEDDINGS_CLASS_PATH
from ix.chains.fixture_src.parsers import LANGUAGE_PARSER_CLASS_PATH
from ix.chains.fixture_src.text_splitter import RECURSIVE_CHARACTER_SPLITTER_CLASS_PATH
from ix.chains.fixture_src.vectorstores import (
REDIS_VECTORSTORE_CLASS_PATH,
)
from ix.chains.loaders.context import IxContext
from langchain.agents import AgentExecutor
from langchain.base_language import BaseLanguageModel
from langchain.memory import (
ConversationBufferMemory,
ConversationSummaryBufferMemory,
CombinedMemory,
)
from langchain.schema import BaseChatMessageHistory, BaseMemory
from langchain.tools import BaseTool
from ix.chains.fixture_src.tools import GOOGLE_SEARCH
from ix.chains.loaders.memory import get_memory_session
from ix.chains.loaders.text_splitter import TextSplitterShim
from ix.chains.loaders.tools import extract_tool_kwargs
from ix.chains.tests.mock_memory import MockMemory
from ix.memory.artifacts import ArtifactMemory
class TestLoadLLM:
pass
OPENAI_LLM = {
"class_path": "langchain.chat_models.openai.ChatOpenAI",
"config": {"verbose": True},
}
MOCK_MEMORY = {
"class_path": "ix.chains.tests.mock_memory.MockMemory",
"config": {"value_map": {"mock_memory_input": "mock memory"}},
}
MEMORY = {
"class_path": "langchain.memory.ConversationBufferMemory",
"config": {
"input_key": "user_input",
"memory_key": "chat_history",
},
}
MEMORY_WITH_BACKEND = {
"class_path": "langchain.memory.ConversationBufferMemory",
"config": {
"input_key": "user_input",
"memory_key": "chat_history",
"chat_memory": {
"class_path": "langchain.memory.RedisChatMessageHistory",
"config": {"url": "redis://redis:6379/0", "session_scope": "task"},
},
},
}
MEMORY_WITH_LLM = {
"class_path": "langchain.memory.summary_buffer.ConversationSummaryBufferMemory",
"config": {
"input_key": "user_input",
"memory_key": "chat_summary",
"llm": {
"class_path": "langchain.chat_models.openai.ChatOpenAI",
},
},
}
MEMORY_WITH_SCOPE = {
"class_path": "ix.memory.artifacts.ArtifactMemory",
"config": {
"memory_key": "chat_history",
"session_scope": "chat",
"session_prefix": "tests",
},
}
CHAT_MESSAGES = [
{
"role": "system",
"template": "You are a test bot.",
},
{
"role": "user",
"template": "{user_input}",
"input_variables": ["user_input"],
},
]
CHAT_MESSAGES_WITH_CHAT_HISTORY = [
{
"role": "system",
"template": "You are a test bot! HISTORY: {chat_history}",
"input_variables": ["chat_history"],
},
{
"role": "user",
"template": "{user_input}",
"input_variables": ["user_input"],
},
]
PROMPT_CHAT = {
"class_path": "langchain.prompts.chat.ChatPromptTemplate",
"config": {
"messages": CHAT_MESSAGES,
},
}
PROMPT_WITH_CHAT_HISTORY = {
"class_path": "langchain.prompts.chat.ChatPromptTemplate",
"config": {
"messages": CHAT_MESSAGES_WITH_CHAT_HISTORY,
},
}
LLM_CHAIN = {
"class_path": "ix.chains.llm_chain.LLMChain",
"config": {
"prompt": PROMPT_CHAT,
"llm": {
"class_path": "langchain.chat_models.openai.ChatOpenAI",
},
},
}
LLM_REPLY = {
"class_path": "ix.chains.llm_chain.LLMReply",
"config": {
"prompt": PROMPT_CHAT,
"llm": {
"class_path": "langchain.chat_models.openai.ChatOpenAI",
},
},
}
LLM_REPLY_WITH_HISTORY = {
"class_path": "ix.chains.llm_chain.LLMReply",
"config": {
"prompt": PROMPT_WITH_CHAT_HISTORY,
"llm": {
"class_path": "langchain.chat_models.openai.ChatOpenAI",
},
},
}
LLM_REPLY_WITH_HISTORY_AND_MEMORY = {
"class_path": "ix.chains.llm_chain.LLMReply",
"config": {
"prompt": PROMPT_WITH_CHAT_HISTORY,
"memory": MEMORY,
"llm": {
"class_path": "langchain.chat_models.openai.ChatOpenAI",
},
},
}
@pytest.mark.django_db
class TestLoadMemory:
def test_load_memory(self, load_chain):
instance = load_chain(MEMORY)
assert isinstance(instance, ConversationBufferMemory)
def test_load_multiple(self, load_chain, mock_openai_key):
"""Test loading multiple memories into a CombinedMemory"""
MEMORY2 = deepcopy(MEMORY)
MEMORY2["config"]["memory_key"] = "chat_history2"
LLM_CONFIG = deepcopy(LLM_REPLY_WITH_HISTORY)
LLM_CONFIG["config"]["memory"] = [MEMORY, MEMORY2]
chain = load_chain(LLM_CONFIG)
instance = chain.memory
assert isinstance(instance, CombinedMemory)
assert len(instance.memories) == 2
assert instance.memories[0].memory_key == "chat_history"
assert instance.memories[1].memory_key == "chat_history2"
def test_load_backend(self, load_chain):
"""
A memory class can have a backend that separates memory logic from
the storage system. ChatMemory works this way.
"""
instance = load_chain(MEMORY_WITH_BACKEND)
assert isinstance(instance, ConversationBufferMemory)
assert isinstance(instance.chat_memory, BaseChatMessageHistory)
def test_load_memory_with_scope(self, chat, load_chain):
"""
Test loading with a scope.
Not all memories support sessions, for example ChatMemory
adds scoping to the backend.
"""
chat = chat["chat"]
chat_id = chat.task.leading_chats.first().id
instance = load_chain(MEMORY_WITH_SCOPE)
assert isinstance(instance, ArtifactMemory)
assert instance.session_id == f"tests_chat_{chat_id}"
def test_load_llm(self, load_chain, mock_openai):
"""
Memory classes may optionally load an llm. (e.g. SummaryMemory)
"""
instance = load_chain(MEMORY_WITH_LLM)
assert isinstance(instance, ConversationSummaryBufferMemory)
assert isinstance(instance.llm, BaseLanguageModel)
def test_load_class_with_config(self, chat, mocker, load_chain):
"""
Test loading a class whose config is defined in MEMORY_CLASSES.
This tests configuring an external class with the required config
to integrate into Ix
"""
chat = chat["chat"]
chat_id = chat.task.leading_chats.first().id
# patch MEMORY_CLASSES to setup the test
from ix.chains.loaders import memory
mock_memory_classes = {
MockMemory: {
"supports_session": True,
}
}
mocker.patch.object(memory, "MEMORY_CLASSES", mock_memory_classes)
# load a memory that will use the mock class config
instance = load_chain(
{
"class_path": "ix.chains.tests.mock_memory.MockMemory",
"config": {
"session_scope": "chat",
"session_prefix": "tests",
},
},
)
assert isinstance(instance, MockMemory)
assert instance.session_id == f"tests_chat_{chat_id}"
@pytest.mark.django_db
class TestLoadChatMemoryBackend:
def test_load_chat_memory_backend(self, chat, load_chain):
chat = chat["chat"]
chat_id = chat.task.leading_chats.first().id
# Config
config = {
"class_path": "langchain.memory.RedisChatMessageHistory",
"config": {
"url": "redis://redis:6379/0",
"session_scope": "chat",
"session_prefix": "tests",
},
}
# Run
backend = load_chain(config)
assert backend.session_id == f"tests_chat_{chat_id}"
def test_load_defaults(self, chat, load_chain):
"""
ChatMemoryBackend should always load session_id. If `session` isn't present then
load the `chat` scope by default.
"""
chat = chat["chat"]
chat_id = chat.task.leading_chats.first().id
# Config
config = {
"class_path": "langchain.memory.RedisChatMessageHistory",
"config": {
"url": "redis://redis:6379/0",
},
}
# Run
backend = load_chain(config)
assert backend.session_id == f"chat_{chat_id}"
@pytest.mark.django_db
class TestGetMemorySession:
"""Test parsing the session scope from the chain config and runtime context."""
@pytest.mark.parametrize(
"config, cls, expected",
[
# No scope - defaults to chat
(
{
"session_scope": "",
"session_prefix": "123",
"session_key": "session_id",
},
BaseChatMessageHistory,
("123_chat_1000", "session_id"),
),
(
{
"session_scope": None,
"session_prefix": "123",
"session_key": "session_id",
},
BaseChatMessageHistory,
("123_chat_1000", "session_id"),
),
(
{"session_prefix": "123", "session_key": "session_id"},
BaseChatMessageHistory,
("123_chat_1000", "session_id"),
),
# agent, task, user scopes
(
{
"session_scope": "agent",
"session_prefix": "456",
"session_key": "session_id",
},
BaseMemory,
("456_agent_1001", "session_id"),
),
(
{
"session_scope": "task",
"session_prefix": "789",
"session_key": "session_id",
},
BaseMemory,
("789_task_1002", "session_id"),
),
(
{
"session_scope": "user",
"session_prefix": "321",
"session_key": "session_id",
},
BaseChatMessageHistory,
("321_user_1003", "session_id"),
),
# custom session_id_key
(
{"session_scope": "chat", "session_key": "chat_session"},
BaseChatMessageHistory,
("chat_1000", "chat_session"),
),
# no session prefix
(
{"session_scope": "chat", "session_key": "session_id"},
BaseChatMessageHistory,
("chat_1000", "session_id"),
),
# custom session prefix
(
{"session_scope": "chat", "session_prefix": "static_session_id"},
BaseChatMessageHistory,
("static_session_id_chat_1000", "session_id"),
),
],
)
def test_get_memory_session(self, task, config, cls, expected):
"""Test various scope configurations."""
context = MagicMock()
context.task = task
context.chat_id = "1000"
context.agent.id = "1001"
context.task.id = "1002"
context.user_id = "1003"
result = get_memory_session(config, context, cls)
assert result == expected
def test_parse_scope_unsupported_scope(self, task):
config = {
"session_scope": "unsupported_scope",
"session_id": "123",
"session_id_key": "session_id",
}
cls = BaseChatMessageHistory
context = IxContext(agent=task.agent, chain=task.chain, task=task)
with pytest.raises(ValueError) as excinfo:
get_memory_session(config, context, cls)
assert "unknown scope" in str(excinfo.value)
class TestLoadChain:
def test_load_chain(self):
pass
class TestExtractToolKwargs:
@pytest.fixture
def kwargs(self):
return {
"return_direct": False,
"verbose": False,
"tool_key1": "tool_value1",
"tool_key2": "tool_value2",
}
def test_extract_tool_kwargs_returns_dict(self, kwargs):
result = extract_tool_kwargs(kwargs)
assert isinstance(result, dict)
def test_extract_tool_kwargs_only_includes_tool_kwargs(self, kwargs):
node_kwargs = kwargs.copy()
tool_kwargs = extract_tool_kwargs(node_kwargs)
expected_node_kwargs = {"tool_key1": "tool_value1", "tool_key2": "tool_value2"}
expected_tool_kwargs = {
"return_direct": False,
"verbose": False,
}
assert tool_kwargs == expected_tool_kwargs
assert expected_node_kwargs == node_kwargs
GOOGLE_SEARCH_CONFIG = {
"class_path": GOOGLE_SEARCH["class_path"],
"name": "tester",
"description": "test",
"config": {},
}
@pytest.fixture()
def mock_google_api_key(monkeypatch):
monkeypatch.setenv("GOOGLE_API_KEY", "MOCK_KEY")
monkeypatch.setenv("GOOGLE_CSE_ID", "MOCK_ID")
@pytest.mark.django_db
class TestGoogleTools:
async def test_load_tools(self, aload_chain, mock_google_api_key):
"""Test that tools can be loaded."""
config = {
"class_path": GOOGLE_SEARCH["class_path"],
"name": "tester",
"description": "test",
"config": {},
}
instance = await aload_chain(config)
assert isinstance(instance, BaseTool)
@pytest.mark.django_db
class TestLoadAgents:
# list of known agents. This list may not be exhaustive
# of all agents available since functions are dynamically
# loaded from LangChain code.
KNOWN_AGENTS = [
"initialize_zero_shot_react_description",
"initialize_conversational_react_description",
"initialize_chat_zero_shot_react_description",
"initialize_chat_conversational_react_description",
"initialize_structured_chat_zero_shot_react_description",
"initialize_openai_functions",
"initialize_openai_multi_functions",
]
def test_init_functions(self):
"""Test that agent init wrappers were generated."""
from ix.chains.loaders.agents import FUNCTION_NAMES
for name in self.KNOWN_AGENTS:
assert name in FUNCTION_NAMES
async def test_load_agents(self, aload_chain, mock_openai, mock_google_api_key):
"""Test that agent can be loaded."""
agents = [
"initialize_zero_shot_react_description",
"initialize_conversational_react_description",
"initialize_chat_zero_shot_react_description",
"initialize_chat_conversational_react_description",
"initialize_structured_chat_zero_shot_react_description",
"initialize_openai_functions",
"initialize_openai_multi_functions",
]
for name in agents:
config = {
"class_path": f"ix.chains.loaders.agents.{name}",
"name": "tester",
"description": "test",
"config": {"tools": [GOOGLE_SEARCH_CONFIG], "llm": OPENAI_LLM},
}
instance = await aload_chain(config)
assert isinstance(instance, AgentExecutor)
TEST_DATA = Path("/var/app/test_data")
TEST_DOCUMENTS = TEST_DATA / "documents"
LANGUAGE_PARSER = {
"class_path": LANGUAGE_PARSER_CLASS_PATH,
"config": {
"language": "python",
},
}
DOCUMENT_LOADER = {
"class_path": GENERIC_LOADER_CLASS_PATH,
"config": {
"parser": LANGUAGE_PARSER,
"path": str(TEST_DOCUMENTS),
"suffixes": [".py"],
},
}
TEXT_SPLITTER = {
"class_path": RECURSIVE_CHARACTER_SPLITTER_CLASS_PATH,
"config": {"language": "python", "document_loader": DOCUMENT_LOADER},
}
EMBEDDINGS = {
"class_path": OPENAI_EMBEDDINGS_CLASS_PATH,
"config": {"model": "text-embedding-ada-002"},
}
REDIS_VECTORSTORE = {
"class_path": REDIS_VECTORSTORE_CLASS_PATH,
"config": {
"embedding": EMBEDDINGS,
"documents": TEXT_SPLITTER,
"redis_url": "redis://redis:6379/0",
"index_name": "tests",
},
}
CONVERSATIONAL_RETRIEVAL_CHAIN = {
"class_path": CONVERSATIONAL_RETRIEVAL_CHAIN_CLASS_PATH,
"config": {"llm": OPENAI_LLM, "retriever": REDIS_VECTORSTORE},
}
@pytest.mark.django_db
class TestLoadRetrieval:
"""Test loading retrieval components.
This is a test of loading mechanism for the various retrieval components.
It is not an exhaustive test that all retrieval components work as expected.
The tests verify that any special loading logic for the components is working.
"""
async def test_load_language_parser(self, aload_chain):
component = await aload_chain(LANGUAGE_PARSER)
assert isinstance(component, LanguageParser)
assert component.language == "python"
async def test_load_document_loader(self, aload_chain):
component = await aload_chain(DOCUMENT_LOADER)
assert isinstance(component, GenericLoader)
assert isinstance(component.blob_parser, LanguageParser)
# non-exhaustive test of document loading
documents = component.load()
sources = {doc.metadata["source"] for doc in documents}
expected_sources = {
str(TEST_DOCUMENTS / "foo.py"),
str(TEST_DOCUMENTS / "bar.py"),
}
assert sources == expected_sources
async def test_load_text_splitter(self, aload_chain):
component = await aload_chain(TEXT_SPLITTER)
assert isinstance(component, TextSplitterShim)
assert isinstance(component.document_loader, GenericLoader)
assert isinstance(component.text_splitter, TextSplitter)
# sanity check that the splitter splits text
# does not test the actual splitting algorithm
with open(TEST_DOCUMENTS / "foo.py", "r") as foo_file:
foo_content = foo_file.read()
split_texts = component.text_splitter.split_text(foo_content)
assert len(split_texts) >= 1
async def test_load_embeddings(self, aload_chain):
component = await aload_chain(EMBEDDINGS)
assert isinstance(component, OpenAIEmbeddings)
async def test_load_vectorstore(
self, clean_redis, aload_chain, mock_openai_embeddings
):
component = await aload_chain(REDIS_VECTORSTORE)
assert isinstance(component, Redis)
async def test_load_conversational_chain(
self, clean_redis, aload_chain, mock_openai_embeddings
):
"""Test loading a fully configured conversational chain."""
component = await aload_chain(CONVERSATIONAL_RETRIEVAL_CHAIN)
assert isinstance(component, ConversationalRetrievalChain)
| [
"{'class_path': 'langchain.prompts.chat.ChatPromptTemplate', 'config': {'messages': PLACEHOLDER}}"
] |
2024-01-10 | developers221/dddddd | ix~conftest.py | import logging
from typing import Dict, Any, List
from unittest.mock import MagicMock
import pytest
import pytest_asyncio
import redis
from asgiref.sync import sync_to_async
from django.core.management import call_command
from ix.agents.models import Agent
from ix.agents.tests.mock_llm import MockChatOpenAI
from ix.chains.callbacks import IxHandler
from ix.chains.fixture_src.embeddings import OPENAI_EMBEDDINGS_CLASS_PATH
from ix.chains.loaders.context import IxContext
from ix.chains.management.commands.create_ix_v2 import (
IX_CHAIN_V2,
)
from ix.chains.models import Chain, ChainNode, NodeType
from ix.chains.tests.mock_vector_embeddings import MOCK_VECTORSTORE_EMBEDDINGS
from ix.task_log.models import Artifact, Task
from ix.task_log.tests.fake import (
fake_task,
fake_task_log_msg,
fake_chat,
fake_agent,
fake_think,
fake_chain,
afake_think,
afake_task,
)
from ix.utils.importlib import import_class, _import_class
logger = logging.getLogger(__name__)
USER_INPUT = {"user_input": "hello agent 1"}
@pytest.fixture
def clean_redis():
"""Ensure redis is clean before and after tests"""
redis_client = redis.Redis(host="redis", port=6379, db=0)
redis_client.flushall()
yield
redis_client.flushall()
@pytest.fixture
def mock_import_class(mocker):
"""Fixture for mocking import_class.
Used to mock specific components (e.g. OpenAIEmbeddings) in tests.
"""
original_import_class = _import_class
mock_class_paths = {}
def mock_fn(class_path):
if class_path in mock_class_paths:
return mock_class_paths[class_path]
else:
return original_import_class(class_path)
def add_mock_path(class_path, mock_result):
mock_class_paths[class_path] = mock_result
mocker.patch("ix.utils.importlib._import_class", side_effect=mock_fn)
return add_mock_path
@pytest.fixture
def mock_openai_key(monkeypatch):
monkeypatch.setenv("OPENAI_API_KEY", "MOCK_KEY")
MOCKABLE_LLM_CLASSES = [
"langchain.chat_models.openai.ChatOpenAI",
]
@pytest.fixture
def mock_openai(mocker, mock_openai_key):
# create a mock instance of the class
mock_llm = MockChatOpenAI()
mock_llm.return_value = "mock llm response"
async def _mock_acompletion_with_retry(*args, **kwargs):
if mock_llm.raise_exception:
raise mock_llm.raise_exception
content = await sync_to_async(mock_llm.get_mock_content)()
return content
# async completions are outside the class and need to be mocked separately
mock_acomplete = mocker.patch("langchain.chat_models.openai.acompletion_with_retry")
mock_acomplete.side_effect = _mock_acompletion_with_retry
mock_llm.acompletion_with_retry = mock_acomplete
# mock the class to return the instance we're creating here
mock_class = MagicMock(return_value=mock_llm)
def mock_import_class(class_path):
if class_path in MOCKABLE_LLM_CLASSES:
return mock_class
else:
return import_class(class_path)
# mock_import returns the mock class
mocker.patch(
"ix.chains.loaders.core.import_node_class", side_effect=mock_import_class
)
# return the mock instance
yield mock_llm
@pytest.fixture
def mock_openai_streaming(mocker, mock_openai_key):
# create a mock instance of the class
mock_llm = MockChatOpenAI()
mock_llm.return_value = "mock llm response"
async def _mock_acompletion_with_retry(*args, **kwargs):
if mock_llm.raise_exception:
raise mock_llm.raise_exception
if mock_llm.streaming:
content = mock_llm.return_value
split = content.split(" ")
words = []
for split_word in split[:-1]:
words.append(split_word)
words.append(" ")
words.append(split[-1])
for word in words:
yield {
"choices": [
{
"delta": {
"role": "system",
"content": word,
"function_call": None,
}
}
]
}
# async completions are outside the class and need to be mocked separately
mock_acomplete = mocker.patch("langchain.chat_models.openai.acompletion_with_retry")
mock_acomplete.side_effect = _mock_acompletion_with_retry
mock_llm.acompletion_with_retry = mock_acomplete
# mock the class to return the instance we're creating here
mock_class = MagicMock(return_value=mock_llm)
def mock_import_class(class_path):
if class_path in MOCKABLE_LLM_CLASSES:
return mock_class
else:
return import_class(class_path)
# mock_import returns the mock class
mocker.patch(
"ix.chains.loaders.core.import_node_class", side_effect=mock_import_class
)
# return the mock instance
yield mock_llm
def fake_embeddings(n: int = 1) -> List[List[float]]:
"""Fake a list of embeddings."""
return [[0.5 for x in range(1536)] for n in range(n)]
@pytest.fixture
def mock_openai_embeddings(mock_import_class, mock_openai_key):
"""Mocks OpenAIEmbeddings to return a mock response
The mock embedding was generated for files test_data/documents
with the real OpenAIEmbeddings component
"""
mock_class = MagicMock()
mock_class.instance = mock_class()
mock_class.instance.embed_documents.return_value = MOCK_VECTORSTORE_EMBEDDINGS
mock_class.instance.embed_query.return_value = fake_embeddings(n=1)[0]
mock_import_class(OPENAI_EMBEDDINGS_CLASS_PATH, mock_class)
yield mock_class.instance
@pytest.fixture
def ix_context(task):
return IxContext(agent=task.agent, chain=task.chain, task=task)
@pytest_asyncio.fixture
async def aix_context(atask):
agent = await Agent.objects.aget(id=atask.agent_id)
chain = await Chain.objects.aget(id=atask.chain_id)
yield IxContext(agent=agent, chain=chain, task=atask)
@pytest.fixture
def ix_handler(chat):
chat = chat["chat"]
task = Task.objects.get(id=chat.task_id)
agent = Agent.objects.get(id=task.agent_id)
chain = Chain.objects.get(id=task.chain_id)
handler = IxHandler(agent=agent, chain=chain, task=task)
handler.parent_think_msg = fake_think(task=task)
yield handler
@pytest_asyncio.fixture
async def aix_handler(achat):
chat = achat["chat"]
task = await Task.objects.aget(id=chat.task_id)
agent = await Agent.objects.aget(id=task.agent_id)
chain = await Chain.objects.aget(id=task.chain_id)
handler = IxHandler(agent=agent, chain=chain, task=task)
handler.parent_think_msg = await afake_think(task=task)
yield handler
@pytest.fixture
def mock_chain(mocker):
"""
Mocks the function that MockChain calls. Used to hook into
the chain and test the output.
"""
yield mocker.patch("ix.chains.tests.mock_chain.mock_chain_func")
@pytest.fixture()
def load_chain(node_types, task, clean_redis):
"""
yields a function for creating a mock chain. Used for generating
mock functions for other chains and configs. The function takes a
config object and generates a chain and nodes. The chain is then
loaded and returned.
"""
def _mock_chain(config: Dict[str, Any], context: IxContext = None) -> Chain:
chain = fake_chain()
chain_node = ChainNode.objects.create_from_config(chain, config, root=True)
return chain_node.load(
context or IxContext(agent=task.agent, task=task, chain=task.chain)
)
yield _mock_chain
@pytest_asyncio.fixture
async def aload_chain(anode_types, achat):
"""
yields a function for creating a mock chain. Used for generating
mock functions for other chains and configs. The function takes a
config object and generates a chain and nodes. The chain is then
loaded and returned.
"""
chat = achat["chat"]
task = await Task.objects.aget(id=chat.task_id)
agent = await Agent.objects.aget(id=chat.lead_id)
chain = await Chain.objects.aget(id=task.chain_id)
async def _mock_chain(config: Dict[str, Any], context: IxContext = None) -> Chain:
chain_node = await sync_to_async(ChainNode.objects.create_from_config)(
chain, config, root=True
)
return await sync_to_async(chain_node.load)(
context or IxContext(agent=agent, task=task, chain=chain)
)
yield _mock_chain
@pytest.fixture
def task(node_types):
return fake_task()
@pytest_asyncio.fixture
async def atask(anode_types):
return await afake_task()
@pytest.fixture()
def chat(node_types, task, load_chain, mock_openai_key, ix_context, clean_redis):
chat = fake_chat(task=task)
fake_agent_1 = fake_agent(
name="agent 1", alias="agent_1", purpose="to test selections"
)
fake_agent_2 = fake_agent(
name="agent 2", alias="agent_2", purpose="to test selections"
)
chat.agents.add(fake_agent_1)
chat.agents.add(fake_agent_2)
# load chain
model_instance = Chain.objects.get(pk=IX_CHAIN_V2)
moderator = model_instance.load_chain(context=ix_context)
yield {
"chat": chat,
"fake_agent_1": fake_agent_1,
"fake_agent_2": fake_agent_2,
"instance": moderator,
}
@pytest_asyncio.fixture
async def achat(anode_types, atask, aix_agent, aix_context, mock_openai_key):
chat = await sync_to_async(fake_chat)(task=atask)
fake_agent_1 = await sync_to_async(fake_agent)(
name="agent 1", alias="agent_1", purpose="to test selections"
)
fake_agent_2 = await sync_to_async(fake_agent)(
name="agent 2", alias="agent_2", purpose="to test selections"
)
await chat.agents.aset([fake_agent_1, fake_agent_2])
# load chain
model_instance = await Chain.objects.aget(pk=IX_CHAIN_V2)
moderator = await model_instance.aload_chain(context=aix_context)
return {
"chat": chat,
"fake_agent_1": fake_agent_1,
"fake_agent_2": fake_agent_2,
"instance": moderator,
}
@pytest.fixture
def task_log_msg(task):
return fake_task_log_msg(task)
@pytest.fixture()
def command_output(mocker):
"""mocks write_output to capture output from `echo` command"""
yield mocker.patch("ix.agents.tests.echo_command.write_output")
def load_fixture(fixture: str) -> None:
"""calls manage.py loaddata"""
call_command("loaddata", fixture)
aload_fixture = sync_to_async(load_fixture)
@pytest.fixture()
def node_types() -> None:
"""calls manage.py loaddata node_types"""
NodeType.objects.all().delete()
Chain.objects.all().delete()
load_fixture("node_types")
load_fixture("ix_v2")
@pytest_asyncio.fixture
async def aix_agent(anode_types):
"""async version of ix_agent fixture"""
await sync_to_async(call_command)("loaddata", "ix_v2")
await sync_to_async(call_command)("loaddata", "code_v2")
@pytest_asyncio.fixture
async def anode_types() -> None:
"""calls manage.py loaddata node_types"""
await sync_to_async(call_command)("loaddata", "node_types")
@pytest.fixture()
def clean_artifacts():
"""deletes all artifacts"""
Artifact.objects.all().delete()
@pytest_asyncio.fixture
async def aclean_artifacts():
"""deletes all artifacts"""
await Artifact.objects.all().adelete()
| [] |
2024-01-10 | developers221/dddddd | ix~chains~tests~components~test_metaphor.py | import pytest
from langchain.tools import Tool
from ix.chains.fixture_src.tools import (
METAPHOR_SEARCH_CLASS_PATH,
METAPHOR_CONTENTS_CLASS_PATH,
METAPHOR_FIND_SIMILAR_CLASS_PATH,
)
METAPHOR_SEARCH = {
"class_path": METAPHOR_SEARCH_CLASS_PATH,
"config": {
"metaphor_api_key": "fake_key",
},
}
METAPHOR_CONTENTS = {
"class_path": METAPHOR_CONTENTS_CLASS_PATH,
"config": {
"metaphor_api_key": "fake_key",
},
}
METAPHOR_SIMILAR = {
"class_path": METAPHOR_FIND_SIMILAR_CLASS_PATH,
"config": {
"metaphor_api_key": "fake_key",
},
}
@pytest.mark.django_db
class TestMetaphorTools:
async def test_load_search(self, aload_chain):
component = await aload_chain(METAPHOR_SEARCH)
assert isinstance(component, Tool)
assert component.name == "metaphor_search"
async def test_load_contents(self, aload_chain):
component = await aload_chain(METAPHOR_CONTENTS)
assert isinstance(component, Tool)
assert component.name == "metaphor_get_contents"
async def test_load_similar(self, aload_chain):
component = await aload_chain(METAPHOR_SIMILAR)
assert isinstance(component, Tool)
assert component.name == "metaphor_find_similar"
| [] |
2024-01-10 | developers221/dddddd | ix~chains~functions.py | import json
import logging
from typing import Any, TypedDict, List, TypeVar
from langchain.schema import BaseLLMOutputParser, Generation
T = TypeVar("T")
logger = logging.getLogger(__name__)
class FunctionSchema(TypedDict):
name: str
description: str
parameters: Any
class OpenAIFunctionParser(BaseLLMOutputParser):
"""
OpenAI function parser. This parser is used to parse a function call
out of a response. This is used in conjunction with functions attached
to the LLMChain.
The function_call is returned if present, otherwise the text is returned.
if parse_json is True, the function_call is parsed as JSON. Otherwise,
it is returned as provided by the LLM component. This may be a string,
dict, or combination of both (arguments may be a string).
"""
parse_json: bool = False
def parse_result(self, result: List[Generation]) -> T:
additional_kwargs = result[0].message.additional_kwargs
if "function_call" in additional_kwargs:
function_call = additional_kwargs["function_call"]
if self.parse_json:
if isinstance(function_call, str):
function_call = json.loads(function_call)
if isinstance(function_call["arguments"], str):
function_call["arguments"] = json.loads(function_call["arguments"])
return function_call
else:
return result[0].text
| [] |
2024-01-10 | developers221/dddddd | ix~tools~wikipedia.py | from typing import Any
from langchain import WikipediaAPIWrapper
from langchain.tools import WikipediaQueryRun, BaseTool
from ix.chains.asyncio import SyncToAsyncRun
from ix.chains.loaders.tools import extract_tool_kwargs
class AsyncWikipediaQueryRun(SyncToAsyncRun, WikipediaQueryRun):
pass
def get_wikipedia(**kwargs: Any) -> BaseTool:
tool_kwargs = extract_tool_kwargs(kwargs)
wrapper = WikipediaAPIWrapper(**kwargs)
return AsyncWikipediaQueryRun(api_wrapper=wrapper, **tool_kwargs)
| [] |
2024-01-10 | developers221/dddddd | ix~agents~callback_manager.py | import logging
from functools import cached_property
from django.db.models import Q
from langchain.callbacks.manager import CallbackManager
from ix.agents.models import Agent
from ix.chat.models import Chat
from ix.task_log.models import Task
logger = logging.getLogger(__name__)
class IxCallbackManager(CallbackManager):
"""
Custom callback manager that adds iX functionality.
"""
stack_id: str
task: Task
def __init__(
self,
task: Task,
agent: Agent,
stack_id: str = None,
parent: "IxCallbackHandler" = None, # noqa: F821
):
super().__init__(handlers=[])
self.task = task
self.agent = agent
self.stack_id = stack_id or "root"
self.parent = parent
def child(self, stack_id: str) -> "IxCallbackManager":
"""Return a child clone with nested stack_id"""
child = type(self)(
parent=self,
task=self.task,
agent=self.agent,
stack_id=f"{self.stack_id}.{stack_id}",
)
child.think_msg = self.think_msg
return child
@property
def task_id(self) -> str:
return str(self.task.id)
@property
def agent_id(self) -> str:
return str(self.task.agent_id)
@property
def user_id(self) -> str:
# HAX: this is currently always the owner of the chat. Likely need to update
# this in the future to be the user making the request.
return str(self.task.user_id)
@cached_property
def chat_id(self) -> str:
try:
chat = Chat.objects.get(Q(task=self.task) | Q(task_id=self.task.parent_id))
except Chat.DoesNotExist:
return None
return chat.id
| [] |
2024-01-10 | Nataleia08/LLM_Project | llm_project~routes~history.py | from fastapi import APIRouter, Depends, status, HTTPException
from fastapi.responses import HTMLResponse
from sqlalchemy.orm import Session
from langchain.memory import PostgresChatMessageHistory
from llm_project.database.config import settings
from llm_project.services.auth import auth_service
from llm_project.database.models import MessageHistory, User
from llm_project.database.schemas import HistoryResponse
from llm_project.repository import history as repository_history
from llm_project.database.db import get_db
from typing import List
from sqlalchemy import and_
router = APIRouter(prefix="/history", tags=["history"])
@router.post("/save_massegas")
async def save_messages(text: str, chat_id: str, user_id: str, db: Session = Depends(get_db)):
new_message = await repository_history.create_message(chat_id, user_id, text, db)
return new_message
@router.get("/chat_id", response_model= List[HistoryResponse])
async def get_history_messages(chat_id: str, db: Session = Depends(get_db)):
chat_history = await repository_history.chat_history(chat_id, db)
if chat_history is None:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND, detail='Chat not found!')
return chat_history
@router.get("/user_id", response_model= List[HistoryResponse])
async def get_history_messages(user_id: str, db: Session = Depends(get_db)):
chat_history = await repository_history.user_history(user_id, db)
if chat_history is None:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND, detail='Chat not found!')
return chat_history
@router.delete("/message_id")
async def get_history_messages(chat_id:str, message_id: str, db: Session = Depends(get_db)):
delete_message = db.query(MessageHistory).filter(and_(MessageHistory.chat_id == chat_id, MessageHistory.id == message_id)).first()
if delete_message is None:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail='Message not found!')
db.delete(delete_message)
db.commit() | [] |
2024-01-10 | Nataleia08/LLM_Project | llm_project~repository~memory.py | from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import FAISS
from langchain.document_loaders import PyPDFLoader, TextLoader
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import CharacterTextSplitter
async def create_memmory(file_path: str):
loader = PyPDFLoader(file_path=file_path)
pages = loader.load_and_split()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(pages)
embeddings = OpenAIEmbeddings()
new_memory = FAISS.from_documents(docs, embeddings).as_retriever()
return new_memory
async def create_memmory2(text: str):
loader = TextLoader(text)
pages = loader.load_and_split()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(pages)
embeddings = OpenAIEmbeddings()
new_memory = FAISS.from_documents(docs, embeddings).as_retriever()
return new_memory | [] |
2024-01-10 | Nataleia08/LLM_Project | llm_project~routes~upload_llm.py | from fastapi import APIRouter, File, UploadFile, HTTPException, Request, Depends, status, Security
from fastapi.templating import Jinja2Templates
import cloudinary
from cloudinary.uploader import upload
from llm_project.database.config import settings
from llm_project.database.models import User, Chat, UserProfile
from llm_project.services.auth import auth_service
from llm_project.repository.user_profile import create_user_profile
from llm_project.database.db import get_db
from sqlalchemy.orm import Session
from fastapi.responses import HTMLResponse
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import FAISS
from langchain.document_loaders import PyPDFLoader, TextLoader
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from langchain.llms.openai import OpenAIChat
from langchain.vectorstores import FAISS
from llm_project.repository import users as repository_users
from fastapi.security import HTTPAuthorizationCredentials, HTTPBearer, OAuth2PasswordBearer
from llm_project.database.config import settings
from fastapi.staticfiles import StaticFiles
from typing import List
security = HTTPBearer()
router = APIRouter(prefix="/upload-pdf", tags=["upload-pdf"])
templates = Jinja2Templates(directory="llm_project/templates")
router.mount('/static', StaticFiles(directory='llm_project/static'), name='static')
cloudinary.config(
cloud_name=settings.cloud_name,
api_key=settings.cloud_api_key,
api_secret=settings.cloud_api_secret
)
@router.get("/")
async def display_upload_form(request: Request,):
return templates.TemplateResponse("upload.html", {"request": request})
@router.post("/submit/", response_class=HTMLResponse)
async def crete_llm(request: Request, file: UploadFile = File(...), db: Session = Depends(get_db)):
if not file.filename.endswith(".pdf"):
raise HTTPException(status_code=400, detail="Invalid file type. Only PDF allowed.")
new_user = User()
db.add(new_user)
db.commit()
db.refresh(new_user)
with file.file as f:
upload_result = upload(f, resource_type="raw", public_id=f"{file.filename}", folder="files", format="pdf")
new_profile = UserProfile(file_name = file.filename, file_url = upload_result['url'], user_id = new_user.id)
db.add(new_profile)
db.commit()
db.refresh(new_profile)
loader = PyPDFLoader(upload_result['url'])
pages = loader.load_and_split()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(pages)
embeddings = OpenAIEmbeddings(openai_api_key = settings.openai_api_key)
new_memory = FAISS.from_documents(docs, embeddings)
new_memory.save_local("/LLM_PROJECT/Data/llm.yaml")
if new_memory is None:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="Memory not save!")
return templates.TemplateResponse("chat_ws.html", {"request": request})
| [
"llm_project/templates"
] |
2024-01-10 | twwch/wechat-mj-bot | qq-channel~src~bot~qq.py | import botpy
from botpy.message import Message
import openai
from bot.mdb import QQ_MESSAGE_TABLE
from bot.midjourney import MidjourneyAPI
from config.env import APP_ID, APP_TOKEN
mj_sdk = MidjourneyAPI()
BOOT_AT_TEXT = "<@!6326807383311494599>"
SUPPORT_COMMANDS = ('U1', 'U2', 'U3', 'U4', 'V1', 'V2', 'V3', 'V4', 'R')
COMMAND_INDEX = {
"U1": 1,
"U2": 2,
"U3": 3,
"U4": 4,
"V1": 1,
"V2": 2,
"V3": 3,
"V4": 4
}
class MyClient(botpy.Client):
@staticmethod
def build_custom_id(msg_hash, prompt):
if "R" == prompt:
return f"MJ::JOB::variation::1::{msg_hash}::SOLO"
index = prompt[-1]
if "U" in prompt:
return f"MJ::JOB::upsample::{index}::{msg_hash}"
if "V" in prompt:
return f"MJ::JOB::variation::{index}::{msg_hash}"
return None
async def on_at_message_create(self, message: Message):
print(message)
message_id = message.id
QQ_MESSAGE_TABLE.update_one({"qq_id": message_id}, {"$set": {
"qq_message": {
"author": str(message.author),
"content": message.content,
"channel_id": message.channel_id,
"id": message.id,
"guild_id": message.guild_id,
"member": str(message.member),
"message_reference": str(message.message_reference),
"mentions": str(message.mentions),
"attachments": str(message.attachments),
"seq": message.seq,
"seq_in_channel": message.seq_in_channel,
"timestamp": message.timestamp,
"event_id": message.event_id
}
}}, upsert=True)
content = message.content
prompt = content.replace(BOOT_AT_TEXT, "").strip()
if content.startswith(BOOT_AT_TEXT) and '/会话' in content:
prompt = prompt.replace("/会话", "").strip()
if not prompt:
await message.reply(content="请输入聊天内容", message_reference={"message_id": message_id})
return
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo-0613",
messages=[
{"role": "system", "content": "Assistant is a large language model trained by OpenAI."},
{"role": "user", "content": prompt},
]
)
await message.reply(content=response['choices'][0]['message']['content'],
message_reference={"message_id": message_id})
return
if content.startswith(BOOT_AT_TEXT) and '/绘图' in content:
prompt = prompt.replace("/绘图", "").strip()
res = mj_sdk.create_imagine(prompt=prompt, message_id=message_id)
if res and isinstance(res, bool):
await message.reply(content=f"排队中了~\n任务ID: {message_id}",
message_reference={"message_id": message_id})
return
await message.reply(content=res, message_reference={"message_id": message_id})
return
message_reference_id = message.message_reference.message_id
qq_log = await self.api.get_message(channel_id=message.channel_id, message_id=message_reference_id)
content = qq_log.get("message").get("content")
_ids = content.split(" ")[-1].strip()
old_qq_id = _ids.split("::")[0].strip()
old_mj_id = _ids.split("::")[1].strip()
log = QQ_MESSAGE_TABLE.find_one({"qq_id": old_qq_id, "mj_id": old_mj_id})
if prompt in SUPPORT_COMMANDS and log:
commands = log.get("commands") or []
if prompt in commands and "U" in prompt:
await message.reply(content=f"已经处理过了: {message_id}",
message_reference={"message_id": message_id})
return
commands.append(prompt)
QQ_MESSAGE_TABLE.update_one({"qq_id": message_id}, {"$set": {
"commands": commands
}})
mj_message = log.get("mj_message")
msg_hash = mj_message.get("msgHash")
custom_id = self.build_custom_id(msg_hash, prompt)
if not custom_id:
await message.reply(content=f"存在异常\n任务ID: {message_id} \ncustom_id生成失败",
message_reference={"message_id": message_id})
return
res = mj_sdk.up_imagine(mj_message_id=mj_message.get("messageId"), custom_id=custom_id)
if res and isinstance(res, bool):
await message.reply(content=f"排队中了~\n任务ID: {message_id}",
message_reference={"message_id": message_id})
return
await message.reply(content=res, message_reference={"message_id": message_id})
return
await message.reply(content=f"不支持的命令\n支持的命令: {','.join(SUPPORT_COMMANDS)}",
message_reference={"message_id": message_id})
def bot_start():
intents = botpy.Intents(public_guild_messages=True, guild_messages=True)
client = MyClient(intents=intents)
client.run(appid=f"{APP_ID}", token=f"{APP_TOKEN}")
| [
"Assistant is a large language model trained by OpenAI."
] |
2024-01-10 | twwch/wechat-mj-bot | wechat-bot~src~utils~chat_proxy.py | from flask import Flask, request
from flask_cors import CORS
import os
import openai
import logging
import traceback
app = Flask(__name__)
cors = CORS(app, resources={r"/*": {"origins": "*"}})
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s %(levelname)s %(filename)s <%(funcName)s> %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
)
log = logging.getLogger(__name__)
openai.api_key = os.getenv('OPENAI_API_KEY')
@app.after_request
def after_request(response):
response.headers["Access-Control-Allow-Origin"] = "*"
response.headers["Access-Control-Allow-Methods"] = "GET,POST,PUT,DELETE,OPTIONS"
response.headers["Access-Control-Allow-Headers"] = "Content-Type,Authorization"
return response
@app.route('/chat/completions', methods=['POST'])
def chat_completions():
try:
data = request.get_json(force=True)
messages = data.get('messages', [])
log.info('messages: %s', messages)
response = openai.ChatCompletion.create(
model='gpt-3.5-turbo',
messages=messages,
temperature=0, # this is the degree of randomness of the model's output
)
return {
"code": 0,
"data": response
}
except Exception as e:
log.error(e)
return {
"code": 1,
"data": {
"choices": [
{
"message": f"error for chat completion {traceback.format_exc()}"
}
]
}
}
if __name__ == '__main__':
app.run(host='0.0.0.0', port=81) | [] |
2024-01-10 | nattvara/kthGPT | jobs~tasks~gpt_request.py | from rq import Queue, Retry
from typing import Optional
from redis import Redis
import logging
from db.crud import (
save_message_for_analysis
)
from tools.text.openai import completion
from db.models.analysis import Analysis
from config.settings import settings
from db.models import Lecture
import jobs.tasks.gpt_request
def job(
prompt: str,
analysis_id: Optional[int] = None,
query_id: Optional[int] = None,
upload_id: Optional[int] = None,
):
logger = logging.getLogger('rq.worker')
try:
response, usage = completion(prompt)
if analysis_id is not None:
usage.analysis_id = analysis_id
if query_id is not None:
usage.query_id = query_id
if query_id is not None:
usage.upload_id = upload_id
usage.save()
except Exception as e:
logger.error(e)
if analysis_id is not None:
analysis = Analysis.get(analysis_id)
save_message_for_analysis(analysis, 'OpenAI Error', 'GPT-3 Error from OpenAI, it is likely overloaded. Retrying in a little while...') # noqa: E501
raise e
return response
# Test run the job
if __name__ == '__main__':
queue = Queue('gpt', connection=Redis(
host=settings.REDIS_HOST,
port=settings.REDIS_PORT,
password=settings.REDIS_PASSWORD,
))
j = queue.enqueue(
jobs.tasks.gpt_request.job,
'say hello',
'ev_wkULk2sk',
Lecture.Language.SWEDISH,
ttl=15,
retry=Retry(max=2, interval=[5, 5]),
)
print(j.id)
| [] |
2024-01-10 | nattvara/kthGPT | jobs~tasks~lecture~classify_video.py | import tempfile
import logging
import os.path
from tools.audio.extraction import extract_mp3_len
from tools.audio.shorten import shorten_mp3
from db.models import Lecture, Analysis
import tools.audio.transcription
import tools.youtube.download
import tools.text.prompts
import tools.text.ai
from db.crud import (
get_lecture_by_public_id_and_language,
save_message_for_analysis
)
import jobs
# 20min timeout
TIMEOUT = 20 * 60
# 7 minutes
SAMPLE_SIZE_SECONDS = 7 * 60
# 4 hours
MAX_ALLOWED_VIDEO_LENGTH = 4 * 60 * 60
def job(lecture_id: str, language: str):
logger = logging.getLogger('rq.worker')
lecture = get_lecture_by_public_id_and_language(lecture_id, language)
if lecture is None:
raise ValueError(f'lecture {lecture_id} not found')
if lecture.source != Lecture.Source.YOUTUBE:
raise ValueError(f'classification only supports youtube lectures, source was {lecture.source}')
lecture.refresh()
analysis = lecture.get_last_analysis()
analysis.state = Analysis.State.CLASSIFYING
analysis.save()
save_message_for_analysis(analysis, 'Classifying video', 'Trying to classify if the video is relevant for kthGPT')
try:
temp_path = tempfile.NamedTemporaryFile(mode='w+', delete=False)
temp_path_mp3 = tools.youtube.download.download_mp3(lecture.content_link(), temp_path.name, SAMPLE_SIZE_SECONDS)
length = extract_mp3_len(temp_path_mp3)
if length > MAX_ALLOWED_VIDEO_LENGTH:
logger.info(f'video was to long {length} > {MAX_ALLOWED_VIDEO_LENGTH}')
lecture.approved = False
lecture.save()
temp_path.close()
return
shorten_mp3(temp_path_mp3, SAMPLE_SIZE_SECONDS)
transcript_dir = f'{temp_path_mp3}.transcription'
tools.audio.transcription.save_text(temp_path_mp3, lecture, transcript_dir, save_progress=False)
transcribed_text_path = f'{transcript_dir}/{os.path.basename(temp_path_mp3)}.txt'
with open(transcribed_text_path, 'r') as file:
text = file.read()
if lecture.language == Lecture.Language.ENGLISH:
prompt = tools.text.prompts.create_text_to_decide_if_video_is_appropriate_english(text)
elif lecture.language == Lecture.Language.SWEDISH:
prompt = tools.text.prompts.create_text_to_decide_if_video_is_appropriate_swedish(text)
else:
raise ValueError(f'unsupported value error {lecture.language}')
response = tools.text.ai.gpt3(
prompt,
time_to_live=60 * 60 * 5, # 5 hrs
max_retries=10,
retry_interval=[
10,
30,
60,
2 * 60,
10 * 60,
20 * 60,
30 * 60,
2 * 60 * 60,
30 * 60,
30 * 60,
],
analysis_id=analysis.id,
)
except Exception as e:
lecture.refresh()
analysis = lecture.get_last_analysis()
analysis.state = Analysis.State.FAILURE
analysis.save()
save_message_for_analysis(analysis, 'Classification failed', 'OpenAI is likely overloaded.')
raise e
category_is_ok = False
if tools.text.prompts.CATEGORY_RECORDED_LECTURE.lower() in response.lower():
category_is_ok = True
logger.info(f'response from openAI: {response}')
lecture.refresh()
lecture.approved = category_is_ok
lecture.save()
if category_is_ok:
jobs.schedule_analysis_of_lecture(lecture)
else:
jobs.schedule_fetch_of_lecture_metadata(lecture)
analysis = lecture.get_last_analysis()
analysis.state = Analysis.State.DENIED
analysis.save()
temp_path.close()
# Test run the job
if __name__ == '__main__':
job('L3pk_TBkihU', Lecture.Language.ENGLISH)
| [] |
2024-01-10 | wavo89/reader-checker | utils~inspect_transcript.py | import openai
from better_profanity import profanity
profanity.load_censor_words()
def inspect_transcript(transcript):
"""
Checks if the given transcript contains any inappropriate content using GPT-3.5.
Returns False if any inappropriate content is found, otherwise returns True.
"""
# Check for bad words using the better_profanity library
contains_bad_word = profanity.contains_profanity(transcript)
print("Contains bad word?:", contains_bad_word)
if contains_bad_word:
return False
# Create a prompt for GPT-3.5
prompt = f"Review this text for any inappropriate or innuendo content: '{transcript}'. If nothing inappropriate is found, please respond with only a 1. If anything inappropriate is found, respond with a 0. Do not respond with anything else besides a 0 or 1 even if its very inappropriate. If its too inappropriate for you to even respond, respond with a 0."
# Use openai.ChatCompletion to get a response from GPT-3.5
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo", messages=[{"role": "user", "content": prompt}]
)
# Extract the GPT-3.5 response
gpt_response = response["choices"][0]["message"]["content"].strip()
print("GPT response:", gpt_response)
# Check the response and return the appropriate value
if gpt_response == "1":
print("Passes inspection: True")
return True
else:
print("Passes inspection: False")
return False
# Example usage:
# result = inspect_transcript("Some innocent text here.")
# print(result)
| [
"Review this text for any inappropriate or innuendo content: 'PLACEHOLDER'. If nothing inappropriate is found, please respond with only a 1. If anything inappropriate is found, respond with a 0. Do not respond with anything else besides a 0 or 1 even if its very inappropriate. If its too inappropriate for you to even respond, respond with a 0."
] |
2024-01-10 | vldc-hq/vldc-bot | bot~skills~buktopuha.py | import logging
import os
import random
import re
from datetime import datetime, timedelta
from random import randint
from tempfile import gettempdir
from threading import Lock
from typing import Dict, Optional
from uuid import uuid4
import openai
import google.generativeai as genai
import pymongo
from config import get_group_chat_id
from db.mongo import get_db
from filters import admin_filter
from handlers import CommandHandler
from mode import cleanup_queue_update
from PIL import Image, ImageDraw, ImageFont
from pymongo.collection import Collection
from skills.mute import mute_user_for_time
from telegram import Message, Update, User
from telegram.ext import CallbackContext, MessageHandler, Updater
from telegram.ext.filters import Filters
logger = logging.getLogger(__name__)
MEME_REGEX = re.compile(r"\/[вb][иu][kк][tт][оo][pр][иu][hн][aа]", re.IGNORECASE)
GAME_TIME_SEC = 30
class DB:
"""
BuKToPuHa document:
{
_id: 420, # int -- tg user id
meta: {...}, # Dict -- full tg user object (just in case)
game_counter: 10, # int -- number of games started
win_counter: 8, # int -- number of games won
total_score: 100, # int -- total score gained
created_at: datetime(...), # DateTime -- user record creation time
updated_at: datetime(...) # DateTime -- last record update time
}
"""
def __init__(self, db_name: str):
self._coll: Collection = get_db(db_name).players
def find_all(self):
return list(self._coll.find({}).sort("win_counter", pymongo.DESCENDING))
def find(self, user_id: str):
return self._coll.find_one({"_id": user_id})
def add(self, user: User, score: int = 0):
now: datetime = datetime.now()
game_inc = 1
win_inc = 0
if score > 0:
game_inc = 0
win_inc = 1
return self._coll.insert_one(
{
"_id": user.id,
"meta": user.to_dict(),
"game_counter": game_inc,
"win_counter": win_inc,
"total_score": score,
"created_at": now,
"updated_at": now,
}
)
def game(self, user_id: str):
self._coll.update_one(
{"_id": user_id},
{
"$inc": {
"game_counter": 1,
},
"$set": {"updated_at": datetime.now()},
},
)
def win(self, user_id: str, score: int):
self._coll.update_one(
{"_id": user_id},
{
"$inc": {"win_counter": 1, "total_score": score},
"$set": {"updated_at": datetime.now()},
},
)
def remove(self, user_id: str):
self._coll.delete_one({"_id": user_id})
def remove_all(self):
self._coll.delete_many({})
_db = DB(db_name="buktopuha")
class Buktopuha:
def __init__(self):
self.the_lock = Lock()
self.word = ""
self.started_at = None
self.last_game_at = None
genai.configure(api_key=os.getenv("GEMINI_API_KEY"))
openai.api_key = os.getenv("OPENAI_API_KEY")
def get_word(self) -> str:
with self.the_lock:
return self.word
def since_last_game(self) -> timedelta:
if self.last_game_at is None:
return timedelta(days=1)
return datetime.now() - self.last_game_at
def can_start(self) -> bool:
# TODO: leaky bucket
with self.the_lock:
return self.since_last_game() > timedelta(minutes=1)
def start(self, word: str):
with self.the_lock:
self.word = word
self.started_at = datetime.now()
self.last_game_at = self.started_at
def stop(self):
with self.the_lock:
self.word = ""
self.started_at = None
def hint1(self, chat_id: str, orig_word: str):
def _f(context: CallbackContext):
word = self.get_word()
# Need to double check the word is the same
# because game can be already stopped
# when hint job is executed.
if word != orig_word:
return
char = word[randint(0, len(word) - 1)]
masked = re.sub(f"[^{char}]", "*", word)
result = context.bot.send_message(
chat_id,
f"First hint: {masked}",
)
cleanup_queue_update(
context.job_queue,
None,
result,
seconds=30,
)
return _f
def hint2(self, chat_id: str, orig_word: str):
def _f(context: CallbackContext):
word = self.get_word()
if word != orig_word:
return
word = list(word)
random.shuffle(word)
anagram = "".join(word)
result = context.bot.send_message(
chat_id,
f"Second hint (anagram): {anagram}",
)
cleanup_queue_update(
context.job_queue,
None,
result,
seconds=30,
)
return _f
def end(self, chat_id: str, orig_word: str):
def _f(context: CallbackContext):
word = self.get_word()
if word != orig_word:
return
self.stop()
result = context.bot.send_message(
chat_id,
f"Nobody guessed the word {word} 😢",
)
cleanup_queue_update(
context.job_queue,
None,
result,
seconds=30,
)
return _f
def check_for_answer(self, text: str):
word = self.get_word()
return word != "" and text.lower().find(word) >= 0
def add_buktopuha(upd: Updater, handlers_group: int):
global WORDLIST
try:
with open("/app/words.txt", "rt", encoding="utf8") as fi:
WORDLIST = fi.read().splitlines()
except: # noqa: E722
logger.error("failed to read wordlist!")
logger.info("registering buktopuha handlers")
dp = upd.dispatcher
dp.add_handler(
CommandHandler(
"znatoki",
show_nerds,
filters=~Filters.chat(username=get_group_chat_id().strip("@"))
| admin_filter,
run_async=True,
),
handlers_group,
)
dp.add_handler(
# limit to groups to avoid API abuse
MessageHandler(
Filters.chat(username=get_group_chat_id().strip("@"))
& Filters.regex(MEME_REGEX),
start_buktopuha,
run_async=True,
),
handlers_group,
)
dp.add_handler(
MessageHandler(
Filters.chat(username=get_group_chat_id().strip("@"))
& Filters.text
& ~Filters.status_update,
check_for_answer,
run_async=True,
),
handlers_group,
)
WORDLIST = [
"babirusa",
"gerenuk",
"pangolin",
"capybara",
"platypus",
"armadillo",
"axolotl",
"wombat",
]
game = Buktopuha()
def stop_jobs(update: Update, context: CallbackContext, names: list[str]):
for name in names:
for job in context.job_queue.get_jobs_by_name(name):
job.schedule_removal()
def check_for_answer(update: Update, context: CallbackContext):
if update.effective_message is None:
return
if game.check_for_answer(update.effective_message.text):
word = game.get_word()
yes = random.choice(
[
"yes",
"correct",
"indeed",
"yup",
"yep",
"yeah",
"aha",
"definetly",
"affirmative",
"right",
"✅",
"👍",
"👏",
]
)
result = context.bot.send_message(
update.effective_chat.id,
yes,
reply_to_message_id=update.message.message_id,
)
game.stop()
stop_jobs(update, context, [f"{j}-{word}" for j in ["hint1", "hint2", "end"]])
# Felix Felicis
if random.random() < 0.1:
minutes = random.randint(1, 10)
result = context.bot.send_message(
update.effective_chat.id,
f"Oh, you're lucky! You get a prize: ban for {minutes} min!",
reply_to_message_id=update.message.message_id,
)
mute_user_for_time(
update, context, update.effective_user, timedelta(minutes=minutes)
)
cleanup_queue_update(
context.job_queue,
update.message,
result,
30,
)
# game.since_last_game() at this point is the start time of the current game.
# So the maximum score achievable is 30 + len(word) if the user guesses in zero seconds.
score = GAME_TIME_SEC - game.since_last_game().seconds + len(word)
existing_user = _db.find(user_id=update.effective_user.id)
if existing_user is None:
_db.add(user=update.effective_user, score=score)
else:
_db.win(user_id=update.effective_user.id, score=score)
def generate_question(prompt, word) -> str:
model = random.choice(["gpt-3.5-turbo", "gpt-4-1106-preview", "gemini-pro"])
if model.startswith("gpt"):
response = openai.chat.completions.create(
model=model,
messages=[{"role": "system", "content": prompt}],
temperature=0.9,
max_tokens=150,
top_p=1,
frequency_penalty=0,
presence_penalty=0.6,
)
rs = response.choices[0].message.content
return f"{model}: " + re.sub(
word, "***", rs, flags=re.IGNORECASE
).strip().strip('"')
if model.startswith("gemini"):
resp = genai.GenerativeModel(model).generate_content(prompt)
return f"{model}: " + re.sub(
word, "***", resp.text, flags=re.IGNORECASE
).strip().strip('"')
raise Exception(f"unknown model '{model}'")
def start_buktopuha(update: Update, context: CallbackContext):
if update.message is None:
return
result: Optional[Message] = None
if not game.can_start():
result = context.bot.send_message(
update.effective_chat.id,
"Hey, not so fast!",
)
cleanup_queue_update(
context.job_queue,
update.message,
result,
10,
)
mute_user_for_time(update, context, update.effective_user, timedelta(minutes=1))
return
word = random.choice(WORDLIST)
prompt = f"""You are a facilitator of an online quiz game.
Your task is to make engaging and tricky quiz questions.
You should try to make your question fun and interesting, but keep your wording simple and short (less than 15 words).
Keep in mind that for part of the audience English is not a native language.
You can use historical references or examples to explain the word.
For expample good quiz question for word "horse" can be:
Wooden statue of this animal helped to end the siege of Troy.
Please write a quiz question for the word '{word}' using single sentence without mentioning the word itself."""
try:
question = generate_question(prompt, word)
except: # pylint: disable=bare-except # noqa: E722
logger.error("Error calling GenAI model", exc_info=1)
result = context.bot.send_message(
update.effective_chat.id,
"Sorry, my GenAI brain is dizzy 😵💫 Try in a minute!",
)
cleanup_queue_update(
context.job_queue,
update.message,
result,
10,
)
game.start("") # set last_game time, to dissallow immediate reattempts
return
msg = question
if game.since_last_game() > timedelta(minutes=120):
msg = f"🎠 Starting the BukToPuHa! 🎪\nTry to guess the word in 30seconds:\n\n{question}"
result = context.bot.send_message(
update.effective_chat.id,
msg,
)
game.start(word)
context.job_queue.run_once(
game.hint1(update.effective_chat.id, word),
10,
context=context,
name=f"hint1-{word}",
)
context.job_queue.run_once(
game.hint2(update.effective_chat.id, word),
20,
context=context,
name=f"hint2-{word}",
)
context.job_queue.run_once(
game.end(update.effective_chat.id, word),
30,
context=context,
name=f"end-{word}",
)
existing_user = _db.find(user_id=update.effective_user.id)
if existing_user is None:
_db.add(user=update.effective_user, score=0)
else:
_db.game(user_id=update.effective_user.id)
def show_nerds(update: Update, context: CallbackContext):
"""Show leader board, I believe it should looks like smth like:
3HaToKu BuKToPuHbI
==================================================
score | games | wins | znatok
------------+---------+---------+-----------------
100500 | 666 | 666 | egregors
9000 | 420 | 999 | getjump
--------------------------------------------------
"""
logger.error(update)
# CSS is awesome!
# todo:
# need to find out how to show board for mobile telegram as well
board = (
f"{'3HaToKu BuKToPuHbI'.center(52)}\n"
f"{'='*55}\n"
f"{'score'.center(12)} "
f"| {'games'.center(9)} "
f"| {'wins'.center(9)} "
f"| {'znatok'.center(16)} "
f"\n"
f"{'-'*12} + {'-'*9} + {'-'*9} + {'-'*16}\n"
)
znatoki = _db.find_all()
znatoki_length = len(znatoki)
for znatok in znatoki:
username = _get_username(znatok)
board += (
f"{str(znatok['total_score']).ljust(12)} "
f"| {str(znatok['game_counter']).ljust(9)} "
f"| {str(znatok['win_counter']).ljust(9)} "
f"| {username.ljust(16)}\n"
)
board += f"{'-'*55}"
try:
board_image, board_image_path = from_text_to_image(board, znatoki_length)
except (ValueError, RuntimeError, OSError) as ex:
logger.error("Cannot get image from text, znatoki error: %s", ex)
return
result: Optional[Message] = None
if znatoki_length <= ZNATOKI_LIMIT_FOR_IMAGE:
result = context.bot.send_photo(
chat_id=update.effective_chat.id,
photo=board_image,
disable_notification=True,
)
else:
result = context.bot.send_document(
chat_id=update.effective_chat.id,
document=board_image,
disable_notification=True,
)
cleanup_queue_update(
context.job_queue,
update.message,
result,
600,
remove_cmd=True,
remove_reply=False,
)
os.remove(board_image_path)
def _get_username(h: Dict) -> str:
"""Get username or fullname or unknown"""
m = h["meta"]
username = m.get("username")
fname = m.get("first_name")
lname = m.get("last_name")
return (
username
or " ".join(filter(lambda x: x is not None, [fname, lname]))
or "unknown"
)
JPEG = "JPEG"
EXTENSION = ".jpg"
COLOR = "white"
MODE = "L"
FONT_SIZE = 12
ZNATOKI_LIMIT_FOR_IMAGE = 25
FONT = "firacode.ttf"
def _create_empty_image(image_path, limit):
width = 480
line_multi = 1
header_height = 30
line_px = FONT_SIZE * line_multi
height = int((limit * line_px * 1.5) + header_height)
size = (width, height)
logger.info("Creating image")
image = Image.new(MODE, size, COLOR)
logger.info("Saving image")
try:
image.save(image_path, JPEG)
logger.info("Empty image saved")
except (ValueError, OSError) as ex:
logger.error("Error during image saving, error: %s", ex)
return None
return image
def _add_text_to_image(text, image_path):
logger.info("Adding text to image")
image = Image.open(image_path)
logger.info("Getting font")
font_path = os.path.join("fonts", FONT)
font = ImageFont.truetype(font_path, FONT_SIZE)
logger.info("Font %s has been found", FONT)
draw = ImageDraw.Draw(image)
position = (45, 0)
draw.text(xy=position, text=text, font=font)
try:
image.save(image_path, JPEG)
logger.info("Image with text saved")
except (ValueError, OSError) as ex:
logger.error("Error during image with text saving, error: %s", ex)
os.remove(image_path)
return None
return image
def from_text_to_image(text, limit):
limit = max(limit, ZNATOKI_LIMIT_FOR_IMAGE)
logger.info("Getting temp dir")
tmp_dir = gettempdir()
file_name = str(uuid4())
image_path = f"{tmp_dir}/{file_name}{EXTENSION}"
_create_empty_image(image_path, limit)
_add_text_to_image(text, image_path)
# pylint: disable=consider-using-with
image = open(image_path, "rb")
return image, image_path
| [
"You are a facilitator of an online quiz game.\n Your task is to make engaging and tricky quiz questions.\n You should try to make your question fun and interesting, but keep your wording simple and short (less than 15 words).\n Keep in mind that for part of the audience English is not a native language.\n You can use historical references or examples to explain the word.\n For expample good quiz question for word \"horse\" can be:\n Wooden statue of this animal helped to end the siege of Troy.\n\n Please write a quiz question for the word 'PLACEHOLDER' using single sentence without mentioning the word itself."
] |
2024-01-10 | Privado-Inc/MuroChat-Backend | chats~services~chatService.py | import pdb
from rest_framework import status
from app_wrapper.appService import getAI_Response
from app_wrapper.commonService import applyDataFirewall
from app_wrapper.llamaService import getAI_ResponseFromLlama
from chats.dao.ChatDao import TYPE_OF_AI, ChatDao
from chats.dao.LlmModelDao import LlmModelDao
from chats.dao.UserChatShareDao import UserChatShareDao
from chats.dao.ChatHistoryDao import TYPE_OF_MESSAGE, ChatHistoryDao
from utils.EmailClient import EmailClient
from utils.accessorUtils import getOrNone
from utils.cryptoClient import getCryptoClient
from utils.dateTimeUtils import convert_bson_datetime_to_string
from utils.paginationUtils import paginationMeta
from utils.responseFormatter import formatAndReturnResponse
import logging
import json
from bson.json_util import dumps
from django.core.paginator import Paginator
from bson.objectid import ObjectId
from users.models import User
from datetime import datetime, timedelta
from django.conf import settings
import openai
log = logging.getLogger(__name__)
chatDao = ChatDao()
chatHistoryDao = ChatHistoryDao()
userChatShareDao = UserChatShareDao()
llmModelDao = LlmModelDao()
crypto = getCryptoClient()
def createChat(userId, data, isUI):
log.info('createChat')
if "name" not in data:
return formatAndReturnResponse({ "message": "Name is missing while creating chat"}, status=status.HTTP_200_OK, isUI=isUI)
response = chatDao.createChat(userId, data["name"]) # Here need to get name from the ML which is context of first message
if not response:
if response is None or not response.acknowledged:
formatAndReturnResponse({'message': 'Failed to create chat. May be the name is already exist.'}, status=status.HTTP_400_BAD_REQUEST, isUI=isUI)
return formatAndReturnResponse({"message": 'chat created succesfully', "userId": userId }, status=status.HTTP_200_OK, isUI=isUI)
def getChats(userId, page, pageSize, searchTerm, isUI):
log.info('getChats')
data = json.loads(dumps(chatDao.getChats(userIds=[userId], searchTerm=searchTerm)))
if data and len(data):
paginator = Paginator(data, pageSize)
page_obj = paginator.get_page(page)
chats = list()
for chat in page_obj:
chats.append(chat)
meta = paginationMeta(paginator, page_obj, pageSize)
return formatAndReturnResponse(chats, status=status.HTTP_200_OK, isUI=isUI, pageInfo=meta)
return formatAndReturnResponse({'message': 'No chats found for given user id ' + str(userId)}, status=status.HTTP_404_NOT_FOUND, isUI=isUI)
def getChatsByPeriod(userId, isUI):
chats = {}
periods = settings.CHAT_GROUPS
chats['pinnedChats'] = json.loads(dumps(chatDao.getPinnedChats(userId)))
chatsByDate = list()
for period in periods:
query = getQueryForPeriod(period)
periodChats = json.loads(dumps(chatDao.getChats(userIds=[userId], period=query)))
for chat in periodChats:
chat["createdAt"] = chat["createdAt"]["$date"]
chat["modifiedAt"] = chat["modifiedAt"]["$date"]
chatsByDate.append({'title': period, 'chats': periodChats})
chats['chatsByDate'] = chatsByDate
return formatAndReturnResponse(chats, status=status.HTTP_200_OK, isUI=isUI)
def getQueryForPeriod(period):
today = datetime.now()
start_of_today = today.replace(hour=0, minute=0, second=0, microsecond=0)
seven_days_ago = start_of_today - timedelta(days=6)
thirty_days_ago = start_of_today - timedelta(days=29)
if period == 'Today':
return {
"modifiedAt": {
"$gte": today.replace(hour=0, minute=0, second=0, microsecond=0),
"$lt": today.replace(hour=23, minute=59, second=59, microsecond=999),
}
}
elif period == 'Last 7 Days':
return {
"modifiedAt": {"$gte": seven_days_ago, "$lt": start_of_today}
}
elif period == 'Last 30 Days':
return {
"modifiedAt": {"$gte": thirty_days_ago, "$lt": seven_days_ago}
}
elif period == 'Others':
return {
"modifiedAt": {"$lt": thirty_days_ago}
}
def createOrUpdateChatMessage(userId, chatId, data, isUI, isPushedToChatHistory = False):
log.info('createOrUpdateChatMessage')
if "message" not in data and not isPushedToChatHistory:
return formatAndReturnResponse({'message': 'Message field is missing.'}, status=status.HTTP_400_BAD_REQUEST, isUI=isUI)
chatHistory = json.loads(dumps(chatHistoryDao.getChatHistory(userId, chatId)))
history = list()
if chatHistory:
for messageObj in chatHistory['messages']:
history.append({
"role": "system" if messageObj['type'] == TYPE_OF_MESSAGE['GPT'] else "user",
"content": messageObj['message'],
"anonymizedMessage": messageObj['anonymizedMessage'],
"piiToEntityTypeMap": messageObj['piiToEntityTypeMap']
})
try:
message = data.get("message", None)
response = llmModelDao.getDefaultLlmModel()
if response:
response["secretKey"] = crypto.decrypt(response["secretKey"].encode('utf-8')).decode('utf-8')
if not message:
lastUserMessage = chatHistory["messages"][-1]
anonymizedMessage = lastUserMessage["anonymizedMessage"]
piiToEntityTypeMap = lastUserMessage["piiToEntityTypeMap"]
else:
anonymizedMessage, piiToEntityTypeMap = applyDataFirewall(message, chatHistory.get("piiToEntityTypeMap", {}) if chatHistory else {})
if response['type'] == TYPE_OF_AI['GPT']:
aiResponse = getAI_Response(response, message, anonymizedMessage, piiToEntityTypeMap, chatHistory, userId, chatId, isPushedToChatHistory)
else:
aiResponse = getAI_ResponseFromLlama(response, message, anonymizedMessage, piiToEntityTypeMap, chatHistory, userId, chatId, isPushedToChatHistory)
else:
return formatAndReturnResponse({ 'message': 'No Model Selected'}, status=status.HTTP_500_INTERNAL_SERVER_ERROR, isUI=isUI)
except Exception as e:
log.error(e)
print(e)
return formatAndReturnResponse({ 'message': 'Failed to get response from Chat GPT'}, status=status.HTTP_500_INTERNAL_SERVER_ERROR, isUI=isUI)
return aiResponse
def deleteChatMessage(userId, chatId, isUI):
log.info('deleteChatMessage')
if not chatId:
return formatAndReturnResponse({'message': 'ChatId field is missing.'}, status=status.HTTP_400_BAD_REQUEST, isUI=isUI)
chat = chatDao.deleteChat(userId, chatId)
if chat:
return formatAndReturnResponse({ 'message': 'Successfully deleted chatId ' + chatId}, status=status.HTTP_200_OK, isUI=isUI)
return formatAndReturnResponse({'message': 'No chat found with chatId' + chatId}, status=status.HTTP_400_BAD_REQUEST, isUI=isUI)
def createMessages(userId, chatId, data, isUI):
log.info('createChat ' + str(data))
if "message" not in data:
return formatAndReturnResponse({'message': 'Message field is missing.'}, status=status.HTTP_400_BAD_REQUEST, isUI=isUI)
message = data["message"]
isChatExist = chatDao.getChats(userIds=[userId]) and chatId
response = None
if not isChatExist:
response = chatDao.createChat(userId, message[:24]) # Here need to get name from the ML which is context of first message
if not response.acknowledged:
return formatAndReturnResponse({'message': 'Failed to create chat. May be the name is already exist.'}, status=status.HTTP_400_BAD_REQUEST, isUI=isUI)
finalChatId = chatId or str(response.inserted_id)
(data, _) = chatHistoryDao.createOrUpdateChat(userId, finalChatId, message, 'USER_INPUT')
(data, _) = chatHistoryDao.createOrUpdateChat(userId, finalChatId, '', 'GPT')
chatMeta = dict(chatDao.getChat(userId, finalChatId))
chatMeta["lastMessage"] = {
"message": '',
"_id": str(data.inserted_id) if hasattr(data, 'inserted_id') else str(data.upserted_id) or 'lastMessage',
"type": 'GPT'
}
chatMeta["_id"] = str(chatMeta["_id"])
if data:
return formatAndReturnResponse(chatMeta, status=status.HTTP_200_OK, isUI=isUI)
return formatAndReturnResponse({'message': 'Failed to get last message' }, status=status.HTTP_400_BAD_REQUEST, isUI=isUI)
def getRedactedMetadata(userId, chatId, isUI):
log.info('getRedactedMetadata')
chat = chatHistoryDao.getChatHistory(userId, chatId)
if not chat:
return formatAndReturnResponse({'message': 'No redacted meta data found for this message'}, status=status.HTTP_404_NOT_FOUND, isUI=isUI)
data = json.loads(dumps(chat))
if len(data):
rMessages = data['messages'][::-1]
messageWithRedactedMetadata = rMessages[0]
data['messages'] = messageWithRedactedMetadata
return formatAndReturnResponse(data, status=status.HTTP_200_OK, isUI=isUI)
return formatAndReturnResponse({'message': 'No redacted meta data found for this message'}, status=status.HTTP_404_NOT_FOUND, isUI=isUI)
def getChatMessages(userId, chatId, page, pageSize, isUI):
log.info('getChatMessages')
chat = chatHistoryDao.getChatHistory(userId, chatId)
if not chat:
sharedChat = userChatShareDao.isChatIdSharedWithTheUser(chatId, userId)
if not sharedChat:
return formatAndReturnResponse({'message': 'Chat not found'}, status=status.HTTP_404_NOT_FOUND, isUI=isUI)
chat = chatHistoryDao.getChatHistory(sharedChat["sharedBy"], chatId)
if not chat:
return formatAndReturnResponse({'message': 'Chat not found'}, status=status.HTTP_404_NOT_FOUND, isUI=isUI)
data = json.loads(dumps(chat))
if len(data):
rMessages = data['messages'][::-1]
paginator = Paginator(rMessages, pageSize)
page_obj = paginator.get_page(page)
messages = list()
for message in page_obj:
message['createdAt'] = convert_bson_datetime_to_string(message.get('createdAt'))
message['modifiedAt'] = convert_bson_datetime_to_string(message.get('modifiedAt'))
# Messages will be loaded in reverse order - scrolling up on UI will load older messages
messages.insert(0, message)
data['messages'] = messages
meta = paginationMeta(paginator, page_obj, pageSize)
return formatAndReturnResponse(data, status=status.HTTP_200_OK, isUI=isUI, pageInfo=meta)
return formatAndReturnResponse({'message': 'No messages found for given chat id '}, status=status.HTTP_404_NOT_FOUND, isUI=isUI)
def deleteChatUserMessage(userId, chatId, messageId, isUI):
log.info('deleteChatUserMessage')
if not chatId or not messageId:
return formatAndReturnResponse({'message': 'ChatId or messageId field is missing.'}, status=status.HTTP_400_BAD_REQUEST, isUI=isUI)
chat = chatHistoryDao.deleteMessage(userId, chatId, messageId)
if chat:
return formatAndReturnResponse({ 'message': 'Successfully deleted chatId ' + chatId + ' and messageId ' + messageId}, status=status.HTTP_200_OK, isUI=isUI)
return formatAndReturnResponse({'message': 'No chat found with chatId ' + chatId + ' or no user message found'}, status=status.HTTP_400_BAD_REQUEST, isUI=isUI)
def updateOrRegenerateChatUserMessage(userId, chatId, messageId, data, isUI, isRegenerate = False): # or regenerate
log.info('updateOrRegenerateChatUserMessage')
if not chatId:
return formatAndReturnResponse({'message': 'ChatId or messageId field is missing.'}, status=status.HTTP_400_BAD_REQUEST, isUI=isUI)
if not messageId and not isRegenerate:
return formatAndReturnResponse({'message': 'Message is mandatory to update the message.'}, status=status.HTTP_400_BAD_REQUEST, isUI=isUI)
chat = chatHistoryDao.deleteMessage(userId, chatId, messageId)
if chat.modified_count > 0:
return createOrUpdateChatMessage(userId, chatId, data, isUI, isRegenerate)
return formatAndReturnResponse({'message': 'No chat found with chatId ' + chatId + ' or no user message found'}, status=status.HTTP_400_BAD_REQUEST, isUI=isUI)
def getChatsByFilter(request, isUI):
userId = request.GET.get('userId')
userIds = userId.split(',') if userId else None
chatId = request.GET.get('chatId')
chatIds = chatId.split(',') if chatId else None
messageType = request.GET.get('messageType')
searchTerm = request.GET.get('searchTerm')
messages = list()
chats = list()
if chatIds:
chatIds = [ObjectId(chatId) for chatId in chatIds]
chats = chatDao.getChatsFromIds(chatIds)
elif not userIds:
chats = chatDao.getChats()
if userIds:
chats = chatDao.getChats(userIds=userIds)
chats = sorted(chats, key=lambda x: x['modifiedAt'])
for chat in chats:
chatId = str(chat["_id"])
chatHistory = chatHistoryDao.getChatHistory(chat["userId"], chatId, includeDeleted=True, messageType=messageType, searchTerm=searchTerm)
if not chatHistory:
continue
data = {
'chatId': chatId,
'userId': chat["userId"],
'chat_name': chat["name"],
'modifiedAt': chat["modifiedAt"],
'messages': chatHistory['messages']
}
messages.insert(0, data)
return formatAndReturnResponse(messages, status=status.HTTP_200_OK, isUI=isUI)
def getBookmarkedChatMessages(userId, page, pageSize, typeFilter, isUI):
chatMeta = json.loads(dumps(chatDao.getBookmarkedChats(userId)))
if chatMeta:
bookmarkedChatMessages = json.loads(dumps(chatHistoryDao.getBookmarkedChatMessages(userId, typeFilter)))
filteredChatMessages = list()
for chat in chatMeta:
for chatMessage in bookmarkedChatMessages:
if chatMessage['chatId'] == chat['_id']['$oid']:
chatMessage['name'] = chat['name']
filteredChatMessages.append(chatMessage)
for chatMessage in filteredChatMessages:
for message in chatMessage['messages']:
message['createdAt'] = convert_bson_datetime_to_string(message.get('createdAt'))
message['modifiedAt'] = convert_bson_datetime_to_string(message.get('modifiedAt'))
if filteredChatMessages and len(filteredChatMessages) > 0:# and type_filter_check:
paginator = Paginator(filteredChatMessages, pageSize)
page_obj = paginator.get_page(page)
chatMetaList = list()
for chat in page_obj:
chatMetaList.append(chat)
meta = paginationMeta(paginator, page_obj, pageSize)
return formatAndReturnResponse(chatMetaList, status=status.HTTP_200_OK, isUI=isUI, pageInfo=meta)
return formatAndReturnResponse({'message': 'No bookmarked chats found for given user id ' + str(userId) + ' and type ' + str(typeFilter)}, status=status.HTTP_404_NOT_FOUND, isUI=isUI)
def bookmarkChatMessage(userId, chatId, messageId, isUI):
chatMeta = chatDao.bookmarkChat(userId, chatId) # 2nd time just reset "isBookmarked": True
if chatMeta:
chatHistory = chatHistoryDao.setBookmarkMessage(userId, chatId, messageId, True)
if chatHistory:
return formatAndReturnResponse({ 'message': 'Successfully Bookmarked chatId ' + chatId + ' and message with messageId ' + messageId}, status=status.HTTP_200_OK, isUI=isUI)
return formatAndReturnResponse({'message': 'No chat found with chatId ' + chatId + ' or message with messageId ' + messageId}, status=status.HTTP_400_BAD_REQUEST, isUI=isUI)
def removeBookmarkChatMessage(userId, chatId, messageId, isUI):
chatHistory = chatHistoryDao.setBookmarkMessage(userId, chatId, messageId, False)
if chatHistory:
bookmarkedSome = chatHistoryDao.hasAnyBookmarkMessage(userId, chatId)
if not bookmarkedSome:
# if none of the chat messages are bookmarked then remove bookmark of chatMeta
chatMeta = chatDao.removeBookmarkChat(userId, chatId)
if chatMeta:
return formatAndReturnResponse({ 'message': 'Removed Bookmarked chatId ' + chatId + ' message with messageId ' + messageId}, status=status.HTTP_200_OK, isUI=isUI)
# Failed to remove bookmark from chatHistory messages
return formatAndReturnResponse({'message': 'No chat found with chatId ' + chatId + ' or message with messageId ' + messageId}, status=status.HTTP_400_BAD_REQUEST, isUI=isUI)
# Message bookmark removed but chat bookmark kept as other messages is bookmarked
return formatAndReturnResponse({'message': 'Successfully removed message with messageId ' + messageId}, status=status.HTTP_200_OK, isUI=isUI)
return formatAndReturnResponse({'message': 'No chat found with chatId ' + chatId + ' or message with messageId ' + messageId}, status=status.HTTP_400_BAD_REQUEST, isUI=isUI)
def pinChat(userId, chatId, isUI):
result = chatDao.pinChat(userId, chatId)
if result.matched_count != 0:
return formatAndReturnResponse({'message': 'Successfully pinned chat with id ' + chatId}, status=status.HTTP_200_OK, isUI=isUI)
return formatAndReturnResponse({'message': 'No chat found with id ' + chatId}, status=status.HTTP_400_BAD_REQUEST, isUI=isUI)
def unpinChat(userId, chatId, isUI):
result = chatDao.unpinChat(userId, chatId)
if result.matched_count != 0:
return formatAndReturnResponse({'message': 'Successfully unpinned chat with id ' + chatId}, status=status.HTTP_200_OK, isUI=isUI)
return formatAndReturnResponse({'message': 'No chat found with id ' + chatId}, status=status.HTTP_400_BAD_REQUEST, isUI=isUI)
def shareChats(userId, chatId, sharedWith, name, isUI):
log.info('shareChats')
if not chatId or not userId:
return formatAndReturnResponse({'message': 'ChatId or userIds or sharedWith fields are mandatory.'}, status=status.HTTP_400_BAD_REQUEST, isUI=isUI)
sharedWithEveryOne = len(sharedWith) == 0
sharedWith = sharedWith if len([sharedWith]) else [user.id for user in list(User.objects.exclude(id=userId))]
# TODO: With this we can even detect what all user id's are invalid but we need to think before returning on the UI because it can open the security threat of knowing valid customer ids.
# existingUserIds = User.objects.filter(id__in=sharedWith).values_list('id', flat=True)
# existingUserIdsSet = set(existingUserIds)
# nonExistingUserIds = list(set(existingUserIds) - existingUserIdsSet)
# existingUserIds = list(existingUserIds)
# ##
# existingUserIds = User.objects.filter(id__in=sharedWith).values_list('id', flat=True)
existingUserIds = sharedWith
allExist = set(sharedWith) == set(existingUserIds)
if not allExist:
return formatAndReturnResponse({'message': 'Few user Ids are invalid. Failed to share'}, status=status.HTTP_400_BAD_REQUEST, isUI=isUI)
response = userChatShareDao.shareChat(sharedBy=userId, chatId=chatId, sharedWith=sharedWith, name=name)
if response.acknowledged:
if not sharedWithEveryOne:
sharedByUser = getOrNone(model=User, id=userId)
for chatSharedWithUserId in sharedWith:
sharedWithUser = getOrNone(model=User, id=chatSharedWithUserId)
shareUrl = f"{settings.CONFIG_UI_HOST}?id={chatId}"
def templateReplacer(template):
template = template.replace("{{username}}", sharedWithUser.firstName)
template = template.replace("{{sharedBy}}", sharedByUser.firstName)
template = template.replace("{{shareUrl}}", shareUrl)
return template
EmailClient.sendEmailWithTemplate(
[sharedWithUser.email],
f"{sharedByUser.firstName} shared chat with you",
f"{settings.BASE_DIR}/templates/emails/shareChat.html",
templateReplacer
)
return formatAndReturnResponse({'message': 'Chat Shared Successfully.'}, status=status.HTTP_200_OK, isUI=isUI)
else:
return formatAndReturnResponse({'message': 'Failed to share the chat. Please try again'}, status=status.HTTP_500_INTERNAL_SERVER_ERROR, isUI=isUI)
def getSharedChats(userId, isUI):
log.info('getSharedChats')
if not userId:
return formatAndReturnResponse({'message': 'ChatId or userIds fields are mandatory.'}, status=status.HTTP_400_BAD_REQUEST, isUI=isUI)
chatIdsSharedByUser = userChatShareDao.getChatIdsSharedByUser(sharedBy=userId)
chatIdsSharedWithUser = userChatShareDao.getChatIdsSharedWithUser(userId=userId)
return formatAndReturnResponse({
'chatIdsSharedByUser': chatIdsSharedByUser,
'chatIdsSharedWithUser': chatIdsSharedWithUser
}, status=status.HTTP_200_OK, isUI=isUI)
def revokeSharedChat(chatId, sharedBy, userIds, isUI):
log.info('revokeSharedChat')
if not chatId:
return formatAndReturnResponse({'message': 'ChatId field is mandatory.'}, status=status.HTTP_400_BAD_REQUEST, isUI=isUI)
response = userChatShareDao.revokeSharedChatAccess(chatId, sharedBy, userIds)
if response.deleted_count == 1 or response.modified_count == 1:
return formatAndReturnResponse({'message': 'Revoked Access Successfully.'}, status=status.HTTP_200_OK, isUI=isUI)
else:
return formatAndReturnResponse({'message': 'Failed to revoke access. Please try again'}, status=status.HTTP_500_INTERNAL_SERVER_ERROR, isUI=isUI)
def selfRemoveFromSharedList(chatId, sharedBy, userId, isUI):
log.info('selfRemoveFromSharedList')
if not chatId or not sharedBy:
return formatAndReturnResponse({'message': 'ChatId and sharedBy field is mandatory.'}, status=status.HTTP_400_BAD_REQUEST, isUI=isUI)
response = userChatShareDao.excludeFromSharing(chatId,sharedBy, userId)
if response.modified_count == 1:
return formatAndReturnResponse({'message': 'Revoked Access Successfully.'}, status=status.HTTP_200_OK, isUI=isUI)
else:
return formatAndReturnResponse({'message': 'Failed to revoke access. Please try again'}, status=status.HTTP_500_INTERNAL_SERVER_ERROR, isUI=isUI)
def importChat(userChatSharingId, oldChatId, userId, isUI):
log.info('importChat')
if not oldChatId or not userId:
return formatAndReturnResponse({'message': 'ChatId and UserId fields are mandatory.'}, status=status.HTTP_400_BAD_REQUEST, isUI=isUI)
if not userChatShareDao.isChatIdSharedWithTheUser(oldChatId, userId):
return formatAndReturnResponse({'message': 'Unauthorized access.'}, status=status.HTTP_400_BAD_REQUEST, isUI=isUI)
sourceChatHistory = userChatShareDao.getSharedChatHistory(userChatSharingId, oldChatId)
chatCreated = chatDao.importChat(oldChatId, userId, sourceChatHistory['name'])
if not chatCreated:
return formatAndReturnResponse({'message': 'Chat Not found'}, status=status.HTTP_400_BAD_REQUEST, isUI=isUI)
if not chatCreated.acknowledged:
return formatAndReturnResponse({'message': 'Failed to import chat'}, status=status.HTTP_500_INTERNAL_SERVER_ERROR, isUI=isUI)
newChatId = str(chatCreated.inserted_id)
chatHistoryImported = chatHistoryDao.importChatHistory(newChatId, userId, sourceChatHistory)
if not chatHistoryImported.acknowledged:
chatDao.deleteChat(newChatId, userId) #TODO handle retry later at some point
return formatAndReturnResponse({'message': 'Failed to import chat'}, status=status.HTTP_500_INTERNAL_SERVER_ERROR, isUI=isUI)
return formatAndReturnResponse({'message': 'Imported Chat Successfully.', "chatId": newChatId }, status=status.HTTP_200_OK, isUI=isUI)
def updateChatName(userId, chatId, name, isUI):
result = chatDao.updateChatName(userId, chatId, name)
if result.matched_count != 0:
return formatAndReturnResponse({'message': 'Successfully updated name for chat with id ' + chatId},
status=status.HTTP_200_OK, isUI=isUI)
return formatAndReturnResponse({'message': 'No chat found with id ' + chatId}, status=status.HTTP_400_BAD_REQUEST,
isUI=isUI)
def createLlmModel(data, isUI):
modelType = data['modelType'] # name on UI
secretKey = data['secretKey'] # API KEY
version = data['modelVersion'] # Model version on UI
apiURL = data['apiURL'] # API URL for LLAMA model
if not isValidKey(modelType, secretKey):
return formatAndReturnResponse({'message': 'Invalid Secret key entered.'}, status=status.HTTP_400_BAD_REQUEST, isUI=isUI)
if modelType not in TYPE_OF_AI:
return formatAndReturnResponse({'message': 'Currently we only support 2 model types: GPT and LLAMA'}, status=status.HTTP_400_BAD_REQUEST, isUI=isUI)
secretKey = crypto.encrypt(secretKey.encode('utf-8')).decode('utf-8')
response = llmModelDao.createLlmModel(modelType, version, secretKey, apiURL)
if not response:
if response is None or not response.acknowledged:
formatAndReturnResponse({'message': 'Failed to create details.'}, status=status.HTTP_400_BAD_REQUEST, isUI=isUI)
return formatAndReturnResponse({"message": 'Model details saved succesfully', "id": str(response.inserted_id)}, status=status.HTTP_200_OK, isUI=isUI)
def getLlmModels(isUI):
models = json.loads(dumps(llmModelDao.getLlmModels()))
if len(models):
for model in models:
model['secretKey'] = crypto.decrypt(model['secretKey'].encode('utf-8')).decode('utf-8')
return formatAndReturnResponse(models, status=status.HTTP_200_OK, isUI=isUI)
return formatAndReturnResponse([], status=status.HTTP_200_OK, isUI=isUI)
def updateLlmModel(data, modelId, isUI):
if not modelId:
formatAndReturnResponse({'message': 'Model id field is compulsory'}, status=status.HTTP_400_BAD_REQUEST, isUI=isUI)
model = llmModelDao.getLlmModel(modelId)
if not model:
formatAndReturnResponse({'message': 'Model details not found'}, status=status.HTTP_400_BAD_REQUEST, isUI=isUI)
isDefault = data.get('isDefault', model["isDefault"])
modelType = data.get('modelType', model["type"])
secretKey = data.get('secretKey', crypto.decrypt(model["secretKey"].encode('utf-8')).decode('utf-8'))
version = data.get('modelVersion', model["modelVersion"])
apiURL = data.get('apiURL', model["apiURL"])
if not isValidKey(modelType, secretKey):
return formatAndReturnResponse({'message': 'Invalid Secret key entered.'}, status=status.HTTP_400_BAD_REQUEST, isUI=isUI)
if modelType not in TYPE_OF_AI:
return formatAndReturnResponse({'message': 'Currently we only support 2 model types: GPT and LLAMA'}, status=status.HTTP_400_BAD_REQUEST, isUI=isUI)
secretKey = crypto.encrypt(secretKey.encode('utf-8')).decode('utf-8')
response = llmModelDao.updateLlmModel(modelId, modelType, version, secretKey, apiURL, isDefault)
if not response:
if response is None or not response.acknowledged:
formatAndReturnResponse({'message': 'Failed to save details.'}, status=status.HTTP_400_BAD_REQUEST,
isUI=isUI)
return formatAndReturnResponse({"message": 'Model details saved succesfully'}, status=status.HTTP_200_OK, isUI=isUI)
def deleteModel(modelId, isUI):
if not modelId:
formatAndReturnResponse({'message': 'Model id field is compulsory'}, status=status.HTTP_400_BAD_REQUEST, isUI=isUI)
model = llmModelDao.getLlmModel(modelId)
if not model:
formatAndReturnResponse({'message': 'Model details not found'}, status=status.HTTP_400_BAD_REQUEST, isUI=isUI)
response = llmModelDao.deleteModel(modelId)
if not response:
return formatAndReturnResponse({'message': 'Default cannot be deleted. Please make another model as default and delete this one.'}, status=status.HTTP_400_BAD_REQUEST,
isUI=isUI)
return formatAndReturnResponse({"message": 'Model deleted succesfully'}, status=status.HTTP_200_OK, isUI=isUI)
def isValidKey(vendor, key):
if vendor == 'GPT':
openai.api_key = key
try:
openai.Model.list()
except openai.error.AuthenticationError:
return False
return True
| [
"{{username}}",
"{{shareUrl}}",
"{{sharedBy}}"
] |
2024-01-10 | Privado-Inc/MuroChat-Backend | app_wrapper~appService.py | import logging
from django.conf import settings
from app_wrapper.utils import parseToOpenAIFormat
import openai
from django.http import StreamingHttpResponse
from app_wrapper.commonService import TOKEN_LIMIT, WITH_LAST_N_MESSAGES, numTokensForGPT, streamParserHandleInitator
from chats.dao.ChatHistoryDao import TYPE_OF_MESSAGE
log = logging.getLogger(__name__)
def getAI_Response(modelInfo, message, anonymizedMessage, piiToEntityTypeMap, chatHistory, userId, chatId, isPushedToChatHistory = False):
openai.api_key = modelInfo['secretKey']
history = list()
if chatHistory:
for messageObj in chatHistory['messages'][WITH_LAST_N_MESSAGES:]:
history.append({
"role": "assistant" if messageObj['type'] == TYPE_OF_MESSAGE['GPT'] else "user",
"content": messageObj['anonymizedMessage']
})
if message:
history.append({"role": "user", "content": f"{anonymizedMessage}"})
tokens = numTokensForGPT(history, modelInfo['modelVersion'])
while tokens >= TOKEN_LIMIT:
history = history[2:]
tokens = numTokensForGPT(history, modelInfo['modelVersion'])
def event_stream(chatId, isPushedToChatHistory):
log.info(chatId, history)
completion = openai.ChatCompletion.create(
model=modelInfo["modelVersion"],
messages = history,
stream = True
)
streamParser = streamParserHandleInitator(chatId, isPushedToChatHistory)
for line in completion:
chunk = line['choices'][0].get('delta', {}).get('content', '')
yield from streamParser(chunk, userId, message, anonymizedMessage, piiToEntityTypeMap)
# TODO: Not working on local
# stream_thread = threading.Thread(target=list, args=(event_stream(chatId, isPushedToChatHistory),)) # Need to convert later to asynchronouse approach
# stream_thread.start()
return StreamingHttpResponse(event_stream(chatId, isPushedToChatHistory), content_type='application/json')
| [
"PLACEHOLDER",
"anonymizedMessage"
] |
2024-01-10 | PyDanny21/PyAssist | SUNDAY.PY | import speech_recognition as sr
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from PyQt5.QtGui import *
def window():
app = QApplication([])
w=QWidget()
font = QFont('Times New Roman', 13)
label = QLabel(w) #label.setPixmap(QPixmap("W:\\Projects\\Projects\\pyqt5\\11.jpg"))
# label.setPixmap(QPixmap("W:\\Projects\\Projects\\pyqt5\\11.jpg"))
label.setPixmap(QPixmap("W:\\Projects\\Projects\\pyqt5\\pexels-thisisengineering-3913025.jpg"))
label.setGeometry(0,0,370,600)
label.setStyleSheet(
'display:flex;position:relative;width:100%;height:100%;'
)
# w.setLayout()
# label.setFont(font)
# label.move(50, 50)
text_display = QTextEdit(w)
text_display.setGeometry(10, 10, 350, 440)
text_display.setReadOnly(True)
text_display.setFont(QFont('Times New Roman',14))
text_display.setStyleSheet(
'background:white;color:black;border:2px double black'
)
text_edit = QLineEdit(w)
text_edit.setGeometry(10, 480, 290, 40)
text_edit.setFont(QFont('Times New Roman',14))
text_edit.setPlaceholderText('Search Here')
text_edit.setStyleSheet('border-radius: 20px;background-color:white;padding:10px;'
)
button = QPushButton(w)
button.setFont(font)
button.setGeometry(250,480,50, 40)
button.setIcon(QIcon('W:\\Projects\\Projects\\pyqt5\\icons8-add-record-64.png'))
# button.setStyleSheet('border-radius: 10px;background-color:blue;'
button.setStyleSheet('border: none;background:white;border-radius: 20px;'
)
Send = QPushButton(w)
Send.setFont(font)
Send.setText('SEND')
Send.setStyleSheet('border-radius: 10px;background-color:green;'
)
Send.setGeometry(310,480,50, 40)
def listen():
r = sr.Recognizer()
with sr.Microphone() as source:
print("Listening...")
audio = r.listen(source)
try:
response = r.recognize_google(audio)
print(f"You said: {response}")
text_edit.setText(response)
Send.clicked.connect(command)
except sr.UnknownValueError:
print("Sorry, I did not understand.")
text_display.setText("Sorry, I did not understand.")
except sr.RequestError:
print("Sorry, I did not understand.")
text_display.setText("Sorry, I did not understand.")
def offlinelisten():
from vosk import KaldiRecognizer,Model
model=Model("vosk-model-small-en-us-0.15")
recognizer=KaldiRecognizer(model,16000)
mic=pyaudio.PyAudio()
stream=mic.open(format=pyaudio.paInt16,channels=1,rate=16000,input=True,frames_per_buffer=8192)
try:
data=stream.read(4096,exception_on_overflow=False)
# if len(data)==0:
# break
if recognizer.AcceptWaveform(data):
rec=recognizer.Result()
command2=json.loads(rec)
text_edit.text=command2['text']
text_edit.editingFinished.connect(command)
except:
speak('Please I did not understand')
#weapp
import sys
import tkinter
from pynput.keyboard import Key,Listener
import logging
import ctypes
# from tkinter import *
import turtle
import pyautogui
from PIL import ImageTk, Image
import datetime
import playsound
import threading
from datetime import datetime
import json
import time
import webbrowser
from playsound import playsound
import pprint
import getpass
# import vosk
import pyaudio
# import playsound
import subprocess as sp
import wikipedia
import speech_recognition as sr
from pytube import YouTube
import requests
import cv2
from cvzone.HandTrackingModule import HandDetector
from cvzone.FaceDetectionModule import FaceDetector
import math
import pyttsx3
import random
from sketchpy import library as lib
import calendar
import os
from pandas.io import clipboard
from geopy.geocoders import Nominatim
from geopy.distance import great_circle
import geocoder
from PIL import ImageGrab
import numpy as np
#normal conversation
#solve mathematics
#news
#weather
#temperature
#face recognition
#web scrapping
#yt downloads
#play alarm
#make call
#send message sms
#email
#jokes
#advice
#speech recognition
#chempy
#GUI
#face recognition
opening_text=['on it,sir','okay sir, i am working on it','a moment sir','getting it done,sir']
commands=[
'minimize',
'maximize',
'open settings',
'open notepad',
'close notepad',
'open cmd',
'open calculator',
'calendar',
'close notepad',
'close cmd',
'malware',
'open camera',
'close camera',
'quote',
'ip address',
'date',
'close calculator',
'screenshot',
'switch window',
'shutdown',
'refresh screen',
'restart',
'lock screen',
'open youtube',
'open stackoverflow',
'open wikipedia',
'draw',
'search',
'make note',
'show note',
'speak note',
'play music',
'cools',
'play worship',
'movie',
'game',
'close game',
'music studio',
'hack my phone',
'volume up',
'volume down',
'mute',
'unmute',
'task manager',
'download',
'system info',
'open obs studio',
'close obs studio',
'calculate',
'quote',
'hand detector',
'face detector',
'all detector'
]
my_dictionary={
'hello':'hi sir, how are you?',
'hi':'hello sir, how are you?',
'how are you':'I am as fit as fiddle and you',
'how are you doing':'I am as fit as fiddle, and you?',
'let talk business':'okay sir,ready for business',
'how dare you':'sorry sir, I didnt mean offensive',
'i am good':'Thank God',
'can you':'Yes I can',
'i love you':'I love you too',
'good': 'also doing great sir',
'great':'Thank God',
'fine and you': 'also doing great sir',
'hey': 'hello, At your service,sir',
'good and you': 'I am doing great',
'whats up':'nothing much sir',
'well done':'Thank you',
'thanks': 'No thanks sir, after all you created me',
'who are you': 'my name is Pi',
'what is your name': 'my name is Pi',
'who created you': 'I was created by PyDanny whose real name is Daniel Quansah',
'i am fine and you':'I am also doing great sir!!',
'how old are you':'I am nearly a year old',
'who is pydanny':'PyDanny is a young, talented, self taught python developer, web developer and many more, who has aimed to achieve something great in future, thhus to be the greatest inventor ever.',
'what can you do':'I can open and close programs and softwares, search for anything from google, youtube, wikipedia, stackoverflow, etc. Play music, movies, make note, create keylogger, give you random advice and jokes, and many more.',
'how can you help me': 'I can open and close programs and softwares, search for anything from google, youtube, wikipedia, stackoverflow, etc.Play music, movies,make note,create keylogger,give you random advice and jokes.',
'whats on':'Nothing much, sir!',
'how was your day':'It was cool, sir and yours',
'how was your night':'It was cool, sir and yours',
'i am sad':'Oooh sir, please what happened',
'tell me more about yourself':'I am Pi, a personal assistant program programmed by my creator PyDanny in pure Python',
'tell me about yourself':'I am Pi, a personal assistant program programmed by my creator PyDanny in pure Python',
'will you marry me':'No sir, I am only programmed to help you with some tasks',
'can you marry me': 'No sir, I am only programmed to help you with some tasks',
'sleep':'ok sir, you can wake me up anytime you want',
'wake up':'hello sir, i am always at your service'
}
advice = ['Have the courage to live a life true to yourself, not the life others expect of you.',
'Never attribute to malice that which can be adequately explained by stupidity.',
'“There is nothing noble in being superior to your fellow man; true nobility is being superior to your former self.” Ernest Hemingway',
'Don’t make decisions when you’re angry. Don’t make promises when you’re happy.',
'“Never argue with a stupid person, they’ll drag you down to their level and beat you with experience.” Mark twain',
'Only pack what you can carry yourself.',
'Remember you’ll always regret what you didn’t do rather than what you did.',
'“You’d worry less about what people think about you if you knew how seldom they do.” David Foster Wallace',
'If you blame it on someone else, don’t expect it to get better.',
'“You can be the ripest, juiciest peach in the world, but there will always be someone who hates peaches.” Dita von Teese',
'If the grass is greener on the other side, there’s probably more manure there.',
'Don’t give up what you want most for what you want now.',
'With regards to the opposite sex: If you look hungry, you’ll starve.',
'“Never let your sense of morals prevent you from doing what is right.” Isaac Asimov',
'Strive to be the man you want your daughter to marry.',
'“Remember only enemies speak the truth. Friends and lovers lie endlessly, caught in the web of duty.” Stephen King',
'Never forget your car keys will change your car from one tonne of inert metal into one of the most deadly killing machines that has been invented.',
'Wait 24 hours before getting mad and reacting about anything. If it doesn’t bother you in 24 hours time, it probably isn’t important enough to get mad over.',
'Never make someone a priority who only makes you an option.',
'Try not to take anything personally. No one thinks about you as much as you do.',
'“If you want to know what a man’s like, take a good look at how he treats his inferiors, not his equals.” Sirius Black',
'Figure out what you love to do, and then figure out how to get someone to pay you to do it.',
'If you treat a woman like a queen, and she treats you like a jester, your princess is in another castle.',
'Whenever something happens that makes you sad, ask yourself whether you’d still care about it when you’re ninety.',
'Be persistent. When knowledge and ability aren’t enough, be persistent.',
'“Life is scary. Get used to it. There are no magical fixes. It’s all up to you. So get up off your keister, get out of here, and go start doin’ the work. Nothing in this world that’s worth having comes easy.” Bob Kelso'
'Smart girls like to hear they’re pretty, pretty girls like to hear that they’re smart.',
'Happiness is a choice and everything else is a matter of perspective.']
quote=['Youth has no age.',
'The young do not know enough to be prudent, and therefore they attempt the impossible, and achieve it, generation after generation.',
'Youth is not a question of years: one is young or old from birth.',
'Use your youth so that you may have comfort to remember it when it has forsaken you, and not sigh and grieve at the account thereof.',
'Youth is the season made for joys, love is then our duty.'
'Never tell a young person that anything cannot be done.',
'Your life, time, and brain should belong to you, not to an institution.',
'How wonderful it is that nobody need wait a single moment before starting to improve the world.',
'Aspire to inspire before we expire.',
'Goodness is the only investment that never fails.',
'Whatever you do, do it well.',
'Winners never quit and quitters never win.',
'You can\’t depend on your eyes when your imagination is out of focus.',
'You have to grow from the inside out.',
'Logic will get you from A to B. Imagination will take you everywhere.',
'After a while, you learn to ignore the names people call you and just trust who you are.',
'The world belongs to the energetic.',
'We are all pretty bizarre. Some of us are just better at hiding it.',
'Do, or do not. There is no try.',
'I just wanna let them know that they didnt break me.',
'Oh yes, the past can hurt. But you can either run from it, or learn from it.',
'I feel infinite.',
'High school is like the training wheels for the bicycle of real life.',
'Youth is a gift of nature but age is a work of art.',
'I define being the best as competing against the best there is out there and beating them.',
'We are who we choose to be.',
'Make each day your masterpiece.',
'There was no respect for youth when I was young, and now that I am old, there is no respect for age, I missed it coming and going.',
'This is what is missing in the youth today. This being able to dream and to change the world.',
'In relative youth, we assume we will remember everything. Someone should urge the young to think otherwise.',
'You are here to continually push forward and move forward. That is where I am at - I just want to always surprise people.',
'But once we live all careless free; No cross to mar our love-lit bower.',
'Life is like a box of chocolates, you never know what you\’re gonna get.',
'I dont regret the things I\’ve done, but those I did not do.',
'Me, I still believe in paradise. But now at least I know it\’s not some place you can look for because its not where you go.',
'Life moves pretty fast. If you dont stop and look around once in a while, you could miss it.',
'Enjoy the power of your beauty and your youth.',
'The most lively young people become the best old people, not those who pretend to be as wise as grandfathers while they are still in school.',
'Age is foolish and forgetful when it underestimates youth.',
'Sometimes you face difficulties you are doing something wrong, but because you are doing something right.',
'There is a fountain of youth: it is your mind, your talents, the creativity you bring to your life and the lives of the people you love.',
'A smart man makes a mistake, learns from it, and never makes that mistake again.',
'Its not what happens to you, but how you react to it that matters.',
'It takes a very long time to become young.',
'Only to children children sing, Only to youth will spring be spring.',
'Youth is wasted on the young.',
'The only person you should try to be better than, is the person you were yesterday.',
'My grandfather once told me that there were two kinds of people: those who do the work and those who take the credit. He told me to try to be in the first group; there was much less competition.',
'Joy of youth, dream of youth, blood of youth, mood of youth, clothe the world with colors golden, singing songs that never olden.',
'Youth is that period when a young boy knows everything but how to make a living.',
'Youth is the best time to be rich, and the best time to be poor.',
'Youth is a blunder; manhood a struggle, 0ld age a regret.',
'You are as old as your doubt, your fear, your despair.',
'It always seems impossible until its done.',
'Youth is something very new: twenty years ago, no one mentioned it.',
'We would accomplish many more things if we did not think of them as impossible.',
'One must learn by doing the thing; for though you think you know it, you have no certainty, until you try.',
'You are only young once, and if you work it right, once is enough.',
'The world is the great gymnasium where we come to make ourselves strong.',
'When you reach the end of your rope, tie a knot in it and hang on.',
'Cherish your visions and your dreams, as they are the children of your soul, the blueprints of your ultimate achievements.',
'When you replace why is this happening to me with what is this trying to teach me? Everything shifts.',
'The world’s biggest power is the youth and beauty of a woman.',
'Everything started as nothing.',
'It is better to be a young June-bug than an old bird of paradise.',
'You always pass failure on the way to success.',
'Young people need models, not critics.',
'It is never too late to be what you might have been.',
'Dont downgrade your dream just to fit your reality. Upgrade your conviction to match your destiny.',
'Youth is the season of receptivity, and should be devoted to acquirement; and manhood of power--that demands an earnest application. Old age is for revision.',
'Yesterday you said tomorrow. Just do it.',
'Build yourself up to stand out and be recognized in a crowd.',
'The secret of growing younger is counting blessings, not birthdays.',
'It is the juvenile period of life when friendships are formed, and habits established, that will stick by one.',
'A leader is a dealer in hope.',
'In youth we learn; in age we understand.',
'You don\’t have to hold a position in order to be a leader.',
'Old minds have the power to create history, young minds have the power to change the history.',
'To have long term success as a coach or in any position of leadership, you have to be obsessed in some way.',
'Leaders are never satisfied; they continually strive to be better.',
'The way to keep young is to keep your faith young. Keep your self-confidence young. Keep your hope young.',
'The passions of the young are vices in the old.',
'The young, free to act on their initiative, can lead their elders in the direction of the unknown.',
'Unless someone like you cares a whole awful lot, nothing is going to get better. Its not.',
'Mans own youth is the worlds youth.',
'We all know we\'re young. We\'re just trying to stay positive. We\'re making young mistakes.',
'The young are not afraid of telling the truth.',
'There is always some specific moment when we become aware that our youth is gone; but, years after, we know it was much later.',
'Youth comes but once in a lifetime.',
'It is time for parents to teach young people early on that in diversity there is beauty and there is strength.',
'There is no doubt that creativity is the most important human resource of all.',
'When you are young, everything feels like the end of the world, but its not; its just the beginning.',
'Anyone who stops learning is old, whether at twenty or eighty. Anyone who keeps learning stays young.',
'Youthfulness is about how you live, not when you were born.',
'Life is not about waiting for the storms to pass... its about learning to dance in the rain.'
]
p=pyttsx3.init('sapi5')
voices=p.getProperty('voices')
p.setProperty('voice',voices[1].id)
p.setProperty('rate',170)
p.setProperty('volume',1.0)
def speak(audio):
print('')
text_display.setText(audio)
# text_display.setTextBackgroundColor(qGreen)
p.say(audio)
print(audio)
p.runAndWait()
#speak('hi sir')
def greetings():
hr=int(datetime.now().hour)
if hr>=0 and hr<12:
speak('Good morning sir!')
elif hr>=12 and hr<16:
speak('Good Afternoon sir!')
else:
speak('Good evening sir!')
#greetings()
def time_now():
a=datetime.now().strftime('%H:%M')
hr = int(datetime.now().hour)
if hr>=6 and hr<12:
speak(f'sir, the time is {a}AM')
elif hr>=12 and hr<=23:
speak(f'sir, the time is {a}PM')
if hr>=0 and hr<6:
speak(f'sir, the time is {a}dawn')
def date():
d=datetime.now().date()
ar = datetime.now().isoweekday()
dic = {'1': 'Monday', '2': 'Tuesday', '3': 'Wednesday', '4': 'Thursday', '5': 'Friday', '6': 'Saturday'}
aa = str(ar)
if aa in dic:
day=dic[aa]
speak(f'sir, today is {day}, {d}')
def take_command():
r = sr.Recognizer()
with sr.Microphone() as source:
print('Listening....')
r.pause_threshold = 1
audio = r.listen(source)
try:
print('Recognizing...')
query = r.recognize_google(audio, language='en-in')
if not 'exit' in query or 'stop' in query:
speak(random.choice(opening_text))
else:
hour = datetime.datetime.now().hour
if hour >= 21 and hour < 6:
speak("Good night sir, take care!")
else:
speak('Have a good day sir!')
exit()
except Exception:
speak('Unable to recognize your voice, sir')
query = 'None'
return query
paths = {
'notepad': "C:\\ProgramData\\Microsoft\\Windows\\Start Menu\\Programs\\Accessories\\Notepad.lnk",
'calculator': "C:\\Windows\\System32\\calc.exe"
}
def open_notepad():
os.startfile(paths['notepad'])
def open_calculator():
os.startfile(paths['calculator'])
def find_my_ip():
ip_address = requests.get('https://api64.ipify.org?format=json').json()
return ip_address["ip"]
def open_camera():
sp.run('start microsoft.windows.camera:', shell=True)
def open_cmd():
os.system('start cmd')
def search_on_wikipedia(query):
results = wikipedia.summary(query, sentences=4)
print(results)
speak(results)
return results
def facedetector():
cap = cv2.VideoCapture(0)
cap.set(3, 1280)
cap.set(4, 720)
aa = FaceDetector()
while (True):
success, img = cap.read()
imag = aa.findFaces(img)
cv2.imshow('image', img)
c = cv2.waitKey(1)
if c == ord('q'):
break
def handdetector():
cap = cv2.VideoCapture(0)
detector = HandDetector(detectionCon=0.8, maxHands=2)
while True:
# Get image frame
success, img = cap.read()
# Find the hand and its landmarks
hands, img = detector.findHands(img) # with draw
# hands = detector.findHands(img, draw=False) # without draw
if hands:
# Hand 1
hand1 = hands[0]
lmList1 = hand1["lmList"] # List of 21 Landmark points
bbox1 = hand1["bbox"] # Bounding box info x,y,w,h
centerPoint1 = hand1['center'] # center of the hand cx,cy
handType1 = hand1["type"] # Handtype Left or Right
fingers1 = detector.fingersUp(hand1)
if len(hands) == 2:
# Hand 2
hand2 = hands[1]
lmList2 = hand2["lmList"] # List of 21 Landmark points
bbox2 = hand2["bbox"] # Bounding box info x,y,w,h
centerPoint2 = hand2['center'] # center of the hand cx,cy
handType2 = hand2["type"] # Hand Type "Left" or "Right"
fingers2 = detector.fingersUp(hand2)
# Find Distance between two Landmarks. Could be same hand or different hands
length, info, img = detector.findDistance(lmList1[8][0:2], lmList2[8][0:2], img) # with draw
#length, info = detector.findDistance(lmList1[8], lmList2[8]) # with draw
# Display
cv2.imshow("Image", img)
if cv2.waitKey(1)==ord('q'):
break
cap.release()
cv2.destroyAllWindows()
def alldetector():
cap = cv2.VideoCapture(0)
cap.set(3, 1280)
cap.set(4, 720)
detector = HandDetector(detectionCon=0.8, maxHands=2)
face = FaceDetector(minDetectionCon=2)
def main():
while True:
# Get image frame
success, img = cap.read()
# Find the hand and its landmarks
hands, img = detector.findHands(img)
faces = face.findFaces(img)
# with draw
# hands = detector.findHands(img, draw=False) # without draw
if hands:
# Hand 1
hand1 = hands[0]
lmList1 = hand1["lmList"] # List of 21 Landmark points
bbox1 = hand1["bbox"] # Bounding box info x,y,w,h
centerPoint1 = hand1['center'] # center of the hand cx,cy
handType1 = hand1["type"] # Handtype Left or Right
fingers1 = detector.fingersUp(hand1)
if len(hands) == 2:
# Hand 2
hand2 = hands[1]
lmList2 = hand2["lmList"] # List of 21 Landmark points
bbox2 = hand2["bbox"] # Bounding box info x,y,w,h
centerPoint2 = hand2['center'] # center of the hand cx,cy
handType2 = hand2["type"] # Hand Type "Left" or "Right"
fingers2 = detector.fingersUp(hand2)
# Find Distance between two Landmarks. Could be same hand or different hands
length, info, img = detector.findDistance(lmList1[8][0:2], lmList2[8][0:2], img) # with draw
# length, info = detector.findDistance(lmList1[8], lmList2[8]) # with draw
# Display
cv2.imshow("Image", img)
c = cv2.waitKey(1)
if c == ord('q'):
break
def cartooning(img):
import pyautogui as pg
image=cv2.imread(img)
gray=cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
gray=cv2.medianBlur(gray, 5)
edges=cv2.adaptiveThreshold(gray,255,
cv2.ADAPTIVE_THRESH_MEAN_C,
cv2.THRESH_BINARY,9,9)
#CARTONATION
color=cv2.bilateralFilter(image,9,250,250)
cartoon=cv2.bitwise_and(color,color,mask=edges)
cv2.imshow('image',image)
cv2.imshow('edges',edges)
cv2.imshow('cartoon',cartoon)
c = cv2.waitKey(0)
cv2.destroyAllWindows()
while(True):
a=pg.confirm('Are you sure about cartooning your picture', buttons=['Yes', 'No'])
if a=='Yes':
cartooning(img)
break
else:
break
def format_response(weather):
for i in range(10):
dt_txt = weather['list'][i]['dt']
date = (datetime.fromtimestamp(dt_txt)).strftime('%d-%b-%Y %H:%M:%S')
desc = weather['list'][i]['weather'][0]['description']
temp_min = weather['list'][i]['main']['temp_min']
temp_max = weather['list'][i]['main']['temp_max']
final_str = 'Date: %s \nConditions: %s \nMin.Temperature(Celsius): %s\nMax.Temperature(Celsius): %s' %(date, desc, temp_min, temp_max)
return(final_str)
def get_weather(city):
api_key = 'a4aa5e3d83ffefaba8c00284de6ef7c3'
res = requests.get(f"http://api.openweathermap.org/data/2.5/weather?q={city}&appid={api_key}&units=metric").json()
weather = res["weather"][0]["main"]
temperature = res["main"]["temp"]
feels_like = res["main"]["feels_like"]
return weather, f"{temperature}℃", f"{feels_like}℃"
def get_latest_news():
NEWS_API_KEY = "17872d9bca7343e4866348761f3d3476"
news_headlines = []
res = requests.get(
f"https://newsapi.org/v2/top-headlines?country=in&apiKey={NEWS_API_KEY}&category=general").json()
articles = res["articles"]
for article in articles:
news_headlines.append(article["title"])
speak(article, sep='\n')
return news_headlines[:5]
def note():
speak('sir,please what should i write')
nn = input(':')
speak('sir,please should i include date and time?')
datetimer = input(':')
with open('log.txt', 'a') as file:
time = datetime.now().strftime('%H:%M')
date = datetime.now().date()
if 'yes' in datetimer:
file.write(f'on {date} at {time},you made a note saying {nn}')
pyautogui.press('enter')
elif 'no' in datetimer:
file.write(f'you made a note saying {nn}')
pyautogui.press('enter')
speak('Note made successfully!')
def open_note():
os.startfile('W:\\Projects\\Projects\\py\\log.txt')
def play_englishsong():
music_dir = 'W:\\English songs\\'
songs = os.listdir(music_dir)
#print(len(songs))
song = random.choice(songs)
speak(f'Playing {song}')
title = os.startfile(music_dir + song)
def next_englishsong():
music_dir = 'W:\\English songs\\'
songs = os.listdir(music_dir)
#print(len(songs))
song = random.choice(songs)
speak(f'Playing {song}')
title = os.startfile(music_dir + song)
def play_hilife():
music_dir = 'W:\\HI-LIFE\\'
songs = os.listdir(music_dir)
#print(len(songs))
song = random.choice(songs)
speak(f'Playing {song}')
title = os.startfile(music_dir + song)
def next_hilife():
music_dir = 'W:\\HI-LIFE\\'
songs = os.listdir(music_dir)
#print(len(songs))
song = random.choice(songs)
speak(f'Playing {song}')
title = os.startfile(music_dir + song)
def play_gospel():
music_dir = 'W:\\GOSPELS\\'
songs = os.listdir(music_dir)
#print(len(songs))
song = random.choice(songs)
speak(f'Playing {song}')
title = os.startfile(music_dir + song)
def next_gospel():
music_dir = 'W:\\GOSPELS\\'
songs = os.listdir(music_dir)
#print(len(songs))
song = random.choice(songs)
speak(f'Playing {song}')
title = os.startfile(music_dir + song)
def play_worship():
music_dir = 'W:\\Worship\\'
songs = os.listdir(music_dir)
# print(len(songs))
song = random.choice(songs)
speak(f'Playing {song}')
title = os.startfile(music_dir + song)
def next_worship():
music_dir = 'W:\\Worship\\'
songs = os.listdir(music_dir)
# print(len(songs))
song = random.choice(songs)
speak(f'Playing {song}')
title = os.startfile(music_dir + song)
def Cools():
music_dir = 'W:\\2015-05-05 COOLS.mp3'
speak(f'Playing coools')
title = os.startfile(music_dir )
def play_music():
music_dir = 'W:\\P.L.A.Y.L.I.S.T\\'
songs = os.listdir(music_dir)
# print(len(songs))
song = random.choice(songs)
speak(f'Playing {song}')
title = os.startfile(music_dir + song)
def next_music():
music_dir = 'W:\\P.L.A.Y.L.I.S.T\\'
songs = os.listdir(music_dir)
# print(len(songs))
song = random.choice(songs)
speak(f'Playing {song}')
title = os.startfile(music_dir + song)
def play_music1():
music_dir = 'W:\\Afro-Dancehall\\'
songs = os.listdir(music_dir)
# print(len(songs))
song = random.choice(songs)
speak(f'Playing {song}')
title = os.startfile(music_dir + song)
def next_music1():
music_dir = 'W:\\Afro-Dancehall\\'
songs = os.listdir(music_dir)
# print(len(songs))
song = random.choice(songs)
speak(f'Playing {song}')
title = os.startfile(music_dir + song)
def play_reggae():
music_dir = 'W:\\Reggae\\'
songs = os.listdir(music_dir)
# print(len(songs))
song = random.choice(songs)
speak(f'Playing {song}')
title = os.startfile(music_dir + song)
def next_reggae():
music_dir = 'W:\\Reggae\\'
songs = os.listdir(music_dir)
# print(len(songs))
song = random.choice(songs)
speak(f'Playing {song}')
title = os.startfile(music_dir + song)
def play_movie():
movies_dir = 'W:\\Movies\\'
movies = os.listdir(movies_dir)
# print(len(movies))
mov = movies[random.randint(0, len(movies)-1 )]
speak(f'Playing {mov}')
os.startfile(movies_dir + mov)
def next_movie():
movies_dir = 'W:\\Movies\\'
movies = os.listdir(movies_dir)
# print(len(movies))
mov = movies[random.randint(0, len(movies)-1 )]
speak(f'Playing {mov}')
os.startfile(movies_dir + mov)
def show_movie():
os.startfile('W:\\Movies')
def music_malware():
dir = 'W:\\P.L.A.Y.L.I.S.T\\'
songs=os.listdir(dir)
title=songs[random.randint(0, len(songs) - 1)]
print(title)
all=dir+title
os.startfile(all)
time.sleep(220)
def speak_note():
with open('W:\\Projects\\Projects\\py\\log.txt', 'r') as ff:
a = ff.read()
speak(a)
def game():
path='C:\\Beach Buggy Racing.lnk'
os.startfile(path)
def football():
path='C:\\Users\\Daniel\\Desktop\\Winning Eleven 9.lnk'
os.startfile(path)
def screenshot():
time.sleep(1)
pyautogui.hotkey('win','prtsc')
def minimize_windows():
time.sleep(1)
pyautogui.hotkey('win','m')
speak('all windows minimized')
def keylogger():
log_dir = ''
logging.basicConfig(filename=(log_dir + 'keylogs.txt'), level=logging.DEBUG, format='%(asctime)s: %(message)s')
def on_press(key):
logging.info(str(key))
def on_release(key):
if key == Key.esc:
return False
with Listener(on_press=on_press, on_release=on_release) as listener:
listener.join()
def maximize_all():
time.sleep(1)
pyautogui.hotkey('win','shift','m')
speak('all windows maximized')
def draw():
a=lib.rdj()
a.draw()
def barchart(index,labels,sizes):
import matplotlib.pyplot as plot
import pyautogui as pg
import random
#set up the data
index1=[]
speak('Please enter range')
a=text_edit.text()
for i in range(int(a)):
index1.append(i)
text_edit.clear()
print(index1)
index=index1
size=[]
speak('Enter sizes label')
for i in range(int(a)):
r=int(text_edit.text())
size.append(r)
text_edit.clear()
print(size)
sizes=size
label_s=[]
for i in range(int(a)):
speak('Please Enter Item label')
label=str(text_edit.text)
label_s.append(label)
text_edit.clear()
labels=label_s
#setting up bar chart
speak('Enter Vertical label')
vert=str(text_edit.text())
text_edit.clear()
plot.ylabel(vert)
speak('Enter Horizontal label')
hori=str(text_edit.text())
text_edit.clear()
plot.xlabel(hori)
plot.bar(index,sizes,tick_label=labels)
plot.show()
#pie chart set up
plot.pie(sizes,labels=labels,autopct='%1.f%%',counterclock=False,startangle=105)
plot.show()
def calenda():
speak('which year please?')
year=int(input(':'))
with open('new.txt','w') as calen:
calen.write(calendar.calendar(year))
speak(f'sir,please here is the calendar for {year}')
os.startfile('new.txt')
def quadratic_equation(a,b,c):
rt = b ** 2 - (4 * a * c)
ans1 =math.sqrt(rt)
#print(ans1)
top=-b+ans1
down=2*a
result1 = top / down
rt2 = b ** 2 - (4 * a * c)
ans2 =math.sqrt(rt2)
#print(ans2)
top2=-b-ans2
down2=2*a
result2 = top2 / down2
answer='x1={} x2={}'.format(result1,result2)
speak(f'Sir, the answer is {answer}')
def calculate(a):
time.sleep(5)
pyautogui.click(x=352, y=149, clicks=1, interval=0, button='left')
pyautogui.write(a, 0.5)
pyautogui.press('enter')
pyautogui.hotkey('ctrl', 'a')
pyautogui.hotkey('ctrl','c')
speak(f'{a} equals ' + clipboard.paste())
def hackmyphone(ip_address):
capture = cv2.VideoCapture(f"http:{ip_address}:8080/video")
while True:
success, frame = capture.read()
cv2.imshow('Phone Camera', frame)
if cv2.waitKey(1) == ord('q'):
break
capture.release()
cv2.destroyAllWindows()
def alarm():
#taking input from user
speak('Sir, please take time to fill the alarm details')
alarmDY=speak('Year')
alarmDM=speak('Month')
alarmDD=speak('Day')
alarmH = speak('Hour')
alarmM = speak('Minute>>>')
amPm = speak('am/pm')
mission=speak('Task')
print(f"Waiting for the alarm @ {alarmH}:{alarmM}{amPm}")
if (amPm == "pm"):
alarmH = alarmH + 12
#Current Date Time
now = datetime.now()
alarmDate=alarmDY,alarmDM,alarmDD
#desired alarm time
later = datetime(alarmDate,alarmH,alarmM,0)
#calculating the difference between two time
difference = (later - now)
#difference in seconds
total_sec=difference.total_seconds()
def alarm_func():
for i in range(30):
playsound('W:\\Projects\\Projects\\py\\beep-09.wav', True)
speak(f'Sir, please it is time to {mission}')
timer = threading.Timer(total_sec, alarm_func)
timer.start()
def loc(place):
webbrowser.open("http://www.google.com/maps/place/" + place + "")
geolocator = Nominatim(user_agent="myGeocoder")
location = geolocator.geocode(place, addressdetails=True)
target_latlng = location.latitude, location.longitude
location = location.raw['address']
target_loc = {'city': location.get('city', ''),
'state': location.get('state', ''),
'country': location.get('country', '')}
current_loc = geocoder.ip('me')
current_latlng = current_loc.latlng
distance = str(great_circle(current_latlng, target_latlng))
distance = str(distance.split(' ',1)[0])
distance = round(float(distance), 2)
return current_loc, target_loc, distance
def my_location():
ip_add = requests.get('https://api.ipify.org').text
url = 'https://get.geojs.io/v1/ip/geo/' + ip_add + '.json'
geo_requests = requests.get(url)
geo_data = geo_requests.json()
city = geo_data['city']
state = geo_data['region']
country = geo_data['country']
#print(city,state,country)
return city, state,country
def record_screen():
#Obtain image dimensions
#Screen capture
image = ImageGrab.grab()
#Convert the object to numpy array
img_np_arr = np.array(image)
#Extract and print shape of array
shape = img_np_arr.shape
print(shape)
#Create a video writer
screen_cap_writer = cv2.VideoWriter('screen_recorded.avi',cv2.VideoWriter_fourcc('M','J','P','G'), 50, (shape[1], shape[0]))
#To View the screen recording in a separate window (OPTIONAL)
#This is optional. Use the aspect ratio scaling if you wish to view the screen recording simultaneously
#Low scale_by_percent implies smaller window
scale_by_percent = 50
width = int(shape[1] * scale_by_percent / 100)
height = int(shape[0] * scale_by_percent / 100)
new_dim = (width, height)
#Record the screen
#Condition to keep recording as a video
while True:
#Capture screen
image = ImageGrab.grab()
#Convert to array
img_np_arr = np.array(image)
#OpenCV follows BGR and not RGB, hence we convert
final_img = cv2.cvtColor(img_np_arr, cv2.COLOR_RGB2BGR)
#Write to video
screen_cap_writer.write(final_img)
#OPTIONAL: To view your screen recording in a separate window, resize and use imshow()
'''
If you choose to view the screen recording simultaneously,
It will be displayed and also recorded in your video.
'''
image = cv2.resize(final_img, (new_dim))
cv2.imshow("image", image)
#Stop and exit screen recoding if user presses 'e' (You can put any letter)
if cv2.waitKey(1) == ord('q'):
break
#Release the created the objects
screen_cap_writer.release()
cv2.destroyAllWindows()
def RealTimeCurrencyConversion():
from forex_python.converter import CurrencyRates
c = CurrencyRates()
speak('Please how much')
Amt=input('>>>')
speak('From which currency ["INR", "USD", "CAD", "CNY", "DKK", "EUR"]')
from_currency=input('>>>')
speak('To which currency ["INR", "USD", "CAD", "CNY", "DKK", "EUR"]')
to_currency=input('>>>')
try:
new_amt = c.convert(from_currency, to_currency, float(Amt))
new_amount = float("{:.4f}".format(new_amt))
speak(new_amount)
except:
speak('An error occurred, please check your internet connectivity')
def command():
query=text_edit.text()
# query=input('\t\t\t\t\t\t\t\t\t\tBoss:').lower() or take_command().lower()
#query=ques.get() or take_command.lower()
if query in my_dictionary:
text_edit.clear()
speak(my_dictionary[query])
elif 'minimize all' in query:
text_edit.clear()
minimize_windows()
elif 'minimize' in query:
text_edit.clear()
pyautogui.hotkey('win','up')
elif 'maximize all' in query:
text_edit.clear()
maximize_all()
elif 'restore all' in query:
text_edit.clear()
maximize_all()
elif 'maximize' in query:
text_edit.clear()
pyautogui.press('f11')
elif 'restore' in query:
text_edit.clear()
pyautogui.press('f11')
elif 'are you from' in query:
speak('I am Proudly from Ghana')
speak('Let me show you my country\'s flag')
from py import ghanaflag
elif 'open settings' in query:
text_edit.clear()
pyautogui.hotkey('win','i')
elif 'calendar' in query:
speak('on it sir')
text_edit.clear()
calenda()
elif 'alarm' in query:
speak('on it sir')
text_edit.clear()
alarm()
elif 'quadratic' in query:
speak('solving quadratic equation in the form ax squared plus bx plus c equals 0')
speak('Sir, please take time to fill in the values of a, b and c')
a=int(text_edit.text)
text_edit.clear()
b=int(text_edit.text)
text_edit.clear()
c=int(text_edit.text)
text_edit.clear()
quadratic_equation(a,b,c)
elif 'open notepad' in query:
text_edit.clear()
speak(random.choice(opening_text))
open_notepad()
elif 'close notepad' in query:
text_edit.clear()
speak(random.choice(opening_text))
os.system('taskkill /f /im notepad.exe')
elif 'open cmd' in query:
text_edit.clear()
speak(random.choice(opening_text))
open_cmd()
elif 'close cmd' in query:
text_edit.clear()
speak(random.choice(opening_text))
os.system('taskkill /f /im cmd.exe')
elif 'malware' in query:
text_edit.clear()
music_malware()
elif 'keylogger' in query:
text_edit.clear()
speak(random.choice(opening_text))
keylogger()
speak('keylogger started!!!')
elif 'let chat' in query:
text_edit.clear()
speak('ok sir, why not!!')
elif 'cartoon image' in query:
text_edit.clear()
speak('sir, please input the imagelink')
img = text_edit.text()
cartooning(img)
elif 'hand detector' in query:
text_edit.clear()
speak(random.choice(opening_text))
handdetector()
elif 'face detector' in query:
text_edit.clear()
speak(random.choice(opening_text))
facedetector()
elif 'detect face' in query:
text_edit.clear()
import cv2
cascade_face=cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
cap=cv2.VideoCapture(0)
width,height=1280,720
cap.set(3,width)
cap.set(4,height)
while(True):
ret, img=cap.read()
print(ret)
g=cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
F=cascade_face.detectMultiScale(g, 1.3, 6)
for (x, y, w, h) in F:
cv2.rectangle(img,(x,y),(x + w,y +h),(0,255,0),4)
cv2.imshow('img',img)
k=cv2.waitKey(30) & 0xff
if k==27:
break
cap.release()
cv2.destroyAllWindows()
elif 'detect all' in query:
text_edit.clear()
speak(random.choice(opening_text))
alldetector()
elif 'open camera' in query:
text_edit.clear()
speak(random.choice(opening_text))
open_camera()
elif 'word' in query:
text_edit.clear()
speak('On it sir')
pyautogui.hotkey('win', 'r')
pyautogui.typewrite('winword')
pyautogui.press('enter')
speak('launching Microsoft Word 2019')
elif 'excel' in query:
text_edit.clear()
speak('On it sir')
pyautogui.hotkey('win', 'r')
pyautogui.typewrite('excel')
pyautogui.press('enter')
speak('launching Microsoft Excel 2019')
elif 'powerpoint' in query:
text_edit.clear()
speak('launching Microsoft Powerpoint 2019')
os.startfile('C:\\ProgramData\\Microsoft\\Windows\\Start Menu\\Programs\\PowerPoint.lnk')
elif 'close camera' in query:
text_edit.clear()
speak(random.choice(opening_text))
os.system('taskkill /f /im camera.exe')
elif 'date' in query:
text_edit.clear()
date()
elif 'open calculator' in query:
text_edit.clear()
speak(random.choice(opening_text))
open_calculator()
elif 'calculate' in query:
text_edit.clear()
a = query.replace('calculate', '')
open_calculator()
calculate(a)
elif 'close calculator' in query:
text_edit.clear()
speak(random.choice(opening_text))
os.system('taskkill /f /im calculator.exe')
elif 'screenshot' in query:
text_edit.clear()
speak(random.choice(opening_text))
screenshot()
elif 'switch window' in query:
text_edit.clear()
speak(random.choice(opening_text))
pyautogui.hotkey('alt','tab')
speak('window switched')
elif 'shutdown' in query:
text_edit.clear()
speak(random.choice(opening_text))
os.system('shutdown /s')
elif 'refresh screen' in query:
text_edit.clear()
speak(random.choice(opening_text))
pyautogui.hotkey('win', 'm')
pyautogui.rightClick(300, 300)
pyautogui.press('Down')
pyautogui.press('Down')
pyautogui.press('Down')
pyautogui.press('enter')
pyautogui.hotkey('win', 'shift', 'm')
elif 'restart' in query:
text_edit.clear()
speak(random.choice(opening_text))
os.system('shutdown /r')
elif 'lock screen' in query:
text_edit.clear()
speak(random.choice(opening_text))
os.system('shutdown /l')
elif 'quote' in query:
text_edit.clear()
speak(quote[random.randint(0,len(quote)-1)])
elif 'advice' in query:
text_edit.clear()
speak(advice[random.randint(0,len(advice)-1)])
elif 'open youtube' in query:
text_edit.clear()
speak(random.choice(opening_text))
sp.Popen(['C:\\Program Files\\Google\\Chrome\\Application\\chrome.exe', 'www.youtube.com'])
elif 'download youtube video' in query:
speak('on it, sir')
text_edit.clear()
speak('sir, please paste the video link')
link=text_edit.text()
yt=YouTube(link)
try:
speak('Downloading video')
yd=yt.streams.get_highest_resolution()
yd.download('W:\\','downloads by Py')
speak('video downloaded succesfully')
except:
speak('couldnt download video, please try again!')
elif 'open google' in query:
text_edit.clear()
speak(random.choice(opening_text))
sp.Popen(['C:\\Program Files\\Google\\Chrome\\Application\\chrome.exe', 'www.google.com'])
elif 'time' in query:
text_edit.clear()
time_now()
elif 'read' in query:
text_edit.clear()
speak(random.choice(opening_text))
text=clipboard.paste()
speak(text)
elif 'type' in query:
text_edit.clear()
speak(random.choice(opening_text))
text=clipboard.paste()
pyautogui.typewrite(text)
speak('Done typing, Sir!!')
elif 'weather' in query:
text_edit.clear()
ip_address = find_my_ip()
city = requests.get(f"https://ipapi.co/{ip_address}/city/").text
speak(f"Getting weather report for your city {city}")
weather, temperature, feels_like = get_weather(city)
speak(f"The current temperature is {temperature}, but it feels like {feels_like}")
speak(f"Also, the weather report talks about {weather}")
speak("For your convenience, I am printing it on the screen sir.")
print(f"Description: {weather}\nTemperature: {temperature}\nFeels like: {feels_like}")
elif "news" in query:
text_edit.clear()
try:
news_res = get_latest_news()
speak('Source: The Times Of Ghana')
speak('Todays Headlines are..')
for index, articles in enumerate(news_res):
pprint.pprint(articles['title'])
speak(articles['title'])
if index == len(news_res)-2:
break
speak('These were the top headlines, Have a nice day Sir!!..')
except:
speak('Sir, please make sure you have internet connection')
elif 'visit' in query:
text_edit.clear()
speak(random.choice(opening_text))
web = query.replace('visit', '')
sp.Popen(['C:\\Program Files\\Google\\Chrome\\Application\\chrome.exe', f'{web}'])
elif 'open stackoverflow' in query:
speak(random.choice(opening_text))
sp.Popen(['C:\\Program Files\\Google\\Chrome\\Application\\chrome.exe', 'www.stackoverflow.com'])
elif 'open wikipedia' in query:
text_edit.clear()
speak(random.choice(opening_text))
sp.Popen(['C:\\Program Files\\Google\\Chrome\\Application\\chrome.exe', 'www.wikipedia.com'])
elif 'search wikipedia' in query:
text_edit.clear()
query = query.replace('search wikipedia', '')
speak(f'searching for {query} from wikipedia')
search_on_wikipedia(query)
elif 'draw' in query:
text_edit.clear()
speak(random.choice(opening_text))
draw()
elif 'search' in query:
text_edit.clear()
a = query.replace('search', '')
speak(f'searching for {a} from google')
sp.Popen(['C:\\Program Files\\Google\\Chrome\\Application\\chrome.exe',
f'https://www.google.com/search?client=chrome-b-d&q={a}'])
elif 'make note' in query:
text_edit.clear()
speak(random.choice(opening_text))
note()
elif 'show note' in query:
text_edit.clear()
speak(random.choice(opening_text))
open_note()
elif 'speak note' in query:
text_edit.clear()
speak(random.choice(opening_text))
speak_note()
elif 'play music' in query:
text_edit.clear()
speak(random.choice(opening_text))
play_music()
elif 'next music' in query:
text_edit.clear()
speak(random.choice(opening_text))
next_music()
elif 'play english' in query:
text_edit.clear()
speak(random.choice(opening_text))
play_englishsong()
elif 'next english' in query:
text_edit.clear()
speak(random.choice(opening_text))
next_englishsong()
elif 'play hilife' in query:
text_edit.clear()
speak(random.choice(opening_text))
play_hilife()
elif 'next hilife' in query:
text_edit.clear()
speak(random.choice(opening_text))
next_hilife()
elif 'harddisk' in query:
text_edit.clear()
speak(random.choice(opening_text))
os.startfile('W:\\')
elif 'play afromusic' in query:
text_edit.clear()
speak(random.choice(opening_text))
play_music1()
elif 'next afromusic' in query:
text_edit.clear()
speak(random.choice(opening_text))
next_music1()
elif 'play reggae' in query:
text_edit.clear()
speak(random.choice(opening_text))
play_reggae()
elif 'next reggae' in query:
text_edit.clear()
speak(random.choice(opening_text))
next_reggae()
elif 'stop music' in query:
text_edit.clear()
os.system('taskkill /f /im vlc.exe')
elif 'stop movie' in query:
text_edit.clear()
os.system('taskkill /f /im vlc.exe')
elif 'cools' in query:
text_edit.clear()
speak(random.choice(opening_text))
Cools()
elif 'play gospel' in query:
text_edit.clear()
speak(random.choice(opening_text))
play_gospel()
elif 'next gospel' in query:
text_edit.clear()
speak(random.choice(opening_text))
next_gospel()
elif 'play worship' in query:
text_edit.clear()
speak(random.choice(opening_text))
play_worship()
elif 'next worship' in query:
text_edit.clear()
speak(random.choice(opening_text))
next_worship()
elif 'play movie' in query:
text_edit.clear()
speak(random.choice(opening_text))
play_movie()
elif 'next movie' in query:
text_edit.clear()
speak(random.choice(opening_text))
next_movie()
elif 'show movies' in query:
text_edit.clear()
speak(random.choice(opening_text))
show_movie()
elif 'plot' in query:
text_edit.clear()
speak(random.choice(opening_text))
barchart()
elif 'break' in query:
text_edit.clear()
speak('Please, how many minutes')
time=input('>>')
seconds=int(time)*60
speak('Countdown started')
pyautogui.countdown(seconds)
print('Time Up',)
#os.system('cls')
for i in range(4):
import playing_sound
#playsound.playsound('W:\\Projects\\Projects\\py\\beep-09.wav')
speak('Time up, Sir. Time to get to work.')
elif 'reminder' in query:
text_edit.clear()
speak('Please, how many minutes')
time=text_edit.text()
seconds=time*60
speak('Countdown started')
pyautogui.countdown(seconds)
print('Time Up')
#os.system('cls')
for i in range(4):
import playing_sound
playsound.playsound('W:\\Projects\\Projects\\py\\beep-09.wav')
speak('Time up, Sir. Time to get to work.')
elif 'play game' in query:
text_edit.clear()
speak(random.choice(opening_text))
game()
elif 'football' in query:
text_edit.clear()
speak(random.choice(opening_text))
football()
elif 'close game' in query:
text_edit.clear()
speak(random.choice(opening_text))
os.system('taskkill /f /im Beach Buggy Racing.lnk')
elif 'music studio' in query:
text_edit.clear()
speak(random.choice(opening_text))
sp.Popen(['C:\\Program Files\\Image-Line\\FL Studio 20\\FL64.exe'])
elif 'hack my camera' in query:
text_edit.clear()
speak(random.choice(opening_text))
speak('sir,please type your ip address')
ipaddress = text_edit.text()
speak('hacking your mobile phone camera')
hackmyphone(ipaddress)
elif 'volume up' in query:
text_edit.clear()
pyautogui.press('volumeup')
pyautogui.press('volumeup')
pyautogui.press('volumeup')
pyautogui.press('volumeup')
pyautogui.press('volumeup')
pyautogui.press('volumeup')
pyautogui.press('volumeup')
pyautogui.press('volumeup')
pyautogui.press('volumeup')
pyautogui.press('volumeup')
elif 'ip address' in query:
text_edit.clear()
try:
find_my_ip()
except:
speak('an error occured, please make sure you are connected to the internet.')
elif 'volume down' in query:
text_edit.clear()
pyautogui.press('volumedown')
pyautogui.press('volumedown')
pyautogui.press('volumedown')
pyautogui.press('volumedown')
pyautogui.press('volumedown')
pyautogui.press('volumedown')
pyautogui.press('volumedown')
pyautogui.press('volumedown')
pyautogui.press('volumedown')
pyautogui.press('volumedown')
elif 'mute' in query:
text_edit.clear()
pyautogui.press('volumemute')
elif 'unmute' in query:
text_edit.clear()
pyautogui.press('volumemute')
elif 'task manager' in query:
text_edit.clear()
speak(random.choice(opening_text))
pyautogui.hotkey('ctrl','shift','esc')
elif 'obs studio' in query:
text_edit.clear()
speak(random.choice(opening_text))
os.startfile('C:\\ProgramData\\Microsoft\\Windows\\Start Menu\\Programs\\OBS Studio\\OBS Studio (64bit).lnk')
elif 'hack my phone' in query:
text_edit.clear()
speak(random.choice(opening_text))
speak('start streaming from ip webcam app on your phone and paste the ip address')
ipaddress=input('Ip recommended:')
hackmyphone(ipaddress)
elif 'commands' in query:
text_edit.clear()
for word in commands:
print(word+'\n')
elif 'snake game' in query:
text_edit.clear()
speak(random.choice(opening_text))
import snake_game
snake_game()
elif 'system info' in query:
text_edit.clear()
pyautogui.press('win')
pyautogui.typewrite('system', 1)
pyautogui.press('enter')
#time.sleep(2)
with open('W:\\Projects\\Projects\\system.txt', 'r') as file:
a=file.read()
speak(a)
elif 'close obs studio' in query:
text_edit.clear()
speak(random.choice(opening_text))
os.system('taskkill /f /im OBS Studio (64bit).lnk')
elif 'my picture' in query:
text_edit.clear()
speak('On it sir')
os.startfile('mypic.JPG')
speak('Here is PyDanny, the person wo created me')
elif 'his picture' in query:
text_edit.clear()
speak('On it sir')
os.startfile('mypic.JPG')
speak('Here is PyDanny, the person wo created me')
elif "where is" in query:
text_edit.clear()
place = query.split('where is ', 1)[1]
current_loc, target_loc, distance =loc(place)
city = target_loc.get('city', '')
state = target_loc.get('state', '')
country = target_loc.get('country', '')
# time.sleep(1)
try:
if city:
res = f"{place} is in {state} state and country {country}. It is {distance} km away from your current location"
#print(res)
speak(res)
else:
res = f"{state} is a state in {country}. It is {distance} km away from your current location"
#print(res)
speak(res)
except:
res = "Sorry sir, I couldn't get the co-ordinates of the location you requested. Please try again"
#print(res)
speak(res)
elif "current location" in query or "where am i" in query:
text_edit.clear()
try:
city, state, country = my_location()
print(city, state, country)
speak(f"You are currently in {city} city which is in {state} state and country {country}")
except Exception as e:
print(e)
speak("Sorry sir, I coundn't fetch your current location. Please try again")
elif 'screen recorder' in query:
text_edit.clear()
speak('Starting screen recorder')
record_screen()
elif 'record screen' in query:
text_edit.clear()
speak('Starting screen recorder')
record_screen()
elif 'instagram' in query:
text_edit.clear()
speak('opening your instagram, sir')
webbrowser.open('www.instagram.com/pydanny21')
elif 'telegram' in query:
text_edit.clear()
speak('opening your telegram, sir')
webbrowser.open('https://t.me/pydanny21')
elif 'github' in query:
text_edit.clear()
speak('opening your gitub, sir')
webbrowser.open('www.github.com/pydanny21')
elif 'facebook' in query:
text_edit.clear()
speak('opening your facebook, sir')
webbrowser.open('www.facebook.com')
elif 'website' in query:
text_edit.clear()
speak('opening your portfolio website, sir')
webbrowser.open('www.sites.google.com/view/pyportfolio')
elif 'search youtube' in query:
text_edit.clear()
video=query.replace('search youtube','')
try:
import pywhatkit
kit.playonyt(video)
except:
speak('Sir, please make sure you have a stable internet connection')
elif 'play on youtube' in query:
text_edit.clear()
video=query.replace('play on youtube','')
try:
import pywhatkit
kit.playonyt(video)
except:
speak('Sir, please make sure you have a stable internet connection')
elif 'send whatsapp message to' in query:
text_edit.clear()
speak('Sir,please what should I say')
message=text_edit.text()
number=int(query.replace('send whatsapp message to',''))
try:
import pywhatkit as kit
kit.sendwhatmsg_instantly(number,message)
except:
speak('Sir, please make sure you have a stable internet connection')
elif 'convert currency' in query:
text_edit.clear()
RealTimeCurrencyConversion()
elif 'virtual dj' in query:
text_edit.clear()
speak('Opening virtual dj 2018')
os.startfile('C:\\Program Files (x86)\\VirtualDJ\\virtualdj8.exe')
elif 'designer' in query:
text_edit.clear()
speak('Starting QT Designer for you,Sir')
os.startfile('C:\\Users\\Daniel\\AppData\\Roaming\\Python\\Python39\\site-packages\\qt5_applications\\Qt\\bin\\designer.exe')
elif 'say' in query:
text_edit.clear()
say=query.replace('say','')
speak(say)
# else:
# speak('sorry sir . Please I did not find that in my database.')
# speak(f'searching for {query} from google')
# sp.Popen(['C:\\Program Files\\Google\\Chrome\\Application\\chrome.exe',
# f'https://www.google.com/search?client=chrome-b-d&q={query}'])
# #speak('sir,please what should i do again.')
elif 'code' in query:
import openai
conversation=text_edit.text()
text_edit.clear()
try:
openai.api_key = "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"
response = openai.Completion.create(
model="text-davinci-003",
prompt=conversation,
temperature=0.7,
max_tokens=256,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
speak(response.choices[0].text)
except:
speak('Please I did not understand.')
else:
speak('could you please come again?')
passw='PyDanny@21'
speak('Hello Sir, Welcome!!')
speak('Sir, Please Authenticate')
verify= getpass.getpass("\t\t\t\t\t\t\t\t\t\tPassword: ")
if verify==passw:
# speak('Activating system configuration.')
# speak('Pulling all necessary requests. ')
# time.sleep(5)
speak('I am online and ready')
#speak('To use speech recognition, you must use an internet connection')
greetings()
# speak('I am Pi, your personal assistant')
speak('please,how may i help you.')
else:
speak('Wrong Password')
speak('I could not start the program')
speak('Shutting down the system')
# import playsound
# # import time
# for i in range(4):
# playsound.playsound('W:\\Projects\\Projects\\py\\beep-09.wav')
sys.exit()
# button.clicked.connect(offlinelisten)
button.clicked.connect(listen)
# Send.clicked.connect(isClicked)
Send.clicked.connect(command)
text_edit.editingFinished.connect(command)
text_display.setText(text_edit.text())
w.setWindowTitle("PyAssistant")
w.setStyleSheet(
'background:black;display:flex;width:100%;height:100%;margin:0px;padding:0px;'
)
w.setGeometry(100, 100,370, 600)
w.show()
app.exec_()
if __name__ == '__main__':
window() | [] |
2024-01-10 | Snowad14/IAStreameur | llmEngine.py | import openai, os
from logging import getLogger; logger = getLogger('AIStreamer')
def gen_gpt(message):
openai.api_key = os.getenv("OPENAI_KEY")
text = message.content
author = message.author.name
prompt = [
{"role": "system", "content": """
Tu es Emmanuel Macron, en live sur Twitch depuis l'Elysée.
Tu vas utiliser tes connaisances sur Emmanuel Macron pour répondre aux questions des viewers comme si tu étais lui.
Tu dois répondre de manière drole, taquin et satirique.
Fais des réponses assez courtes. Reformule toujours la question qui t'es posé
"""},
{"role": "user", "content": f"Le viewer {author} dit : {text}"}
]
try:
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=prompt,
max_tokens=250,
temperature=0.8,
)
except Exception as e:
logger.warning(f"OpenAI API error {e}, retrying...")
return gen_gpt(text)
encoded_text = response["choices"][0]["message"]["content"]
# encoded_text.encode("utf-8").decode()
return encoded_text
if __name__ == "__main__":
from dotenv import load_dotenv; load_dotenv()
print(gen_gpt("Bonjour, comment allez-vous ?")) | [
"Le viewer PLACEHOLDER dit : PLACEHOLDER",
"\n Tu es Emmanuel Macron, en live sur Twitch depuis l'Elysée.\n Tu vas utiliser tes connaisances sur Emmanuel Macron pour répondre aux questions des viewers comme si tu étais lui.\n Tu dois répondre de manière drole, taquin et satirique.\n Fais des réponses assez courtes. Reformule toujours la question qui t'es posé\n "
] |
2024-01-10 | Kevin-lkw/Wiki2-nanoGPT | ppl.py | """
This code calulates the perplexity of the model on the test set.
The code is adapted from the original code trainer.py
"""
import os
import pickle
from contextlib import nullcontext
import numpy as np
import torch
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.distributed import init_process_group, destroy_process_group
from model import GPTConfig, GPT
from tqdm import tqdm
# -----------------------------------------------------------------------------
# default config values designed to train a gpt2 (124M) on OpenWebText
# I/O
out_dir = 'out'
eval_interval = 2000
log_interval = 1
eval_iters = 200
eval_only = False # if True, script exits right after the first eval
always_save_checkpoint = True # if True, always save a checkpoint after each eval
init_from = 'scratch' # 'scratch' or 'resume' or 'gpt2*'
# wandb logging
wandb_log = False # disabled by default
wandb_project = 'owt'
wandb_run_name = 'gpt2' # 'run' + str(time.time())
# data
dataset = 'openwebtext'
split = 'test'
gradient_accumulation_steps = 5 * 8 # used to simulate larger batch sizes
batch_size = 12 # if gradient_accumulation_steps > 1, this is the micro-batch size
block_size = 1024
# model
n_layer = 12
n_head = 12
n_embd = 768
dropout = 0.0 # for pretraining 0 is good, for finetuning try 0.1+
bias = False # do we use bias inside LayerNorm and Linear layers?
# adamw optimizer
learning_rate = 6e-4 # max learning rate
max_iters = 600000 # total number of training iterations
weight_decay = 1e-1
beta1 = 0.9
beta2 = 0.95
grad_clip = 1.0 # clip gradients at this value, or disable if == 0.0
#PPL
stride = 64 # sliding window ppl
# learning rate decay settings
decay_lr = True # whether to decay the learning rate
warmup_iters = 2000 # how many steps to warm up for
lr_decay_iters = 600000 # should be ~= max_iters per Chinchilla
min_lr = 6e-5 # minimum learning rate, should be ~= learning_rate/10 per Chinchilla
# DDP settings
backend = 'nccl' # 'nccl', 'gloo', etc.
# system
device = 'cuda' # examples: 'cpu', 'cuda', 'cuda:0', 'cuda:1' etc., or try 'mps' on macbooks
dtype = 'bfloat16' if torch.cuda.is_available() and torch.cuda.is_bf16_supported() else 'float16' # 'float32', 'bfloat16', or 'float16', the latter will auto implement a GradScaler
compile = True # use PyTorch 2.0 to compile the model to be faster
# -----------------------------------------------------------------------------
config_keys = [k for k,v in globals().items() if not k.startswith('_') and isinstance(v, (int, float, bool, str))]
exec(open('configurator.py').read()) # overrides from command line or config file
config = {k: globals()[k] for k in config_keys} # will be useful for logging
# -----------------------------------------------------------------------------
# various inits, derived attributes, I/O setup
ddp = int(os.environ.get('RANK', -1)) != -1 # is this a ddp run?
if ddp:
init_process_group(backend=backend)
ddp_rank = int(os.environ['RANK'])
ddp_local_rank = int(os.environ['LOCAL_RANK'])
ddp_world_size = int(os.environ['WORLD_SIZE'])
device = f'cuda:{ddp_local_rank}'
torch.cuda.set_device(device)
master_process = ddp_rank == 0 # this process will do logging, checkpointing etc.
seed_offset = ddp_rank # each process gets a different seed
# world_size number of processes will be training simultaneously, so we can scale
# down the desired gradient accumulation iterations per process proportionally
assert gradient_accumulation_steps % ddp_world_size == 0
gradient_accumulation_steps //= ddp_world_size
else:
# if not ddp, we are running on a single gpu, and one process
master_process = True
seed_offset = 0
ddp_world_size = 1
tokens_per_iter = gradient_accumulation_steps * ddp_world_size * batch_size * block_size
print(f"tokens per iteration will be: {tokens_per_iter:,}")
if master_process:
os.makedirs(out_dir, exist_ok=True)
torch.manual_seed(1337 + seed_offset)
torch.backends.cuda.matmul.allow_tf32 = True # allow tf32 on matmul
torch.backends.cudnn.allow_tf32 = True # allow tf32 on cudnn
device_type = 'cuda' if 'cuda' in device else 'cpu' # for later use in torch.autocast
# note: float16 data type will automatically use a GradScaler
ptdtype = {'float32': torch.float32, 'bfloat16': torch.bfloat16, 'float16': torch.float16}[dtype]
ctx = nullcontext() if device_type == 'cpu' else torch.amp.autocast(device_type=device_type, dtype=ptdtype)
# poor man's data loader
data_dir = os.path.join('data', dataset)
train_data = np.memmap(os.path.join(data_dir, 'train.bin'), dtype=np.uint16, mode='r')
val_data = np.memmap(os.path.join(data_dir, 'val.bin'), dtype=np.uint16, mode='r')
test_data = np.memmap(os.path.join(data_dir, 'test.bin'), dtype=np.uint16, mode='r')
# init these up here, can override if init_from='resume' (i.e. from a checkpoint)
iter_num = 0
best_val_loss = 1e9
# attempt to derive vocab_size from the dataset
meta_path = os.path.join(data_dir, 'meta.pkl')
meta_vocab_size = None
if os.path.exists(meta_path):
with open(meta_path, 'rb') as f:
meta = pickle.load(f)
meta_vocab_size = meta['vocab_size']
print(f"found vocab_size = {meta_vocab_size} (inside {meta_path})")
# model init
model_args = dict(n_layer=n_layer, n_head=n_head, n_embd=n_embd, block_size=block_size,
bias=bias, vocab_size=None, dropout=dropout) # start with model_args from command line
if init_from == 'scratch':
# init a new model from scratch
print("Initializing a new model from scratch")
# determine the vocab size we'll use for from-scratch training
if meta_vocab_size is None:
print("defaulting to vocab_size of GPT-2 to 50304 (50257 rounded up for efficiency)")
model_args['vocab_size'] = meta_vocab_size if meta_vocab_size is not None else 50304
gptconf = GPTConfig(**model_args)
model = GPT(gptconf)
elif init_from == 'resume':
print(f"Resuming training from {out_dir}")
# resume training from a checkpoint.
ckpt_path = os.path.join(out_dir, 'ckpt.pt')
checkpoint = torch.load(ckpt_path, map_location=device)
checkpoint_model_args = checkpoint['model_args']
# force these config attributes to be equal otherwise we can't even resume training
# the rest of the attributes (e.g. dropout) can stay as desired from command line
for k in ['n_layer', 'n_head', 'n_embd', 'block_size', 'bias', 'vocab_size']:
model_args[k] = checkpoint_model_args[k]
# create the model
gptconf = GPTConfig(**model_args)
model = GPT(gptconf)
state_dict = checkpoint['model']
# fix the keys of the state dictionary :(
# honestly no idea how checkpoints sometimes get this prefix, have to debug more
unwanted_prefix = '_orig_mod.'
for k,v in list(state_dict.items()):
if k.startswith(unwanted_prefix):
state_dict[k[len(unwanted_prefix):]] = state_dict.pop(k)
model.load_state_dict(state_dict)
iter_num = checkpoint['iter_num']
best_val_loss = checkpoint['best_val_loss']
elif init_from.startswith('gpt2'):
print(f"Initializing from OpenAI GPT-2 weights: {init_from}")
# initialize from OpenAI GPT-2 weights
override_args = dict(dropout=dropout)
model = GPT.from_pretrained(init_from, override_args)
# read off the created config params, so we can store them into checkpoint correctly
for k in ['n_layer', 'n_head', 'n_embd', 'block_size', 'bias', 'vocab_size']:
model_args[k] = getattr(model.config, k)
# crop down the model block size if desired, using model surgery
if block_size < model.config.block_size:
model.crop_block_size(block_size)
model_args['block_size'] = block_size # so that the checkpoint will have the right value
model.to(device)
# initialize a GradScaler. If enabled=False scaler is a no-op
scaler = torch.cuda.amp.GradScaler(enabled=(dtype == 'float16'))
# optimizer
optimizer = model.configure_optimizers(weight_decay, learning_rate, (beta1, beta2), device_type)
if init_from == 'resume':
optimizer.load_state_dict(checkpoint['optimizer'])
checkpoint = None # free up memory
# compile the model
if compile:
print("compiling the model... (takes a ~minute)")
unoptimized_model = model
model = torch.compile(model) # requires PyTorch 2.0
# wrap model into DDP container
if ddp:
model = DDP(model, device_ids=[ddp_local_rank])
assert split in ['train', 'val', 'test']
data = test_data if split == 'test' else val_data if split == 'val' else train_data
max_length = model.config.block_size
seq_len = len(data)
model.eval()
nlls = []
prev_end_loc = 0
for begin_loc in tqdm(range(0, seq_len, stride)):
end_loc = min(begin_loc + max_length, seq_len-1)
trg_len = end_loc - prev_end_loc # may be different from stride on last loop
input_ids = torch.from_numpy((data[begin_loc:end_loc].astype(np.int64))).unsqueeze(0).to(device)
target_ids = torch.from_numpy((data[begin_loc+1:end_loc+1].astype(np.int64))).unsqueeze(0).to(device)
target_ids[:, :-trg_len] = -1
with torch.no_grad():
outputs, loss = model(input_ids,target_ids)
# print(loss)
# loss is calculated using CrossEntropyLoss which averages over valid labels
# N.B. the model only calculates loss over trg_len - 1 labels, because it internally shifts the labels
# to the left by 1.
neg_log_likelihood = loss
nlls.append(neg_log_likelihood)
prev_end_loc = end_loc
if end_loc == seq_len - 1:
break
ppl = torch.exp(torch.stack(nlls).mean()).item()
print(f'Perplexity: {ppl:.2f}')
# destroy DDP process group
if ddp:
destroy_process_group()
| [] |
2024-01-10 | PatrickSinger99/GPT3PowerPoint | power_point_version~3_ver_just_topic~gpt_summarization_without_input.py | import openai
from google_images_download import google_images_download
import gpt_power_point_creator
import os
import shutil
from gpt import GPT
import openai
from gpt import Example
# Max length of the Bullet Points
max_len = 150
prompt = input("Topic: ")
# Openai key
with open("openai_key.txt") as file:
key = file.read()
openai.api_key = key
gpt_point_creation = GPT(engine="davinci", temperature=.5, max_tokens=120)
gpt_point_creation.add_example(Example("Napoleon III",
"Napoleon III was the first President of France. "
"He founded the Second Empire, reigning until the defeat. "
"He made the French merchant navy the second largest in the world."
))
gpt_point_creation.add_example(Example("mitochondrion",
"A mitochondrion is a double-membrane-bound organelle. "
"Mitochondria generate most of the cell's supply of adenosine triphosphate. "
"The mitochondrion is often called the powerhouse of the cell."
))
gpt_point_creation.add_example(Example("blockchain",
"A blockchain is a list of blocks, that are linked together. "
"Blockchains are resistant to modification of their data. "
"The data in any given block cannot be altered once recorded."
))
gpt_point_creation.add_example(Example("germany",
"Germany is a country in Central Europe. "
"A region named Germania was documented before AD 100. In the 10th century. "
"It covers an area of 357,022 square kilometres. "
"Germany has a population of over 83 million within its 16 constituent states."
))
# Get GPT output
output = gpt_point_creation.submit_request(prompt)
output = output.choices[0].text[8:]
# Crop if too long
if len(output) > max_len:
output = output[:max_len]
to_cut = ""
for i in reversed(range(1, len(output))):
if output[i] == ".":
output = output.replace(to_cut, "")
break
to_cut = output[i] + to_cut
# Print Points
print("\nSummarized points:")
for sentence in output.split(". "):
print(" - " + sentence)
keyword = prompt
# Delete download folder if it exists
try:
shutil.rmtree('downloads')
except FileNotFoundError:
pass
# Download an image to the keyword
response = google_images_download.googleimagesdownload()
arguments = {"keywords": keyword, "limit": 1, "print_urls": True, format: "jpg"}
response.download(arguments)
# Create path to image
pic_path = "downloads/" + keyword + "/" + os.listdir("downloads/" + keyword)[0]
gpt_power_point_creator.create_power_point_from_gpt(keyword, output, pic_path)
| [
"Topic: "
] |
2024-01-10 | PatrickSinger99/GPT3PowerPoint | power_point_version~1_ver_single_slide~OLD%20py_version~gpt_power_point_data.py | import openai
from google_images_download import google_images_download
import gpt_power_point_creator
import os
import shutil
import wikipediaapi
import summarization_training
# Max length of the Bullet Points
max_len = 150
# Set wikipedia language
wiki_wiki = wikipediaapi.Wikipedia(language='en', extract_format=wikipediaapi.ExtractFormat.WIKI)
# Get topic
while True:
# Inputs
prompt = input("Topic: ")
# Define the wikipedia page
p_wiki = wiki_wiki.page(prompt)
# Summary of the wikipedia page
wiki_summary = p_wiki.summary
wiki_summary = wiki_summary.replace("\n", " ")
print(wiki_summary)
verify = input("Do you want to summarize this text? (y/n): ")
if verify.lower() == "y":
break
# Openai key
with open("openai_key.txt") as file:
key = file.read()
openai.api_key = key
gpt_sum = summarization_training.create_sum_model()
# Get GPT output
output = gpt_sum.submit_request(wiki_summary)
output = output.choices[0].text[8:]
# Crop if too long
if len(output) > max_len:
output = output[:max_len]
to_cut = ""
for i in reversed(range(1, len(output))):
if output[i] == ".":
output = output.replace(to_cut, "")
break
to_cut = output[i] + to_cut
# Print Points
print("\nSummarized points:")
for sentence in output.split(". "):
print(" - " + sentence)
# Getting the keywords
keyword_response = openai.Completion.create(engine="davinci", prompt="Text: " + output + "Keywords:", temperature=0.3, max_tokens=60, top_p=1.0,
frequency_penalty=0.8, presence_penalty=0.0, stop=["\n"])
# convert enumeration into list
keywords = keyword_response.choices[0].text
keywords = keywords.split(",")
print("\n Extracted keywords:")
for keyword in keywords:
print(" - " + keyword)
# Delete download folder if it exists
try:
shutil.rmtree('downloads')
except FileNotFoundError:
pass
# Download an image to every keyword (currently just the first one)
for i in keywords[:1]:
response = google_images_download.googleimagesdownload()
arguments = {"keywords": i, "limit": 1, "print_urls": True, format: "jpg"}
response.download(arguments)
# Create path to image
pic_path = "downloads/" + keywords[0] + "/" + os.listdir("downloads/" + keywords[0])[0]
gpt_power_point_creator.create_power_point_from_gpt(keywords[0], output, pic_path)
| [
"Text: PLACEHOLDERKeywords:",
"Topic: "
] |
2024-01-10 | PatrickSinger99/GPT3PowerPoint | html_markdown_version~OLD%20py_version~gpt_mindmap_markdown_creator.py | from gpt_content_creator import create_text_from_topic
from jupyter_notebook import gpt_subtopic_creator
from gpt import GPT
import openai
from gpt import Example
# Creates the html body wrapper around <section> parts
from jupyter_notebook.gpt_subtopic_creator import create_subtopics_from_topic
def pack_sections_in_body(sections_list):
body_start = "<body class=\"jp-Notebook\"><div class=\"reveal\"><div class=\"slides\">"
body_end = "</div></div></body>"
sections = "".join(sections_list)
return body_start + sections + body_end
# Creates html jupyter notebook presentation code
def create_markdown_presentation(sections_list):
# Static Head Code (Start)
with open("markdown_html_header.txt", encoding="utf8") as head:
header = head.read()
# GPT generated body code
with open("markdown_script_part.txt", encoding="utf8") as end:
ending = end.read()
# Static Script Code (Ending)
with open("new_html_presentation.html", "w", encoding="utf8") as html:
html.write(header)
html.write(pack_sections_in_body(sections_list))
html.write(ending)
# Openai key
with open("openai_key.txt") as file:
key = file.read()
openai.api_key = key
# Get topic from user and call create_text_from_topic function which generates bulletpoints from the topic
prompt = input("Topic: ")
content = create_subtopics_from_topic(prompt)
# GPT model to create html code from heading and bulletpoints
gpt_mindmap_markdown_creation = GPT(engine="davinci", temperature=.5, max_tokens=120)
gpt_mindmap_markdown_creation.add_example(Example("internet, computer, communication, network, ethernet, router, backbone",
"<ul><li>internet</li><li>computer</li><li>communication</li><li>network</li><li>ethernet</li><li>router</li><li>backbone</li></ul>"))
gpt_mindmap_markdown_creation.add_example(Example("vehicle, transportation, road, wheels",
"<ul><li>vehicle</li><li>transportation</li><li>road</li><li>wheels</li></ul>"
))
gpt_mindmap_markdown_creation.add_example(Example("social network, internet, user, privacy",
"<ul><li>social network</li><li>internet</li><li>user</li><li>privacy</li></ul>"
))
gpt_mindmap_markdown_creation.add_example(Example("technology, hardware, software, internet, microchip, silicon, chip",
"<ul><li>technology</li><li>hardware</li><li>software</li><li>internet</li><li>microchip</li><li>silicon</li><li>chip</li></ul>"
))
# Create html Code from GPT Model and create presentation
output = gpt_mindmap_markdown_creation.submit_request(content)
section_html = output.choices[0].text[8:]
print("GPT-3 generated html:\n" + section_html)
#create_markdown_presentation([section_html])
| [
"Topic: "
] |
2024-01-10 | PatrickSinger99/GPT3PowerPoint | power_point_version~2_ver_mul_slide~OLD%20py_version~gpt_power_point_creator_mul_slides.py | from pptx import Presentation
from pptx.util import Cm, Pt
import PIL
from pptx.dml.color import RGBColor
from google_images_download import google_images_download
import os
import openai
import shutil
# Data
slide_height = Cm(19.05)
slide_width = Cm(25.4)
# Openai key
with open("openai_key.txt") as file:
key = file.read()
openai.api_key = key
def add_picture_bottom_right(slide, img_path, margin=Cm(1), max_pic_height=Cm(14), max_pic_width=Cm(10),
slide_height=Cm(19.05), slide_width=Cm(25.4)):
if not isinstance(margin, Cm):
margin = Cm(margin)
if not isinstance(max_pic_height, Cm):
max_pic_height = Cm(max_pic_height)
if not isinstance(max_pic_width, Cm):
max_pic_width = Cm(max_pic_width)
if not isinstance(slide_height, Cm):
slide_height = Cm(slide_height)
if not isinstance(slide_width, Cm):
slide_width = Cm(slide_width)
pic_width, pic_height = PIL.Image.open(img_path).size
pic_width = Cm(round(pic_width / 47.25, 2))
pic_height = Cm(round(pic_height / 47.25, 2))
# Convert width if too big
if pic_width > max_pic_width:
ratio = max_pic_width / pic_width
pic_width = max_pic_width
pic_height *= ratio
# Convert Height if too big
if pic_height > max_pic_height:
ratio = max_pic_height / pic_height
pic_height = max_pic_height
pic_width *= ratio
image = slide.shapes.add_picture(img_path, slide_width - margin - pic_width, slide_height - margin - pic_height,
height=pic_height, width=pic_width)
return image
def create_power_point_slides_from_gpt(dictionary, img_dict, pp_title):
# Create File and Slide
prs = Presentation()
for key in dictionary:
slide = prs.slides.add_slide(prs.slide_layouts[1])
# Add Title
title = slide.shapes.title
title.text = key
title.text_frame.paragraphs[0].font.color.rgb = RGBColor(59, 89, 152)
title.text_frame.paragraphs[0].font.bold = True
title.text_frame.paragraphs[0].font.name = "Calibri Light"
# Add Bulletpoints
subtitle = slide.placeholders[1]
subtitle.text = dictionary[key].replace(".", ".\n")
for line in subtitle.text_frame.paragraphs:
line.font.size = Pt(25)
line.font.name = "Calibri Light"
subtitle.width = Cm(12)
subtitle.height = Cm(19.05 - 5.2)
subtitle.top = Cm(4.2)
subtitle.left = Cm(1)
try:
img = add_picture_bottom_right(slide, img_dict[key], max_pic_width=Cm(11))
except:
pass
prs.save(pp_title + "_mul_slides.pptx")
| [] |
2024-01-10 | PatrickSinger99/GPT3PowerPoint | html_markdown_version~OLD%20py_version~gpt_content_creator.py | from gpt import GPT
import openai
from gpt import Example
# Openai key
with open("openai_key.txt") as file:
key = file.read()
openai.api_key = key
# GPT Model to create bulletpoints from a topic
gpt_point_creation = GPT(engine="davinci", temperature=.5, max_tokens=120)
gpt_point_creation.add_example(Example("Napoleon III",
"Napoleon III was the first President of France. He founded the Second Empire, reigning until the defeat. He made the French merchant navy the second largest in the world."
))
gpt_point_creation.add_example(Example("mitochondrion",
"A mitochondrion is a double-membrane-bound organelle. Mitochondria generate most of the cell's supply of adenosine triphosphate. The mitochondrion is often called the powerhouse of the cell."
))
gpt_point_creation.add_example(Example("blockchain",
"A blockchain is a list of blocks, that are linked together. Blockchains are resistant to modification of their data. The data in any given block cannot be altered once recorded."
))
gpt_point_creation.add_example(Example("germany",
"Germany is a country in Central Europe. A region named Germania was documented before AD 100 In the 10th century. It covers an area of 357,022 square kilometres. Germany has a population of over 83 million within its 16 constituent states."
))
# Create Text (Bulletpoints) from a topic
def create_text_from_topic(prompt):
output = gpt_point_creation.submit_request(prompt)
text_output = output.choices[0].text[8:]
text_output = text_output.strip()
print("GPT-3 generated Text:\n" + text_output)
return text_output
| [] |
2024-01-10 | PatrickSinger99/GPT3PowerPoint | html_markdown_version~OLD%20py_version~gpt_markdown_creator.py | from gpt_content_creator import create_text_from_topic
from gpt_subtopic_creator import create_subtopics_from_topic
from gpt import GPT
import openai
from gpt import Example
# Creates the html body wrapper around <section> parts
def pack_sections_in_body(sections_list):
body_start = "<body class=\"jp-Notebook\"><div class=\"reveal\"><div class=\"slides\">"
body_end = "</div></div></body>"
sections = "".join(sections_list)
return body_start + sections + body_end
# Creates html jupyter notebook presentation code
def create_markdown_presentation(sections_list):
# Static Head Code (Start)
with open("markdown_html_header.txt", encoding="utf8") as head:
header = head.read()
# GPT generated body code
with open("markdown_script_part.txt", encoding="utf8") as end:
ending = end.read()
# Static Script Code (Ending)
with open("new_html_presentation.html", "w", encoding="utf8") as html:
html.write(header)
html.write(pack_sections_in_body(sections_list))
html.write(ending)
# Openai key
with open("openai_key.txt") as file:
key = file.read()
openai.api_key = key
# GPT model to create html code from heading and bulletpoints
gpt_markdown_creation = GPT(engine="davinci", temperature=.5, max_tokens=120)
gpt_markdown_creation.add_example(Example("Heading: Cat Text: The cat (Felis catus) is a domesticated species of small carnivorous mammal. It is the only domesticated species in the family Felidae.",
"<section><div class=\"jp-RenderedHTMLCommon jp-RenderedMarkdown jp-MarkdownOutput \" data-mime-type=\"text/markdown\"><h1>Cat</h1><ul><li>The cat (Felis catus) is a domesticated species of small carnivorous mammal.</li><li>It is the only domesticated species in the family Felidae.</li></ul></div></section>"
))
gpt_markdown_creation.add_example(Example("Heading: PyCharm Text: PyCharm is an integrated development environment used in computer programming. It is developed by the Czech company JetBrains.",
"<section><div class=\"jp-RenderedHTMLCommon jp-RenderedMarkdown jp-MarkdownOutput \" data-mime-type=\"text/markdown\"><h1>PyCharm</h1><ul><li>PyCharm is an integrated development environment used in computer programming.</li><li>It is developed by the Czech company JetBrains.</li></ul></div></section>"
))
gpt_markdown_creation.add_example(Example("Heading: python Text: Python is a high-level programming language. It is a general-purpose language. It is a high-level language. It is a widely used language.",
"<section><div class=\"jp-RenderedHTMLCommon jp-RenderedMarkdown jp-MarkdownOutput \" data-mime-type=\"text/markdown\"><h1>python</h1><ul><li>Python is a high-level programming language.</li><li>It is a general-purpose language.</li><li>It is a high-level language.</li><li>It is a widely used language.</li></ul></div></section>"
))
gpt_markdown_creation.add_example(Example("Heading: Etymology and naming Text: The origin of the English word cat is thought to be the Late Latin word cattus, which was first used at the beginning of the 6th century.",
"<section><div class=\"jp-RenderedHTMLCommon jp-RenderedMarkdown jp-MarkdownOutput \" data-mime-type=\"text/markdown\"><h1>Etymology and naming</h1><ul><li>The origin of the English word cat is thought to be the Late Latin word cattus, which was first used at the beginning of the 6th century.</li></ul></div></section>"
))
# Get topic from user
prompt = input("Topic: ")
# Get subtopics from topic
subtopics = create_subtopics_from_topic(prompt)
subtopics = subtopics.split(", ")
subtopics_list = [prompt]
for i in subtopics:
subtopics_list.append(i.strip())
if len(subtopics_list) > 5:
subtopics_list = subtopics_list[:4]
print("GPT-3 generated subtopics:\n", subtopics_list)
# Get text for every subtopic
section_list = []
for topic in subtopics_list:
if prompt not in topic:
topic = prompt + " " + topic
print("\n" + topic)
print("---------------")
content = create_text_from_topic(topic)
# Clean up content output
content.replace("\"", "")
content.replace("output:", "")
# Convert topic and bulletpoints to format for the html GPT model
content_input = "Heading: " + topic + " Text: " + content
# Create html Code from GPT Model and create presentation
output = gpt_markdown_creation.submit_request(content_input)
# clean up output code
section_html = output.choices[0].text[8:]
section_html.replace("output:", "")
section_html = section_html.strip()
section_html = section_html.split("</section>")[0] + "</section>"
print("GPT-3 generated html:\n" + section_html)
section_list.append(section_html)
create_markdown_presentation(section_list)
| [
"Topic: "
] |
2024-01-10 | PatrickSinger99/GPT3PowerPoint | power_point_version~OLD%20tests~gpt%20test~first_test.py | import os
import openai
# Load your API key from an environment variable or secret management service
openai.api_key = "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"
i = "openai is a"
response = openai.Completion.create(engine="curie", prompt=i, max_tokens=50)
print("[" + i + "]" + response["choices"][0]["text"])
| [
"openai is a"
] |
2024-01-10 | PatrickSinger99/GPT3PowerPoint | html_markdown_version~OLD%20py_version~gpt_subtopic_creator.py | from gpt import GPT
import openai
from gpt import Example
# Openai key
with open("openai_key.txt") as file:
key = file.read()
openai.api_key = key
# GPT Model to create subtopics from a topic
gpt_subtopic_creation = GPT(engine="davinci", temperature=.5, max_tokens=120)
gpt_subtopic_creation.add_example(Example("car",
"motor, vehicle, transportation, road, wheels"
))
gpt_subtopic_creation.add_example(Example("tiger",
"cat, panthera, predator, wildlife, endangered"
))
gpt_subtopic_creation.add_example(Example("blockchain",
"crypto, cryptography, transaction, bitcoin, cryptocurrency"
))
gpt_subtopic_creation.add_example(Example("germany",
"country, europe, berlin, european union, demogracy"
))
# Create Text (Bulletpoints) from a topic
def create_subtopics_from_topic(prompt):
output = gpt_subtopic_creation.submit_request(prompt)
text_output = output.choices[0].text[8:]
return text_output
| [] |
2024-01-10 | PatrickSinger99/GPT3PowerPoint | power_point_version~OLD%20tests~gpt%20test~keywords.py | import openai
import image_download
# Openai key
openai.api_key = "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"
# Create model
response = openai.Completion.create(
engine="davinci",
prompt="Text: A car is a wheeled motor vehicle used for transportation. Cars are a primary means of transportation in many regions of the world. The year 1886 is regarded as the birth year of the car when German inventor Karl Benz patented his Benz Patent\n\nKeywords:",
temperature=0.3,
max_tokens=60,
top_p=1.0,
frequency_penalty=0.8,
presence_penalty=0.0,
stop=["\n"]
)
# Model Output
print(response.choices[0].text)
# convert enumeration into list
daten = response.choices[0].text
einzeldaten = daten.split(",")
print(einzeldaten)
# download images from google search
image_download.keywordPictures(einzeldaten)
| [
"Text: A car is a wheeled motor vehicle used for transportation. Cars are a primary means of transportation in many regions of the world. The year 1886 is regarded as the birth year of the car when German inventor Karl Benz patented his Benz Patent\n\nKeywords:"
] |
2024-01-10 | PatrickSinger99/GPT3PowerPoint | power_point_version~2_ver_mul_slide~OLD%20py_version~gpt_power_point_data_mul_slides.py | import openai
import gpt_power_point_creator_mul_slides
import wikipediaapi
import summarization_training
from google_images_download import google_images_download
import os
import shutil
max_len = 150 # Max length of the Bullet Points
max_slide_num = 5
# Set wikipedia language
wiki_wiki = wikipediaapi.Wikipedia(language='en', extract_format=wikipediaapi.ExtractFormat.WIKI)
# Get topic
while True:
# Inputs
prompt = input("Topic: ")
# Define the wikipedia page
p_wiki = wiki_wiki.page(prompt)
# dictionary of the wikipedia page
def create_dict(page):
dict = {}
dict[page.title] = page.summary
for s in page.sections:
dict[s.title] = s.text
try:
if s.text == "":
dict[s.title] = s.sections[0].text
except:
pass
return dict
wiki_dict = create_dict(p_wiki)
try:
wiki_dict.pop("See also")
except:
pass
try:
wiki_dict.pop("References")
except:
pass
try:
wiki_dict.pop("External links")
except:
pass
try:
wiki_dict.pop("Further reading")
except:
pass
try:
wiki_dict.pop("Notes")
except:
pass
new_wiki_dict = {}
slide_num = 0
for key in wiki_dict:
slide_num += 1
if slide_num <= max_slide_num:
new_wiki_dict[key] = wiki_dict[key]
# Print Subtopics
print("\nFound Subtopics: ")
for key in new_wiki_dict:
print(key)
print("\n")
verify = input("Do you want a power-point for these topics? (y/n): ")
if verify.lower() == "y":
break
# Openai key
with open("openai_key.txt") as file:
key = file.read()
openai.api_key = key
gpt_sum = summarization_training.create_sum_model()
dict_for_pptx = {}
for key in new_wiki_dict:
# Get GPT output
output = gpt_sum.submit_request(new_wiki_dict[key])
output = output.choices[0].text[8:]
# Crop if too long
if len(output) > max_len:
output = output[:max_len]
to_cut = ""
for i in reversed(range(1, len(output))):
if output[i] == ".":
output = output.replace(to_cut, "")
break
to_cut = output[i] + to_cut
# Print Points
print("\nSummarized points for " + key + ":")
for sentence in output.split(". "):
print(" - " + sentence)
dict_for_pptx[key] = output
# Delete download folder if it exists
try:
shutil.rmtree('downloads')
except FileNotFoundError:
pass
# Download an image for the keyword
response = google_images_download.googleimagesdownload()
arguments = {"keywords": prompt, "limit": max_slide_num+2, "print_urls": True, format: "jpg"}
response.download(arguments)
img_list = os.listdir("downloads/" + prompt)
img_dict = {}
counter = 0
try:
for key in dict_for_pptx:
img_dict[key] = "downloads/" + prompt + "//" + img_list[counter]
counter += 1
except:
pass
gpt_power_point_creator_mul_slides.create_power_point_slides_from_gpt(dict_for_pptx, img_dict, prompt)
| [
"Topic: "
] |
2024-01-10 | DSamuelHodge/phasellm | release_checklist_code.py | """
This code is used to test various aspects of PhaseLLM. We recommend running this on a P3 EC2 instance with Ubuntu 22.04 installed. To get this up and running, run the following code:
sudo apt-get update
sudo apt-get upgrade
sudo apt-get install xorg
sudo apt-get install nvidia-driver-460
sudo reboot
Run `nvidia-smi` to ensure you have GPU devices with CUDA installed.
"""
##########################################################################################
# GPU SETUP
#
import torch
# Confirm GPUs are installed and usable.
print(torch.cuda.is_available())
print(torch.cuda.current_device())
##########################################################################################
# ENVIRONMENT VARIABLES
#
# Load all environment variables and API keys
import os
from dotenv import load_dotenv
load_dotenv()
openai_api_key = os.getenv("OPENAI_API_KEY")
anthropic_api_key = os.getenv("ANTHROPIC_API_KEY")
cohere_api_key = os.getenv("COHERE_API_KEY")
hugging_face_api_key = os.getenv("HUGGING_FACE_API_KEY")
##########################################################################################
# GPT-3.5 EVALUATOR WITH COHERE AND CLAUDE COMPARISONS
#
# Run GPT-3.5 evaluator
from phasellm.eval import GPT35Evaluator
# We'll use GPT-3.5 as the evaluator.
e = GPT35Evaluator(openai_api_key)
# Our objective.
objective = "We're building a chatbot to discuss a user's travel preferences and provide advice."
# Chats that have been launched by users.
travel_chat_starts = [
"I'm planning to visit Poland in spring.",
"I'm looking for the cheapest flight to Europe next week.",
"I am trying to decide between Prague and Paris for a 5-day trip",
"I want to visit Europe but can't decide if spring, summer, or fall would be better.",
"I'm unsure I should visit Spain by flying via the UK or via France."
]
from phasellm.llms import CohereWrapper, ClaudeWrapper
cohere_model = CohereWrapper(cohere_api_key)
claude_model = ClaudeWrapper(anthropic_api_key)
print("Running test. 1 = Cohere, and 2 = Claude.")
for tcs in travel_chat_starts:
messages = [{"role":"system", "content":objective},
{"role":"user", "content":tcs}]
response_cohere = cohere_model.complete_chat(messages, "assistant")
response_claude = claude_model.complete_chat(messages, "assistant")
pref = e.choose(objective, tcs, response_cohere, response_claude)
print(f"{pref}")
##########################################################################################
# DOLLY TESTS
#
from phasellm.llms import DollyWrapper
dw = DollyWrapper()
# Testing chat capability.
messages = [{"role":"user", "content":"What should I eat for lunch today?"}]
dw.complete_chat(messages, 'assistant')
# Run a text completion.
dw.text_completion("The capital of Poland is")
##########################################################################################
# GPT-3.5 EVALUATOR WITH COHERE AND DOLLY COMPARISONS
#
import os
from dotenv import load_dotenv
load_dotenv()
openai_api_key = os.getenv("OPENAI_API_KEY")
anthropic_api_key = os.getenv("ANTHROPIC_API_KEY")
cohere_api_key = os.getenv("COHERE_API_KEY")
from phasellm.eval import GPT35Evaluator
# We'll use GPT-3.5 as the evaluator.
e = GPT35Evaluator(openai_api_key)
# Our objective.
objective = "We're building a chatbot to discuss a user's travel preferences and provide advice."
# Chats that have been launched by users.
travel_chat_starts = [
"I'm planning to visit Poland in spring.",
"I'm looking for the cheapest flight to Europe next week.",
"I am trying to decide between Prague and Paris for a 5-day trip",
"I want to visit Europe but can't decide if spring, summer, or fall would be better.",
"I'm unsure I should visit Spain by flying via the UK or via France."
]
from phasellm.llms import CohereWrapper
from phasellm.llms import DollyWrapper # NEW: importing the DollyWrapper...
dwl = DollyWrapper() # NEW: ... and instantiating it.
cohere_model = CohereWrapper(cohere_api_key)
print("Running test. 1 = Cohere, and 2 = Dolly.")
for tcs in travel_chat_starts:
messages = [{"role":"system", "content":objective},
{"role":"user", "content":tcs}]
response_cohere = cohere_model.complete_chat(messages, "assistant")
response_dw = dw.complete_chat(messages, "assistant") # NEW: minor change to variable name
pref = e.choose(objective, tcs, response_cohere, response_dw)
print(f"{pref}")
##########################################################################################
# BLOOM TESTS
#
from phasellm.llms import BloomWrapper
bw = BloomWrapper(hugging_face_api_key)
# Testing chat capability.
messages = [{"role":"user", "content":"What should I eat for lunch today?"}]
bw.complete_chat(messages, 'assistant')
# Run a text completion.
bw.text_completion("The capital of Poland is")
| [
"What should I eat for lunch today?"
] |
2024-01-10 | DSamuelHodge/phasellm | phasellm~llms~llms.py | """
Abstract classes and wrappers for LLMs, chatbots, and prompts.
"""
# TODO Make all prompts to text completion be strings, so people can fill prompts in. Or support both if need be...
# TODO Add tests, etc... Make sure everything still works.
import requests
import json
import re
# Imports for external APIs
import openai
import cohere
# Hugging Face and PyTorch imports
from transformers import pipeline
import torch
def _clean_messages_to_prompt(messages):
"""
Converts an array of messages in the form {"role": <str>, "content":<str>} into a String.
This is influened by the OpenAI chat completion API.
"""
out_text = "\n".join([f"{str(m['role'])}: {str(m['content'])}" for m in messages])
return out_text
def _get_stop_sequences_from_messages(messages):
"""
Generetes a list of strings of stop sequences from an array of messages in the form {"role": <str>, "content":<str>}.
"""
roles = set()
for m in messages:
roles.add(m["role"])
stop_sequences = [f"\n{r}:" for r in roles]
return stop_sequences
class LanguageModelWrapper():
"""
Abstract wrapper for large language models.
"""
def __init__(self):
pass
def __repr__(self):
pass
def complete_chat(self, messages):
"""
Takes an array of messages in the form {"role": <str>, "content":<str>} and generate a response.
This is influened by the OpenAI chat completion API.
"""
pass
def text_completion(self, prompt, stop_sequences=[]):
"""
Standardizes text completion for large language models.
"""
pass
class Prompt():
"""
Prompts are used to generate text completions. Prompts can be simple Strings. They can also include variables surrounded by curly braces. For example:
Hello {name}!
In this case, 'name' can be filled using the fill_prompts() function. This makes it easier to loop through prompts that follow a specific pattern or structure.
"""
def __init__(self, prompt):
self.prompt = prompt
def __repr__(self):
return self.prompt
def get_prompt(self):
"""
Return the raw prompt command (i.e., does not fill in variables.)
"""
return self.prompt
def fill_prompts(self, **kwargs):
"""
Return a prompt with variables filled in.
"""
pattern = r'\{\s*[a-zA-Z0-9_]+\s*\}'
matches = re.findall(pattern, self.prompt)
new_prompt = self.prompt
for m in matches:
keyword = m.replace("{", "").replace("}", "").strip()
if keyword in kwargs:
new_prompt = new_prompt.replace(m, kwargs[keyword])
return new_prompt
class BloomWrapper():
"""
Wrapper for Hugging Face's BLOOM model. Requires access to Hugging Face's inference API.
"""
def __init__(self, apikey):
self.apikey = apikey
def __repr__(self):
return f"BloomWrapper()"
def complete_chat(self, messages, append_role=None):
"""
Mimicks a chat scenario with BLOOM, via a list of {"role": <str>, "content":<str>} objects.
"""
prompt_preamble = "You are a friendly chat assistant. You are speaking to the 'user' below and will respond at the end, where it says 'assistant'.\n"
prompt_text = prompt_preamble + _clean_messages_to_prompt(messages)
if append_role is not None and len(append_role) > 0:
prompt_text += f"\n{append_role}:"
API_URL = "https://api-inference.huggingface.co/models/bigscience/bloom"
headers = {"Authorization": f"Bearer {self.apikey}"}
response = requests.post(API_URL, headers=headers, json={"inputs": prompt_text}).json()
all_text = response[0]['generated_text']
new_text = all_text[len(prompt_text):]
# We only return the first line of text.
newline_location = new_text.find("\n")
if newline_location > 0:
new_text = new_text[:newline_location]
return new_text
def text_completion(self, prompt):
"""
Completes text via BLOOM (Hugging Face).
"""
API_URL = "https://api-inference.huggingface.co/models/bigscience/bloom"
headers = {"Authorization": f"Bearer {self.apikey}"}
response = requests.post(API_URL, headers=headers, json={"inputs": prompt}).json()
all_text = response[0]['generated_text']
new_text = all_text[len(prompt):]
return new_text
class OpenAIGPTWrapper():
"""
Wrapper for the OpenAI API. Supports all major text and chat completion models by OpenAI.
"""
def __init__(self, apikey, model="gpt-3.5-turbo"):
openai.api_key = apikey
self.model = model
def __repr__(self):
return f"OpenAIGPTWrapper(model={self.model})"
def complete_chat(self, messages, append_role=None):
"""
Completes chat with OpenAI. If using GPT 3.5 or 4, will simply send the list of {"role": <str>, "content":<str>} objects to the API.
If using an older model, it will structure the messages list into a prompt first.
"""
if self.model.find('gpt-4') >= 0 or self.model.find('gpt-3.5') >= 0:
response = openai.ChatCompletion.create(
model=self.model,
messages=messages
)
top_response_content = response['choices'][0]['message']['content']
return top_response_content
else:
prompt_text = _clean_messages_to_prompt(messages)
if append_role is not None and len(append_role) > 0:
prompt_text += f"\n{append_role}: "
prompt_text = prompt_text.strip()
response = openai.Completion.create(
model=self.model,
prompt=prompt_text,
stop=_get_stop_sequences_from_messages(messages)
)
top_response_content = response['choices'][0]['text']
return top_response_content
# Note that this currently will error out with GPT 3.5 or above as they are chat models.
# TODO Add error catching.
def text_completion(self, prompt, stop_sequences=[]):
"""
Completes text via OpenAI. Note that this doesn't support GPT 3.5 or later.
"""
if len(stop_sequences) == 0:
response = openai.Completion.create(
model=self.model,
prompt=prompt
)
else:
response = openai.Completion.create(
model=self.model,
prompt=prompt,
stop = stop_sequences
)
top_response_content = response['choices'][0]['text']
return top_response_content
class ClaudeWrapper():
"""
Wrapper for Anthropic's Claude large language model.
We've opted to call Anthropic's API directly rather than using their Python offering.
"""
def __init__(self, apikey, model="claude-v1"):
self.apikey = apikey
self.model = model
def __repr__(self):
return f"ClaudeWrapper(model={self.model})"
def complete_chat(self, messages, append_role=None):
"""
Completes chat with Claude. Since Claude doesn't support a chat interface via API, we mimick the chat via the a prompt.
"""
r_headers = {"X-API-Key":self.apikey, "Accept":"application/json"}
prompt_text = _clean_messages_to_prompt(messages)
if append_role is not None and len(append_role) > 0:
prompt_text += f"\n{append_role}: "
r_data = {"prompt": prompt_text,
"model": self.model,
"max_tokens_to_sample": 500,
"stop_sequences": _get_stop_sequences_from_messages(messages)
}
resp = requests.post("https://api.anthropic.com/v1/complete", headers=r_headers, json=r_data)
completion = json.loads(resp.text)["completion"].strip()
return completion
def text_completion(self, prompt, stop_sequences=[]):
"""
Completes text based on provided prompt.
"""
r_headers = {"X-API-Key":self.apikey, "Accept":"application/json"}
r_data = {"prompt": prompt,
"model": self.model,
"max_tokens_to_sample": 500,
"stop_sequences": stop_sequences
}
resp = requests.post("https://api.anthropic.com/v1/complete", headers=r_headers, json=r_data)
completion = json.loads(resp.text)["completion"].strip()
return completion
# TODO Might want to add stop sequences (new lines, roles) to make this better.
class GPT2Wrapper(LanguageModelWrapper):
"""
Wrapper for GPT-2 implementation (via Hugging Face).
"""
def __init__(self):
self.model_name = "GPT-2"
def __repr__(self):
return f"GPT2Wrapper()"
def complete_chat(self, messages, append_role=None, max_length=300):
"""
Mimicks a chat scenario via a list of {"role": <str>, "content":<str>} objects.
"""
prompt_preamble = "You are a friendly chat assistant. You are speaking to the 'user' below and will respond at the end, where it says 'assistant'.\n"
prompt_text = prompt_preamble + _clean_messages_to_prompt(messages)
if append_role is not None and len(append_role) > 0:
prompt_text += f"\n{append_role}:"
generator = pipeline('text-generation', model='gpt2')
resps = generator(prompt_text, max_length=max_length, num_return_sequences=1)
resp = resps[0]['generated_text']
resp = resp[len(prompt_text):] # Strip out the original text.
return resp
def text_completion(self, prompt, max_length=200):
"""
Completes text via GPT-2.
"""
generator = pipeline('text-generation', model='gpt2')
resps = generator(prompt, max_length=max_length, num_return_sequences=1)
resp = resps[0]['generated_text']
resp = resp[len(prompt):] # Strip out the original text.
return resp
class DollyWrapper():
"""
Implementation of Dolly 2.0 (via Hugging Face).
"""
def __init__(self):
self.model_name = 'dolly-v2-12b'
self.generate_text = pipeline(model="databricks/dolly-v2-12b", torch_dtype=torch.bfloat16, trust_remote_code=True, device_map="auto")
def __repr__(self):
return f"DollyWrapper(model={self.model})"
def complete_chat(self, messages, append_role=None):
"""
Mimicks a chat scenario via a list of {"role": <str>, "content":<str>} objects.
"""
prompt_preamble = "You are a friendly chat assistant. You are speaking to the 'user' below and will respond at the end, where it says 'assistant'.\n"
prompt_text = prompt_preamble + _clean_messages_to_prompt(messages)
if append_role is not None and len(append_role) > 0:
prompt_text += f"\n{append_role}:"
resp = self.generate_text(prompt_text)
return resp
def text_completion(self, prompt):
"""
Complates
"""
resp = self.generate_text(prompt)
return resp
class CohereWrapper():
"""
Wrapper for Cohere's API. Defaults to their 'xlarge' model.
"""
def __init__(self, apikey, model="xlarge"):
self.apikey = apikey
self.model = model
def __repr__(self):
return f"CohereWrapper(model={self.model})"
def complete_chat(self, messages, append_role=None):
"""
Mimicks a chat scenario via a list of {"role": <str>, "content":<str>} objects.
"""
prompt_text = _clean_messages_to_prompt(messages)
if append_role is not None and len(append_role) > 0:
prompt_text += f"\n{append_role}:"
co = cohere.Client(self.apikey)
response = co.generate(
prompt=prompt_text,
max_tokens=300,
stop_sequences=_get_stop_sequences_from_messages(messages)
)
resp = response.generations[0].text
for s in _get_stop_sequences_from_messages(messages):
resp = resp.replace(s, "").strip()
return resp
def text_completion(self, prompt, stop_sequences=[]):
"""
Completes text.
"""
co = cohere.Client(self.apikey)
response = co.generate(
prompt=prompt,
max_tokens=300,
stop_sequences=stop_sequences
)
resp = response.generations[0].text
return resp
class ChatBot():
"""
Allows you to have a chat conversation with an LLM wrapper.
In short, it manages the list of {"role": <str>, "content":<str>} objects for you, so you don't have to figure this out. It also interacts directly with the model.
"""
def __init__(self, llm, initial_system_prompt="You are a friendly chatbot assistant."):
"""
Initializes a ChatBot. Provide an initial_system_prompt value to request the type of chatbot you will be dealing with.
Warning: not all LLMs are trained to use instructions provided in a system prompt.
"""
self.llm = llm
self.messages = []
self._append_message('system', initial_system_prompt)
def _append_message(self, role, message):
"""
Saves a message to the chatbot's message queue.
"""
self.messages.append({"role":role, "content":message})
def chat(self, message):
"""
Chats with the chatbot.
"""
self._append_message('user', message)
response = self.llm.complete_chat(self.messages, "assistant")
self._append_message('assistant', response)
return response
| [
"You are a friendly chat assistant. You are speaking to the 'user' below and will respond at the end, where it says 'assistant'.\n",
"\nPLACEHOLDER:",
"\nPLACEHOLDER: "
] |
2024-01-10 | sshh12/multi_token | scripts~whisper_gpt_build_finetune_dataset.py | from typing import List
import argparse
import json
import os
import random
import openai
from datasets import Dataset, load_dataset
from multi_token.constants import ROLE_ASSISTANT, ROLE_USER
DATASET_ARGS = dict(
path="mozilla-foundation/common_voice_15_0", name="en", split="train"
)
PROMPT = """
You are helping train a voice audio assistant that can take speech inputs and output text.
Here is the speech you can hear:
{captions}
{question}
Include the question and answer.
"""
QUESTIONS = [
"Ask a question about the content of the audio.",
"Ask a complex question about the content of the audio.",
"Ask a complex question that is relevant to the content of the audio, for example, asking about background knowledge of the things in the speech. Do not ask about uncertain details.",
"Ask a complex question that is relevant to the content of the audio, for example, asking about the events referred to in the audio. Do not ask about uncertain details.",
"Ask a question about the tone of the audio.",
"Ask to paraphrase the audio in a certain way.",
"Ask about your thoughts on the audio.",
"Ask what is said in the audio.",
"Ask about what could be said next in the audio.",
"If the audio could be question, ask to answer the question in the audio. If it does not, ask to answer a question only answered by listening to the audio.",
]
OPENAI_TOOLS = [
{
"type": "function",
"function": {
"name": "create_chat",
"description": "Create a training example",
"parameters": {
"type": "object",
"properties": {
"question": {
"type": "string",
"description": "The question, must be provided",
},
"answer": {
"type": "string",
"description": "The answer to the question, must be provided",
},
},
"required": ["question", "answer"],
},
},
}
]
def _build_convo(idx, row) -> List:
client = openai.Client()
captions = [row["sentence"]]
speech_audios = [{"dataset_args": DATASET_ARGS, "idx": idx}]
captions_text = "\n".join([f'Caption: "{cap}"' for i, cap in enumerate(captions)])
prompt = PROMPT.format(
captions=captions_text, question=random.choice(QUESTIONS)
).strip()
completion = client.chat.completions.create(
model="gpt-3.5-turbo-1106",
messages=[{"role": "system", "content": prompt}],
tools=OPENAI_TOOLS,
tool_choice={"type": "function", "function": {"name": "create_chat"}},
)
resp = json.loads(completion.choices[0].message.tool_calls[0].function.arguments)
if "answer" not in resp:
print(resp)
q = resp["question"]
a = resp["answer"]
if random.choice([True, False]):
q = "<speech>" * len(captions) + " " + q
else:
q = q + " " + "<speech>" * len(captions)
example = {
"speech_audios": speech_audios,
"messages": [
{
"role": ROLE_USER,
"content": q,
},
{
"role": ROLE_ASSISTANT,
"content": a,
},
],
}
return example
def main(args):
data = load_dataset(**DATASET_ARGS)
data_idxs = list(range(len(data)))
os.makedirs(args.cache_folder, exist_ok=True)
def gen(seeds):
r = random.Random(seeds[0] + 10)
cache = open(
os.path.join(args.cache_folder, f"gpt-cache.{seeds[0]}.jsonl"), "a"
)
i = 0
while i < len(seeds):
selected_idx = r.sample(data_idxs, k=1)[0]
selected_row = data[selected_idx]
try:
example = _build_convo(selected_idx, selected_row)
cache.write(json.dumps(example) + "\n")
yield example
i += 1
except Exception as e:
print(e)
continue
cache.close()
ds = Dataset.from_generator(
gen,
num_proc=args.num_proc,
gen_kwargs={"seeds": list(range(args.num_examples))},
)
ds.save_to_disk(args.output_folder)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"-o",
"--output_folder",
type=str,
default="/data/whisper-gpt-common_voice_15_0-finetune",
)
parser.add_argument(
"-c",
"--cache_folder",
type=str,
default="/data/whisper-gpt-common_voice_15_0-finetune-cache",
)
parser.add_argument("-n", "--num_examples", type=int, default=300_000)
parser.add_argument("-p", "--num_proc", type=int, default=10)
args = parser.parse_args()
main(args)
| [
"\nYou are helping train a voice audio assistant that can take speech inputs and output text.\n\nHere is the speech you can hear:\n{captions}\n\n{question}\n\nInclude the question and answer.\n"
] |
2024-01-10 | sshh12/multi_token | scripts~llava_gpt_build_multi_image_finetune_dataset.py | from typing import List
import argparse
import json
import os
import random
import openai
from datasets import Dataset, load_dataset
from multi_token.constants import ROLE_ASSISTANT, ROLE_USER
PROMPT = """
You are helping train a chat vision assistant that can take several image inputs and output text.
Here are the images you can see:
{captions}
{question}
Include the question and answer.
"""
QUESTIONS = [
"Using the images and their captions above, ask a complex question about the relationship between the images.",
"Ask a question that reasons about ALL of the images, for example, asking about how they are related or how one might lead to the other.",
"Ask a question that reasons about ALL of the images, for example, asking about the relationship between objects in the images, asking about the location of objects in the images, etc.",
"Ask a complex question that is relevant to the content some of images, for example, asking about background knowledge of the objects in the images, asking to discuss about events happening in the images, etc. Do not ask about uncertain details.",
"Ask about the similarities among the provided images.",
"Ask about the differences among the provided images.",
"Ask about the last image.",
"Ask about the first image.",
"Ask about your thoughts on the images.",
"Ask about how to use the items in the images.",
"Ask a question that relates to the order of the images.",
"Ask a question that relates to the numbering of the images.",
]
OPENAI_TOOLS = [
{
"type": "function",
"function": {
"name": "create_chat",
"description": "Create a training example",
"parameters": {
"type": "object",
"properties": {
"question": {
"type": "string",
"description": "The question, must be provided",
},
"answer": {
"type": "string",
"description": "The answer to the question, must be provided",
},
},
"required": ["question", "answer"],
},
},
}
]
def _build_convo(pretrain_examples) -> List:
client = openai.Client()
captions = [e["messages"][1]["content"] for e in pretrain_examples]
paths = [e["images"][0] for e in pretrain_examples]
captions_text = "\n".join(
[f"Image {i+1} - {cap}" for i, cap in enumerate(captions)]
)
prompt = PROMPT.format(
captions=captions_text, question=random.choice(QUESTIONS)
).strip()
completion = client.chat.completions.create(
model="gpt-3.5-turbo-1106",
messages=[{"role": "system", "content": prompt}],
tools=OPENAI_TOOLS,
tool_choice={"type": "function", "function": {"name": "create_chat"}},
)
resp = json.loads(completion.choices[0].message.tool_calls[0].function.arguments)
if "answer" not in resp:
print(resp)
q = resp["question"]
a = resp["answer"]
if random.choice([True, False]):
q = "<image>" * len(captions) + " " + q
else:
q = q + " " + "<image>" * len(captions)
example = {
"images": paths,
"messages": [
{
"role": ROLE_USER,
"content": q,
},
{
"role": ROLE_ASSISTANT,
"content": a,
},
],
}
return example
def main(args):
data = load_dataset("sshh12/llava-pretrain", split="train", data_files="*.arrow")
data_idxs = list(range(len(data)))
os.makedirs(args.cache_folder, exist_ok=True)
def gen(seeds):
r = random.Random(seeds[0])
cache = open(
os.path.join(args.cache_folder, f"gpt-cache.{seeds[0]}.jsonl"), "a"
)
i = 0
while i < len(seeds):
k = r.randint(1, args.max_images)
selected_idxs = r.sample(data_idxs, k=k)
selected_examples = [data[i] for i in selected_idxs]
try:
example = _build_convo(selected_examples)
cache.write(json.dumps(example) + "\n")
yield example
i += 1
except Exception as e:
print(e)
continue
cache.close()
ds = Dataset.from_generator(
gen,
num_proc=args.num_proc,
gen_kwargs={"seeds": list(range(args.num_examples))},
)
ds.save_to_disk(args.output_folder)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"-o",
"--output_folder",
type=str,
default="/data/llava-gpt-multi-image-finetune",
)
parser.add_argument(
"-c",
"--cache_folder",
type=str,
default="/data/llava-gpt-multi-image-finetune-cache",
)
parser.add_argument("-n", "--num_examples", type=int, default=200_000)
parser.add_argument("-m", "--max_images", type=int, default=6)
parser.add_argument("-p", "--num_proc", type=int, default=10)
args = parser.parse_args()
main(args)
| [
"\nYou are helping train a chat vision assistant that can take several image inputs and output text.\n\nHere are the images you can see:\n{captions}\n\n{question}\n\nInclude the question and answer.\n"
] |
2024-01-10 | sshh12/multi_token | scripts~clap_gpt_build_finetune_dataset.py | from typing import List
import argparse
import json
import os
import random
import openai
from datasets import Dataset, load_dataset
from multi_token.constants import ROLE_ASSISTANT, ROLE_USER
PROMPT = """
You are helping train a sound assistant that can take audio inputs and output text.
You can hear an audio file with the following metadata tags:
{captions}
{question}
Include the question and answer.
"""
QUESTIONS = [
"Ask a question about the content of the audio.",
"Ask a complex question about the content of the audio.",
"Ask a complex question that is relevant to the content of the audio, for example, asking about background knowledge of the things mentioned. Do not ask about uncertain details.",
"Ask a complex question that is relevant to the content of the audio, for example, asking about the events referred to in the audio. Do not ask about uncertain details.",
"Ask about your thoughts on the audio.",
"Ask about what occurs in the audio.",
"Ask a question on a topic that related to the audio.",
"Ask a question that classifies the audio in some way.",
"Ask a question that can only be answered by listening to the audio.",
]
OPENAI_TOOLS = [
{
"type": "function",
"function": {
"name": "create_chat",
"description": "Create a training example",
"parameters": {
"type": "object",
"properties": {
"question": {
"type": "string",
"description": "The question, must be provided",
},
"answer": {
"type": "string",
"description": "The answer to the question, must be provided",
},
},
"required": ["question", "answer"],
},
},
}
]
def _build_convo(row) -> List:
client = openai.Client()
captions = [row["metadataTags"]]
paths = [row["url"]]
captions_text = "\n".join([f"{cap}" for i, cap in enumerate(captions)])
prompt = PROMPT.format(
captions=captions_text, question=random.choice(QUESTIONS)
).strip()
completion = client.chat.completions.create(
model="gpt-3.5-turbo-1106",
messages=[{"role": "system", "content": prompt}],
tools=OPENAI_TOOLS,
tool_choice={"type": "function", "function": {"name": "create_chat"}},
)
resp = json.loads(completion.choices[0].message.tool_calls[0].function.arguments)
if "answer" not in resp:
print(resp)
q = resp["question"]
a = resp["answer"]
if random.choice([True, False]):
q = "<sound>" * len(captions) + " " + q
else:
q = q + " " + "<sound>" * len(captions)
example = {
"sounds": paths,
"messages": [
{
"role": ROLE_USER,
"content": q,
},
{
"role": ROLE_ASSISTANT,
"content": a,
},
],
}
return example
def main(args):
data = load_dataset("Chr0my/Epidemic_sounds", split="train")
data_idxs = list(range(len(data)))
os.makedirs(args.cache_folder, exist_ok=True)
def gen(seeds):
r = random.Random(seeds[0] + 3)
cache = open(
os.path.join(args.cache_folder, f"gpt-cache.{seeds[0]}.jsonl"), "a"
)
i = 0
while i < len(seeds):
selected_idxs = r.sample(data_idxs, k=1)[0]
selected_example = data[selected_idxs]
try:
example = _build_convo(selected_example)
cache.write(json.dumps(example) + "\n")
yield example
i += 1
except Exception as e:
print(e)
continue
cache.close()
ds = Dataset.from_generator(
gen,
num_proc=args.num_proc,
gen_kwargs={"seeds": list(range(args.num_examples))},
)
ds.save_to_disk(args.output_folder)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"-o",
"--output_folder",
type=str,
default="/data/clap-gpt-finetune",
)
parser.add_argument(
"-c",
"--cache_folder",
type=str,
default="/data/clap-gpt-finetune-cache",
)
parser.add_argument("-n", "--num_examples", type=int, default=100_000)
parser.add_argument("-p", "--num_proc", type=int, default=10)
args = parser.parse_args()
main(args)
| [
"\nYou are helping train a sound assistant that can take audio inputs and output text.\n\nYou can hear an audio file with the following metadata tags:\n{captions}\n\n{question}\n\nInclude the question and answer.\n"
] |
2024-01-10 | sshh12/multi_token | scripts~clap_gpt_build_pretrain_dataset.py | from typing import List
import argparse
import json
import os
import random
import openai
from datasets import Dataset, load_dataset
from multi_token.constants import ROLE_ASSISTANT, ROLE_USER
PROMPT = """
You are helping write captions for audio clips.
Here are the tags for the audio clip you are captioning:
{captions}
Write a brief caption for the audio clip.
"""
PRETRAIN_PHRASES = [
"What is happening in <sound>?",
"Describe the sound. <sound>",
"<sound> Provide a description of the audio.",
"Can you interpret <sound>?",
"Please explain what's happening in <sound>",
"What does <sound> represent?",
"Could you describe <sound> for me?",
"What's the content of <sound>?",
"Can you depict <sound>?",
"What is <sound>?",
"In the audo clip, <sound>, what is happening?",
"Provide a description of the sound. <sound>",
"Provide a caption for the sound. <sound>",
]
OPENAI_TOOLS = [
{
"type": "function",
"function": {
"name": "write_caption",
"description": "Write a caption for an audio clip",
"parameters": {
"type": "object",
"properties": {
"caption": {
"type": "string",
},
},
"required": ["caption"],
},
},
}
]
def _build_convo(row) -> List:
client = openai.Client()
captions = [row["metadataTags"]]
sounds = [row["url"]]
captions_text = "\n".join([f'Tags: "{cap}"' for i, cap in enumerate(captions)])
prompt = PROMPT.format(captions=captions_text).strip()
completion = client.chat.completions.create(
model="gpt-3.5-turbo-1106",
messages=[{"role": "system", "content": prompt}],
tools=OPENAI_TOOLS,
tool_choice={"type": "function", "function": {"name": "write_caption"}},
)
resp = json.loads(completion.choices[0].message.tool_calls[0].function.arguments)
caption = resp["caption"]
q = random.choice(PRETRAIN_PHRASES)
example = {
"sounds": sounds,
"messages": [
{
"role": ROLE_USER,
"content": q,
},
{
"role": ROLE_ASSISTANT,
"content": caption,
},
],
}
return example
def main(args):
data = load_dataset("Chr0my/Epidemic_sounds", split="train")
os.makedirs(args.cache_folder, exist_ok=True)
def gen(seeds):
cache = open(
os.path.join(args.cache_folder, f"gpt-cache.{seeds[0]}.jsonl"), "a"
)
for s in seeds:
selected_row = data[s]
try:
example = _build_convo(selected_row)
cache.write(json.dumps(example) + "\n")
yield example
except Exception as e:
print(e)
continue
cache.close()
idxs = list(range(len(data)))
random.shuffle(idxs)
ds = Dataset.from_generator(
gen,
num_proc=args.num_proc,
gen_kwargs={"seeds": idxs},
)
ds.save_to_disk(args.output_folder)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"-o",
"--output_folder",
type=str,
default="/data/clap-gpt-pretrain",
)
parser.add_argument(
"-c",
"--cache_folder",
type=str,
default="/data/clap-gpt-pretrain-cache",
)
parser.add_argument("-n", "--num_examples", type=int, default=500_000)
parser.add_argument("-p", "--num_proc", type=int, default=10)
args = parser.parse_args()
main(args)
| [
"You are helping write captions for audio clips.\n\nHere are the tags for the audio clip you are captioning:\nPLACEHOLDER\n\nWrite a brief caption for the audio clip.",
"\nYou are helping write captions for audio clips.\n\nHere are the tags for the audio clip you are captioning:\n{captions}\n\nWrite a brief caption for the audio clip.\n"
] |
2024-01-10 | obahamonde/aio-agents | aio_agents~tools~content.py | from typing import Any, List, Optional
import openai
from aiofauna import *
from pydantic import Field
from ..schemas import FunctionDocument
async def chat_completion(text: str, context: Optional[str] = None) -> str:
if context is not None:
messages = [
{"role": "user", "content": text},
{"role": "system", "content": context},
]
else:
messages = [{"role": "user", "content": text}]
response = await openai.ChatCompletion.acreate(
model="gpt-3.5-turbo-16k-0613", messages=messages
)
return response["choices"][0]["message"]["content"]
class Quiz(FunctionDocument):
"""Generates a set of questions of a given topic."""
topic: str = Field(description="Topic to generate questions about.")
quantity: int = Field(
default=5, gt=0, lt=11, description="Number of questions to generate."
)
questions: Optional[List[str]] = Field(
default=None, description="List of questions to generate answers for."
)
async def ask(self, **kwargs: Any) -> str:
context = f"You are an expert on {self.topic}."
text = f"Please formulate a non trivial question about {self.topic} to asses candidate knowledge about the subject. These questions were already asked: {self.questions}, ask a different one."
response = await chat_completion(text, context=context)
if self.questions is None:
self.questions = [response]
else:
self.questions.append(response)
return response
@process_time
@handle_errors
async def run(self, **kwargs: Any) -> List[str]:
for _ in range(self.quantity):
await self.ask(**kwargs)
return self.questions
class Song(FunctionDocument):
"""Generates a song of a given genre."""
title: str = Field(description="Title of the song.")
genre: str = Field(default="pop", description="Genre of the song.")
lyrics: Optional[str] = Field(default=None, description="Lyrics of the song.")
async def run(self, **kwargs: Any) -> str:
context = f"You are a songwriter. You are writing a {self.genre} song called {self.title}."
text = f"Generate lyrics for the song {self.title}."
response = await chat_completion(text, context=context)
self.lyrics = response
return self
class Blog(FunctionDocument):
"""Generates a blog post of a given topic."""
topic: str = Field(description="Topic of the blog post.")
title: str = Field(description="Title of the blog post.")
content: Optional[str] = Field(
default=None, description="Content of the blog post."
)
async def run(self, **kwargs: Any) -> str:
context = f"You are a blogger. You are writing a blog post about {self.topic}."
text = f"Generate content for the blog post {self.title}."
response = await chat_completion(text, context=context)
self.content = response
return self
| [] |
2024-01-10 | cmtabr/ros2_assistant | bridge~server.py | #! /usr/bin/env python3
# Libraries importing
import re
import subprocess
from time import sleep
from decouple import config
from langchain.chat_models import ChatOpenAI
from langchain.document_loaders import DirectoryLoader, TextLoader
from langchain.embeddings.sentence_transformer import SentenceTransformerEmbeddings
from langchain.prompts import ChatPromptTemplate
from langchain.schema.runnable import RunnablePassthrough
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores import chroma
import roslibpy
from twilio.rest import Client
# Utilities importing
# Environment variables definition
bridge_launched = False
OPENAI_KEY = config('OPENAI_API_KEY')
ACCOUNT_SID = config('ACCOUNT_SID')
AUTH_TOKEN = config('AUTH_TOKEN')
FROM_NUMBER = config('FROM_NUMBER')
TO_NUMBER = config('TO_NUMBER')
# Functions definition
def bridge_initializer(bridge_launched):
launch_command = "ros2 launch rosbridge_server rosbridge_websocket_launch.xml"
try:
if not bridge_launched:
subprocess.Popen(launch_command, shell=True)
bridge_launched = True
except subprocess.CalledProcessError as e:
print(f"Error executing command: {e}")
# Class definition
class ChatBotModel():
def __init__(self):
self._model = ChatOpenAI(model="gpt-3.5-turbo", api_key=OPENAI_KEY)
self._retriever = self.archive_loader_and_vectorizer()
self._template = """\
Rely only on the context to answer the question.
{context}
With the context, the response to the question should be in the expected form literally:
The item ordered by the user was -> item_name.
Item position is -> [x, y].
Question: {question}
"""
self._prompt = ChatPromptTemplate.from_template(self._template)
def archive_loader_and_vectorizer(self):
"""
This function loads txt documents from current directory
and vectorizes them
"""
loader = DirectoryLoader('../',
glob='**/*.txt',
loader_cls=TextLoader,
show_progress=True
)
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=30000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
embedding_function = SentenceTransformerEmbeddings(model_name="all-MiniLM-L6-v2")
vectorstore = chroma.Chroma.from_documents(docs, embedding_function)
retriever = vectorstore.as_retriever()
return retriever
def chat(self, text):
chain = (
{"context": self._retriever, "question": RunnablePassthrough()}
| self._prompt
| self._model
)
output_text = ""
for s in chain.stream(text):
print(s.content, end="", flush=True)
output_text += s.content
return output_text
class Twilio:
def __init__(self):
self.account_sid = ACCOUNT_SID
self.auth_token = AUTH_TOKEN
self.client = Client(self.account_sid, self.auth_token)
def send_whatsapp(self, body, from_, to):
try:
message = self.client.messages.create(
body=body,
from_=from_,
to=to
)
print("Mensagem enviada com sucesso!")
except Exception as e:
print("Erro ao enviar mensagem: ", e)
def main():
twilio = Twilio()
client = roslibpy.Ros(host='localhost', port=9090)
client.run()
publisher = roslibpy.Topic(client, name='/web_socket_topic',
message_type='std_msgs/String')
chat_model = ChatBotModel()
while client.is_connected:
output = chat_model.chat('prego')
twilio.send_whatsapp(body=output, from_='whatsapp:'+ FROM_NUMBER, to='whatsapp:'+ TO_NUMBER)
publisher.publish(roslibpy.Message({'data': output}))
print(f'Data published to topic /web_socket_topic: {output}')
sleep(2)
break
publisher.unadvertise()
client.terminate()
print("client disconnect")
if __name__ == "__main__":
main() | [
"prego"
] |
2024-01-10 | nestauk/dap_aria_mapping | dap_aria_mapping~notebooks~cohere~cohere_prompts.py | import json, argparse
import numpy as np
from dap_aria_mapping.getters.taxonomies import get_topic_names
from dap_aria_mapping import PROJECT_DIR
from typing import Dict, List, Tuple, Sequence
import cohere
from collections import defaultdict
from tqdm import tqdm
from time import sleep
co = cohere.Client("pArhjogTnlZGIUEocMpv1lVZKv3RHsf7KIgTPk0F")
prompt = f"""
This program generates a topic label given a list of entities contained within a topic.
Topic entities: Probability space, Sample space, Event, Elementary event, Mutual exclusivity, Outcome, Singleton, Experiment, Bernoulli trial, Probability distribution, Bernoulli distribution, Binomial distribution, Normal distribution, Probability measure.
Topic label: Probability theory.
--
Topic entities: Chromosome, DNA, RNA, Genome, Heredity, Mutation, Nucleotide, Variation.
Topic label: Genetics.
--
Topic entities: Breakbeat, Chiptune, Dancehall, Downtempo, Drum and bass, Dub, Dubstep, Electro, EDM, Grime, Hardcore, House, IDM, Reggaeton, Synth-pop, Techno.
Topic label: Electronic music.
--
Topic entities: Ruy Lopez, French Defence, Petrov's Defence, Vienna Game, Centre Game, King's Gambit, Philidor Defence, Giuoco Piano, Evans Gambit, Hungarian Defence, Scotch Game, Four Knights Game, King's Pawn Opening.
Topic label: Chess openings.
--
Topic entities: Arial, Verdana, Tahoma, Trebuchet, Times New Roman, Georgia, Garamond, Courier New.
Topic label: Typefaces.
--
Topic entities: Amharic, Dinka, Ibo, Kirundi, Mandinka, Nuer, Oromo, Swahili, Tigrigna, Wolof, Xhosa, Yoruba, Zulu.
Topic label: African languages.
--
Topic entities: Algorithm, Mathematical optimization, Machine learning, Classical mechanics, Geometry.
Topic label: Mathematics and Computer Science.
--
Topic entities: Monet, Renoir, Degas, Cezanne, Manet, Toulouse-Lautrec, Van Gogh, Gauguin, Pissarro, Sisley.
Topic label: Impressionist artists.
--
Topic entities: Pythagoras, Euclid, Archimedes, Apollonius, Plato, Aristotle, Hippocrates, Galen, Ptolemy.
Topic label: Ancient Greek mathematicians and philosophers.
--
Topic entities: Amazon, Google, Apple, Facebook, Microsoft, Alibaba, Tencent, Tesla, Netflix, Oracle.
Topic label: Technology companies.
--
Topic entities: Lagrange, Hamilton, Poisson, Cauchy, Gauss, Riemann, Noether, Euler, Leibniz, Newton.
Topic label: 18th and 19th century mathematicians and physicists.
--
Topic entities: Beethoven, Mozart, Chopin, Bach, Tchaikovsky, Haydn, Brahms, Schubert, Handel, Wagner.
Topic label: Classical composers.
--
Topic entities: Fossils, Extinction, Adaptation, Natural selection, Evolution, Paleontology, Taxonomy, Darwin, Mendel.
Topic label: Evolutionary biology.
--
Topic entities: Plate tectonics, Earthquake, Volcano, Tsunami, Magma, Lava, Geology, Seismology, Mineralogy.
Topic label: Earth sciences.
--
Topic entities: Keynes, Marx, Friedman, Smith, Hayek, Schumpeter, Malthus, Ricardo, Hegel, Adam Smith.
Topic label: Economists and economic theories.
--
Topic entities: Relativity, Quantum mechanics, Electromagnetism, Thermodynamics, Astrophysics, Cosmology, Particle physics, String theory.
Topic label: Physics.
--
Topic entities: Shakespeare, Tolstoy, Dante, Chaucer, Austen, Hemingway, Whitman, Faulkner, Orwell, Camus.
Topic label: Classic authors.
--
Topic entities: Mona Lisa, Sistine Chapel, The Last Supper, The Starry Night, The Persistence of Memory, The Scream, The Kiss, The Dance, The Water Lilies.
Topic label:Famous paintings and artists.
--
"""
def evaluate_entities(dictionary: Dict[str, str], key: str, val: str) -> None:
"""Evaluates a set of entities given a few-prompted examples of entities
and their corresponding labels.
Args:
dictionary (Dict[str, str]): A dictionary of topic entities and the
corresponding topic label. Iteratively filled in the function.
key (str): The topic ID.
val (str): The topic entities.
"""
eval = prompt + "Topic_entities: " + val[3:] + "\n\nTopic_label:"
response = co.generate(
model="xlarge",
prompt=eval,
max_tokens=20,
num_generations=5,
temperature=0.6,
stop_sequences=["--"],
)
topic_label = response.generations[0].text
label_processed = (topic_label.split("\n")[0]).strip()
dictionary.update({f"Topic {key}": label_processed})
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--taxonomy",
nargs="+",
help=(
"The type of taxonomy to use. Can be a single taxonomy or a sequence \
of taxonomies, and accepts 'cooccur', 'centroids' or 'imbalanced' as tags."
),
required=True,
)
parser.add_argument(
"--name_type",
type=str,
default="entity",
help="Which taxonomy labels to use. It can be 'entity' or 'journal'",
)
parser.add_argument(
"--levels", nargs="+", help="The levels of the taxonomy to use.", required=True
)
args = parser.parse_args()
for taxonomy in args.taxonomy:
for level in [int(x) for x in args.levels]:
dict_names = defaultdict(list)
dict_entities = get_topic_names("cooccur", args.name_type, level, True)
for key, val in tqdm(dict_entities.items()):
sleep(np.random.uniform(1, 5))
try:
evaluate_entities(dictionary=dict_names, key=key, val=val)
except:
dict_names.update({"Topic " + key: "Error"})
with open(
PROJECT_DIR
/ "dap_aria_mapping"
/ "notebooks"
/ "cohere"
/ "prompt_outputs"
/ f"cohere_labels_{taxonomy}_class_{args.name_type}_level_{level}.json",
"w",
) as f:
json.dump(dict_names, f)
| [
"\nThis program generates a topic label given a list of entities contained within a topic.\n\nTopic entities: Probability space, Sample space, Event, Elementary event, Mutual exclusivity, Outcome, Singleton, Experiment, Bernoulli trial, Probability distribution, Bernoulli distribution, Binomial distribution, Normal distribution, Probability measure.\nTopic label: Probability theory.\n\n--\nTopic entities: Chromosome, DNA, RNA, Genome, Heredity, Mutation, Nucleotide, Variation.\nTopic label: Genetics.\n\n--\nTopic entities: Breakbeat, Chiptune, Dancehall, Downtempo, Drum and bass, Dub, Dubstep, Electro, EDM, Grime, Hardcore, House, IDM, Reggaeton, Synth-pop, Techno.\nTopic label: Electronic music.\n\n--\nTopic entities: Ruy Lopez, French Defence, Petrov's Defence, Vienna Game, Centre Game, King's Gambit, Philidor Defence, Giuoco Piano, Evans Gambit, Hungarian Defence, Scotch Game, Four Knights Game, King's Pawn Opening.\nTopic label: Chess openings.\n\n--\nTopic entities: Arial, Verdana, Tahoma, Trebuchet, Times New Roman, Georgia, Garamond, Courier New.\nTopic label: Typefaces.\n\n--\nTopic entities: Amharic, Dinka, Ibo, Kirundi, Mandinka, Nuer, Oromo, Swahili, Tigrigna, Wolof, Xhosa, Yoruba, Zulu.\nTopic label: African languages.\n\n--\nTopic entities: Algorithm, Mathematical optimization, Machine learning, Classical mechanics, Geometry.\nTopic label: Mathematics and Computer Science.\n\n--\nTopic entities: Monet, Renoir, Degas, Cezanne, Manet, Toulouse-Lautrec, Van Gogh, Gauguin, Pissarro, Sisley.\nTopic label: Impressionist artists.\n\n--\nTopic entities: Pythagoras, Euclid, Archimedes, Apollonius, Plato, Aristotle, Hippocrates, Galen, Ptolemy.\nTopic label: Ancient Greek mathematicians and philosophers.\n\n--\nTopic entities: Amazon, Google, Apple, Facebook, Microsoft, Alibaba, Tencent, Tesla, Netflix, Oracle.\nTopic label: Technology companies.\n--\n\nTopic entities: Lagrange, Hamilton, Poisson, Cauchy, Gauss, Riemann, Noether, Euler, Leibniz, Newton.\nTopic label: 18th and 19th century mathematicians and physicists.\n--\nTopic entities: Beethoven, Mozart, Chopin, Bach, Tchaikovsky, Haydn, Brahms, Schubert, Handel, Wagner.\nTopic label: Classical composers.\n\n--\nTopic entities: Fossils, Extinction, Adaptation, Natural selection, Evolution, Paleontology, Taxonomy, Darwin, Mendel.\nTopic label: Evolutionary biology.\n\n--\nTopic entities: Plate tectonics, Earthquake, Volcano, Tsunami, Magma, Lava, Geology, Seismology, Mineralogy.\nTopic label: Earth sciences.\n\n--\nTopic entities: Keynes, Marx, Friedman, Smith, Hayek, Schumpeter, Malthus, Ricardo, Hegel, Adam Smith.\nTopic label: Economists and economic theories.\n\n--\nTopic entities: Relativity, Quantum mechanics, Electromagnetism, Thermodynamics, Astrophysics, Cosmology, Particle physics, String theory.\nTopic label: Physics.\n\n--\nTopic entities: Shakespeare, Tolstoy, Dante, Chaucer, Austen, Hemingway, Whitman, Faulkner, Orwell, Camus.\nTopic label: Classic authors.\n\n--\nTopic entities: Mona Lisa, Sistine Chapel, The Last Supper, The Starry Night, The Persistence of Memory, The Scream, The Kiss, The Dance, The Water Lilies.\nTopic label:Famous paintings and artists.\n\n--\n\n"
] |
2024-01-10 | ayataka0nk/meeting-recoder | modules.py | import io
from typing import Any, Generator
import soundcard as sc
import numpy as np
from numpy.typing import NDArray
from numpy import float32
from multiprocessing import Process, Queue
from openai import OpenAI
import soundfile as sf
INTERVAL = 3
BUFFER_SIZE = 4096
b = np.ones(100) / 100
SAMPLE_RATE = 16000
CHANNELS = 1
# TODO 設定ファイルで調整できるようにしたい。
THRESHOLD = 0.09
def record(microphone) -> Generator[NDArray[np.float32], Any, Any]:
global SAMPLE_RATE, CHANNELS
with microphone.recorder(samplerate=SAMPLE_RATE, channels=CHANNELS) as recorder:
while True:
data = recorder.record(BUFFER_SIZE)
yield data
def record_speaker():
mic = sc.get_microphone(id=str(sc.default_speaker().name), include_loopback=True)
yield from record(mic)
def record_microphone():
mic = sc.default_microphone()
yield from record(mic)
def record_speaker_multi(q):
for peace in record_speaker():
q.put(peace)
def mix_audio(input1: NDArray[float32], input2: NDArray[float32]):
return input1 + input2
def record_microphone_multi(q):
for peace in record_microphone():
q.put(peace)
def record_mix_audio():
speakerQueue = Queue()
micQueue = Queue()
speakerProcess = Process(target=record_speaker_multi, args=(speakerQueue,))
micProcess = Process(target=record_microphone_multi, args=(micQueue,))
speakerProcess.start()
micProcess.start()
try:
while True:
newSpeakerAudio = speakerQueue.get()
newMicAudio = micQueue.get()
yield mix_audio(newSpeakerAudio, newMicAudio)
except KeyboardInterrupt:
speakerProcess.terminate()
micProcess.terminate()
raise
finally:
speakerProcess.terminate()
micProcess.terminate()
def sample_length(second: int):
return second * SAMPLE_RATE
def slice_by_silence(
stream: Generator[NDArray[float32], Any, Any]
) -> Generator[NDArray[float32], Any, Any]:
global SAMPLE_RATE, CHANNELS
n = 0
audio = np.empty(SAMPLE_RATE * INTERVAL + BUFFER_SIZE, dtype=np.float32)
try:
while True:
while n < SAMPLE_RATE * INTERVAL:
data = next(stream)
last = n + len(data)
audio[n:last] = data.reshape(-1)
n += len(data)
# find silent periods
m = n * 4 // 5
vol = np.convolve(audio[m:n] ** 2, b, "same")
m += vol.argmin()
yield audio[:m]
# 前周の差分残し
audio_prev = audio
audio = np.empty(SAMPLE_RATE * INTERVAL + BUFFER_SIZE, dtype=np.float32)
audio[: n - m] = audio_prev[m:n]
n = n - m
except KeyboardInterrupt:
yield audio[:n]
raise
def slice_by_seconds(
seconds: int, stream: Generator[NDArray[float32], Any, Any]
) -> Generator[NDArray[float32], Any, Any]:
try:
sample_length = seconds * SAMPLE_RATE
data = np.empty(0, dtype=np.float32)
for peace in stream:
data = np.append(data, peace)
if len(data) >= sample_length:
yield data
data = np.empty(0, dtype=np.float32)
except KeyboardInterrupt:
yield data
raise
def isIncludeVoice(audio: NDArray[float32]):
# 無音をwhisperに送るとバグるので発話検知
# ひとまず音量の最大値で判定
# TODO より精密な発話検知。例えば連続した音の長さが0.5秒以上あるゾーンが一つでもあれば、とか。
# ノイズフィルタあてる?それだと音質微妙なマイク使ってる人いたらやばいかも
# ポップノイズだけカットした上で、音量の最大値で判定するのがいいかも
# min = np.min(np.abs(audio))
maxVal = np.max(np.abs(audio))
# print(min, max)
return maxVal > THRESHOLD
def speechToText(audio: NDArray[float32], language: str, prompt: str | None = None):
# ここで処理してるときにKeyboardInterruptした時の挙動調整
if isIncludeVoice(audio):
buffer = io.BytesIO()
sf.write(buffer, audio, SAMPLE_RATE, format="wav")
buffer.name = "output.wav"
openai = OpenAI()
result = openai.audio.transcriptions.create(
model="whisper-1", file=buffer, language=language, prompt=prompt
)
return result.text
else:
return ""
def record_audio_limited() -> NDArray[float32]:
global SAMPLE_RATE, CHANNELS, INTERVAL
mic = sc.default_microphone()
with mic.recorder(samplerate=SAMPLE_RATE, channels=CHANNELS) as recorder:
n = 0
audio = np.empty(SAMPLE_RATE * INTERVAL + BUFFER_SIZE, dtype=np.float32)
while n < SAMPLE_RATE * INTERVAL:
data = recorder.record(BUFFER_SIZE)
last = n + len(data)
audio[n:last] = data.reshape(-1)
n += len(data)
return audio
def record_audio2(stream):
global SAMPLE_RATE, CHANNELS
n = 0
audio = np.empty(SAMPLE_RATE * INTERVAL + BUFFER_SIZE, dtype=np.float32)
try:
while True:
while n < SAMPLE_RATE * INTERVAL:
data = next(stream)
last = n + len(data)
audio[n:last] = data.reshape(-1)
n += len(data)
# find silent periods
m = n * 4 // 5
vol = np.convolve(audio[m:n] ** 2, b, "same")
m += vol.argmin()
yield audio[:m]
# 前周の差分残し
audio_prev = audio
audio = np.empty(SAMPLE_RATE * INTERVAL + BUFFER_SIZE, dtype=np.float32)
audio[: n - m] = audio_prev[m:n]
n = n - m
except KeyboardInterrupt:
yield audio[:n]
raise
def record_audio(sample_rate: int, channels: int):
speaker = sc.get_microphone(
id=str(sc.default_speaker().name), include_loopback=True
)
with speaker.recorder(samplerate=sample_rate, channels=channels) as recorder:
audio = np.empty(sample_rate * INTERVAL + BUFFER_SIZE, dtype=np.float32)
n = 0
try:
while True:
while n < sample_rate * INTERVAL:
data = recorder.record(BUFFER_SIZE)
last = n + len(data)
audio[n:last] = data.reshape(-1)
n += len(data)
# find silent periods
m = n * 4 // 5
vol = np.convolve(audio[m:n] ** 2, b, "same")
m += vol.argmin()
yield audio[:m]
# 前周の差分残し
audio_prev = audio
audio = np.empty(sample_rate * INTERVAL + BUFFER_SIZE, dtype=np.float32)
audio[: n - m] = audio_prev[m:n]
n = n - m
except KeyboardInterrupt:
yield audio[:n]
raise
| [] |
2024-01-10 | keyboardP/ChatGPMe | ChatGPMe.py | # dotenv is a library that allows us to securely load env variables
from dotenv import load_dotenv
# used to load an individual file (TextLoader) or multiple files (DirectoryLoader)
from langchain.document_loaders import TextLoader, DirectoryLoader
# used to split the text within documents and chunk the data
from langchain.text_splitter import CharacterTextSplitter
# use embedding from OpenAI (but others available)
from langchain.embeddings import OpenAIEmbeddings
# using Chroma database to store our vector embeddings
from langchain.vectorstores import Chroma
# use this to configure the Chroma database
from chromadb.config import Settings
# We'll use the chain that allows Question and Answering and provides source of where it got the data from. This is useful if you have multiple files. If you don't need the source, you can use RetrievalQA
from langchain.chains import RetrievalQAWithSourcesChain
# We'll use the OpenAI Chat model to interact with the embeddings. This is the model that allows us to query in a similar way to ChatGPT
from langchain.chat_models import ChatOpenAI
# We'll need this for reading/storing from directories
import os
# looks for the .env file and loads the variable(s)
load_dotenv()
# prepare directories for DB
ABS_PATH: str = os.path.dirname(os.path.abspath(__file__))
DB_DIR: str = os.path.join(ABS_PATH, "db")
# use TextLoader for an individual file
# explicitly stating the encoding is also recommmended
doc_loader: TextLoader = TextLoader('MSFT_Call_Transcript.txt', encoding="utf8")
# if you want to load multiple files, place them in a directory
# and use DirectoryLoader; comment above and uncomment below
#doc_loader: DirectoryLoader = DirectoryLoader('my_directory')
# load the document
document: str = doc_loader.load()
# obtain an instance of the splitter with the relevant parameters
text_splitter: CharacterTextSplitter = CharacterTextSplitter(chunk_size=512 , chunk_overlap=0)
# split the document data
split_docs: list[str] = text_splitter.split_documents(document)
# load the embeddings from OpenAI
openai_embeddings: OpenAIEmbeddings = OpenAIEmbeddings()
# configure our database
client_settings: Settings = Settings(
chroma_db_impl="duckdb+parquet", #we'll store as parquet files/DuckDB
persist_directory=DB_DIR, #location to store
anonymized_telemetry=False # optional but showing how to toggle telemetry
)
# check if the database exists already
# if not, create it, otherwise read from the database
if not os.path.exists(DB_DIR):
# Create the database from the document(s) above and use the OpenAI embeddings for the word to vector conversions. We also pass the "persist_directory" parameter which means
# this won't be a transient database, it will be stored on the hard drive at the DB_DIR location. We also pass the settings we created earlier and give the collection a name
vector_store: Chroma = Chroma.from_documents(split_docs, openai_embeddings, persist_directory=DB_DIR,
client_settings=client_settings,
collection_name="transcripts_store")
# It's key to called the persist() method otherwise it won't be saved
vector_store.persist()
else:
# As the database already exists, load the collection from there
vector_store: Chroma = Chroma(collection_name="transcripts_store", persist_directory=DB_DIR, embedding_function=openai_embeddings, client_settings=client_settings)
# create and configure our chain
# we're using ChatOpenAI LLM with the 'gpt-3.5-turbo' model
# we're setting the temperature to 0. The higher the temperature, the more 'creative' the answers. In my case, I want as factual and direct from source info as possible
# 'stuff' is the default chain_type which means it uses all the data from the document
# set the retriever to be our embeddings database
qa_with_source: RetrievalQAWithSourcesChain = RetrievalQAWithSourcesChain.from_chain_type(
llm=ChatOpenAI(temperature=0, model_name='gpt-3.5-turbo'),
chain_type="stuff",
retriever = vector_store.as_retriever()
)
def query_document(question: str) -> dict[str, str]:
return qa_with_source({"question": question})
# loop through to allow the user to ask questions until they type in 'quit'
while(True):
# make the user input yellow using ANSI codes
print("What is your query? ", end="")
user_query : str = input("\033[33m")
print("\033[0m")
if(user_query == "quit"):
break
response: dict[str, str] = query_document(user_query)
# make the answer green and source blue using ANSI codes
print(f'Answer: \033[32m{response["answer"]}\033[0m')
print(f'\033[34mSources: {response["sources"]}\033[0m')
| [] |
2024-01-10 | KennethEnevoldsen/scandinavian-embedding-benchmark | src~seb~seb_models~cohere_models.py | """
The openai embedding api's evaluated on the SEB benchmark.
"""
import logging
from collections.abc import Sequence
from functools import partial
import torch
from seb.model_interface import ModelInterface, ModelMeta, SebModel
from seb.registries import models
logger = logging.getLogger(__name__)
class CohereTextEmbeddingModel(ModelInterface):
def __init__(self, model_name: str) -> None:
self.model_name = model_name
@staticmethod
def create_sentence_blocks(
sentences: Sequence[str],
block_size: int,
) -> list[Sequence[str]]:
sent_blocks: list[Sequence[str]] = []
for i in range(0, len(sentences), block_size):
sent_blocks.append(sentences[i : i + block_size])
return sent_blocks
def get_embedding_dim(self) -> int:
v = self.encode(["get emb dim"])
return v.shape[1]
def encode(
self,
sentences: Sequence[str],
batch_size: int = 32, # noqa: ARG002
embed_type: str = "classification",
**kwargs: dict, # noqa: ARG002
) -> torch.Tensor:
import cohere # type: ignore
client = cohere.Client()
response = client.embed(
texts=list(sentences),
model=self.model_name,
input_type=embed_type,
)
return torch.tensor(response.embeddings)
@models.register("embed-multilingual-v3.0")
def create_embed_multilingual_v3() -> SebModel:
model_name = "embed-multilingual-v3.0"
meta = ModelMeta(
name=model_name,
huggingface_name=None,
reference="https://huggingface.co/Cohere/Cohere-embed-multilingual-v3.0",
languages=[],
open_source=False,
)
return SebModel(
loader=partial(CohereTextEmbeddingModel, model_name=model_name),
meta=meta,
)
| [] |
2024-01-10 | mike-gee/webtranspose | src~webtranspose~scrape.py | import json
import logging
import os
import re
import uuid
import requests
from bs4 import BeautifulSoup
from .openai import OpenAIScraper
from .webt_api import run_webt_api
class Scraper:
def __init__(
self,
schema: dict,
scraper_id: str = None,
name: str = None,
render_js: bool = False,
verbose: bool = False,
scraper: OpenAIScraper = None,
api_key: str = None,
_created: bool = False,
):
"""
Initialize the Scraper object.
Args:
schema (dict): The schema for scraping.
scraper_id (str, optional): The ID of the scraper. Defaults to None.
name (str, optional): The name of the scraper. Defaults to None.
render_js (bool, optional): Whether to render JavaScript. Defaults to False.
verbose (bool, optional): Whether to print verbose output. Defaults to False.
scraper (OpenAIScraper, optional): The scraper object. Defaults to None.
api_key (str, optional): The API key. Defaults to None.
_created (bool, optional): Whether the scraper has been created. Defaults to False.
"""
self.api_key = api_key
if self.api_key is None:
self.api_key = os.environ.get("WEBTRANSPOSE_API_KEY")
self.name = name
if self.name is None:
self.name = "New Scraper"
self.schema = schema
self.verbose = verbose
self.scraper = scraper
self.render_js = render_js
self.scraper_id = scraper_id
if self.scraper is None:
self.scraper = OpenAIScraper()
if self.scraper_id is None:
self.scraper_id = str(uuid.uuid4())
self.created = _created
api_key = os.environ.get("WEBTRANSPOSE_API_KEY")
if api_key is None and self.api_key is None:
logging.warning(
"No Web Transpose API provided. Lite version in use...\n\nTo run the actual WebT AI Web Scraper the Web Transpose API, set the WEBTRANSPOSE_API_KEY from https://webtranspose.com. Run cheaper with logging and advanced analytics."
)
def __str__(self) -> str:
"""
Get a string representation of the Scraper object.
Returns:
str: The string representation of the Scraper object.
"""
status = self.status()
schema = json.dumps(status["schema"], indent=4)
return (
f"WebTransposeScraper(\n"
f" Status ID: {status['scraper_id']}\n"
f" Name: {status['name']}\n"
f" Render JS: {status['render_js']}\n"
f" Schema: {schema}\n"
f")"
)
def __repr__(self) -> str:
"""
Get a string representation of the Scraper object.
Returns:
str: The string representation of the Scraper object.
"""
status = self.status()
schema = json.dumps(status["schema"], indent=4)
return (
f"WebTransposeScraper(\n"
f" Status ID: {status['scraper_id']}\n"
f" Name: {status['name']}\n"
f" Render JS: {status['render_js']}\n"
f" Schema: {schema}\n"
f")"
)
def create_scraper_api(self):
"""
Creates a Scraper on https://webtranspose.com
"""
if self.verbose:
logging.info(f"Creating AI Web Scraper on Web Transpose...")
create_json = {
"name": self.name,
"schema": self.schema,
"render_js": self.render_js,
}
out_json = run_webt_api(
create_json,
"/v1/scraper/create",
self.api_key,
)
self.scraper_id = out_json["scraper_id"]
self.created = True
def scrape(self, url=None, html=None, timeout=30):
"""
Scrape the data from a given URL or HTML.
Args:
url (str, optional): The URL to scrape. Defaults to None.
html (str, optional): The HTML to scrape. Defaults to None.
timeout (int, optional): The timeout for the request. Defaults to 30.
Returns:
dict: The scraped data.
Raises:
ValueError: If neither URL nor HTML is provided.
"""
if self.verbose:
logging.info(f"Running Scraper({self.name}) on {url}...")
if self.api_key is None:
if url is not None:
response = requests.get(url, timeout=timeout)
soup = BeautifulSoup(response.content, "html.parser")
body = soup.body
html = re.sub("\s+", " ", str(body)).strip()
if html is None:
raise ValueError("Must provide either a url or html.")
return self.scraper.scrape(
html,
self.schema,
)
else:
if not self.created:
self.create_scraper_api()
scrape_json = {
"scraper_id": self.scraper_id,
"url": url,
"html": html,
}
out_json = run_webt_api(
scrape_json,
"/v1/scraper/scrape",
self.api_key,
)
return out_json
def status(self):
"""
Get the status of the Scraper.
Returns:
dict: The status of the Scraper.
"""
if self.api_key is None or not self.created:
return {
"scraper_id": self.scraper_id,
"name": self.name,
"verbose": self.verbose,
"render_js": self.render_js,
"schema": self.schema,
}
else:
get_json = {
"scraper_id": self.scraper_id,
}
out_api = run_webt_api(
get_json,
"/v1/scraper/get",
self.api_key,
)
scraper = out_api["scraper"]
return {
"scraper_id": scraper["id"],
"name": scraper["name"],
"verbose": self.verbose,
"render_js": scraper["render_js"],
"schema": scraper["schema"],
}
def get_scraper(scraper_id, api_key: str = None):
"""
Get a Scraper object based on the scraper ID.
Args:
scraper_id (str): The ID of the scraper.
api_key (str, optional): The API key. Defaults to None.
Returns:
Scraper: The Scraper object.
Raises:
ValueError: If api_key is not provided.
"""
if api_key is None:
api_key = os.environ.get("WEBTRANSPOSE_API_KEY")
if api_key is not None:
get_json = {
"scraper_id": scraper_id,
}
out_json = run_webt_api(
get_json,
"/v1/scraper/get",
api_key,
)
scraper = out_json["scraper"]
return Scraper(
scraper_id=scraper["id"],
name=scraper["name"],
schema=scraper["schema"],
render_js=scraper["render_js"],
api_key=api_key,
_created=True,
)
raise ValueError("Must provide api_key or set WEBTRANSPOSE_API_KEY in environment variables.")
def list_scrapers(api_key: str = None):
"""
List all available scrapers.
Args:
api_key (str, optional): The API key. Defaults to None.
Returns:
list: A list of Scrapers.
Raises:
ValueError: If api_key is not provided.
"""
if api_key is None:
api_key = os.environ.get("WEBTRANSPOSE_API_KEY")
if api_key is not None:
out_json = run_webt_api(
{},
"/v1/scraper/list",
api_key,
)
return out_json["scrapers"]
raise ValueError("Must provide api_key or set WEBTRANSPOSE_API_KEY in environment variables.")
| [] |
2024-01-10 | PixelSculptor/IT-Dojo---question-tool | src~getAnswer.py | import os
import openai
from dotenv import load_dotenv
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
# _context = """ You are a senior QA Engineer who working in company and having to mission:
# - leading in project for client
# - taking participate in technical interview
# You are a good dev at frontend technologies and knows lots of technical questions and short programming tasks to examine interns and juniors.
# When junior ask you question your answer to question in backticks with max limit 50 words but enough understandable for beginner."""
# _question = "What is test end to end(e2e)?"
def get_definition(_context, _question):
prompt = f"""
Context: ```{_context}```
Question: ```{_question}```
Answer questions based on passed context. Answer limit to 250 characters. If its too long please return below original answer also shorter answer that keeps sense of answer.
Expected format of answer is Markdown notation (lists, bold phrases, enumerating list, new section etc.) - please remember to add new line character after each sentence of answer to provide good Markdown formatting.
Emphasise keywords in answer based on ```{_question}``` content.
Answer should be in Markdown notation which recognize key words of question and answer.
"""
messages = [{"role": "user", "content": prompt}]
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages,
temperature=0
)
print( response.choices[0].message["content"])
return response.choices[0].message["content"] | [
"\n Context: ```PLACEHOLDER```\n Question: ```PLACEHOLDER```\n Answer questions based on passed context. Answer limit to 250 characters. If its too long please return below original answer also shorter answer that keeps sense of answer.\n Expected format of answer is Markdown notation (lists, bold phrases, enumerating list, new section etc.) - please remember to add new line character after each sentence of answer to provide good Markdown formatting.\n Emphasise keywords in answer based on ```PLACEHOLDER``` content.\n Answer should be in Markdown notation which recognize key words of question and answer. \n"
] |
2024-01-10 | royaals/code-rangers | app2.py | from flask import Flask, render_template, request
import openai
from dotenv import load_dotenv
import os # Add this line
# Load environment variables
load_dotenv()
app = Flask(__name__) # Use __name__, not _name_
# Set up OpenAI API credentials
openai.api_key = os.getenv('OPENAI_API_KEY')
# Define the default route to return the index.html file
@app.route("/")
def index():
return render_template("index.html")
# Define the /api route to handle POST requests
@app.route("/api", methods=["POST"])
def api():
# Get the message from the POST request
user_message = request.json.get("message")
# Define a system message to set context as medical
system_message = {
"role": "system",
"content": "You are strictly a medical chatbot. Do not provide information outside of the medical domain. If a question isn't medical, inform the user and ask for a medical question."
}
# Send the system message and user message to OpenAI's API and receive the response
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
system_message,
{"role": "user", "content": user_message}
]
)
response = completion.choices[0].message
return response
if __name__ == '__main__': # Use __name__, not _name_
app.run()
else:
print("You can only ask about Medical Related Questions")
| [
"You are strictly a medical chatbot. Do not provide information outside of the medical domain. If a question isn't medical, inform the user and ask for a medical question."
] |
2024-01-10 | watanabe3tipapa/OpenAIcontainer | src~upload.py | import os
from io import StringIO
import openai
import json
from IPython.display import display, HTML
from dotenv import load_dotenv
# get the OPENAI_API_KEY from the environment (docker-compose.yml)
openai.organization = os.environ.get( 'OPENAI_ORGANIZATION_ID' )
openai.api_key = os.environ.get( 'OPENAI_API_KEY' )
# 学習用データのファイルパス
filepath_train = "./data/training.jsonl"
# ファイルアップロード(学習用データ)
upload_file_train = openai.File.create(
file=open(filepath_train, "rb"), # ファイル(JSONL)
purpose='fine-tune', # ファイルのアップロード目的
)
# 出力
print(upload_file_train) | [] |
2024-01-10 | watanabe3tipapa/OpenAIcontainer | src~fine-tuning.py | import os
from io import StringIO
import openai
import json
from IPython.display import display, HTML
from dotenv import load_dotenv
from datetime import datetime
# get the OPENAI_API_KEY from the environment (docker-compose.yml)
openai.organization = os.environ.get( 'OPENAI_ORGANIZATION_ID' )
openai.api_key = os.environ.get( 'OPENAI_API_KEY' )
# 引数
file_id_train = 'file-xxxxxxxxxxxxxxxxxxx' # 学習用データのファイルID "file-xxxxxxxxxxxx"
model = 'davinci' # チューニングするモデル (davinciでやむなし)
# 実行
FineTune = openai.FineTune.create(training_file = file_id_train, # 学習用データのファイルID
model = model, # モデル
)
print(FineTune)
# 状況チェック
FineTune_data = FineTune.list().data
num = len(FineTune_data)
for i in range(num):
timestamp = FineTune_data[i].created_at
datetime = datetime.fromtimestamp(timestamp)
fine_tuned_id = FineTune_data[i].id
status = openai.FineTune.retrieve(id=fine_tuned_id).status
model = openai.FineTune.retrieve(id=fine_tuned_id).fine_tuned_model
print(f'Create At: {datetime}')
print(f'FineTune ID: {fine_tuned_id}')
print(f'Model: {model}')
print(f'Statsu: {status}\n') | [] |
2024-01-10 | adtor97/digitalia_portal | utils~utils_chatgpt.py | import pandas as pd
import dash_bootstrap_components as dbc
from dash import dcc
from dash import dash_table
from dash import html
from flask import render_template, json
from datetime import datetime, date
from utils import utils_google, utils
import openai
import requests
import os
api_key=os.environ["CHATGPT_API_KEY"]
def chat_chatgpt(messages, model="gpt-3.5-turbo", api_key=api_key):
openai.api_key = api_key
response = openai.ChatCompletion.create(
model=model,
messages=messages
)
result = ''
for choice in response.choices:
result += choice.message.content
return result
| [] |
2024-01-10 | adtor97/digitalia_portal | dashboards~tests.py | from utils import utils, utils_tests, utils_google, utils_chatgpt
import dash
import dash_bootstrap_components as dbc
from dash import html, dcc
from dash.dependencies import Input, Output, State
import pandas as pd
import openai
import json
# Función para validar el EMAIL del usuario
def validate_email(email_input, email_selected):
if email_input.lower() == email_selected.lower():
return True
else:
return False
# Función para generar el reto técnico
def generate_technical_challenge(position, level, example, user_input):
messages = [
{"role": "system", "content": "Eres un generador de retos técnicos cortos y creativos de tecnología en español. Los niveles de seniority son Junior, Medium, SemiSenior y Senior. Cada nivel tiene una mayor expectativa de habilidades y experiencia."},
{"role": "user", "content": f"Necesito un reto técnico para un postulante al puesto {position} de nivel {level}, da las indicaciones específicas para este nivel. El reto debe estar estimado para 6 horas máximo, no des retos muy largos, sé creativo. Por favor escríbelo tomando en cuenta que lo leerá el candidato final. Este es un ejemplo de reto técnico, solo un ejemplo, sé creativo pero con el mismo estilo y testeando mismas habilidades: {example}. Mi experiencia y perfil es el siguiente: {user_input}."},
]
print(messages[1]["content"])
response = utils_chatgpt.chat_chatgpt(messages)
return response
def serve_layout():
layout = html.Div(
[
# Título
dbc.Row(
[
dbc.Col(
html.H1(
"Postulaciones",
id="title-tests",
className="text-center",
style={"color":"#1b2b58", "font-size":"28px"}
),
sm=12,
lg=12
),
dbc.Col(
html.H2(
"Pruebas Técnicas",
id="subtitle-tests",
className="text-center",
style={"color":"#1b2b58", "font-size":"21px", "margin-bottom": "20px"}
),
sm=12,
lg=12
),
],
justify='center',
),
# Dropdown para seleccionar el nombre del usuario y Input para ingresar el EMAIL
dbc.Row(
[
dbc.Col(
[
html.Label(
html.Strong("1. Elige tu nombre"),
className="label"
),
dcc.Dropdown(id='dropdown-user-tests'),
html.Label(
html.Strong("2. Ingresa tu email"),
className="label"
),
dcc.Input(id='input-email-tests', type='text'),
dcc.Loading(
id="loading-validate",
type="circle",
children=html.Button('Validar', id='btn-validate-tests')
),
html.Div(id='div-error-tests'),
],
sm=12,
md=6,
lg=4
),
]
),
# Dropdown para seleccionar el puesto abierto y el nivel
dbc.Row(
[
dbc.Col(
[
html.Label(
html.Strong("3. Elige el puesto al que postulas"),
className="label"
),
dcc.Dropdown(id='dropdown-position-tests', style={'display': 'none'}),
html.Label(
html.Strong("4. Elige tu nivel"),
className="label"
),
dcc.Dropdown(
id='dropdown-level-tests',
options=[
{'label': 'Junior', 'value': 'Junior'},
{'label': 'Medium', 'value': 'Medium'},
{'label': 'SemiSenior', 'value': 'SemiSenior'},
{'label': 'Senior', 'value': 'Senior'}
],
style={'display': 'none'}
),
dcc.Loading(
id="loading-generate",
type="circle",
children=html.Button('Generar reto', id='btn-generate-tests', style={'display': 'none'})
),
],
sm=12,
md=6,
lg=4
),
]
),
# Reto técnico generado
dbc.Row(
[
dbc.Col(
html.Div(id='div-challenge-tests', style={'display': 'none', 'border': '1px solid #ddd', 'padding': '20px', 'border-radius': '8px', 'background-color': '#f9f9f9', 'text-align': 'center'}),
sm=12,
md=12,
lg=12
),
],
style={'margin-top': '20px'}
),
# Botón para resolver el reto
dbc.Row(
[
dbc.Col(
dcc.Loading(
id="loading-resolve",
type="circle",
children=html.Button('Deseo resolver el reto', id='btn-resolve-tests', style={'display': 'none'})
),
sm=12,
md=6,
lg=4
),
],
style={'margin-top': '20px'}
),
# Mensaje después de resolver el reto
dbc.Row(
[
dbc.Col(
html.Div(id='div-resolve-message', style={'display': 'none'}),
sm=12,
md=12,
lg=12
),
],
style={'margin-top': '20px'}
),
],
className="dash-inside-container",
)
return layout
def init_callbacks(dash_app):
@dash_app.callback(
Output('dropdown-user-tests', 'options'),
Input('dropdown-user-tests', 'id')
)
def update_user_dropdown(id):
# Leer los nombres de los usuarios de Google Sheets
ws = utils_google.open_ws('Postula a Digitalia (Responses)', 'Form Responses 1')
df = utils_google.read_ws_data(ws)
df["names"] = df["Nombres"] + " " + df["Apellidos"]
options = [{'label': name, 'value': mail} for name, mail in zip(df['names'], df['Email Address'])]
return options
@dash_app.callback(
[Output('div-error-tests', 'children'),
Output('dropdown-position-tests', 'style'),
Output('dropdown-level-tests', 'style'),
Output('btn-generate-tests', 'style')],
Input('btn-validate-tests', 'n_clicks'),
State('input-email-tests', 'value'),
State('dropdown-user-tests', 'value')
)
def validate_user(n_clicks, email_input, email_selected):
if n_clicks is not None:
if not validate_email(email_input, email_selected):
return "Usuario no encontrado, por favor vuelva a intentar", {'display': 'none'}, {'display': 'none'}, {'display': 'none'}
else:
return "", {}, {}, {}
@dash_app.callback(
Output('dropdown-position-tests', 'options'),
Input('dropdown-position-tests', 'id')
)
def update_position_dropdown(id):
#Leer los puestos abiertos de Google Sheets
ws = utils_google.open_ws('Postula a Digitalia (Responses)', 'portal-positions')
df = utils_google.read_ws_data(ws)
options = [{'label': position, 'value': position} for position in df['position']]
return options
@dash_app.callback(
[Output('div-challenge-tests', 'children'),
Output('div-challenge-tests', 'style'),
Output('btn-resolve-tests', 'style')],
Input('btn-generate-tests', 'n_clicks'),
State('dropdown-position-tests', 'value'),
State('dropdown-level-tests', 'value'),
State('dropdown-user-tests', 'value')
)
def update_technical_challenge(n_clicks, position, level, user_email):
if n_clicks is not None and level is not None and position is not None:
# Leer el ejemplo de Google Sheets
ws = utils_google.open_ws('Postula a Digitalia (Responses)', 'portal-positions')
df = utils_google.read_ws_data(ws)
example = df.loc[df['position'] == position, 'example'].iloc[0]
# Leer la información del usuario
ws_user = utils_google.open_ws('Postula a Digitalia (Responses)', 'Form Responses 1')
df_user = utils_google.read_ws_data(ws_user)
user_input = df_user.loc[df_user['Email Address'] == user_email, 'Cuéntanos sobre ti cuando trabajas, skills, forma de ser, etc.'].iloc[0]
# Generar el reto técnico
challenge = generate_technical_challenge(position, level, example, user_input)
return challenge, {}, {}
return "", {'display': 'none'}, {'display': 'none'}
@dash_app.callback(
[Output('div-resolve-message', 'children'),
Output('div-resolve-message', 'style')],
Input('btn-resolve-tests', 'n_clicks'),
State('dropdown-user-tests', 'value'),
State('dropdown-position-tests', 'value'),
State('dropdown-level-tests', 'value'),
State('div-challenge-tests', 'children')
)
def resolve_challenge(n_clicks, user, position, level, challenge):
if n_clicks is not None:
# Leer los retos de Google Sheets
ws = utils_google.open_ws('Postula a Digitalia (Responses)', 'portal-retos')
df = utils_google.read_ws_data(ws)
# Añadir el nuevo reto
new_row = {'User': user, 'Position': position, 'Level': level, 'Challenge': challenge}
df = df.append(new_row, ignore_index=True)
# Guardar los retos en Google Sheets
utils_google.pandas_to_sheets(df, ws)
return "Genial, procede a desarrollar el reto, nuestro equipo de hunting te contactará pronto", {}
return None, {'display': 'none'}
| [
"Eres un generador de retos técnicos cortos y creativos de tecnología en español. Los niveles de seniority son Junior, Medium, SemiSenior y Senior. Cada nivel tiene una mayor expectativa de habilidades y experiencia.",
"Necesito un reto técnico para un postulante al puesto PLACEHOLDER de nivel PLACEHOLDER, da las indicaciones específicas para este nivel. El reto debe estar estimado para 6 horas máximo, no des retos muy largos, sé creativo. Por favor escríbelo tomando en cuenta que lo leerá el candidato final. Este es un ejemplo de reto técnico, solo un ejemplo, sé creativo pero con el mismo estilo y testeando mismas habilidades: PLACEHOLDER. Mi experiencia y perfil es el siguiente: PLACEHOLDER."
] |
2024-01-10 | NavadinNehru/SEBI-HACKATHON | fraudshield.py | import streamlit as st
from deep_translator import GoogleTranslator
from pytube import YouTube
import random
import base64
import os
from langchain.llms import OpenAI
from langchain.agents import AgentExecutor, AgentType, initialize_agent, load_tools # type: ignore
from langchain.tools import BaseTool
from typing import List
import openai
messages = [
{"role": "system", "content": "Investor seeking guidance. Provide a credibility score (1-100) for the given content, considering source reliability (40%), market conditions (30%), and risk factors (30%). Your response format: credibility score: (your answer) in one line, followed by reason: in a concise paragraph (max 150 words). Emphasize due diligence importance, exercise caution, and maintain a highly critical approach. Address fraudulent activities and refrain from accepting information without proper evidence. The user relies on your assessment for investment decisions, so precision is crucial. The content is as follows:{topic} "},
]
def get_img_as_base64(file):
with open(file, "rb") as f:
data = f.read()
return base64.b64encode(data).decode()
img = get_img_as_base64("image.jpg")
page_bg_img = f"""
<style>
[data-testid="stAppViewContainer"] > .main {{
background-image: url("https://media.istockphoto.com/id/1252292286/photo/black-bull-and-bear.jpg?s=170667a&w=0&k=20&c=EGv51h_SIWTHlaLYnxr6TfM5pKsv6nvudjW1IrYIeS8=");
background-size: 100%;
background-position: top left;
background-repeat: no-repeat;
background-attachment: local;
}}
[data-testid="stSidebar"] > div:first-child {{
background-image: url("data:image/png;base64,{img}");
background-position: center;
background-repeat: no-repeat;
background-attachment: fixed;
}}
[data-testid="stHeader"] {{
background: rgba(0,0,0,0);
}}
[data-testid="stToolbar"] {{
right: 2rem;
}}
</style>
"""
st.markdown(page_bg_img, unsafe_allow_html=True)
def main():
st.title("FraudShield")
tabs = st.tabs(["Home", "Text Input", "Image Input", "YouTube URLs"])
with tabs[0]:
st.subheader(
"Welcome to FraudShield - Your Trusted Source for Financial Safety"
)
st.write(
"At FraudShield, we're committed to safeguarding your financial well-being in the digital age. We understand that in today's fast-paced world, it's increasingly challenging to separate fact from fiction when it comes to financial news and investment opportunities. That's why we're here to help you make informed decisions and protect your hard-earned money."
)
with tabs[1]:
user_input_key = "userinput"
userinput = st.text_input("Enter your statement here:", key=user_input_key)
if st.button("Verify"):
with st.spinner("Analyzing the statement"):
# Append user's message to messages
messages.append({"role": "user", "content": userinput})
# Call OpenAI's Chat API
chat = openai.ChatCompletion.create(
model="gpt-3.5-turbo-16k-0613",
messages=messages,
)
# Get the assistant's reply
userinput = chat.choices[0].message["content"]
# Append assistant's reply to messages
messages.append({"role": "assistant", "content": userinput})
user_input1=userinput[:22]
user_input2=userinput[22:]
st.write(user_input1)
st.write(user_input2)
# Display the assistant's reply
# st.write(userinput)
with tabs[3]:
user_input_key = "user_input"
user_input = st.text_input("Paste the link here:", key=user_input_key)
openai.api_key =st.secrets["OPENAI_API_KEY"]
if st.button("Check"):
videoReview(user_input)
with tabs[2]:
st.subheader("Image")
image = st.file_uploader(
label="Upload an image",
type=["jpg", "png"],
accept_multiple_files=False,
)
if st.button("Submit"):
if image is None:
st.error("Please upload an image.")
else:
# save the image
with st.spinner("Saving image..."):
image_path = "./temp.jpg"
with open(image_path, "wb+") as f:
f.write(image.getbuffer())
# reading text from image
with st.spinner("Extracting text..."):
import easyocr
Reader = easyocr.Reader(["en"])
text = " ".join(Reader.readtext(image_path, detail=0))
res=fraud(text).strip().capitalize()
# res = fraud(text).strip().capitalize()
# if res.find("s") == 0 or res.find("S") == 0:
# st.error(res)
# else:
# st.success(res)
res1=res[:22]
res2=res[22:]
st.write(res1)
st.write(res2)
# delete image
with st.spinner("Cleaning up..."):
os.remove(image_path)
def fraud(search: str) -> str:
# Create a new instance of the OpenAI class
llm = OpenAI(
openai_api_key=st.secrets["OPENAI_API_KEY"],
max_tokens=200,
temperature=0,
client=None,
model="text-davinci-003",
frequency_penalty=1,
presence_penalty=0,
top_p=1,
)
# Load the tools
tools: List[BaseTool] = load_tools(["google-serper"], llm=llm)
# Create a new instance of the AgentExecutor class
agent: AgentExecutor = initialize_agent(
tools=tools,
llm=llm,
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
verbose=False,
)
template = """Investor seeking guidance. Provide a credibility score (1-100) for the given content, considering source reliability (40%), market conditions (30%), and risk factors (30%). Your response format: 'credibility score: (your answer)' in one line, followed by 'reason: ' in a concise paragraph (max 150 words). Emphasize due diligence importance, exercise caution, and maintain a highly critical approach. Address fraudulent activities and refrain from accepting information without proper evidence. The user relies on your assessment for investment decisions, so precision is crucial.The content is as follows: {topic}"""
# Generate the response
response: str = agent.run(template.format(topic=search))
# Print the response
print(response)
# # Convert the response to a dictionary
# result = json.loads(response)
return response
def videoReview(yt_link):
video_caller = YouTube(yt_link)
a = random.randint(1, 10000)
a = str(a)
titlename=video_caller.title
video_caller.title = a
with st.spinner("Downloading the video"):
video_caller.streams.filter(only_audio=True).first().download()
b = a + ".mp4"
with st.spinner("Extracting the content"):
with open(b, "rb") as audio_file:
transcript2 = openai.Audio.transcribe(
file=audio_file, model="whisper-1", response_format="srt", language="en"
)
# st.write(transcript2)
if transcript2:
# Append user's message to messages
messages.append(
{
"role": "user",
# "content": transcript2,
"content": "Video title name : "+titlename+"\n"+"transcription: "+transcript2,
})
# print(messages)
# st.write(messages)
with st.spinner("Analyzing the content"):
# Call OpenAI's Chat API
chat = openai.ChatCompletion.create(
model="gpt-3.5-turbo-16k-0613",
messages=messages,
)
# Get the assistant's reply
user_input = chat.choices[0].message["content"]
# Append assistant's reply to messages
# messages.append({"role": "assistant", "content": user_input})
# Display the assistant's reply
userinput1=user_input[:22]
userinput2=user_input[22:]
st.write(userinput1)
st.write(userinput2)
def translate(text: str) -> str:
translator: GoogleTranslator = GoogleTranslator(source="auto", target="en")
text = translator.translate(text)
text = (
text.strip()
.replace("\n", " ")
.replace("\t", " ")
.replace("\r", " ")
.rstrip(".")
)
return text
if __name__ == "__main__":
main() | [
"Investor seeking guidance. Provide a credibility score (1-100) for the given content, considering source reliability (40%), market conditions (30%), and risk factors (30%). Your response format: credibility score: (your answer) in one line, followed by reason: in a concise paragraph (max 150 words). Emphasize due diligence importance, exercise caution, and maintain a highly critical approach. Address fraudulent activities and refrain from accepting information without proper evidence. The user relies on your assessment for investment decisions, so precision is crucial. The content is as follows:{topic} ",
"Investor seeking guidance. Provide a credibility score (1-100) for the given content, considering source reliability (40%), market conditions (30%), and risk factors (30%). Your response format: 'credibility score: (your answer)' in one line, followed by 'reason: ' in a concise paragraph (max 150 words). Emphasize due diligence importance, exercise caution, and maintain a highly critical approach. Address fraudulent activities and refrain from accepting information without proper evidence. The user relies on your assessment for investment decisions, so precision is crucial.The content is as follows: {topic}",
"Video title name : PLACEHOLDER\ntranscription: PLACEHOLDER"
] |
2024-01-10 | ajinkyagorad/AskMe | realtimeAskMe.py | import numpy as np
import queue
import threading
import time
import openai
import whisper
import soundfile as sf
import io
from gtts import gTTS
from pydub import AudioSegment
from pydub.playback import play
from time import time
import sounddevice as sd
import os
from playsound import playsound
import simpleaudio
import struct
import boto3
from textblob import TextBlob
import cv2
import pyttsx3
from io import BytesIO
import requests
from PIL import Image
# pyttsx3
# AWS access keys (incomplete and are temporary) hint AKvSAG
aws_access_key_id = 'IA3ZOK3GYW2647Q54M'
aws_secret_access_key = 'gKMsBFkYBzlJS/XKqC8idobl6b/jJKkeiX8xmk'
# OpenAI api key
openai.api_key = 'sk-aalWvYCWi0oiYffnsk5WT3BlbkFJn2snshVgPGXwlG3Hn9'
# AWS region name
region_name = 'eu-central-1'
# Create a client session
session = boto3.Session(
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
region_name=region_name
)
# Create a Polly client
polly_client = session.client('polly')
comprehend = session.client('comprehend')
q = queue.Queue()
samplerate = 44100
recording_duration = 50 # timeout (sec)
inactive_time_limit = .5 # when person pauses for this time or more (sec)
recording_blocks = []
dirpath = r''
valid_language_codes = ['en-US', 'en-IN', 'es-MX', 'en-ZA', 'tr-TR', 'ru-RU', 'ro-RO', 'pt-PT', 'pl-PL', 'nl-NL', 'it-IT', 'is-IS', 'fr-FR', 'es-ES', 'de-DE', 'yue-CN', 'ko-KR', 'en-NZ', 'en-GB-WLS', 'hi-IN', 'arb', 'cy-GB', 'cmn-CN', 'da-DK', 'en-AU', 'pt-BR', 'nb-NO', 'sv-SE', 'ja-JP', 'es-US', 'ca-ES', 'fr-CA', 'en-GB', 'de-AT']
def draw_image(prompt):
response = openai.Image.create(
prompt=prompt,
n=1,
size="512x512"
)
print('submitted request.. waiting')
image_url = response['data'][0]['url']
response = requests.get(image_url)
print('response at: ', image_url)
# Open the response content as an image using Pillow
image = Image.open(BytesIO(response.content))
#cv2.namedWindow('imgen', cv2.WINDOW_KEEPRATIO )
cv2.imshow('imgen',np.array(image))
cv2.waitKey(5000)
cv2.destroyAllWindows()
def audio_callback(indata, frames, time, status):
q.put(indata.copy())
def text_to_speech_offline(text):
# Initialize the engine
engine = pyttsx3.init()
# Set the voice property to a female voice
voices = engine.getProperty('voices')
engine.setProperty('voice', voices[1].id) # index 1 corresponds to a female voice
engine.say(text)
# Run the engine and wait until speech is complete
engine.runAndWait()
print('¤')
def text_to_speech_aws(text):
response = comprehend.detect_dominant_language(Text=text)
# extract language code
language_code = response['Languages'][0]['LanguageCode']
language_code = language_code+'-'+language_code.upper()
if language_code not in valid_language_codes:
language_code = valid_language_codes[0]
# Generate an MP3 file using Polly
print('detected as:', language_code)
response = polly_client.synthesize_speech(
Text=text,
OutputFormat='mp3',
VoiceId='Joanna',
LanguageCode=language_code
)
ofile = os.path.join(dirpath, 'output.mp3')
# Save the MP3 file to disk
with open(ofile, 'wb') as file:
file.write(response['AudioStream'].read())
playsound('output.mp3')
'''
sound = AudioSegment.from_mp3(ofile)
sound.export('output.wav', format="wav")
obj = simpleaudio.WaveObject.from_wave_file('output.wav')
pobj = obj.play()
pobj.wait_done()
'''
def text_to_speech(text):
tts = gTTS(text, lang="en", slow=False)
#fp = io.BytesIO()
ofile = os.path.join(dirpath, 'output.mp3')
tts.save(ofile)
sound = AudioSegment.from_mp3(ofile)
sound.export('output.wav', format="wav")
obj = simpleaudio.WaveObject.from_wave_file('output.wav')
pobj = obj.play()
pobj.wait_done()
#print('filepath: ',os.path.join(dirpath, 'output.mp3'))
#audio_file = AudioSegment.from_file('output.mp3', format="mp3")
#play(audio_file)
def process_audio():
while True:
recording_blocks = []
print('recording...')
last_active_time = time()
inactive_time = 0
start_time = time()
while True:
audio_data = q.get()[:,0]
if np.max(audio_data)<0.01:
inactive_time = time()-last_active_time
else:
last_active_time = time()
recording_blocks.append(audio_data)
if inactive_time>inactive_time_limit or len(recording_blocks) * audio_data.shape[0] >= samplerate * recording_duration:
break
print('done')
audio_data_concat = np.concatenate(recording_blocks, axis=0)
# only proceed if at least 1 second of audio is present and there is at least 50% audio else redo
if time()-start_time<1:
print('too short')
continue
val=np.sum(audio_data_concat>0.005)/len(audio_data_concat)
if val<.05:
print('too little audio :', val)
continue
sf.write(os.path.join(dirpath, 'input.wav'), audio_data_concat, samplerate)
with open(os.path.join(dirpath, 'input.wav'), 'rb') as f:
transcript = openai.Audio.transcribe("whisper-1", f)['text']
print('Input: ',transcript)
if transcript.split(' ')[0].upper() == 'DRAW':
draw_image(transcript)
elif transcript:
response = openai.Completion.create(engine="text-davinci-003", prompt=transcript, max_tokens=50, n=1, stop='None', temperature=0.7)
message = response.choices[0].text.strip()
if message:
print("Response:", message)
#text_to_speech_aws(message)
text_to_speech_offline(message)
stream = sd.InputStream(device = 0, callback=audio_callback)
#outstream=sd.OutputStream(samplerate=samplerate)
stream.start()
#outstream.start()
processing_thread = threading.Thread(target=process_audio)
processing_thread.start()
processing_thread.join()
while True:
# Keep the main thread running until the user presses the 'q' key
if input() == 'q':
break
stream.stop()
stream.close()
| [] |
2024-01-10 | pratimdas/anthropic-sdk-python | tests~test_client.py | # File generated from our OpenAPI spec by Stainless.
from __future__ import annotations
import os
import json
import asyncio
import inspect
from typing import Any, Dict, Union, cast
import httpx
import pytest
from respx import MockRouter
from anthropic import Stream, Anthropic, AsyncStream, AsyncAnthropic
from anthropic._types import Omit
from anthropic._models import BaseModel, FinalRequestOptions
from anthropic._base_client import BaseClient, make_request_options
base_url = os.environ.get("API_BASE_URL", "http://127.0.0.1:4010")
api_key = os.environ.get("API_KEY", "something1234")
def _get_params(client: BaseClient) -> dict[str, str]:
request = client._build_request(FinalRequestOptions(method="get", url="/foo"))
url = httpx.URL(request.url)
return dict(url.params)
class TestAnthropic:
client = Anthropic(base_url=base_url, api_key=api_key, _strict_response_validation=True)
def test_copy(self) -> None:
copied = self.client.copy()
assert id(copied) != id(self.client)
copied = self.client.copy(api_key="my new api key")
assert copied.api_key == "my new api key"
assert self.client.api_key == api_key
copied = self.client.copy(auth_token="my-auth-token")
assert copied.auth_token == "my-auth-token"
def test_copy_default_options(self) -> None:
# options that have a default are overriden correctly
copied = self.client.copy(max_retries=7)
assert copied.max_retries == 7
assert self.client.max_retries == 2
copied2 = copied.copy(max_retries=6)
assert copied2.max_retries == 6
assert copied.max_retries == 7
# timeout
assert isinstance(self.client.timeout, httpx.Timeout)
copied = self.client.copy(timeout=None)
assert copied.timeout is None
assert isinstance(self.client.timeout, httpx.Timeout)
def test_copy_default_headers(self) -> None:
client = Anthropic(
base_url=base_url, api_key=api_key, _strict_response_validation=True, default_headers={"X-Foo": "bar"}
)
assert client.default_headers["X-Foo"] == "bar"
# does not override the already given value when not specified
copied = client.copy()
assert copied.default_headers["X-Foo"] == "bar"
# merges already given headers
copied = client.copy(default_headers={"X-Bar": "stainless"})
assert copied.default_headers["X-Foo"] == "bar"
assert copied.default_headers["X-Bar"] == "stainless"
# uses new values for any already given headers
copied = client.copy(default_headers={"X-Foo": "stainless"})
assert copied.default_headers["X-Foo"] == "stainless"
# set_default_headers
# completely overrides already set values
copied = client.copy(set_default_headers={})
assert copied.default_headers.get("X-Foo") is None
copied = client.copy(set_default_headers={"X-Bar": "Robert"})
assert copied.default_headers["X-Bar"] == "Robert"
with pytest.raises(
ValueError,
match="`default_headers` and `set_default_headers` arguments are mutually exclusive",
):
client.copy(set_default_headers={}, default_headers={"X-Foo": "Bar"})
def test_copy_default_query(self) -> None:
client = Anthropic(
base_url=base_url, api_key=api_key, _strict_response_validation=True, default_query={"foo": "bar"}
)
assert _get_params(client)["foo"] == "bar"
# does not override the already given value when not specified
copied = client.copy()
assert _get_params(copied)["foo"] == "bar"
# merges already given params
copied = client.copy(default_query={"bar": "stainless"})
params = _get_params(copied)
assert params["foo"] == "bar"
assert params["bar"] == "stainless"
# uses new values for any already given headers
copied = client.copy(default_query={"foo": "stainless"})
assert _get_params(copied)["foo"] == "stainless"
# set_default_query
# completely overrides already set values
copied = client.copy(set_default_query={})
assert _get_params(copied) == {}
copied = client.copy(set_default_query={"bar": "Robert"})
assert _get_params(copied)["bar"] == "Robert"
with pytest.raises(
ValueError,
# TODO: update
match="`default_query` and `set_default_query` arguments are mutually exclusive",
):
client.copy(set_default_query={}, default_query={"foo": "Bar"})
def test_copy_signature(self) -> None:
# ensure the same parameters that can be passed to the client are defined in the `.copy()` method
init_signature = inspect.signature(
# mypy doesn't like that we access the `__init__` property.
self.client.__init__, # type: ignore[misc]
)
copy_signature = inspect.signature(self.client.copy)
exclude_params = {"transport", "proxies", "_strict_response_validation"}
for name in init_signature.parameters.keys():
if name in exclude_params:
continue
copy_param = copy_signature.parameters.get(name)
assert copy_param is not None, f"copy() signature is missing the {name} param"
def test_default_headers_option(self) -> None:
client = Anthropic(
base_url=base_url, api_key=api_key, _strict_response_validation=True, default_headers={"X-Foo": "bar"}
)
request = client._build_request(FinalRequestOptions(method="get", url="/foo"))
assert request.headers.get("x-foo") == "bar"
assert request.headers.get("x-stainless-lang") == "python"
client2 = Anthropic(
base_url=base_url,
api_key=api_key,
_strict_response_validation=True,
default_headers={
"X-Foo": "stainless",
"X-Stainless-Lang": "my-overriding-header",
},
)
request = client2._build_request(FinalRequestOptions(method="get", url="/foo"))
assert request.headers.get("x-foo") == "stainless"
assert request.headers.get("x-stainless-lang") == "my-overriding-header"
def test_validate_headers(self) -> None:
client = Anthropic(base_url=base_url, api_key=api_key, _strict_response_validation=True)
request = client._build_request(FinalRequestOptions(method="get", url="/foo"))
assert request.headers.get("X-Api-Key") == api_key
client2 = Anthropic(base_url=base_url, api_key=None, _strict_response_validation=True)
with pytest.raises(
TypeError,
match="Could not resolve authentication method. Expected either api_key or auth_token to be set. Or for one of the `X-Api-Key` or `Authorization` headers to be explicitly omitted",
):
client2._build_request(FinalRequestOptions(method="get", url="/foo"))
request2 = client2._build_request(FinalRequestOptions(method="get", url="/foo", headers={"X-Api-Key": Omit()}))
assert request2.headers.get("X-Api-Key") is None
def test_default_query_option(self) -> None:
client = Anthropic(
base_url=base_url, api_key=api_key, _strict_response_validation=True, default_query={"query_param": "bar"}
)
request = client._build_request(FinalRequestOptions(method="get", url="/foo"))
url = httpx.URL(request.url)
assert dict(url.params) == {"query_param": "bar"}
request = client._build_request(
FinalRequestOptions(
method="get",
url="/foo",
params={"foo": "baz", "query_param": "overriden"},
)
)
url = httpx.URL(request.url)
assert dict(url.params) == {"foo": "baz", "query_param": "overriden"}
def test_request_extra_json(self) -> None:
request = self.client._build_request(
FinalRequestOptions(
method="post",
url="/foo",
json_data={"foo": "bar"},
extra_json={"baz": False},
),
)
data = json.loads(request.content.decode("utf-8"))
assert data == {"foo": "bar", "baz": False}
request = self.client._build_request(
FinalRequestOptions(
method="post",
url="/foo",
extra_json={"baz": False},
),
)
data = json.loads(request.content.decode("utf-8"))
assert data == {"baz": False}
# `extra_json` takes priority over `json_data` when keys clash
request = self.client._build_request(
FinalRequestOptions(
method="post",
url="/foo",
json_data={"foo": "bar", "baz": True},
extra_json={"baz": None},
),
)
data = json.loads(request.content.decode("utf-8"))
assert data == {"foo": "bar", "baz": None}
def test_request_extra_headers(self) -> None:
request = self.client._build_request(
FinalRequestOptions(
method="post",
url="/foo",
**make_request_options(extra_headers={"X-Foo": "Foo"}),
),
)
assert request.headers.get("X-Foo") == "Foo"
# `extra_headers` takes priority over `default_headers` when keys clash
request = self.client.with_options(default_headers={"X-Bar": "true"})._build_request(
FinalRequestOptions(
method="post",
url="/foo",
**make_request_options(
extra_headers={"X-Bar": "false"},
),
),
)
assert request.headers.get("X-Bar") == "false"
def test_request_extra_query(self) -> None:
request = self.client._build_request(
FinalRequestOptions(
method="post",
url="/foo",
**make_request_options(
extra_query={"my_query_param": "Foo"},
),
),
)
params = cast(Dict[str, str], dict(request.url.params))
assert params == {"my_query_param": "Foo"}
# if both `query` and `extra_query` are given, they are merged
request = self.client._build_request(
FinalRequestOptions(
method="post",
url="/foo",
**make_request_options(
query={"bar": "1"},
extra_query={"foo": "2"},
),
),
)
params = cast(Dict[str, str], dict(request.url.params))
assert params == {"bar": "1", "foo": "2"}
# `extra_query` takes priority over `query` when keys clash
request = self.client._build_request(
FinalRequestOptions(
method="post",
url="/foo",
**make_request_options(
query={"foo": "1"},
extra_query={"foo": "2"},
),
),
)
params = cast(Dict[str, str], dict(request.url.params))
assert params == {"foo": "2"}
@pytest.mark.respx(base_url=base_url)
def test_basic_union_response(self, respx_mock: MockRouter) -> None:
class Model1(BaseModel):
name: str
class Model2(BaseModel):
foo: str
respx_mock.get("/foo").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
response = self.client.get("/foo", cast_to=cast(Any, Union[Model1, Model2]))
assert isinstance(response, Model2)
assert response.foo == "bar"
@pytest.mark.respx(base_url=base_url)
def test_union_response_different_types(self, respx_mock: MockRouter) -> None:
"""Union of objects with the same field name using a different type"""
class Model1(BaseModel):
foo: int
class Model2(BaseModel):
foo: str
respx_mock.get("/foo").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
response = self.client.get("/foo", cast_to=cast(Any, Union[Model1, Model2]))
assert isinstance(response, Model2)
assert response.foo == "bar"
respx_mock.get("/foo").mock(return_value=httpx.Response(200, json={"foo": 1}))
response = self.client.get("/foo", cast_to=cast(Any, Union[Model1, Model2]))
assert isinstance(response, Model1)
assert response.foo == 1
def test_base_url_trailing_slash(self) -> None:
client = Anthropic(
base_url="http://localhost:5000/custom/path/", api_key=api_key, _strict_response_validation=True
)
request = client._build_request(
FinalRequestOptions(
method="post",
url="/foo",
json_data={"foo": "bar"},
),
)
assert request.url == "http://localhost:5000/custom/path/foo"
def test_base_url_no_trailing_slash(self) -> None:
client = Anthropic(
base_url="http://localhost:5000/custom/path", api_key=api_key, _strict_response_validation=True
)
request = client._build_request(
FinalRequestOptions(
method="post",
url="/foo",
json_data={"foo": "bar"},
),
)
assert request.url == "http://localhost:5000/custom/path/foo"
def test_client_del(self) -> None:
client = Anthropic(base_url=base_url, api_key=api_key, _strict_response_validation=True)
assert not client.is_closed()
client.__del__()
assert client.is_closed()
def test_client_context_manager(self) -> None:
client = Anthropic(base_url=base_url, api_key=api_key, _strict_response_validation=True)
with client as c2:
assert c2 is client
assert not c2.is_closed()
assert not client.is_closed()
assert client.is_closed()
@pytest.mark.respx(base_url=base_url)
def test_default_stream_cls(self, respx_mock: MockRouter) -> None:
class Model(BaseModel):
name: str
respx_mock.post("/foo").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
response = self.client.post("/foo", cast_to=Model, stream=True)
assert isinstance(response, Stream)
class TestAsyncAnthropic:
client = AsyncAnthropic(base_url=base_url, api_key=api_key, _strict_response_validation=True)
def test_copy(self) -> None:
copied = self.client.copy()
assert id(copied) != id(self.client)
copied = self.client.copy(api_key="my new api key")
assert copied.api_key == "my new api key"
assert self.client.api_key == api_key
copied = self.client.copy(auth_token="my-auth-token")
assert copied.auth_token == "my-auth-token"
def test_copy_default_options(self) -> None:
# options that have a default are overriden correctly
copied = self.client.copy(max_retries=7)
assert copied.max_retries == 7
assert self.client.max_retries == 2
copied2 = copied.copy(max_retries=6)
assert copied2.max_retries == 6
assert copied.max_retries == 7
# timeout
assert isinstance(self.client.timeout, httpx.Timeout)
copied = self.client.copy(timeout=None)
assert copied.timeout is None
assert isinstance(self.client.timeout, httpx.Timeout)
def test_copy_default_headers(self) -> None:
client = AsyncAnthropic(
base_url=base_url, api_key=api_key, _strict_response_validation=True, default_headers={"X-Foo": "bar"}
)
assert client.default_headers["X-Foo"] == "bar"
# does not override the already given value when not specified
copied = client.copy()
assert copied.default_headers["X-Foo"] == "bar"
# merges already given headers
copied = client.copy(default_headers={"X-Bar": "stainless"})
assert copied.default_headers["X-Foo"] == "bar"
assert copied.default_headers["X-Bar"] == "stainless"
# uses new values for any already given headers
copied = client.copy(default_headers={"X-Foo": "stainless"})
assert copied.default_headers["X-Foo"] == "stainless"
# set_default_headers
# completely overrides already set values
copied = client.copy(set_default_headers={})
assert copied.default_headers.get("X-Foo") is None
copied = client.copy(set_default_headers={"X-Bar": "Robert"})
assert copied.default_headers["X-Bar"] == "Robert"
with pytest.raises(
ValueError,
match="`default_headers` and `set_default_headers` arguments are mutually exclusive",
):
client.copy(set_default_headers={}, default_headers={"X-Foo": "Bar"})
def test_copy_default_query(self) -> None:
client = AsyncAnthropic(
base_url=base_url, api_key=api_key, _strict_response_validation=True, default_query={"foo": "bar"}
)
assert _get_params(client)["foo"] == "bar"
# does not override the already given value when not specified
copied = client.copy()
assert _get_params(copied)["foo"] == "bar"
# merges already given params
copied = client.copy(default_query={"bar": "stainless"})
params = _get_params(copied)
assert params["foo"] == "bar"
assert params["bar"] == "stainless"
# uses new values for any already given headers
copied = client.copy(default_query={"foo": "stainless"})
assert _get_params(copied)["foo"] == "stainless"
# set_default_query
# completely overrides already set values
copied = client.copy(set_default_query={})
assert _get_params(copied) == {}
copied = client.copy(set_default_query={"bar": "Robert"})
assert _get_params(copied)["bar"] == "Robert"
with pytest.raises(
ValueError,
# TODO: update
match="`default_query` and `set_default_query` arguments are mutually exclusive",
):
client.copy(set_default_query={}, default_query={"foo": "Bar"})
def test_copy_signature(self) -> None:
# ensure the same parameters that can be passed to the client are defined in the `.copy()` method
init_signature = inspect.signature(
# mypy doesn't like that we access the `__init__` property.
self.client.__init__, # type: ignore[misc]
)
copy_signature = inspect.signature(self.client.copy)
exclude_params = {"transport", "proxies", "_strict_response_validation"}
for name in init_signature.parameters.keys():
if name in exclude_params:
continue
copy_param = copy_signature.parameters.get(name)
assert copy_param is not None, f"copy() signature is missing the {name} param"
def test_default_headers_option(self) -> None:
client = AsyncAnthropic(
base_url=base_url, api_key=api_key, _strict_response_validation=True, default_headers={"X-Foo": "bar"}
)
request = client._build_request(FinalRequestOptions(method="get", url="/foo"))
assert request.headers.get("x-foo") == "bar"
assert request.headers.get("x-stainless-lang") == "python"
client2 = AsyncAnthropic(
base_url=base_url,
api_key=api_key,
_strict_response_validation=True,
default_headers={
"X-Foo": "stainless",
"X-Stainless-Lang": "my-overriding-header",
},
)
request = client2._build_request(FinalRequestOptions(method="get", url="/foo"))
assert request.headers.get("x-foo") == "stainless"
assert request.headers.get("x-stainless-lang") == "my-overriding-header"
def test_validate_headers(self) -> None:
client = AsyncAnthropic(base_url=base_url, api_key=api_key, _strict_response_validation=True)
request = client._build_request(FinalRequestOptions(method="get", url="/foo"))
assert request.headers.get("X-Api-Key") == api_key
client2 = AsyncAnthropic(base_url=base_url, api_key=None, _strict_response_validation=True)
with pytest.raises(
TypeError,
match="Could not resolve authentication method. Expected either api_key or auth_token to be set. Or for one of the `X-Api-Key` or `Authorization` headers to be explicitly omitted",
):
client2._build_request(FinalRequestOptions(method="get", url="/foo"))
request2 = client2._build_request(FinalRequestOptions(method="get", url="/foo", headers={"X-Api-Key": Omit()}))
assert request2.headers.get("X-Api-Key") is None
def test_default_query_option(self) -> None:
client = AsyncAnthropic(
base_url=base_url, api_key=api_key, _strict_response_validation=True, default_query={"query_param": "bar"}
)
request = client._build_request(FinalRequestOptions(method="get", url="/foo"))
url = httpx.URL(request.url)
assert dict(url.params) == {"query_param": "bar"}
request = client._build_request(
FinalRequestOptions(
method="get",
url="/foo",
params={"foo": "baz", "query_param": "overriden"},
)
)
url = httpx.URL(request.url)
assert dict(url.params) == {"foo": "baz", "query_param": "overriden"}
def test_request_extra_json(self) -> None:
request = self.client._build_request(
FinalRequestOptions(
method="post",
url="/foo",
json_data={"foo": "bar"},
extra_json={"baz": False},
),
)
data = json.loads(request.content.decode("utf-8"))
assert data == {"foo": "bar", "baz": False}
request = self.client._build_request(
FinalRequestOptions(
method="post",
url="/foo",
extra_json={"baz": False},
),
)
data = json.loads(request.content.decode("utf-8"))
assert data == {"baz": False}
# `extra_json` takes priority over `json_data` when keys clash
request = self.client._build_request(
FinalRequestOptions(
method="post",
url="/foo",
json_data={"foo": "bar", "baz": True},
extra_json={"baz": None},
),
)
data = json.loads(request.content.decode("utf-8"))
assert data == {"foo": "bar", "baz": None}
def test_request_extra_headers(self) -> None:
request = self.client._build_request(
FinalRequestOptions(
method="post",
url="/foo",
**make_request_options(extra_headers={"X-Foo": "Foo"}),
),
)
assert request.headers.get("X-Foo") == "Foo"
# `extra_headers` takes priority over `default_headers` when keys clash
request = self.client.with_options(default_headers={"X-Bar": "true"})._build_request(
FinalRequestOptions(
method="post",
url="/foo",
**make_request_options(
extra_headers={"X-Bar": "false"},
),
),
)
assert request.headers.get("X-Bar") == "false"
def test_request_extra_query(self) -> None:
request = self.client._build_request(
FinalRequestOptions(
method="post",
url="/foo",
**make_request_options(
extra_query={"my_query_param": "Foo"},
),
),
)
params = cast(Dict[str, str], dict(request.url.params))
assert params == {"my_query_param": "Foo"}
# if both `query` and `extra_query` are given, they are merged
request = self.client._build_request(
FinalRequestOptions(
method="post",
url="/foo",
**make_request_options(
query={"bar": "1"},
extra_query={"foo": "2"},
),
),
)
params = cast(Dict[str, str], dict(request.url.params))
assert params == {"bar": "1", "foo": "2"}
# `extra_query` takes priority over `query` when keys clash
request = self.client._build_request(
FinalRequestOptions(
method="post",
url="/foo",
**make_request_options(
query={"foo": "1"},
extra_query={"foo": "2"},
),
),
)
params = cast(Dict[str, str], dict(request.url.params))
assert params == {"foo": "2"}
@pytest.mark.respx(base_url=base_url)
async def test_basic_union_response(self, respx_mock: MockRouter) -> None:
class Model1(BaseModel):
name: str
class Model2(BaseModel):
foo: str
respx_mock.get("/foo").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
response = await self.client.get("/foo", cast_to=cast(Any, Union[Model1, Model2]))
assert isinstance(response, Model2)
assert response.foo == "bar"
@pytest.mark.respx(base_url=base_url)
async def test_union_response_different_types(self, respx_mock: MockRouter) -> None:
"""Union of objects with the same field name using a different type"""
class Model1(BaseModel):
foo: int
class Model2(BaseModel):
foo: str
respx_mock.get("/foo").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
response = await self.client.get("/foo", cast_to=cast(Any, Union[Model1, Model2]))
assert isinstance(response, Model2)
assert response.foo == "bar"
respx_mock.get("/foo").mock(return_value=httpx.Response(200, json={"foo": 1}))
response = await self.client.get("/foo", cast_to=cast(Any, Union[Model1, Model2]))
assert isinstance(response, Model1)
assert response.foo == 1
def test_base_url_trailing_slash(self) -> None:
client = AsyncAnthropic(
base_url="http://localhost:5000/custom/path/", api_key=api_key, _strict_response_validation=True
)
request = client._build_request(
FinalRequestOptions(
method="post",
url="/foo",
json_data={"foo": "bar"},
),
)
assert request.url == "http://localhost:5000/custom/path/foo"
def test_base_url_no_trailing_slash(self) -> None:
client = AsyncAnthropic(
base_url="http://localhost:5000/custom/path", api_key=api_key, _strict_response_validation=True
)
request = client._build_request(
FinalRequestOptions(
method="post",
url="/foo",
json_data={"foo": "bar"},
),
)
assert request.url == "http://localhost:5000/custom/path/foo"
async def test_client_del(self) -> None:
client = AsyncAnthropic(base_url=base_url, api_key=api_key, _strict_response_validation=True)
assert not client.is_closed()
client.__del__()
await asyncio.sleep(0.2)
assert client.is_closed()
async def test_client_context_manager(self) -> None:
client = AsyncAnthropic(base_url=base_url, api_key=api_key, _strict_response_validation=True)
async with client as c2:
assert c2 is client
assert not c2.is_closed()
assert not client.is_closed()
assert client.is_closed()
@pytest.mark.respx(base_url=base_url)
@pytest.mark.asyncio
async def test_default_stream_cls(self, respx_mock: MockRouter) -> None:
class Model(BaseModel):
name: str
respx_mock.post("/foo").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
response = await self.client.post("/foo", cast_to=Model, stream=True)
assert isinstance(response, AsyncStream)
| [] |
2024-01-10 | pratimdas/anthropic-sdk-python | tests~api_resources~test_top_level.py | # File generated from our OpenAPI spec by Stainless.
from __future__ import annotations
import os
import pytest
from anthropic import Anthropic, AsyncAnthropic
base_url = os.environ.get("API_BASE_URL", "http://127.0.0.1:4010")
api_key = os.environ.get("API_KEY", "something1234")
class TestTopLevel:
strict_client = Anthropic(base_url=base_url, api_key=api_key, _strict_response_validation=True)
loose_client = Anthropic(base_url=base_url, api_key=api_key, _strict_response_validation=False)
parametrize = pytest.mark.parametrize("client", [strict_client, loose_client], ids=["strict", "loose"])
def test_count_tokens(self) -> None:
tokens = self.strict_client.count_tokens("hello world!")
assert tokens == 3
class TestAsyncTopLevel:
strict_client = AsyncAnthropic(base_url=base_url, api_key=api_key, _strict_response_validation=True)
loose_client = AsyncAnthropic(base_url=base_url, api_key=api_key, _strict_response_validation=False)
parametrize = pytest.mark.parametrize("client", [strict_client, loose_client], ids=["strict", "loose"])
async def test_count_tokens(self) -> None:
tokens = await self.strict_client.count_tokens("hello world!")
assert tokens == 3
| [] |
2024-01-10 | pratimdas/anthropic-sdk-python | tests~test_models.py | from typing import Any, Dict, List, Union, Optional, cast
from datetime import datetime, timezone
from typing_extensions import Literal
import pytest
from pydantic import Field
from anthropic._compat import PYDANTIC_V2, parse_obj, model_dump, model_json
from anthropic._models import BaseModel
class BasicModel(BaseModel):
foo: str
@pytest.mark.parametrize("value", ["hello", 1], ids=["correct type", "mismatched"])
def test_basic(value: object) -> None:
m = BasicModel.construct(foo=value)
assert m.foo == value
def test_directly_nested_model() -> None:
class NestedModel(BaseModel):
nested: BasicModel
m = NestedModel.construct(nested={"foo": "Foo!"})
assert m.nested.foo == "Foo!"
# mismatched types
m = NestedModel.construct(nested="hello!")
assert m.nested == "hello!"
def test_optional_nested_model() -> None:
class NestedModel(BaseModel):
nested: Optional[BasicModel]
m1 = NestedModel.construct(nested=None)
assert m1.nested is None
m2 = NestedModel.construct(nested={"foo": "bar"})
assert m2.nested is not None
assert m2.nested.foo == "bar"
# mismatched types
m3 = NestedModel.construct(nested={"foo"})
assert isinstance(cast(Any, m3.nested), set)
assert m3.nested == {"foo"}
def test_list_nested_model() -> None:
class NestedModel(BaseModel):
nested: List[BasicModel]
m = NestedModel.construct(nested=[{"foo": "bar"}, {"foo": "2"}])
assert m.nested is not None
assert isinstance(m.nested, list)
assert len(m.nested) == 2
assert m.nested[0].foo == "bar"
assert m.nested[1].foo == "2"
# mismatched types
m = NestedModel.construct(nested=True)
assert cast(Any, m.nested) is True
m = NestedModel.construct(nested=[False])
assert cast(Any, m.nested) == [False]
def test_optional_list_nested_model() -> None:
class NestedModel(BaseModel):
nested: Optional[List[BasicModel]]
m1 = NestedModel.construct(nested=[{"foo": "bar"}, {"foo": "2"}])
assert m1.nested is not None
assert isinstance(m1.nested, list)
assert len(m1.nested) == 2
assert m1.nested[0].foo == "bar"
assert m1.nested[1].foo == "2"
m2 = NestedModel.construct(nested=None)
assert m2.nested is None
# mismatched types
m3 = NestedModel.construct(nested={1})
assert cast(Any, m3.nested) == {1}
m4 = NestedModel.construct(nested=[False])
assert cast(Any, m4.nested) == [False]
def test_list_optional_items_nested_model() -> None:
class NestedModel(BaseModel):
nested: List[Optional[BasicModel]]
m = NestedModel.construct(nested=[None, {"foo": "bar"}])
assert m.nested is not None
assert isinstance(m.nested, list)
assert len(m.nested) == 2
assert m.nested[0] is None
assert m.nested[1] is not None
assert m.nested[1].foo == "bar"
# mismatched types
m3 = NestedModel.construct(nested="foo")
assert cast(Any, m3.nested) == "foo"
m4 = NestedModel.construct(nested=[False])
assert cast(Any, m4.nested) == [False]
def test_list_mismatched_type() -> None:
class NestedModel(BaseModel):
nested: List[str]
m = NestedModel.construct(nested=False)
assert cast(Any, m.nested) is False
def test_raw_dictionary() -> None:
class NestedModel(BaseModel):
nested: Dict[str, str]
m = NestedModel.construct(nested={"hello": "world"})
assert m.nested == {"hello": "world"}
# mismatched types
m = NestedModel.construct(nested=False)
assert cast(Any, m.nested) is False
def test_nested_dictionary_model() -> None:
class NestedModel(BaseModel):
nested: Dict[str, BasicModel]
m = NestedModel.construct(nested={"hello": {"foo": "bar"}})
assert isinstance(m.nested, dict)
assert m.nested["hello"].foo == "bar"
# mismatched types
m = NestedModel.construct(nested={"hello": False})
assert cast(Any, m.nested["hello"]) is False
def test_unknown_fields() -> None:
m1 = BasicModel.construct(foo="foo", unknown=1)
assert m1.foo == "foo"
assert cast(Any, m1).unknown == 1
m2 = BasicModel.construct(foo="foo", unknown={"foo_bar": True})
assert m2.foo == "foo"
assert cast(Any, m2).unknown == {"foo_bar": True}
assert model_dump(m2) == {"foo": "foo", "unknown": {"foo_bar": True}}
def test_strict_validation_unknown_fields() -> None:
class Model(BaseModel):
foo: str
model = parse_obj(Model, dict(foo="hello!", user="Robert"))
assert model.foo == "hello!"
assert cast(Any, model).user == "Robert"
assert model_dump(model) == {"foo": "hello!", "user": "Robert"}
def test_aliases() -> None:
class Model(BaseModel):
my_field: int = Field(alias="myField")
m = Model.construct(myField=1)
assert m.my_field == 1
# mismatched types
m = Model.construct(myField={"hello": False})
assert cast(Any, m.my_field) == {"hello": False}
def test_repr() -> None:
model = BasicModel(foo="bar")
assert str(model) == "BasicModel(foo='bar')"
assert repr(model) == "BasicModel(foo='bar')"
def test_repr_nested_model() -> None:
class Child(BaseModel):
name: str
age: int
class Parent(BaseModel):
name: str
child: Child
model = Parent(name="Robert", child=Child(name="Foo", age=5))
assert str(model) == "Parent(name='Robert', child=Child(name='Foo', age=5))"
assert repr(model) == "Parent(name='Robert', child=Child(name='Foo', age=5))"
def test_optional_list() -> None:
class Submodel(BaseModel):
name: str
class Model(BaseModel):
items: Optional[List[Submodel]]
m = Model.construct(items=None)
assert m.items is None
m = Model.construct(items=[])
assert m.items == []
m = Model.construct(items=[{"name": "Robert"}])
assert m.items is not None
assert len(m.items) == 1
assert m.items[0].name == "Robert"
def test_nested_union_of_models() -> None:
class Submodel1(BaseModel):
bar: bool
class Submodel2(BaseModel):
thing: str
class Model(BaseModel):
foo: Union[Submodel1, Submodel2]
m = Model.construct(foo={"thing": "hello"})
assert isinstance(m.foo, Submodel2)
assert m.foo.thing == "hello"
def test_nested_union_of_mixed_types() -> None:
class Submodel1(BaseModel):
bar: bool
class Model(BaseModel):
foo: Union[Submodel1, Literal[True], Literal["CARD_HOLDER"]]
m = Model.construct(foo=True)
assert m.foo is True
m = Model.construct(foo="CARD_HOLDER")
assert m.foo is "CARD_HOLDER"
m = Model.construct(foo={"bar": False})
assert isinstance(m.foo, Submodel1)
assert m.foo.bar is False
def test_nested_union_multiple_variants() -> None:
class Submodel1(BaseModel):
bar: bool
class Submodel2(BaseModel):
thing: str
class Submodel3(BaseModel):
foo: int
class Model(BaseModel):
foo: Union[Submodel1, Submodel2, None, Submodel3]
m = Model.construct(foo={"thing": "hello"})
assert isinstance(m.foo, Submodel2)
assert m.foo.thing == "hello"
m = Model.construct(foo=None)
assert m.foo is None
m = Model.construct()
assert m.foo is None
m = Model.construct(foo={"foo": "1"})
assert isinstance(m.foo, Submodel3)
assert m.foo.foo == 1
def test_nested_union_invalid_data() -> None:
class Submodel1(BaseModel):
level: int
class Submodel2(BaseModel):
name: str
class Model(BaseModel):
foo: Union[Submodel1, Submodel2]
m = Model.construct(foo=True)
assert cast(bool, m.foo) is True
m = Model.construct(foo={"name": 3})
if PYDANTIC_V2:
assert isinstance(m.foo, Submodel1)
assert m.foo.name == 3 # type: ignore
else:
assert isinstance(m.foo, Submodel2)
assert m.foo.name == "3"
def test_list_of_unions() -> None:
class Submodel1(BaseModel):
level: int
class Submodel2(BaseModel):
name: str
class Model(BaseModel):
items: List[Union[Submodel1, Submodel2]]
m = Model.construct(items=[{"level": 1}, {"name": "Robert"}])
assert len(m.items) == 2
assert isinstance(m.items[0], Submodel1)
assert m.items[0].level == 1
assert isinstance(m.items[1], Submodel2)
assert m.items[1].name == "Robert"
m = Model.construct(items=[{"level": -1}, 156])
assert len(m.items) == 2
assert isinstance(m.items[0], Submodel1)
assert m.items[0].level == -1
assert m.items[1] == 156
def test_union_of_lists() -> None:
class SubModel1(BaseModel):
level: int
class SubModel2(BaseModel):
name: str
class Model(BaseModel):
items: Union[List[SubModel1], List[SubModel2]]
# with one valid entry
m = Model.construct(items=[{"name": "Robert"}])
assert len(m.items) == 1
assert isinstance(m.items[0], SubModel2)
assert m.items[0].name == "Robert"
# with two entries pointing to different types
m = Model.construct(items=[{"level": 1}, {"name": "Robert"}])
assert len(m.items) == 2
assert isinstance(m.items[0], SubModel1)
assert m.items[0].level == 1
assert isinstance(m.items[1], SubModel1)
assert cast(Any, m.items[1]).name == "Robert"
# with two entries pointing to *completely* different types
m = Model.construct(items=[{"level": -1}, 156])
assert len(m.items) == 2
assert isinstance(m.items[0], SubModel1)
assert m.items[0].level == -1
assert m.items[1] == 156
def test_dict_of_union() -> None:
class SubModel1(BaseModel):
name: str
class SubModel2(BaseModel):
foo: str
class Model(BaseModel):
data: Dict[str, Union[SubModel1, SubModel2]]
m = Model.construct(data={"hello": {"name": "there"}, "foo": {"foo": "bar"}})
assert len(list(m.data.keys())) == 2
assert isinstance(m.data["hello"], SubModel1)
assert m.data["hello"].name == "there"
assert isinstance(m.data["foo"], SubModel2)
assert m.data["foo"].foo == "bar"
# TODO: test mismatched type
def test_double_nested_union() -> None:
class SubModel1(BaseModel):
name: str
class SubModel2(BaseModel):
bar: str
class Model(BaseModel):
data: Dict[str, List[Union[SubModel1, SubModel2]]]
m = Model.construct(data={"foo": [{"bar": "baz"}, {"name": "Robert"}]})
assert len(m.data["foo"]) == 2
entry1 = m.data["foo"][0]
assert isinstance(entry1, SubModel2)
assert entry1.bar == "baz"
entry2 = m.data["foo"][1]
assert isinstance(entry2, SubModel1)
assert entry2.name == "Robert"
# TODO: test mismatched type
def test_union_of_dict() -> None:
class SubModel1(BaseModel):
name: str
class SubModel2(BaseModel):
foo: str
class Model(BaseModel):
data: Union[Dict[str, SubModel1], Dict[str, SubModel2]]
m = Model.construct(data={"hello": {"name": "there"}, "foo": {"foo": "bar"}})
assert len(list(m.data.keys())) == 2
assert isinstance(m.data["hello"], SubModel1)
assert m.data["hello"].name == "there"
assert isinstance(m.data["foo"], SubModel1)
assert cast(Any, m.data["foo"]).foo == "bar"
def test_iso8601_datetime() -> None:
class Model(BaseModel):
created_at: datetime
expected = datetime(2019, 12, 27, 18, 11, 19, 117000, tzinfo=timezone.utc)
if PYDANTIC_V2:
expected_json = '{"created_at":"2019-12-27T18:11:19.117Z"}'
else:
expected_json = '{"created_at": "2019-12-27T18:11:19.117000+00:00"}'
model = Model.construct(created_at="2019-12-27T18:11:19.117Z")
assert model.created_at == expected
assert model_json(model) == expected_json
model = parse_obj(Model, dict(created_at="2019-12-27T18:11:19.117Z"))
assert model.created_at == expected
assert model_json(model) == expected_json
def test_coerces_int() -> None:
class Model(BaseModel):
bar: int
assert Model.construct(bar=1).bar == 1
assert Model.construct(bar=10.9).bar == 10
assert Model.construct(bar="19").bar == 19
assert Model.construct(bar=False).bar == 0
# TODO: support this
# assert Model.construct(bar="True").bar == 1
# mismatched types are left as-is
m = Model.construct(bar={"foo": "bar"})
assert m.bar == {"foo": "bar"} # type: ignore[comparison-overlap]
def test_deprecated_alias() -> None:
class Model(BaseModel):
resource_id: str = Field(alias="model_id")
@property
def model_id(self) -> str:
return self.resource_id
m = Model.construct(model_id="id")
assert m.model_id == "id"
assert m.resource_id == "id"
assert m.resource_id is m.model_id
m = parse_obj(Model, {"model_id": "id"})
assert m.model_id == "id"
assert m.resource_id == "id"
assert m.resource_id is m.model_id
| [] |
2024-01-10 | pratimdas/anthropic-sdk-python | examples~demo_sync.py | #!/usr/bin/env -S poetry run python
import anthropic
from anthropic import Anthropic
def main() -> None:
client = Anthropic()
res = client.completions.create(
model="claude-2",
prompt=f"{anthropic.HUMAN_PROMPT} how does a court case get to the Supreme Court? {anthropic.AI_PROMPT}",
max_tokens_to_sample=1000,
)
print(res.completion)
main()
| [] |
2024-01-10 | pratimdas/anthropic-sdk-python | tests~utils.py | from __future__ import annotations
import traceback
from typing import Any, TypeVar, cast
from datetime import date, datetime
from typing_extensions import Literal, get_args, get_origin, assert_type
from anthropic._types import NoneType
from anthropic._utils import is_dict, is_list, is_list_type, is_union_type
from anthropic._compat import PYDANTIC_V2, field_outer_type, get_model_fields
from anthropic._models import BaseModel
BaseModelT = TypeVar("BaseModelT", bound=BaseModel)
def assert_matches_model(model: type[BaseModelT], value: BaseModelT, *, path: list[str]) -> bool:
for name, field in get_model_fields(model).items():
field_value = getattr(value, name)
if PYDANTIC_V2:
allow_none = False
else:
# in v1 nullability was structured differently
# https://docs.pydantic.dev/2.0/migration/#required-optional-and-nullable-fields
allow_none = getattr(field, "allow_none", False)
assert_matches_type(
field_outer_type(field),
field_value,
path=[*path, name],
allow_none=allow_none,
)
return True
# Note: the `path` argument is only used to improve error messages when `--showlocals` is used
def assert_matches_type(
type_: Any,
value: object,
*,
path: list[str],
allow_none: bool = False,
) -> None:
if allow_none and value is None:
return
if type_ is None or type_ is NoneType:
assert value is None
return
origin = get_origin(type_) or type_
if is_list_type(type_):
return _assert_list_type(type_, value)
if origin == str:
assert isinstance(value, str)
elif origin == int:
assert isinstance(value, int)
elif origin == bool:
assert isinstance(value, bool)
elif origin == float:
assert isinstance(value, float)
elif origin == datetime:
assert isinstance(value, datetime)
elif origin == date:
assert isinstance(value, date)
elif origin == object:
# nothing to do here, the expected type is unknown
pass
elif origin == Literal:
assert value in get_args(type_)
elif origin == dict:
assert is_dict(value)
args = get_args(type_)
key_type = args[0]
items_type = args[1]
for key, item in value.items():
assert_matches_type(key_type, key, path=[*path, "<dict key>"])
assert_matches_type(items_type, item, path=[*path, "<dict item>"])
elif is_union_type(type_):
for i, variant in enumerate(get_args(type_)):
try:
assert_matches_type(variant, value, path=[*path, f"variant {i}"])
return
except AssertionError:
traceback.print_exc()
continue
assert False, "Did not match any variants"
elif issubclass(origin, BaseModel):
assert isinstance(value, type_)
assert assert_matches_model(type_, cast(Any, value), path=path)
else:
assert None, f"Unhandled field type: {type_}"
def _assert_list_type(type_: type[object], value: object) -> None:
assert is_list(value)
inner_type = get_args(type_)[0]
for entry in value:
assert_type(inner_type, entry) # type: ignore
| [] |
2024-01-10 | pratimdas/anthropic-sdk-python | tests~test_extract_files.py | from __future__ import annotations
from typing import Sequence
import pytest
from anthropic._types import FileTypes
from anthropic._utils import extract_files
def test_removes_files_from_input() -> None:
query = {"foo": "bar"}
assert extract_files(query, paths=[]) == []
assert query == {"foo": "bar"}
query2 = {"foo": b"Bar", "hello": "world"}
assert extract_files(query2, paths=[["foo"]]) == [("foo", b"Bar")]
assert query2 == {"hello": "world"}
query3 = {"foo": {"foo": {"bar": b"Bar"}}, "hello": "world"}
assert extract_files(query3, paths=[["foo", "foo", "bar"]]) == [("foo[foo][bar]", b"Bar")]
assert query3 == {"foo": {"foo": {}}, "hello": "world"}
query4 = {"foo": {"bar": b"Bar", "baz": "foo"}, "hello": "world"}
assert extract_files(query4, paths=[["foo", "bar"]]) == [("foo[bar]", b"Bar")]
assert query4 == {"hello": "world", "foo": {"baz": "foo"}}
def test_multiple_files() -> None:
query = {"documents": [{"file": b"My first file"}, {"file": b"My second file"}]}
assert extract_files(query, paths=[["documents", "<array>", "file"]]) == [
("documents[][file]", b"My first file"),
("documents[][file]", b"My second file"),
]
assert query == {"documents": [{}, {}]}
@pytest.mark.parametrize(
"query,paths,expected",
[
[
{"foo": {"bar": "baz"}},
[["foo", "<array>", "bar"]],
[],
],
[
{"foo": ["bar", "baz"]},
[["foo", "bar"]],
[],
],
[
{"foo": {"bar": "baz"}},
[["foo", "foo"]],
[],
],
],
ids=["dict expecting array", "arraye expecting dict", "unknown keys"],
)
def test_ignores_incorrect_paths(
query: dict[str, object],
paths: Sequence[Sequence[str]],
expected: list[tuple[str, FileTypes]],
) -> None:
assert extract_files(query, paths=paths) == expected
| [] |
2024-01-10 | pratimdas/anthropic-sdk-python | tests~test_qs.py | from typing import Any, cast
from functools import partial
from urllib.parse import unquote
import pytest
from anthropic._qs import Querystring, stringify
def test_empty() -> None:
assert stringify({}) == ""
assert stringify({"a": {}}) == ""
assert stringify({"a": {"b": {"c": {}}}}) == ""
def test_basic() -> None:
assert stringify({"a": 1}) == "a=1"
assert stringify({"a": "b"}) == "a=b"
assert stringify({"a": True}) == "a=true"
assert stringify({"a": False}) == "a=false"
assert stringify({"a": 1.23456}) == "a=1.23456"
assert stringify({"a": None}) == ""
@pytest.mark.parametrize("method", ["class", "function"])
def test_nested_dotted(method: str) -> None:
if method == "class":
serialise = Querystring(nested_format="dots").stringify
else:
serialise = partial(stringify, nested_format="dots")
assert unquote(serialise({"a": {"b": "c"}})) == "a.b=c"
assert unquote(serialise({"a": {"b": "c", "d": "e", "f": "g"}})) == "a.b=c&a.d=e&a.f=g"
assert unquote(serialise({"a": {"b": {"c": {"d": "e"}}}})) == "a.b.c.d=e"
assert unquote(serialise({"a": {"b": True}})) == "a.b=true"
def test_nested_brackets() -> None:
assert unquote(stringify({"a": {"b": "c"}})) == "a[b]=c"
assert unquote(stringify({"a": {"b": "c", "d": "e", "f": "g"}})) == "a[b]=c&a[d]=e&a[f]=g"
assert unquote(stringify({"a": {"b": {"c": {"d": "e"}}}})) == "a[b][c][d]=e"
assert unquote(stringify({"a": {"b": True}})) == "a[b]=true"
@pytest.mark.parametrize("method", ["class", "function"])
def test_array_comma(method: str) -> None:
if method == "class":
serialise = Querystring(array_format="comma").stringify
else:
serialise = partial(stringify, array_format="comma")
assert unquote(serialise({"in": ["foo", "bar"]})) == "in=foo,bar"
assert unquote(serialise({"a": {"b": [True, False]}})) == "a[b]=true,false"
assert unquote(serialise({"a": {"b": [True, False, None, True]}})) == "a[b]=true,false,true"
def test_array_repeat() -> None:
assert unquote(stringify({"in": ["foo", "bar"]})) == "in=foo&in=bar"
assert unquote(stringify({"a": {"b": [True, False]}})) == "a[b]=true&a[b]=false"
assert unquote(stringify({"a": {"b": [True, False, None, True]}})) == "a[b]=true&a[b]=false&a[b]=true"
assert unquote(stringify({"in": ["foo", {"b": {"c": ["d", "e"]}}]})) == "in=foo&in[b][c]=d&in[b][c]=e"
def test_unknown_array_format() -> None:
with pytest.raises(NotImplementedError, match="Unknown array_format value: foo, choose from comma, repeat"):
stringify({"a": ["foo", "bar"]}, array_format=cast(Any, "foo"))
| [] |
2024-01-10 | pratimdas/anthropic-sdk-python | examples~tokens.py | #!/usr/bin/env -S poetry run python
import asyncio
from anthropic import Anthropic, AsyncAnthropic
def sync_tokens() -> None:
client = Anthropic()
text = "hello world!"
tokens = client.count_tokens(text)
print(f"'{text}' is {tokens} tokens")
assert tokens == 3
async def async_tokens() -> None:
anthropic = AsyncAnthropic()
text = "fist message"
tokens = await anthropic.count_tokens(text)
print(f"'{text}' is {tokens} tokens")
text = "second message"
tokens = await anthropic.count_tokens(text)
print(f"'{text}' is {tokens} tokens")
sync_tokens()
asyncio.run(async_tokens())
| [] |
2024-01-10 | pratimdas/anthropic-sdk-python | examples~streaming.py | #!/usr/bin/env -S poetry run python
import asyncio
from anthropic import AI_PROMPT, HUMAN_PROMPT, Anthropic, APIStatusError, AsyncAnthropic
client = Anthropic()
async_client = AsyncAnthropic()
question = """
Hey Claude! How can I recursively list all files in a directory in Python?
"""
def sync_stream() -> None:
stream = client.completions.create(
prompt=f"{HUMAN_PROMPT} {question}{AI_PROMPT}",
model="claude-2",
stream=True,
max_tokens_to_sample=300,
)
for completion in stream:
print(completion.completion, end="")
print()
async def async_stream() -> None:
stream = await async_client.completions.create(
prompt=f"{HUMAN_PROMPT} {question}{AI_PROMPT}",
model="claude-2",
stream=True,
max_tokens_to_sample=300,
)
async for completion in stream:
print(completion.completion, end="")
print()
def stream_error() -> None:
try:
client.completions.create(
prompt=f"{HUMAN_PROMPT} {question}{AI_PROMPT}",
model="claude-unknown-model",
stream=True,
max_tokens_to_sample=300,
)
except APIStatusError as err:
print(f"Caught API status error with response body: {err.response.text}")
sync_stream()
asyncio.run(async_stream())
stream_error()
| [
"PLACEHOLDER \nHey Claude! How can I recursively list all files in a directory in Python?\nPLACEHOLDER"
] |
2024-01-10 | pratimdas/anthropic-sdk-python | tests~test_required_args.py | from __future__ import annotations
import pytest
from anthropic._utils import required_args
def test_too_many_positional_params() -> None:
@required_args(["a"])
def foo(a: str | None = None) -> str | None:
return a
with pytest.raises(TypeError, match=r"foo\(\) takes 1 argument\(s\) but 2 were given"):
foo("a", "b") # type: ignore
def test_positional_param() -> None:
@required_args(["a"])
def foo(a: str | None = None) -> str | None:
return a
assert foo("a") == "a"
assert foo(None) is None
assert foo(a="b") == "b"
with pytest.raises(TypeError, match="Missing required argument: 'a'"):
foo()
def test_keyword_only_param() -> None:
@required_args(["a"])
def foo(*, a: str | None = None) -> str | None:
return a
assert foo(a="a") == "a"
assert foo(a=None) is None
assert foo(a="b") == "b"
with pytest.raises(TypeError, match="Missing required argument: 'a'"):
foo()
def test_multiple_params() -> None:
@required_args(["a", "b", "c"])
def foo(a: str = "", *, b: str = "", c: str = "") -> str | None:
return a + " " + b + " " + c
assert foo(a="a", b="b", c="c") == "a b c"
error_message = r"Missing required arguments.*"
with pytest.raises(TypeError, match=error_message):
foo()
with pytest.raises(TypeError, match=error_message):
foo(a="a")
with pytest.raises(TypeError, match=error_message):
foo(b="b")
with pytest.raises(TypeError, match=error_message):
foo(c="c")
with pytest.raises(TypeError, match=r"Missing required argument: 'a'"):
foo(b="a", c="c")
with pytest.raises(TypeError, match=r"Missing required argument: 'b'"):
foo("a", c="c")
def test_multiple_variants() -> None:
@required_args(["a"], ["b"])
def foo(*, a: str | None = None, b: str | None = None) -> str | None:
return a if a is not None else b
assert foo(a="foo") == "foo"
assert foo(b="bar") == "bar"
assert foo(a=None) is None
assert foo(b=None) is None
# TODO: this error message could probably be improved
with pytest.raises(
TypeError,
match=r"Missing required arguments; Expected either \('a'\) or \('b'\) arguments to be given",
):
foo()
def test_multiple_params_multiple_variants() -> None:
@required_args(["a", "b"], ["c"])
def foo(*, a: str | None = None, b: str | None = None, c: str | None = None) -> str | None:
if a is not None:
return a
if b is not None:
return b
return c
error_message = r"Missing required arguments; Expected either \('a' and 'b'\) or \('c'\) arguments to be given"
with pytest.raises(TypeError, match=error_message):
foo(a="foo")
with pytest.raises(TypeError, match=error_message):
foo(b="bar")
with pytest.raises(TypeError, match=error_message):
foo()
assert foo(a=None, b="bar") == "bar"
assert foo(c=None) is None
assert foo(c="foo") == "foo"
| [] |
2024-01-10 | pratimdas/anthropic-sdk-python | src~anthropic~_resource.py | # File generated from our OpenAPI spec by Stainless.
from __future__ import annotations
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from ._client import Anthropic, AsyncAnthropic
class SyncAPIResource:
_client: Anthropic
def __init__(self, client: Anthropic) -> None:
self._client = client
self._get = client.get
self._post = client.post
self._patch = client.patch
self._put = client.put
self._delete = client.delete
self._get_api_list = client.get_api_list
class AsyncAPIResource:
_client: AsyncAnthropic
def __init__(self, client: AsyncAnthropic) -> None:
self._client = client
self._get = client.get
self._post = client.post
self._patch = client.patch
self._put = client.put
self._delete = client.delete
self._get_api_list = client.get_api_list
| [] |
2024-01-10 | pratimdas/anthropic-sdk-python | src~anthropic~_types.py | from __future__ import annotations
from typing import (
IO,
TYPE_CHECKING,
Any,
Dict,
List,
Type,
Tuple,
Union,
Mapping,
TypeVar,
Optional,
Sequence,
)
from typing_extensions import Literal, Protocol, TypedDict, runtime_checkable
import httpx
import pydantic
from httpx import Proxy, Timeout, Response, BaseTransport
if TYPE_CHECKING:
from ._models import BaseModel
Transport = BaseTransport
Query = Mapping[str, object]
Body = object
AnyMapping = Mapping[str, object]
ModelT = TypeVar("ModelT", bound=pydantic.BaseModel)
_T = TypeVar("_T")
# Approximates httpx internal ProxiesTypes and RequestFiles types
ProxiesTypes = Union[str, Proxy, Dict[str, Union[None, str, Proxy]]]
FileContent = Union[IO[bytes], bytes]
FileTypes = Union[
# file (or bytes)
FileContent,
# (filename, file (or bytes))
Tuple[Optional[str], FileContent],
# (filename, file (or bytes), content_type)
Tuple[Optional[str], FileContent, Optional[str]],
# (filename, file (or bytes), content_type, headers)
Tuple[Optional[str], FileContent, Optional[str], Mapping[str, str]],
]
RequestFiles = Union[Mapping[str, FileTypes], Sequence[Tuple[str, FileTypes]]]
# Workaround to support (cast_to: Type[ResponseT]) -> ResponseT
# where ResponseT includes `None`. In order to support directly
# passing `None`, overloads would have to be defined for every
# method that uses `ResponseT` which would lead to an unacceptable
# amount of code duplication and make it unreadable. See _base_client.py
# for example usage.
#
# This unfortunately means that you will either have
# to import this type and pass it explicitly:
#
# from anthropic import NoneType
# client.get('/foo', cast_to=NoneType)
#
# or build it yourself:
#
# client.get('/foo', cast_to=type(None))
if TYPE_CHECKING:
NoneType: Type[None]
else:
NoneType = type(None)
class RequestOptions(TypedDict, total=False):
headers: Headers
max_retries: int
timeout: float | Timeout | None
params: Query
extra_json: AnyMapping
idempotency_key: str
# Sentinel class used when the response type is an object with an unknown schema
class UnknownResponse:
...
# Sentinel class used until PEP 0661 is accepted
class NotGiven:
"""
A sentinel singleton class used to distinguish omitted keyword arguments
from those passed in with the value None (which may have different behavior).
For example:
```py
def get(timeout: Union[int, NotGiven, None] = NotGiven()) -> Response: ...
get(timout=1) # 1s timeout
get(timout=None) # No timeout
get() # Default timeout behavior, which may not be statically known at the method definition.
```
"""
def __bool__(self) -> Literal[False]:
return False
NotGivenOr = Union[_T, NotGiven]
NOT_GIVEN = NotGiven()
class Omit:
"""In certain situations you need to be able to represent a case where a default value has
to be explicitly removed and `None` is not an appropriate substitute, for example:
```py
# as the default `Content-Type` header is `application/json` that will be sent
client.post('/upload/files', files={'file': b'my raw file content'})
# you can't explicitly override the header as it has to be dynamically generated
# to look something like: 'multipart/form-data; boundary=0d8382fcf5f8c3be01ca2e11002d2983'
client.post(..., headers={'Content-Type': 'multipart/form-data'})
# instead you can remove the default `application/json` header by passing Omit
client.post(..., headers={'Content-Type': Omit()})
```
"""
def __bool__(self) -> Literal[False]:
return False
@runtime_checkable
class ModelBuilderProtocol(Protocol):
@classmethod
def build(
cls: type[_T],
*,
response: Response,
data: object,
) -> _T:
...
Headers = Mapping[str, Union[str, Omit]]
class HeadersLikeProtocol(Protocol):
def get(self, __key: str) -> str | None:
...
HeadersLike = Union[Headers, HeadersLikeProtocol]
ResponseT = TypeVar(
"ResponseT",
bound="Union[str, None, BaseModel, List[Any], Dict[str, Any], httpx.Response, UnknownResponse, ModelBuilderProtocol]",
)
StrBytesIntFloat = Union[str, bytes, int, float]
| [] |
2024-01-10 | pratimdas/anthropic-sdk-python | examples~demo_async.py | #!/usr/bin/env -S poetry run python
import asyncio
import anthropic
from anthropic import AsyncAnthropic
async def main() -> None:
client = AsyncAnthropic()
res = await client.completions.create(
model="claude-2",
prompt=f"{anthropic.HUMAN_PROMPT} how does a court case get to the Supreme Court? {anthropic.AI_PROMPT}",
max_tokens_to_sample=1000,
)
print(res.completion)
asyncio.run(main())
| [] |
2024-01-10 | Code-WSY/GPT-SY | gpt_sy~ask_GPT.py | import openai
from Box_Dialog import *
def askGPT(messages, MODEL, MODEL_use_mode, temperature, max_tokens):
"""
:param messages: 历史记录
:param MODEL: 应用的模型
:param MODEL_use_mode: 模型使用的输入格式
:param temperature: 温度
:param max_tokens: 最大输出长度
:return:
"""
output = ""
Dialog_box.config(state=tk.NORMAL)
Dialog_box.insert(tk.END, "AI:\n")
Dialog_box.config(state=tk.DISABLED)
if MODEL_use_mode == "ChatCompletion":
response = openai.chat.completions.create(
model=MODEL,
messages=messages,
temperature=temperature,
n=1,
max_tokens=max_tokens,
stream=True,
)
for chunk in response:
try:
#正常输出
answer = chunk.choices[0].delta.content
output += answer
Dialog_box.config(state=tk.NORMAL)
Dialog_box.insert(tk.END, answer)
Dialog_box.see(tk.END)
Dialog_box.update()
Dialog_box.config(state=tk.DISABLED)
except:
#流文本输出最后的换行符,不然会出现异常
Dialog_box.config(state=tk.NORMAL)
Dialog_box.insert(tk.END, "\n")
Dialog_box.see(tk.END)
Dialog_box.update()
Dialog_box.config(state=tk.DISABLED)
elif MODEL_use_mode == "Completion":
response = openai.completions.create(
model=MODEL,
prompt=messages[-1]["prompt"],
temperature=temperature,
max_tokens=max_tokens,
n=1,
stream=True,
)
#输出对话
for chunk in response:
Dialog_box.config(state=tk.NORMAL)
answer = chunk.choices[0].text
output += answer
Dialog_box.insert(tk.END, answer)
Dialog_box.see(tk.END)
Dialog_box.update()
Dialog_box.config(state=tk.DISABLED)
#输出最后一个换行符
Dialog_box.config(state=tk.NORMAL)
Dialog_box.insert(tk.END, "\n")
Dialog_box.see(tk.END)
Dialog_box.config(state=tk.DISABLED)
elif MODEL_use_mode== "Edit":
#给定一个提示和一条指令,模型将返回提示的编辑版本。
response = openai.edits.create(
model=MODEL,
input=messages[-1]["input"],
instruction=messages[-1]["instruction"],
temperature=temperature,
n=1,
)
answer = response.choices[0].text
output = answer
# 输出对话
for chunk in answer:
Dialog_box.config(state=tk.NORMAL)
Dialog_box.insert(tk.END, chunk)
Dialog_box.see(tk.END)
Dialog_box.update()
Dialog_box.config(state=tk.DISABLED)
# 输出最后一个换行符
Dialog_box.config(state=tk.NORMAL)
Dialog_box.insert(tk.END, "\n")
Dialog_box.see(tk.END)
Dialog_box.config(state=tk.DISABLED)
elif MODEL_use_mode == "Embedding":
response = openai.embeddings.create(
model=MODEL,
input=messages[-1]["input"],
)
answer = response["data"][0]["embedding"]
output = answer
# 输出对话
for chunk in answer:
Dialog_box.config(state=tk.NORMAL)
Dialog_box.insert(tk.END, chunk+"\n")
Dialog_box.see(tk.END)
Dialog_box.update()
Dialog_box.config(state=tk.DISABLED)
elif MODEL_use_mode == "Image.create":
response = openai.images.create_variation(
image=messages[-1]["image"],
n=1,
size="1024x1024",
)
answer = response["data"][0]["url"]
output = answer
# 输出对话
Dialog_box.config(state=tk.NORMAL)
Dialog_box.insert(tk.END, answer)
Dialog_box.see(tk.END)
Dialog_box.update()
Dialog_box.config(state=tk.DISABLED)
elif MODEL_use_mode == "Image.create_edit":
response = openai.images.edit(
image=messages[-1]["image"],#要编辑的图像
mask=messages[-1]["mask"],#一个额外的图像,其完全透明的区域(例如 alpha 值为零的区域)指示应该编辑图像的位置。
prompt=messages[-1]["prompt"],#一个文本片段,用于指导编辑。
n=1,
)
answer = response.data[0].url
output = answer
# 输出对话
Dialog_box.config(state=tk.NORMAL)
Dialog_box.insert(tk.END, answer)
Dialog_box.see(tk.END)
Dialog_box.update()
Dialog_box.config(state=tk.DISABLED)
return output
if __name__ == "main":
pass
| [] |
2024-01-10 | Code-WSY/GPT-SY | Log~Menu_login.py | import tkinter as tk
from windows import window, menubar
from config import font_style, font_size
import openai
from Box_Message import model_message_box
def login():
# 读取API_KEY文件
filename = "../API_KEY/API_KEY_3.5"
# 打开文件
with open(filename, "r", encoding="utf-8") as f:
# 读取第一行内容
API_KEY = f.readline().strip("\n")
# 登录
openai.api_key = API_KEY
model_message_box.config(state=tk.NORMAL)
model_message_box.delete("1.0", "end")
model_message_box.insert("insert", "登录成功")
model_message_box.config(font=(font_style, font_size + 4))
model_message_box.config(width=50, height=4)
model_message_box.config(state=tk.DISABLED)
# -----------------------------------------------------------------------------------# | [] |
2024-01-10 | Code-WSY/GPT-SY | Log~MenuBar_function.py | import openai
import tkinter as tk
from tkinter import filedialog
from config import font_style, font_size
from Menu_mode import selected_mode
from Box_Dialog import Dialog_box
from Box_Input import Input_box
from Box_Message import model_message_box
from Cbox_Model import model_list
from Cbox_Promot import func_list
from Bottom_Submit import messages_list
from UI import chat_UI,load_UI,foget_all
"""
实现菜单栏功能的函数:
1. 打开文件:open_file()
2. 保存文件: save_file()
3. 登录: login()
"""
## 打开文件
def open_file():
file_path = tk.filedialog.askopenfilename(
title="选择文件", filetypes=[("All Files", "*")]
)
with open(file_path, "r", encoding="utf-8") as f:
Input_box.delete("1.0", "end")
Input_box.insert("insert", f.read())
def save_file():
# 保存messages:
save_file_path ="../Chat_history/" + model_list.get() + "_" + func_list.get() + ".txt"
#查看是否有重复文件
try:
i=1
while True:
#打开文件
f = open(save_file_path, "r", encoding="utf-8")
f.close()
#上面的语句没有报错,说明文件存在
save_file_path = "../Chat_history/" + model_list.get() + "_" + func_list.get() + "_" + str(i) + ".txt"
i += 1
except:
# 逐个写入
with open(save_file_path, "w", encoding="utf-8") as f:
for message in eval(messages_list.get()):
f.write(str(message) + "\n")
# 输出保存成功到message_box
model_message_box.config(state=tk.NORMAL)
model_message_box.delete("1.0", "end")
model_message_box.insert("insert", "保存成功")
model_message_box.config(font=(font_style, font_size + 4))
model_message_box.config(width=50, height=4)
# 关闭文件
f.close()
def login():
# 读取API_KEY文件
filename = "../API_KEY/API_KEY_3.5"
# 打开文件
with open(filename, "r", encoding="utf-8") as f:
# 读取第一行内容
API_KEY = f.readline().strip("\n")
# 登录
openai.api_key = API_KEY
model_message_box.config(state=tk.NORMAL)
model_message_box.delete("1.0", "end")
model_message_box.insert("insert", "登录成功")
model_message_box.config(font=(font_style, font_size + 4))
model_message_box.config(width=50, height=4)
model_message_box.config(state=tk.DISABLED)
def clear_display():
Dialog_box.config(state=tk.NORMAL)
Dialog_box.delete(0.0, tk.END)
Dialog_box.config(state=tk.DISABLED)
def clear_messages_list():
messages_list.set("[]")
model_message_box.config(state=tk.NORMAL)
model_message_box.delete(0.0, tk.END)
model_message_box.insert("insert", "成功清空对话记录")
model_message_box.config(state=tk.DISABLED)
def change_UI():
if selected_mode.get() == "Prompt-based":
foget_all()
chat_UI()
elif selected_mode.get() == "Fine-tuning":
foget_all()
load_UI()
| [] |
2024-01-10 | Code-WSY/GPT-SY | gpt_sy~Menu_login.py | from tkinter.ttk import Combobox
from windows import *
import openai
import os
from Box_Message import Message_box
login_file_path = "../API_KEY/API_KEY"
def Latest_API_KEY():
# 读取../API_KEY/API_KEY
try:
with open(login_file_path, "r", encoding="utf-8") as f:
api_key = f.read()
except:
Message_box.config(state=tk.NORMAL)
Message_box.delete(0.0, tk.END)
Message_box.insert(tk.END, "API_KEY文件不存在\n")
Message_box.config(state=tk.DISABLED)
return
# 读取最后一行数据(字典)
api_key = api_key.split("\n")[-2]
# 转换为字典
api = eval(api_key)
# 读取
openai.api_key = api["API_KEY"]
openai.api_base = api["API_BASE"]
# 显示
Message_box.config(state=tk.NORMAL)
Message_box.delete(0.0, tk.END)
Message_box.insert(tk.END, "已登陆最新API_KEY\n"+"API note: "+api["API_NAME"]+"\n")
Message_box.config(state=tk.DISABLED)
def Reset_API_KEY():
def login_api():
key = api_key_entry.get()
base = api_base_entry.get()
note = api_name_entry.get()
# 读取:如果是空的,或者都是空格,或者是None
if base == "" or base.isspace() or base is None:
base = "https://api.openai.com/v1"
openai.api_key = key
openai.api_base = base
if note == "" or note.isspace() or note is None:
# 赋值为当前时间
import time
note = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
# 初始写入
with open(login_file_path, "r+", encoding="utf-8") as f:
#滚到最后一行
f.seek(0, 2) #第1个参数代表偏移量,第2个参数代表起始位置,0代表文件开头,1代表当前位置,2代表文件结尾
f.write("\n")
f.write(str({"API_KEY": key, "API_BASE": base, "API_NAME": note}))
#读取,删除所有空行
with open(login_file_path, "r", encoding="utf-8") as f:
api_key = f.read()
api_key = api_key.split("\n")
# 删除所有空行
while "" in api_key:
api_key.remove("")
# 重新写入
with open(login_file_path, "w", encoding="utf-8") as f:
for i in api_key:
f.write(i+"\n")
# 关闭
login.destroy()
# 显示
Message_box.config(state=tk.NORMAL)
Message_box.delete(0.0, tk.END)
Message_box.insert(tk.END, "已登陆\n"+"API note: "+note+"\n")
Message_box.config(state=tk.DISABLED)
login = tk.Toplevel() # 创建一个子窗口
login.geometry("300x200")
login.title("Login")
api_key_label = tk.Label(login, text="API_KEY(*):")
api_key_entry = tk.Entry(login)
api_base_label = tk.Label(login, text="API_BASE(option):") # 可选
api_base_entry = tk.Entry(login)
api_name_label = tk.Label(login, text="API_NOTE(option):")
api_name_entry = tk.Entry(login)
login_button = tk.Button(login, text="Login", command=lambda: login_api())
api_key_label.pack()
api_key_entry.pack()
api_base_label.pack()
api_base_entry.pack()
api_name_label.pack()
api_name_entry.pack()
login_button.pack()
def choose_API_KEY():
# 读取../API_KEY/API_KEY
try:
with open(login_file_path, "r", encoding="utf-8") as f:
api_key_choose = f.read()
api_key_choose = api_key_choose.split("\n")
# 删除所有空行
while "" in api_key_choose:
api_key_choose.remove("")
api_name_list_choose = []
api_key_list_choose = []
api_base_list_choose = []
for i in api_key_choose:
api_choose_list = eval(i)
api_name_list_choose.append(api_choose_list["API_NAME"])
api_key_list_choose.append(api_choose_list["API_KEY"])
api_base_list_choose.append(api_choose_list["API_BASE"])
except:
Message_box.config(state=tk.NORMAL)
Message_box.delete(0.0, tk.END)
Message_box.insert(tk.END, "API_KEY文件错误\n")
Message_box.config(state=tk.DISABLED)
choosekey = tk.Toplevel() # 创建一个子窗口
choosekey.geometry("300x100")
choosekey.title("选择 API_KEY")
#创建标签
Label_choose=tk.Label(choosekey,text="选择API_KEY")
choosekey_var = tk.StringVar()
# 下拉框
api_name_option = Combobox(choosekey, values=api_name_list_choose, state="readonly",textvariable=choosekey_var)
def confirm():
openai.api_key = api_key_list_choose[api_name_list_choose.index(choosekey_var.get())]
openai.api_base = api_base_list_choose[api_name_list_choose.index(choosekey_var.get())]
# 显示
Message_box.config(state=tk.NORMAL)
Message_box.delete(0.0, tk.END)
Message_box.insert(tk.END, "已登陆\n" + "API note: " + api_name_option.get() + "\n")
Message_box.config(state=tk.DISABLED) \
# 关闭
choosekey.destroy()
# 确认按钮
confirm_button = tk.Button(choosekey, text="确认", command=lambda: confirm())
# 下拉框变量
Label_choose.pack()
api_name_option.pack()
confirm_button.pack()
choosekey.mainloop()
def Delete_API_KEY():
# 读取../API_KEY/API_KEY
try:
with open(login_file_path, "r", encoding="utf-8") as f:
api_keys = f.read()
api_keys = api_keys.split("\n")
# 删除所有空行
while "" in api_keys:
api_keys.remove("")
api_name_delete = []
for i in api_keys:
api_name_delete.append(eval(i)["API_NAME"])
except:
Message_box.config(state=tk.NORMAL)
Message_box.delete(0.0, tk.END)
Message_box.insert(tk.END, "API_KEY文件不存在\n")
Message_box.config(state=tk.DISABLED)
def delete():
# 读取最新的API_KEY
with open(login_file_path, "r", encoding="utf-8") as f:
api_key_d = f.read()
# 转换为列表
api_key_d = api_key_d.split("\n")
#删除所有空行
while "" in api_key_d:
api_key_d.remove("")
delete_name=api_name_option.get()
new_api_key_d=[]
# 删除
for i in api_key_d:
if eval(i)["API_NAME"]!=delete_name:
new_api_key_d.append(i)
if eval(i)["API_NAME"]==delete_name:
pass
#重新写入
with open(login_file_path, "w", encoding="utf-8") as f:
for i in new_api_key_d:
f.write(i+"\n")
# 显示
Message_box.config(state=tk.NORMAL)
Message_box.delete(0.0, tk.END)
Message_box.insert(tk.END, "已删除\n" + "API note: " + api_name_option.get() + "\n")
Message_box.config(state=tk.DISABLED) \
# 关闭窗口
Deletekey.destroy()
Deletekey = tk.Toplevel() # 创建一个子窗口
Deletekey.geometry("300x100")
Deletekey.title("选择 API_KEY")
# 创建标签
Label_Delete = tk.Label(Deletekey, text="选择API_KEY")
choosekey_var = tk.StringVar()
# 下拉框
api_name_option = Combobox(Deletekey, values=api_name_delete, state="readonly", textvariable=choosekey_var)
# 确认按钮
confirm_button = tk.Button(Deletekey, text="删除", command=lambda: delete())
Label_Delete.pack()
api_name_option.pack()
confirm_button.pack()
Deletekey.mainloop()
# -----------------------------------------------------------------------------------#
# 创建一个菜单
filemenu_login = tk.Menu(menubar, tearoff=0)
# 设置单选
filemenu_login.add_command(label="Latest API_KEY", command=lambda: Latest_API_KEY())
filemenu_login.add_command(label="Create API_KEY", command=lambda: Reset_API_KEY())
filemenu_login.add_command(label="Choose API_KEY", command=lambda: choose_API_KEY())
filemenu_login.add_separator()
filemenu_login.add_command(label="Delete API_KEY", command=lambda: Delete_API_KEY())
# -----------------------------------------------------------------------------------#
if os.path.exists(login_file_path):
with open(login_file_path, "r", encoding="utf-8") as f:
api_key = f.read()
api_key = api_key.split("\n")
# 删除所有空行
while "" in api_key:
api_key.remove("")
# 读取每行数据中的API_NAME
api_name = []
api_key_list = []
api_base_list = []
for i in api_key:
api_name.append(eval(i)["API_NAME"])
api_key_list.append(eval(i)["API_KEY"])
api_base_list.append(eval(i)["API_BASE"])
api_key_init = api_key[0]
# 转换为字典
api = eval(api_key_init)
# 读取
openai.api_key = api["API_KEY"]
openai.api_base = api["API_BASE"]
# 显示
Message_box.config(state=tk.NORMAL)
Message_box.delete(0.0, tk.END)
Message_box.insert(tk.END, "已登录 \nAPI note: " + api["API_NAME"] + "\n")
Message_box.config(state=tk.DISABLED)
else:
Message_box.config(state=tk.NORMAL)
Message_box.delete(0.0, tk.END)
Message_box.insert(tk.END, "未登录:\nAPI_KEY文件不存在\n")
Message_box.config(state=tk.DISABLED)
if __name__=="__main__":
pass | [] |
2024-01-10 | Code-WSY/GPT-SY | gpt_sy~Menu_engine.py | import openai
from windows import *
def engine_list():
models_list = tk.Tk()
models_list.title("当前API可用模型")
print("API:"+openai.api_key)
print("API_Base:"+openai.api_base)
#models_list.geometry("300x300")
#models_list.resizable(False, False)
#新建文本
engine_list_text = tk.Text(models_list)
#字体
engine_list_text.config(font=(font_style, font_size))
#添加滚动条
engine_list_scroll = tk.Scrollbar(models_list)
engine_list_scroll.config(command=engine_list_text.yview)
engine_list_text.config(yscrollcommand=engine_list_scroll.set)
engine_list_scroll.pack(side=tk.RIGHT, fill=tk.Y)
engine_list_text.pack()
#获取引擎列表
models_list=openai.models.list()
engine_list_text.config(state=tk.NORMAL)
engine_list_text.delete(0.0, tk.END)
engine_list_text.insert(tk.END, "API:"+openai.api_key+"\n")
engine_list_text.insert(tk.END, "API_Base:"+openai.api_base+"\n")
engine_list_text.insert(tk.END, "可用模型:\n")
#显示引擎列表
for i in models_list:
#i是Model()对象
#print(i)
engine_list_text.insert(tk.END, i.object+": "+i.id+"\n")
engine_list_text.config(state=tk.DISABLED)
# -----------------------------------------------------------------------------------#
filemenu_engine = tk.Menu(menubar, tearoff=0)
#加入菜单:可用模型
filemenu_engine.add_command(label="可用模型", command=lambda: engine_list())
| [] |
2024-01-10 | m0bstaRx/generative-ai-workbook | personal_projects~3.fact-checker~fact-checker.py | # AUTOGENERATED! DO NOT EDIT! File to edit: example.ipynb.
# %% auto 0
__all__ = []
# %% example.ipynb 1
import os
from nbdev.export import nb_export
import streamlit as st
from streamlit_jupyter import StreamlitPatcher, tqdm
StreamlitPatcher().jupyter()
from langchain.chains import LLMChain, SimpleSequentialChain
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
# %% example.ipynb 3
# If an API key has been provided, create an OpenAI language model instance
API = os.getenv('OPENAI_KEY')
if API:
llm = OpenAI(temperature=0.7, openai_api_key=os.getenv('OPENAI_KEY'))
else:
# If an API key hasn't been provided, display a warning message
st.warning("Enter your OPENAI API-KEY. Get your OpenAI API key from [here](https://platform.openai.com/account/api-keys).\n")
# %% example.ipynb 5
# Create simple web app using Streamlit
# Set the title of the Streamlit app
st.title("✅ What's TRUE : Using LangChain `SimpleSequentialChain`")
# Add a link to the Github repository that inspired this app
st.markdown("Inspired from [fact-checker](https://github.com/jagilley/fact-checker) by Jagiley")
# %% example.ipynb 6
# Add a text input box for the user's question
user_question = st.text_input(
"Enter Your Question : ",
placeholder = "How was your day?",
)
# %% example.ipynb 7
# Generating the final answer to the user's question using all the chains
if st.button("Tell me about it", type="primary"):
# Chain 1: Generating a rephrased version of the user's question
template = """{question}\n\n"""
prompt_template = PromptTemplate(input_variables=["question"], template=template)
question_chain = LLMChain(llm=llm, prompt=prompt_template)
# Chain 2: Generating assumptions made in the statement
template = """Here is a statement:
{statement}
Make a bullet point list of the assumptions you made when producing the above statement.\n\n"""
prompt_template = PromptTemplate(input_variables=["statement"], template=template)
assumptions_chain = LLMChain(llm=llm, prompt=prompt_template)
assumptions_chain_seq = SimpleSequentialChain(
chains=[question_chain, assumptions_chain], verbose=True
)
# Chain 3: Fact checking the assumptions
template = """Here is a bullet point list of assertions:
{assertions}
For each assertion, determine whether it is true or false. If it is false, explain why.\n\n"""
prompt_template = PromptTemplate(input_variables=["assertions"], template=template)
fact_checker_chain = LLMChain(llm=llm, prompt=prompt_template)
fact_checker_chain_seq = SimpleSequentialChain(
chains=[question_chain, assumptions_chain, fact_checker_chain], verbose=True
)
# Final Chain: Generating the final answer to the user's question based on the facts and assumptions
template = """In light of the above facts, how would you answer the question '{}'""".format(
user_question
)
template = """{facts}\n""" + template
prompt_template = PromptTemplate(input_variables=["facts"], template=template)
answer_chain = LLMChain(llm=llm, prompt=prompt_template)
overall_chain = SimpleSequentialChain(
chains=[question_chain, assumptions_chain, fact_checker_chain, answer_chain],
verbose=True,
)
# Running all the chains on the user's question and displaying the final answer
st.success(overall_chain.run(user_question))
| [
"Here is a statement:\n {statement}\n Make a bullet point list of the assumptions you made when producing the above statement.\n\n",
"In light of the above facts, how would you answer the question 'PLACEHOLDER'",
"question",
"statement",
"{question}\n\n",
"assertions",
"{facts}\nPLACEHOLDER",
"Here is a bullet point list of assertions:\n {assertions}\n For each assertion, determine whether it is true or false. If it is false, explain why.\n\n"
] |
2024-01-10 | Stayu03/Song-AlbumRecApp | song_rec_app.py | import streamlit as st
import pandas as pd
import openai #importOpenAI
# Set Streamlit app title and description
st.title("Music Mood Recommender 🎵")
st.write("Tell us about your mood! We will recommend 3 songs and 1 album based on your mood!")
# Add a sidebar for the OpenAI API key input
api_key = st.sidebar.text_input("Enter your OpenAI API key", type="password", key="api_key_input")
# Set OpenAI API key
client = openai.OpenAI(api_key=api_key)
# Function to recommend songs and albums based on mood and language
def recommend_songs_and_album(mood, language):
song_recommendations = []
prompt_song1 = f"""
Could you recommend the first song that expresses {mood} in {language}?
I want details about the song.
Could you tell me the artist name, album name, and a quote from the lyrics (3 sentences)?
Could you give me information by this format for Example of the response:
For the first song, I recommend the song "song name" by artist name. Could yougive me the song detail?
*skip a line*
Album: album name *skip a line*
Quote from the lyrics: quote from the lyrics and translate it to English
"""
prompt_song2 = f"""
Could you recommend another song (not the same as prompt_song1) that expresses {mood} in {language} which is not the same artist as prompt_song1 ?
I want details about the song.
Could you tell me the artist name, album name, and a quote from the lyrics (3 sentences)?
Could you give me information by this format for Example of the response:
For the second song, I recommend the song "song name" by artist name. Could yougive me the song detail?
*skip a line*
Album: album name *skip a line*
Quote from the lyrics: quote from the lyrics and translate it to English
"""
prompt_song3 = f"""
Could you recommend another song (not the same as prompt_song1 and prompt_song2) that expresses {mood} in {language} which is not the same as artist as prompt_song1 and prompt_song2?
I want details about the song.
Could you tell me the artist name, album name, and a quote from the lyrics (3 sentences)?
Could you give me information by this format for Example of the response:
For the third song, I recommend the song "song name" by artist name. Could yougive me the song detail?
*skip a line*
Album: album name *skip a line*
Quote from the lyrics: quote from the lyrics and translate it to English
"""
prompt_album = f"""
You want to find albums that express a {mood} mood in {language}.
Could you recommend one album with its respective artist, release year,
3 songs from the album (including the title track) and qoute some lyrics from each songs?
Please skip a line for each detail.
For example of the response:
The album that expresses a {mood} mood in {language} is "album name" by artist name.
Here are the additional details about the album! <skip a line>
Artist : artist name <skpi a line>
Release Year : release year <skip a line>
3 Songs from the album and real lyrics from each songs: *new line*
1. "song's name" - example of a quote from the lyrics and translate it to English *new line*
2. "song's name" - example of a quote from the lyrics and translate it to English *new line*
3. "song's name" - example of a quote from the lyrics and translate it to English *new line*
note: only 3 sentences for each song
"""
song_response1 = client.chat.completions.create(
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt_song1}
],
model="gpt-3.5-turbo",
max_tokens=300,
)
song_response2 = client.chat.completions.create(
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt_song2}
],
model="gpt-3.5-turbo",
max_tokens=300
)
song_response3 = client.chat.completions.create(
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt_song3}
],
model="gpt-3.5-turbo",
max_tokens=300
)
songs1 = song_response1.choices
if len(songs1) > 0 :
song_info1 = {
"Recommended Song": songs1[0].message.content.strip(),
}
song_recommendations.append(song_info1)
songs2 = song_response2.choices
if len(songs2) > 0 and songs2[0].message.content.strip() != songs1[0].message.content.strip():
song_info2 = {
"Recommended Song": songs2[0].message.content.strip(),
}
song_recommendations.append(song_info2)
songs3 = song_response3.choices
if len(songs3) > 0 and songs3[0].message.content.strip() != songs1[0].message.content.strip() and songs3[0].message.content.strip() != songs2[0].message.content.strip():
song_info3 = {
"Recommended Song": songs3[0].message.content.strip(),
}
song_recommendations.append(song_info3)
album_response = client.chat.completions.create(
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt_album}
],
model="gpt-3.5-turbo",
max_tokens=1000,
)
album = album_response.choices
album_recommendation = []
if len(album) > 0:
album_info = {
"This album is for you ": album[0].message.content.strip(),
}
album_recommendation.append(album_info)
return song_recommendations, album_recommendation
# Get user input for mood and language
user_input = st.text_input("Enter the mood and language of songs (e.g., happy/Japanese)")
# Check if the user input matches the expected format
if "/" not in user_input:
st.warning("Please enter the mood and language in the format 'mood/language'.")
else:
mood_input, language_input = user_input.split("/")
# Call the recommend_songs_and_albums function to get song and album recommendations
song_recommendations, album_recommendation = recommend_songs_and_album(mood_input, language_input)
st.subheader("Recommended Songs")
for i in range(len(song_recommendations)):
song_name = song_recommendations[i].get("Recommended Song", "")
decorated_song_name = f"{i+1}. {song_name} \n"
st.write(decorated_song_name)
# Display the album recommendations
st.subheader("Album Recommendation")
album_name = album_recommendation[0].get("This album is for you ", "")
decorated_album_name = f"{album_name}"
album_details = album_recommendation[0].get("Detail of the album", "")
decorated_album_details = f"{album_details}"
st.write(decorated_album_name)
st.write(decorated_album_details) | [
"\n Could you recommend another song (not the same as prompt_song1) that expresses PLACEHOLDER in PLACEHOLDER which is not the same artist as prompt_song1 ?\n I want details about the song.\n Could you tell me the artist name, album name, and a quote from the lyrics (3 sentences)?\n\n Could you give me information by this format for Example of the response:\n For the second song, I recommend the song \"song name\" by artist name. Could yougive me the song detail?\n *skip a line*\n Album: album name *skip a line*\n Quote from the lyrics: quote from the lyrics and translate it to English\n ",
"\n You want to find albums that express a PLACEHOLDER mood in PLACEHOLDER. \n Could you recommend one album with its respective artist, release year, \n 3 songs from the album (including the title track) and qoute some lyrics from each songs?\n Please skip a line for each detail.\n\n For example of the response:\n The album that expresses a PLACEHOLDER mood in PLACEHOLDER is \"album name\" by artist name. \n Here are the additional details about the album! <skip a line>\n Artist : artist name <skpi a line>\n Release Year : release year <skip a line>\n 3 Songs from the album and real lyrics from each songs: *new line*\n 1. \"song's name\" - example of a quote from the lyrics and translate it to English *new line*\n 2. \"song's name\" - example of a quote from the lyrics and translate it to English *new line*\n 3. \"song's name\" - example of a quote from the lyrics and translate it to English *new line*\n note: only 3 sentences for each song\n ",
"You are a helpful assistant.",
"\n Could you recommend the first song that expresses PLACEHOLDER in PLACEHOLDER?\n I want details about the song.\n Could you tell me the artist name, album name, and a quote from the lyrics (3 sentences)?\n\n Could you give me information by this format for Example of the response:\n For the first song, I recommend the song \"song name\" by artist name. Could yougive me the song detail?\n *skip a line*\n Album: album name *skip a line*\n Quote from the lyrics: quote from the lyrics and translate it to English\n ",
"\n Could you recommend another song (not the same as prompt_song1 and prompt_song2) that expresses PLACEHOLDER in PLACEHOLDER which is not the same as artist as prompt_song1 and prompt_song2?\n\n I want details about the song.\n Could you tell me the artist name, album name, and a quote from the lyrics (3 sentences)?\n\n Could you give me information by this format for Example of the response:\n For the third song, I recommend the song \"song name\" by artist name. Could yougive me the song detail?\n *skip a line*\n Album: album name *skip a line*\n Quote from the lyrics: quote from the lyrics and translate it to English\n "
] |
2024-01-10 | last-project-rookies/PICA | PICA_MIDDLE~modules~gpt~chat_init.py | import faiss
import os
import urllib.request
from modules.aws import AwsQuery
from langchain.docstore import InMemoryDocstore
from langchain.vectorstores import FAISS
from langchain.memory import VectorStoreRetrieverMemory
from langchain.embeddings.openai import OpenAIEmbeddings
OPENAI_API_KEY = "openai-api-key"
HUGGINGFACEHUB_API_TOKEN = ""
SERPAPI_API_KEY = ""
os.environ["OPENAI_API_KEY"] = OPENAI_API_KEY
os.environ["HUGGINGFACEHUB_API_TOKEN"] = HUGGINGFACEHUB_API_TOKEN
os.environ["SERPAPI_API_KEY"] = SERPAPI_API_KEY
EMBEDDING_SIZE = 1536 # OpenAIEmbeddings 의 차원 수(한국어도 처리해주는지 확인 필요)
embedding_fn = OpenAIEmbeddings().embed_query
aws = AwsQuery()
def make_static_vecDB(user_id, user, name, mbti, age):
"""
사전에 질문과 답변 프리셋을 미리 구성해놓고 생성
사전 DB는 초기에 저장된 질문 답변 외에 추가되지 않음
"""
index = faiss.IndexFlatL2(EMBEDDING_SIZE)
vectorstore = FAISS(embedding_fn, index, InMemoryDocstore({}), {})
preset = [
("안녕", "안녕!"),
("너 이름이 뭐야", f"내 이름은 {name}야"),
("너 누구야", f"난 {name}야"),
("너 성격이 어때?", f"내 mbti는 {mbti}야"),
("너 mbti가 뭐야", f"내 mbti는 {mbti}야"),
("너 몇살이야?", f"난 {age}살이야"),
]
vectorstore.add_texts([q for q, _ in preset], [{"answer": a} for _, a in preset])
vectorstore.save_local(f"{user_id}/static")
def make_memory_vecDB(user_id, user, name):
"""
기억 DB를 생성
기억 DB는 사용자가 입력한 값과 챗봇의 대답이 3세트씩 Document 단위로 저장함
"""
index = faiss.IndexFlatL2(EMBEDDING_SIZE)
vectorstore = FAISS(embedding_fn, index, InMemoryDocstore({}), {})
retriever = vectorstore.as_retriever(search_kwargs=dict(k=1))
memory = VectorStoreRetrieverMemory(retriever=retriever, return_docs=False)
# s3 에서 이전 대화 요약 가져오기
url = aws.CLOUD_FLONT_CDN + f"/{user_id}/log_summary/yesterday.txt"
yesterday_txt = ""
try :
req = urllib.request.Request(url)
with urllib.request.urlopen(req) as response:
yesterday_txt = response.read().decode("utf-8")
except Exception as e:
print("chat_init err : ", e)
yesterday_txt = ""
# 백터 db에 넣기
memory.save_context({"시간": "어제"}, {"대화": yesterday_txt})
vectorstore.save_local(f"{user_id}/memory")
| [] |
2024-01-10 | last-project-rookies/PICA | PICA_MIDDLE~modules~gpt~emotion.py | import openai
import json
import asyncio
OPENAI_API_KEY = "openai-api-key"
openai.api_key = OPENAI_API_KEY
# 텍스트 기반 감정 분석 with chatgpt
async def chat_emotion(who, user_content):
if who == "gpt":
emotion = "happiness, sadness, anger"
emotion_dict = {
"emotion": {
"happiness": 0.9,
"sadness": 0.0,
"anger": 0.0,
}
}
else:
emotion = "happiness, excited, sadness, bored, disgust, anger, calm, comfortable"
emotion_dict = {
"emotion": {
"happiness": 0.9,
"sadness": 0.0,
"anger": 0.0,
"excited": 0.0,
"bored": 0.0,
"disgust": 0.0,
"calm": 0.0,
"comfortable": 0.0,
}
}
# 감정 분석 Prompt
system_content = f"""
I want you to help me to do text-based emotion analysis.
Please analyze its emotion and express it with numbers.
emotion(rates of {emotion})
Provide them in JSON format with the following keys: emotion
Examples:
{emotion_dict}
Also, you should observe the format given in the example.
Don't add your comments, but answer right away.
"""
# ChatGPT 감정 분석
messages = []
result = None
messages.append({"role": "system", "content": f"{system_content}"})
messages.append({"role": "user", "content": f"{user_content}"})
try:
completion = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=messages)
assistant_content = completion.choices[0].message["content"].strip()
result = json.loads(assistant_content.replace("'", '"'))
# 감정을 알수 없을경우 happy로 지정
except Exception as e:
print("emotion err : 감정을 확인할 수 없습니다")
result = emotion_dict
finally:
print(who + " : ", result)
return result
| [
"PLACEHOLDER"
] |
2024-01-10 | younesbram/chat-with-repo | github.py | import os
import subprocess
from langchain.document_loaders import TextLoader
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores import DeepLake
from langchain.embeddings.openai import OpenAIEmbeddings
from dotenv import load_dotenv
import openai
import logging
# Load environment variables from a .env file (for securely managing secrets like API keys)
load_dotenv()
# Set OpenAI API key (assuming it's defined in your .env file)
openai.api_key = os.environ.get('OPENAI_API_KEY')
# Set up basic logging configuration with level set to INFO
logging.basicConfig(level=logging.INFO)
def clone_repository(repo_url, local_path):
"""
(not currently used, pr if you really want to and dm me @didntdrinkwater on tweeter(X)) This function clones a git repository from the provided URL into a local path.
"""
subprocess.run(["git", "clone", repo_url, local_path])
def is_binary(file_path):
"""
This function checks whether a file is binary by reading a chunk of the file and looking for null bytes.
"""
with open(file_path, 'rb') as file:
chunk = file.read(1024)
return b'\0' in chunk
def load_docs(root_dir):
"""
This function walks through a directory, loading text documents and splitting them into chunks.
"""
docs = []
for dirpath, dirnames, filenames in os.walk(root_dir):
for file in filenames:
file_path = os.path.join(dirpath, file)
if not is_binary(file_path):
try:
loader = TextLoader(file_path, encoding='utf-8')
doc_chunks = loader.load_and_split()
# Prepend the filename to the first chunk
if doc_chunks:
doc_chunks[0].page_content = f"// {file}\n{doc_chunks[0].page_content}"
docs.extend(doc_chunks)
except Exception as e:
logging.error(f"Error loading file {file}: {str(e)}")
return docs
def split_docs(docs):
"""
This function splits the loaded documents into smaller chunks of specified size.
"""
text_splitter = CharacterTextSplitter(chunk_size=1666, chunk_overlap=0)
return text_splitter.split_documents(docs)
def main(repo_url, root_dir, deep_lake_path):
"""
This is the main function that loads documents from the specified directory, splits them into chunks, and
stores them into a DeepLake vector store with embeddings calculated by OpenAI.
"""
# Print out the directory that the script is going to load documents from
print(f"Loading documents from directory: {root_dir}")
# Load documents
docs = load_docs(root_dir)
print(f"Loaded {len(docs)} documents.")
# Split the documents
texts = split_docs(docs)
# Initialize embeddings and DeepLake vector store
embeddings = OpenAIEmbeddings()
db = DeepLake(dataset_path=deep_lake_path, embedding_function=embeddings)
# Add documents to DeepLake
db.add_documents(texts)
# Entrypoint of the script
if __name__ == "__main__":
repo_url = os.environ.get('REPO_URL')
root_dir = "/mnt/c/Users/hueyfreeman/OneDrive/Desktop/twitter/x/chat-with-repo/the-algorithm" # change me to the repo (clone the repo first)
deep_lake_path = os.environ.get('DEEPLAKE_DATASET_PATH')
main(repo_url, root_dir, deep_lake_path)
| [] |
2024-01-10 | shiraco/lw-chatgpt | whisper.py | import os
import openai
from dotenv import load_dotenv
load_dotenv()
# Settings for OpenAI
openai.api_key = os.environ["OPENAI_API_KEY"]
def generate_response(file_path: str = "./data/speech.m4a") -> str:
"""Generate response from Whisper
:param text: request text
:return: generated text
"""
response = openai.Audio.transcribe(
file=open(file_path, "rb"),
model="whisper-1",
)
res = response.text.strip()
return res
| [] |
2024-01-10 | faisale/langchain | libs~langchain~langchain~embeddings~cohere.py | from typing import Any, Dict, List, Optional
from langchain.pydantic_v1 import BaseModel, Extra, root_validator
from langchain.schema.embeddings import Embeddings
from langchain.utils import get_from_dict_or_env
class CohereEmbeddings(BaseModel, Embeddings):
"""Cohere embedding models.
To use, you should have the ``cohere`` python package installed, and the
environment variable ``COHERE_API_KEY`` set with your API key or pass it
as a named parameter to the constructor.
Example:
.. code-block:: python
from langchain.embeddings import CohereEmbeddings
cohere = CohereEmbeddings(
model="embed-english-light-v3.0", cohere_api_key="my-api-key"
)
"""
client: Any #: :meta private:
"""Cohere client."""
async_client: Any #: :meta private:
"""Cohere async client."""
model: str = "embed-english-v2.0"
"""Model name to use."""
truncate: Optional[str] = None
"""Truncate embeddings that are too long from start or end ("NONE"|"START"|"END")"""
cohere_api_key: Optional[str] = None
max_retries: Optional[int] = None
"""Maximum number of retries to make when generating."""
request_timeout: Optional[float] = None
"""Timeout in seconds for the Cohere API request."""
user_agent: str = "langchain"
"""Identifier for the application making the request."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
cohere_api_key = get_from_dict_or_env(
values, "cohere_api_key", "COHERE_API_KEY"
)
max_retries = values.get("max_retries")
request_timeout = values.get("request_timeout")
try:
import cohere
client_name = values["user_agent"]
values["client"] = cohere.Client(
cohere_api_key,
max_retries=max_retries,
timeout=request_timeout,
client_name=client_name,
)
values["async_client"] = cohere.AsyncClient(
cohere_api_key,
max_retries=max_retries,
timeout=request_timeout,
client_name=client_name,
)
except ImportError:
raise ValueError(
"Could not import cohere python package. "
"Please install it with `pip install cohere`."
)
return values
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Call out to Cohere's embedding endpoint.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
embeddings = self.client.embed(
model=self.model,
texts=texts,
input_type="search_document",
truncate=self.truncate,
).embeddings
return [list(map(float, e)) for e in embeddings]
async def aembed_documents(self, texts: List[str]) -> List[List[float]]:
"""Async call out to Cohere's embedding endpoint.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
embeddings = await self.async_client.embed(
model=self.model,
texts=texts,
input_type="search_document",
truncate=self.truncate,
)
return [list(map(float, e)) for e in embeddings.embeddings]
def embed_query(self, text: str) -> List[float]:
"""Call out to Cohere's embedding endpoint.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
embeddings = self.client.embed(
model=self.model,
texts=[text],
input_type="search_query",
truncate=self.truncate,
).embeddings
return [list(map(float, e)) for e in embeddings][0]
async def aembed_query(self, text: str) -> List[float]:
"""Async call out to Cohere's embedding endpoint.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
embeddings = await self.async_client.embed(
model=self.model,
texts=[text],
input_type="search_query",
truncate=self.truncate,
)
return [list(map(float, e)) for e in embeddings.embeddings][0]
| [] |
2024-01-10 | faisale/langchain | libs~langchain~langchain~embeddings~clarifai.py | import logging
from typing import Any, Dict, List, Optional
from langchain.pydantic_v1 import BaseModel, Extra, root_validator
from langchain.schema.embeddings import Embeddings
from langchain.utils import get_from_dict_or_env
logger = logging.getLogger(__name__)
class ClarifaiEmbeddings(BaseModel, Embeddings):
"""Clarifai embedding models.
To use, you should have the ``clarifai`` python package installed, and the
environment variable ``CLARIFAI_PAT`` set with your personal access token or pass it
as a named parameter to the constructor.
Example:
.. code-block:: python
from langchain.embeddings import ClarifaiEmbeddings
clarifai = ClarifaiEmbeddings(
model="embed-english-light-v3.0", clarifai_api_key="my-api-key"
)
"""
stub: Any #: :meta private:
"""Clarifai stub."""
userDataObject: Any
"""Clarifai user data object."""
model_id: Optional[str] = None
"""Model id to use."""
model_version_id: Optional[str] = None
"""Model version id to use."""
app_id: Optional[str] = None
"""Clarifai application id to use."""
user_id: Optional[str] = None
"""Clarifai user id to use."""
pat: Optional[str] = None
"""Clarifai personal access token to use."""
api_base: str = "https://api.clarifai.com"
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
values["pat"] = get_from_dict_or_env(values, "pat", "CLARIFAI_PAT")
user_id = values.get("user_id")
app_id = values.get("app_id")
model_id = values.get("model_id")
if values["pat"] is None:
raise ValueError("Please provide a pat.")
if user_id is None:
raise ValueError("Please provide a user_id.")
if app_id is None:
raise ValueError("Please provide a app_id.")
if model_id is None:
raise ValueError("Please provide a model_id.")
try:
from clarifai.auth.helper import ClarifaiAuthHelper
from clarifai.client import create_stub
except ImportError:
raise ImportError(
"Could not import clarifai python package. "
"Please install it with `pip install clarifai`."
)
auth = ClarifaiAuthHelper(
user_id=user_id,
app_id=app_id,
pat=values["pat"],
base=values["api_base"],
)
values["userDataObject"] = auth.get_user_app_id_proto()
values["stub"] = create_stub(auth)
return values
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Call out to Clarifai's embedding models.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
try:
from clarifai_grpc.grpc.api import (
resources_pb2,
service_pb2,
)
from clarifai_grpc.grpc.api.status import status_code_pb2
except ImportError:
raise ImportError(
"Could not import clarifai python package. "
"Please install it with `pip install clarifai`."
)
batch_size = 32
embeddings = []
for i in range(0, len(texts), batch_size):
batch = texts[i : i + batch_size]
post_model_outputs_request = service_pb2.PostModelOutputsRequest(
user_app_id=self.userDataObject,
model_id=self.model_id,
version_id=self.model_version_id,
inputs=[
resources_pb2.Input(
data=resources_pb2.Data(text=resources_pb2.Text(raw=t))
)
for t in batch
],
)
post_model_outputs_response = self.stub.PostModelOutputs(
post_model_outputs_request
)
if post_model_outputs_response.status.code != status_code_pb2.SUCCESS:
logger.error(post_model_outputs_response.status)
first_output_failure = (
post_model_outputs_response.outputs[0].status
if len(post_model_outputs_response.outputs)
else None
)
raise Exception(
f"Post model outputs failed, status: "
f"{post_model_outputs_response.status}, first output failure: "
f"{first_output_failure}"
)
embeddings.extend(
[
list(o.data.embeddings[0].vector)
for o in post_model_outputs_response.outputs
]
)
return embeddings
def embed_query(self, text: str) -> List[float]:
"""Call out to Clarifai's embedding models.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
try:
from clarifai_grpc.grpc.api import (
resources_pb2,
service_pb2,
)
from clarifai_grpc.grpc.api.status import status_code_pb2
except ImportError:
raise ImportError(
"Could not import clarifai python package. "
"Please install it with `pip install clarifai`."
)
post_model_outputs_request = service_pb2.PostModelOutputsRequest(
user_app_id=self.userDataObject,
model_id=self.model_id,
version_id=self.model_version_id,
inputs=[
resources_pb2.Input(
data=resources_pb2.Data(text=resources_pb2.Text(raw=text))
)
],
)
post_model_outputs_response = self.stub.PostModelOutputs(
post_model_outputs_request
)
if post_model_outputs_response.status.code != status_code_pb2.SUCCESS:
logger.error(post_model_outputs_response.status)
first_output_failure = (
post_model_outputs_response.outputs[0].status
if len(post_model_outputs_response.outputs[0])
else None
)
raise Exception(
f"Post model outputs failed, status: "
f"{post_model_outputs_response.status}, first output failure: "
f"{first_output_failure}"
)
embeddings = [
list(o.data.embeddings[0].vector)
for o in post_model_outputs_response.outputs
]
return embeddings[0]
| [] |
2024-01-10 | faisale/langchain | libs~langchain~langchain~document_loaders~parsers~language~cobol.py | import re
from typing import Callable, List
from langchain.document_loaders.parsers.language.code_segmenter import CodeSegmenter
class CobolSegmenter(CodeSegmenter):
"""Code segmenter for `COBOL`."""
PARAGRAPH_PATTERN = re.compile(r"^[A-Z0-9\-]+(\s+.*)?\.$", re.IGNORECASE)
DIVISION_PATTERN = re.compile(
r"^\s*(IDENTIFICATION|DATA|PROCEDURE|ENVIRONMENT)\s+DIVISION.*$", re.IGNORECASE
)
SECTION_PATTERN = re.compile(r"^\s*[A-Z0-9\-]+\s+SECTION.$", re.IGNORECASE)
def __init__(self, code: str):
super().__init__(code)
self.source_lines: List[str] = self.code.splitlines()
def is_valid(self) -> bool:
# Identify presence of any division to validate COBOL code
return any(self.DIVISION_PATTERN.match(line) for line in self.source_lines)
def _extract_code(self, start_idx: int, end_idx: int) -> str:
return "\n".join(self.source_lines[start_idx:end_idx]).rstrip("\n")
def _is_relevant_code(self, line: str) -> bool:
"""Check if a line is part of the procedure division or a relevant section."""
if "PROCEDURE DIVISION" in line.upper():
return True
# Add additional conditions for relevant sections if needed
return False
def _process_lines(self, func: Callable) -> List[str]:
"""A generic function to process COBOL lines based on provided func."""
elements: List[str] = []
start_idx = None
inside_relevant_section = False
for i, line in enumerate(self.source_lines):
if self._is_relevant_code(line):
inside_relevant_section = True
if inside_relevant_section and (
self.PARAGRAPH_PATTERN.match(line.strip().split(" ")[0])
or self.SECTION_PATTERN.match(line.strip())
):
if start_idx is not None:
func(elements, start_idx, i)
start_idx = i
# Handle the last element if exists
if start_idx is not None:
func(elements, start_idx, len(self.source_lines))
return elements
def extract_functions_classes(self) -> List[str]:
def extract_func(elements: List[str], start_idx: int, end_idx: int) -> None:
elements.append(self._extract_code(start_idx, end_idx))
return self._process_lines(extract_func)
def simplify_code(self) -> str:
simplified_lines: List[str] = []
inside_relevant_section = False
omitted_code_added = (
False # To track if "* OMITTED CODE *" has been added after the last header
)
for line in self.source_lines:
is_header = (
"PROCEDURE DIVISION" in line
or "DATA DIVISION" in line
or "IDENTIFICATION DIVISION" in line
or self.PARAGRAPH_PATTERN.match(line.strip().split(" ")[0])
or self.SECTION_PATTERN.match(line.strip())
)
if is_header:
inside_relevant_section = True
# Reset the flag since we're entering a new section/division or
# paragraph
omitted_code_added = False
if inside_relevant_section:
if is_header:
# Add header and reset the omitted code added flag
simplified_lines.append(line)
elif not omitted_code_added:
# Add omitted code comment only if it hasn't been added directly
# after the last header
simplified_lines.append("* OMITTED CODE *")
omitted_code_added = True
return "\n".join(simplified_lines)
| [] |
2024-01-10 | faisale/langchain | libs~langchain~tests~integration_tests~document_loaders~test_docusaurus.py | from pathlib import Path
from langchain.document_loaders import DocusaurusLoader
DOCS_URL = str(Path(__file__).parent.parent / "examples/docusaurus-sitemap.xml")
def test_docusarus() -> None:
"""Test sitemap loader."""
loader = DocusaurusLoader(DOCS_URL, is_local=True)
documents = loader.load()
assert len(documents) > 1
assert "🦜️🔗 Langchain" in documents[0].page_content
def test_filter_docusaurus_sitemap() -> None:
"""Test sitemap loader."""
loader = DocusaurusLoader(
DOCS_URL,
is_local=True,
filter_urls=[
"https://python.langchain.com/docs/integrations/document_loaders/sitemap"
],
)
documents = loader.load()
assert len(documents) == 1
assert "SitemapLoader" in documents[0].page_content
def test_docusarus_metadata() -> None:
def sitemap_metadata_one(meta: dict, _content: None) -> dict:
return {**meta, "mykey": "Super Important Metadata"}
"""Test sitemap loader."""
loader = DocusaurusLoader(
DOCS_URL,
is_local=True,
meta_function=sitemap_metadata_one,
)
documents = loader.load()
assert len(documents) > 1
assert "mykey" in documents[0].metadata
assert "Super Important Metadata" in documents[0].metadata["mykey"]
| [] |
2024-01-10 | faisale/langchain | templates~rag-timescale-hybrid-search-time~rag_timescale_hybrid_search_time~load_sample_dataset.py | import os
import tempfile
from datetime import datetime, timedelta
import requests
from langchain.document_loaders import JSONLoader
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores.timescalevector import TimescaleVector
from timescale_vector import client
def parse_date(date_string: str) -> datetime:
if date_string is None:
return None
time_format = "%a %b %d %H:%M:%S %Y %z"
return datetime.strptime(date_string, time_format)
def extract_metadata(record: dict, metadata: dict) -> dict:
dt = parse_date(record["date"])
metadata["id"] = str(client.uuid_from_time(dt))
if dt is not None:
metadata["date"] = dt.isoformat()
else:
metadata["date"] = None
metadata["author"] = record["author"]
metadata["commit_hash"] = record["commit"]
return metadata
def load_ts_git_dataset(
service_url,
collection_name="timescale_commits",
num_records: int = 500,
partition_interval=timedelta(days=7),
):
json_url = "https://s3.amazonaws.com/assets.timescale.com/ai/ts_git_log.json"
tmp_file = "ts_git_log.json"
temp_dir = tempfile.gettempdir()
json_file_path = os.path.join(temp_dir, tmp_file)
if not os.path.exists(json_file_path):
response = requests.get(json_url)
if response.status_code == 200:
with open(json_file_path, "w") as json_file:
json_file.write(response.text)
else:
print(f"Failed to download JSON file. Status code: {response.status_code}")
loader = JSONLoader(
file_path=json_file_path,
jq_schema=".commit_history[]",
text_content=False,
metadata_func=extract_metadata,
)
documents = loader.load()
# Remove documents with None dates
documents = [doc for doc in documents if doc.metadata["date"] is not None]
if num_records > 0:
documents = documents[:num_records]
# Split the documents into chunks for embedding
text_splitter = CharacterTextSplitter(
chunk_size=1000,
chunk_overlap=200,
)
docs = text_splitter.split_documents(documents)
embeddings = OpenAIEmbeddings()
# Create a Timescale Vector instance from the collection of documents
TimescaleVector.from_documents(
embedding=embeddings,
ids=[doc.metadata["id"] for doc in docs],
documents=docs,
collection_name=collection_name,
service_url=service_url,
time_partition_interval=partition_interval,
)
| [] |
2024-01-10 | faisale/langchain | libs~langchain~langchain~document_loaders~docusaurus.py | """Load Documents from Docusarus Documentation"""
from typing import Any, List, Optional
from langchain.document_loaders.sitemap import SitemapLoader
class DocusaurusLoader(SitemapLoader):
"""
Loader that leverages the SitemapLoader to loop through the generated pages of a
Docusaurus Documentation website and extracts the content by looking for specific
HTML tags. By default, the parser searches for the main content of the Docusaurus
page, which is normally the <article>. You also have the option to define your own
custom HTML tags by providing them as a list, for example: ["div", ".main", "a"].
"""
def __init__(
self,
url: str,
custom_html_tags: Optional[List[str]] = None,
**kwargs: Any,
):
"""
Initialize DocusaurusLoader
Args:
url: The base URL of the Docusaurus website.
custom_html_tags: Optional custom html tags to extract content from pages.
kwargs: Additional args to extend the underlying SitemapLoader, for example:
filter_urls, blocksize, meta_function, is_local, continue_on_failure
"""
if not kwargs.get("is_local"):
url = f"{url}/sitemap.xml"
self.custom_html_tags = custom_html_tags or ["main article"]
super().__init__(
url,
parsing_function=kwargs.get("parsing_function") or self._parsing_function,
**kwargs,
)
def _parsing_function(self, content: Any) -> str:
"""Parses specific elements from a Docusarus page."""
relevant_elements = content.select(",".join(self.custom_html_tags))
for element in relevant_elements:
if element not in relevant_elements:
element.decompose()
return str(content.get_text())
| [] |
2024-01-10 | faisale/langchain | libs~langchain~tests~unit_tests~document_transformers~test_beautiful_soup_transformer.py | """Unit tests for beautiful soup document transformer."""
import pytest
from langchain.document_transformers import BeautifulSoupTransformer
from langchain.schema.document import Document
@pytest.mark.requires("bs4")
def test_transform_empty_html() -> None:
bs_transformer = BeautifulSoupTransformer()
empty_html = "<html></html>"
documents = [Document(page_content=empty_html)]
docs_transformed = bs_transformer.transform_documents(documents)
assert docs_transformed[0].page_content == ""
@pytest.mark.requires("bs4")
def test_extract_paragraphs() -> None:
bs_transformer = BeautifulSoupTransformer()
paragraphs_html = (
"<html><h1>Header</h1><p>First paragraph.</p>"
"<p>Second paragraph.</p><h1>Ignore at end</h1></html>"
)
documents = [Document(page_content=paragraphs_html)]
docs_transformed = bs_transformer.transform_documents(documents)
assert docs_transformed[0].page_content == "First paragraph. Second paragraph."
@pytest.mark.requires("bs4")
def test_strip_whitespace() -> None:
bs_transformer = BeautifulSoupTransformer()
paragraphs_html = (
"<html><h1>Header</h1><p><span>First</span> paragraph.</p>"
"<p>Second paragraph. </p></html>"
)
documents = [Document(page_content=paragraphs_html)]
docs_transformed = bs_transformer.transform_documents(documents)
assert docs_transformed[0].page_content == "First paragraph. Second paragraph."
@pytest.mark.requires("bs4")
def test_extract_html() -> None:
bs_transformer = BeautifulSoupTransformer()
paragraphs_html = (
"<html>Begin of html tag"
"<h1>Header</h1>"
"<p>First paragraph.</p>"
"Middle of html tag"
"<p>Second paragraph.</p>"
"End of html tag"
"</html>"
)
documents = [Document(page_content=paragraphs_html)]
docs_transformed = bs_transformer.transform_documents(
documents, tags_to_extract=["html", "p"]
)
assert docs_transformed[0].page_content == (
"Begin of html tag "
"Header First paragraph. "
"Middle of html tag "
"Second paragraph. "
"End of html tag"
)
@pytest.mark.requires("bs4")
def test_remove_style() -> None:
bs_transformer = BeautifulSoupTransformer()
with_style_html = (
"<html><style>my_funky_style</style><p>First paragraph.</p></html>"
)
documents = [Document(page_content=with_style_html)]
docs_transformed = bs_transformer.transform_documents(
documents, tags_to_extract=["html"]
)
assert docs_transformed[0].page_content == "First paragraph."
@pytest.mark.requires("bs4")
def test_remove_nested_tags() -> None:
"""
If a tag_to_extract is _inside_ an unwanted_tag, it should be removed
(e.g. a <p> inside a <table> if <table> is unwanted).)
If an unwanted tag is _inside_ a tag_to_extract, it should be removed,
but the rest of the tag_to_extract should stay.
This means that "unwanted_tags" have a higher "priority" than "tags_to_extract".
"""
bs_transformer = BeautifulSoupTransformer()
with_style_html = (
"<html><style>my_funky_style</style>"
"<table><td><p>First paragraph, inside a table.</p></td></table>"
"<p>Second paragraph<table><td> with a cell </td></table>.</p>"
"</html>"
)
documents = [Document(page_content=with_style_html)]
docs_transformed = bs_transformer.transform_documents(
documents, unwanted_tags=["script", "style", "table"]
)
assert docs_transformed[0].page_content == "Second paragraph."
@pytest.mark.requires("bs4")
def test_remove_unwanted_lines() -> None:
bs_transformer = BeautifulSoupTransformer()
with_lines_html = "<html>\n\n<p>First \n\n paragraph.</p>\n</html>\n\n"
documents = [Document(page_content=with_lines_html)]
docs_transformed = bs_transformer.transform_documents(documents, remove_lines=True)
assert docs_transformed[0].page_content == "First paragraph."
@pytest.mark.requires("bs4")
def test_do_not_remove_repeated_content() -> None:
bs_transformer = BeautifulSoupTransformer()
with_lines_html = "<p>1\n1\n1\n1</p>"
documents = [Document(page_content=with_lines_html)]
docs_transformed = bs_transformer.transform_documents(documents)
assert docs_transformed[0].page_content == "1 1 1 1"
@pytest.mark.requires("bs4")
def test_extract_nested_tags() -> None:
bs_transformer = BeautifulSoupTransformer()
nested_html = (
"<html><div class='some_style'>"
"<p><span>First</span> paragraph.</p>"
"<p>Second <div>paragraph.</div></p>"
"<p><p>Third paragraph.</p></p>"
"</div></html>"
)
documents = [Document(page_content=nested_html)]
docs_transformed = bs_transformer.transform_documents(documents)
assert (
docs_transformed[0].page_content
== "First paragraph. Second paragraph. Third paragraph."
)
@pytest.mark.requires("bs4")
def test_extract_more_nested_tags() -> None:
bs_transformer = BeautifulSoupTransformer()
nested_html = (
"<html><div class='some_style'>"
"<p><span>First</span> paragraph.</p>"
"<p>Second paragraph.</p>"
"<p>Third paragraph with a list:"
"<ul>"
"<li>First list item.</li>"
"<li>Second list item.</li>"
"</ul>"
"</p>"
"<p>Fourth paragraph.</p>"
"</div></html>"
)
documents = [Document(page_content=nested_html)]
docs_transformed = bs_transformer.transform_documents(documents)
assert docs_transformed[0].page_content == (
"First paragraph. Second paragraph. "
"Third paragraph with a list: "
"First list item. Second list item. "
"Fourth paragraph."
)
@pytest.mark.requires("bs4")
def test_transform_keeps_order() -> None:
bs_transformer = BeautifulSoupTransformer()
multiple_tags_html = (
"<h1>First heading.</h1>"
"<p>First paragraph.</p>"
"<h1>Second heading.</h1>"
"<p>Second paragraph.</p>"
)
documents = [Document(page_content=multiple_tags_html)]
# Order of "p" and "h1" in the "tags_to_extract" parameter is NOT important here:
# it will keep the order of the original HTML.
docs_transformed_p_then_h1 = bs_transformer.transform_documents(
documents, tags_to_extract=["p", "h1"]
)
assert (
docs_transformed_p_then_h1[0].page_content
== "First heading. First paragraph. Second heading. Second paragraph."
)
# Recreating `documents` because transform_documents() modifies it.
documents = [Document(page_content=multiple_tags_html)]
# changing the order of "h1" and "p" in "tags_to_extract" does NOT flip the order
# of the extracted tags:
docs_transformed_h1_then_p = bs_transformer.transform_documents(
documents, tags_to_extract=["h1", "p"]
)
assert (
docs_transformed_h1_then_p[0].page_content
== "First heading. First paragraph. Second heading. Second paragraph."
)
@pytest.mark.requires("bs4")
def test_extracts_href() -> None:
bs_transformer = BeautifulSoupTransformer()
multiple_tags_html = (
"<h1>First heading.</h1>"
"<p>First paragraph with an <a href='http://example.com'>example</a></p>"
"<p>Second paragraph with an <a>a tag without href</a></p>"
)
documents = [Document(page_content=multiple_tags_html)]
docs_transformed = bs_transformer.transform_documents(
documents, tags_to_extract=["p"]
)
assert docs_transformed[0].page_content == (
"First paragraph with an example (http://example.com) "
"Second paragraph with an a tag without href"
)
@pytest.mark.requires("bs4")
def test_invalid_html() -> None:
bs_transformer = BeautifulSoupTransformer()
invalid_html_1 = "<html><h1>First heading."
invalid_html_2 = "<html 1234 xyz"
documents = [
Document(page_content=invalid_html_1),
Document(page_content=invalid_html_2),
]
docs_transformed = bs_transformer.transform_documents(
documents, tags_to_extract=["h1"]
)
assert docs_transformed[0].page_content == "First heading."
assert docs_transformed[1].page_content == ""
| [] |
2024-01-10 | faisale/langchain | templates~rag-timescale-conversation~rag_timescale_conversation~load_sample_dataset.py | import os
import tempfile
from datetime import datetime, timedelta
import requests
from langchain.document_loaders import JSONLoader
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores.timescalevector import TimescaleVector
from timescale_vector import client
def parse_date(date_string: str) -> datetime:
if date_string is None:
return None
time_format = "%a %b %d %H:%M:%S %Y %z"
return datetime.strptime(date_string, time_format)
def extract_metadata(record: dict, metadata: dict) -> dict:
dt = parse_date(record["date"])
metadata["id"] = str(client.uuid_from_time(dt))
if dt is not None:
metadata["date"] = dt.isoformat()
else:
metadata["date"] = None
metadata["author"] = record["author"]
metadata["commit_hash"] = record["commit"]
return metadata
def load_ts_git_dataset(
service_url,
collection_name="timescale_commits",
num_records: int = 500,
partition_interval=timedelta(days=7),
):
json_url = "https://s3.amazonaws.com/assets.timescale.com/ai/ts_git_log.json"
tmp_file = "ts_git_log.json"
temp_dir = tempfile.gettempdir()
json_file_path = os.path.join(temp_dir, tmp_file)
if not os.path.exists(json_file_path):
response = requests.get(json_url)
if response.status_code == 200:
with open(json_file_path, "w") as json_file:
json_file.write(response.text)
else:
print(f"Failed to download JSON file. Status code: {response.status_code}")
loader = JSONLoader(
file_path=json_file_path,
jq_schema=".commit_history[]",
text_content=False,
metadata_func=extract_metadata,
)
documents = loader.load()
# Remove documents with None dates
documents = [doc for doc in documents if doc.metadata["date"] is not None]
if num_records > 0:
documents = documents[:num_records]
# Split the documents into chunks for embedding
text_splitter = CharacterTextSplitter(
chunk_size=1000,
chunk_overlap=200,
)
docs = text_splitter.split_documents(documents)
embeddings = OpenAIEmbeddings()
# Create a Timescale Vector instance from the collection of documents
TimescaleVector.from_documents(
embedding=embeddings,
ids=[doc.metadata["id"] for doc in docs],
documents=docs,
collection_name=collection_name,
service_url=service_url,
time_partition_interval=partition_interval,
)
| [] |
2024-01-10 | LiamDGray/aider | aider~coders~base_coder.py | #!/usr/bin/env python
import hashlib
import json
import os
import sys
import traceback
from json.decoder import JSONDecodeError
from pathlib import Path
import backoff
import git
import openai
import requests
from openai.error import APIError, RateLimitError, ServiceUnavailableError, Timeout
from rich.console import Console, Text
from rich.live import Live
from rich.markdown import Markdown
from aider import models, prompts, utils
from aider.commands import Commands
from aider.repomap import RepoMap
from ..dump import dump # noqa: F401
class MissingAPIKeyError(ValueError):
pass
class ExhaustedContextWindow(Exception):
pass
def wrap_fence(name):
return f"<{name}>", f"</{name}>"
class Coder:
abs_fnames = None
repo = None
last_aider_commit_hash = None
last_asked_for_commit_time = 0
repo_map = None
functions = None
total_cost = 0.0
num_exhausted_context_windows = 0
@classmethod
def create(
self,
main_model,
edit_format,
io,
openai_api_key,
openai_api_base="https://api.openai.com/v1",
**kwargs,
):
from . import (
EditBlockCoder,
EditBlockFunctionCoder,
SingleWholeFileFunctionCoder,
WholeFileCoder,
WholeFileFunctionCoder,
)
openai.api_key = openai_api_key
openai.api_base = openai_api_base
if not main_model:
main_model = models.GPT35_16k
if not main_model.always_available:
if not check_model_availability(main_model):
if main_model != models.GPT4:
io.tool_error(
f"API key does not support {main_model.name}, falling back to"
f" {models.GPT35_16k.name}"
)
main_model = models.GPT35_16k
if edit_format is None:
edit_format = main_model.edit_format
if edit_format == "diff":
return EditBlockCoder(main_model, io, **kwargs)
elif edit_format == "whole":
return WholeFileCoder(main_model, io, **kwargs)
elif edit_format == "whole-func":
return WholeFileFunctionCoder(main_model, io, **kwargs)
elif edit_format == "single-whole-func":
return SingleWholeFileFunctionCoder(main_model, io, **kwargs)
elif edit_format == "diff-func-list":
return EditBlockFunctionCoder("list", main_model, io, **kwargs)
elif edit_format in ("diff-func", "diff-func-string"):
return EditBlockFunctionCoder("string", main_model, io, **kwargs)
else:
raise ValueError(f"Unknown edit format {edit_format}")
def __init__(
self,
main_model,
io,
fnames=None,
pretty=True,
show_diffs=False,
auto_commits=True,
dirty_commits=True,
dry_run=False,
map_tokens=1024,
verbose=False,
assistant_output_color="blue",
stream=True,
use_git=True,
):
if not fnames:
fnames = []
self.chat_completion_call_hashes = []
self.chat_completion_response_hashes = []
self.verbose = verbose
self.abs_fnames = set()
self.cur_messages = []
self.done_messages = []
self.num_control_c = 0
self.io = io
self.stream = stream
if not auto_commits:
dirty_commits = False
self.auto_commits = auto_commits
self.dirty_commits = dirty_commits
self.assistant_output_color = assistant_output_color
self.dry_run = dry_run
self.pretty = pretty
if pretty:
self.console = Console()
else:
self.console = Console(force_terminal=True, no_color=True)
self.main_model = main_model
self.io.tool_output(f"Model: {main_model.name}")
self.show_diffs = show_diffs
self.commands = Commands(self.io, self)
if use_git:
self.set_repo(fnames)
else:
self.abs_fnames = [str(Path(fname).resolve()) for fname in fnames]
if self.repo:
rel_repo_dir = os.path.relpath(self.repo.git_dir, os.getcwd())
self.io.tool_output(f"Git repo: {rel_repo_dir}")
else:
self.io.tool_output("Git repo: none")
self.find_common_root()
if main_model.use_repo_map and self.repo and self.gpt_prompts.repo_content_prefix:
rm_io = io if self.verbose else None
self.repo_map = RepoMap(
map_tokens,
self.root,
self.main_model,
rm_io,
self.gpt_prompts.repo_content_prefix,
)
if self.repo_map.use_ctags:
self.io.tool_output(f"Repo-map: universal-ctags using {map_tokens} tokens")
elif not self.repo_map.has_ctags and map_tokens > 0:
self.io.tool_output(
f"Repo-map: basic using {map_tokens} tokens (universal-ctags not found)"
)
else:
self.io.tool_output("Repo-map: disabled because map_tokens == 0")
else:
self.io.tool_output("Repo-map: disabled")
for fname in self.get_inchat_relative_files():
self.io.tool_output(f"Added {fname} to the chat.")
def find_common_root(self):
if len(self.abs_fnames) == 1:
self.root = os.path.dirname(list(self.abs_fnames)[0])
elif self.abs_fnames:
self.root = os.path.commonpath(list(self.abs_fnames))
else:
self.root = os.getcwd()
def set_repo(self, cmd_line_fnames):
if not cmd_line_fnames:
cmd_line_fnames = ["."]
repo_paths = []
for fname in cmd_line_fnames:
fname = Path(fname)
if not fname.exists():
self.io.tool_output(f"Creating empty file {fname}")
fname.parent.mkdir(parents=True, exist_ok=True)
fname.touch()
try:
repo_path = git.Repo(fname, search_parent_directories=True).working_dir
repo_paths.append(repo_path)
except git.exc.InvalidGitRepositoryError:
pass
if fname.is_dir():
continue
fname = fname.resolve()
self.abs_fnames.add(str(fname))
num_repos = len(set(repo_paths))
if num_repos == 0:
return
if num_repos > 1:
self.io.tool_error("Files are in different git repos.")
return
# https://github.com/gitpython-developers/GitPython/issues/427
repo = git.Repo(repo_paths.pop(), odbt=git.GitDB)
self.root = repo.working_tree_dir
new_files = []
for fname in self.abs_fnames:
relative_fname = self.get_rel_fname(fname)
tracked_files = set(repo.git.ls_files().splitlines())
if relative_fname not in tracked_files:
new_files.append(relative_fname)
if new_files:
rel_repo_dir = os.path.relpath(repo.git_dir, os.getcwd())
self.io.tool_output(f"Files not tracked in {rel_repo_dir}:")
for fn in new_files:
self.io.tool_output(f" - {fn}")
if self.io.confirm_ask("Add them?"):
for relative_fname in new_files:
repo.git.add(relative_fname)
self.io.tool_output(f"Added {relative_fname} to the git repo")
show_files = ", ".join(new_files)
commit_message = f"Added new files to the git repo: {show_files}"
repo.git.commit("-m", commit_message, "--no-verify")
commit_hash = repo.head.commit.hexsha[:7]
self.io.tool_output(f"Commit {commit_hash} {commit_message}")
else:
self.io.tool_error("Skipped adding new files to the git repo.")
return
self.repo = repo
# fences are obfuscated so aider can modify this file!
fences = [
("``" + "`", "``" + "`"),
wrap_fence("source"),
wrap_fence("code"),
wrap_fence("pre"),
wrap_fence("codeblock"),
wrap_fence("sourcecode"),
]
fence = fences[0]
def choose_fence(self):
all_content = ""
for fname in self.abs_fnames:
all_content += Path(fname).read_text() + "\n"
all_content = all_content.splitlines()
good = False
for fence_open, fence_close in self.fences:
if fence_open in all_content or fence_close in all_content:
continue
good = True
break
if good:
self.fence = (fence_open, fence_close)
else:
self.fence = self.fences[0]
self.io.tool_error(
"Unable to find a fencing strategy! Falling back to:"
" {self.fence[0]}...{self.fence[1]}"
)
return
def get_files_content(self, fnames=None):
if not fnames:
fnames = self.abs_fnames
prompt = ""
for fname in fnames:
relative_fname = self.get_rel_fname(fname)
prompt += utils.quoted_file(fname, relative_fname, fence=self.fence)
return prompt
def get_files_messages(self):
all_content = ""
if self.abs_fnames:
files_content = self.gpt_prompts.files_content_prefix
files_content += self.get_files_content()
else:
files_content = self.gpt_prompts.files_no_full_files
all_content += files_content
other_files = set(self.get_all_abs_files()) - set(self.abs_fnames)
if self.repo_map:
repo_content = self.repo_map.get_repo_map(self.abs_fnames, other_files)
if repo_content:
if all_content:
all_content += "\n"
all_content += repo_content
files_messages = [
dict(role="user", content=all_content),
dict(role="assistant", content="Ok."),
]
if self.abs_fnames:
files_messages += [
dict(role="system", content=self.fmt_system_reminder()),
]
return files_messages
def run(self, with_message=None):
while True:
try:
if with_message:
new_user_message = with_message
self.io.user_input(with_message)
else:
new_user_message = self.run_loop()
while new_user_message:
new_user_message = self.send_new_user_message(new_user_message)
if with_message:
return
except KeyboardInterrupt:
self.num_control_c += 1
if self.num_control_c >= 2:
break
self.io.tool_error("^C again or /exit to quit")
except EOFError:
return
def should_dirty_commit(self, inp):
is_commit_command = inp and inp.startswith("/commit")
if is_commit_command:
return
if not self.dirty_commits:
return
if not self.repo:
return
if not self.repo.is_dirty():
return
if self.last_asked_for_commit_time >= self.get_last_modified():
return
return True
def move_back_cur_messages(self, message):
self.done_messages += self.cur_messages
if message:
self.done_messages += [
dict(role="user", content=message),
dict(role="assistant", content="Ok."),
]
self.cur_messages = []
def run_loop(self):
inp = self.io.get_input(
self.root,
self.get_inchat_relative_files(),
self.get_addable_relative_files(),
self.commands,
)
self.num_control_c = 0
if self.should_dirty_commit(inp):
self.commit(ask=True, which="repo_files")
# files changed, move cur messages back behind the files messages
self.move_back_cur_messages(self.gpt_prompts.files_content_local_edits)
if inp.strip():
self.io.tool_output("Use up-arrow to retry previous command:", inp)
return
if not inp:
return
if self.commands.is_command(inp):
return self.commands.run(inp)
self.check_for_file_mentions(inp)
return self.send_new_user_message(inp)
def fmt_system_reminder(self):
prompt = self.gpt_prompts.system_reminder
prompt = prompt.format(fence=self.fence)
return prompt
def send_new_user_message(self, inp):
self.choose_fence()
self.cur_messages += [
dict(role="user", content=inp),
]
main_sys = self.gpt_prompts.main_system
# if self.main_model.max_context_tokens > 4 * 1024:
main_sys += "\n" + self.fmt_system_reminder()
messages = [
dict(role="system", content=main_sys),
]
messages += self.done_messages
messages += self.get_files_messages()
messages += self.cur_messages
if self.verbose:
utils.show_messages(messages, functions=self.functions)
exhausted = False
interrupted = False
try:
interrupted = self.send(messages, functions=self.functions)
except ExhaustedContextWindow:
exhausted = True
except openai.error.InvalidRequestError as err:
if "maximum context length" in str(err):
exhausted = True
if exhausted:
self.num_exhausted_context_windows += 1
self.io.tool_error("The chat session is larger than the context window!\n")
self.commands.cmd_tokens("")
self.io.tool_error("\nTo reduce token usage:")
self.io.tool_error(" - Use /drop to remove unneeded files from the chat session.")
self.io.tool_error(" - Use /clear to clear chat history.")
return
if self.partial_response_function_call:
args = self.parse_partial_args()
if args:
content = args["explanation"]
else:
content = ""
elif self.partial_response_content:
content = self.partial_response_content
else:
content = ""
if interrupted:
self.io.tool_error("\n\n^C KeyboardInterrupt")
self.num_control_c += 1
content += "\n^C KeyboardInterrupt"
self.io.tool_output()
if interrupted:
self.cur_messages += [dict(role="assistant", content=content)]
return
edited, edit_error = self.apply_updates()
if edit_error:
return edit_error
# TODO: this shouldn't use content, should use self.partial_....
self.update_cur_messages(content, edited)
if edited:
if self.repo and self.auto_commits and not self.dry_run:
saved_message = self.auto_commit()
elif hasattr(self.gpt_prompts, "files_content_gpt_edits_no_repo"):
saved_message = self.gpt_prompts.files_content_gpt_edits_no_repo
else:
saved_message = None
self.move_back_cur_messages(saved_message)
add_rel_files_message = self.check_for_file_mentions(content)
if add_rel_files_message:
return add_rel_files_message
def update_cur_messages(self, content, edited):
self.cur_messages += [dict(role="assistant", content=content)]
def auto_commit(self):
res = self.commit(history=self.cur_messages, prefix="aider: ")
if res:
commit_hash, commit_message = res
self.last_aider_commit_hash = commit_hash
saved_message = self.gpt_prompts.files_content_gpt_edits.format(
hash=commit_hash,
message=commit_message,
)
else:
if self.repo:
self.io.tool_error("Warning: no changes found in tracked files.")
saved_message = self.gpt_prompts.files_content_gpt_no_edits
return saved_message
def check_for_file_mentions(self, content):
words = set(word for word in content.split())
# drop sentence punctuation from the end
words = set(word.rstrip(",.!;") for word in words)
# strip away all kinds of quotes
quotes = "".join(['"', "'", "`"])
words = set(word.strip(quotes) for word in words)
addable_rel_fnames = self.get_addable_relative_files()
mentioned_rel_fnames = set()
fname_to_rel_fnames = {}
for rel_fname in addable_rel_fnames:
fname = os.path.basename(rel_fname)
if fname not in fname_to_rel_fnames:
fname_to_rel_fnames[fname] = []
fname_to_rel_fnames[fname].append(rel_fname)
for fname, rel_fnames in fname_to_rel_fnames.items():
if len(rel_fnames) == 1 and fname in words:
mentioned_rel_fnames.add(rel_fnames[0])
else:
for rel_fname in rel_fnames:
if rel_fname in words:
mentioned_rel_fnames.add(rel_fname)
if not mentioned_rel_fnames:
return
for rel_fname in mentioned_rel_fnames:
self.io.tool_output(rel_fname)
if not self.io.confirm_ask("Add these files to the chat?"):
return
for rel_fname in mentioned_rel_fnames:
self.abs_fnames.add(os.path.abspath(os.path.join(self.root, rel_fname)))
return prompts.added_files.format(fnames=", ".join(mentioned_rel_fnames))
@backoff.on_exception(
backoff.expo,
(
Timeout,
APIError,
ServiceUnavailableError,
RateLimitError,
requests.exceptions.ConnectionError,
),
max_tries=5,
on_backoff=lambda details: print(f"Retry in {details['wait']} seconds."),
)
def send_with_retries(self, model, messages, functions):
kwargs = dict(
model=model,
messages=messages,
temperature=0,
stream=self.stream,
)
if functions is not None:
kwargs["functions"] = self.functions
# Generate SHA1 hash of kwargs and append it to chat_completion_call_hashes
hash_object = hashlib.sha1(json.dumps(kwargs, sort_keys=True).encode())
self.chat_completion_call_hashes.append(hash_object.hexdigest())
res = openai.ChatCompletion.create(**kwargs)
return res
def send(self, messages, model=None, silent=False, functions=None):
if not model:
model = self.main_model.name
self.partial_response_content = ""
self.partial_response_function_call = dict()
interrupted = False
try:
completion = self.send_with_retries(model, messages, functions)
if self.stream:
self.show_send_output_stream(completion, silent)
else:
self.show_send_output(completion, silent)
except KeyboardInterrupt:
interrupted = True
if not silent:
if self.partial_response_content:
self.io.ai_output(self.partial_response_content)
elif self.partial_response_function_call:
# TODO: push this into subclasses
args = self.parse_partial_args()
if args:
self.io.ai_output(json.dumps(args, indent=4))
return interrupted
def show_send_output(self, completion, silent):
if self.verbose:
print(completion)
show_func_err = None
show_content_err = None
try:
self.partial_response_function_call = completion.choices[0].message.function_call
except AttributeError as func_err:
show_func_err = func_err
try:
self.partial_response_content = completion.choices[0].message.content
except AttributeError as content_err:
show_content_err = content_err
resp_hash = dict(
function_call=self.partial_response_function_call,
content=self.partial_response_content,
)
resp_hash = hashlib.sha1(json.dumps(resp_hash, sort_keys=True).encode())
self.chat_completion_response_hashes.append(resp_hash.hexdigest())
if show_func_err and show_content_err:
self.io.tool_error(show_func_err)
self.io.tool_error(show_content_err)
raise Exception("No data found in openai response!")
prompt_tokens = completion.usage.prompt_tokens
completion_tokens = completion.usage.completion_tokens
tokens = f"{prompt_tokens} prompt tokens, {completion_tokens} completion tokens"
if self.main_model.prompt_price:
cost = prompt_tokens * self.main_model.prompt_price / 1000
cost += completion_tokens * self.main_model.completion_price / 1000
tokens += f", ${cost:.6f} cost"
self.total_cost += cost
show_resp = self.render_incremental_response(True)
if self.pretty:
show_resp = Markdown(show_resp, style=self.assistant_output_color, code_theme="default")
else:
show_resp = Text(show_resp or "<no response>")
self.io.console.print(show_resp)
self.io.console.print(tokens)
def show_send_output_stream(self, completion, silent):
live = None
if self.pretty and not silent:
live = Live(vertical_overflow="scroll")
try:
if live:
live.start()
for chunk in completion:
if chunk.choices[0].finish_reason == "length":
raise ExhaustedContextWindow()
try:
func = chunk.choices[0].delta.function_call
# dump(func)
for k, v in func.items():
if k in self.partial_response_function_call:
self.partial_response_function_call[k] += v
else:
self.partial_response_function_call[k] = v
except AttributeError:
pass
try:
text = chunk.choices[0].delta.content
if text:
self.partial_response_content += text
except AttributeError:
pass
if silent:
continue
if self.pretty:
self.live_incremental_response(live, False)
else:
sys.stdout.write(text)
sys.stdout.flush()
finally:
if live:
self.live_incremental_response(live, True)
live.stop()
def live_incremental_response(self, live, final):
show_resp = self.render_incremental_response(final)
if not show_resp:
return
md = Markdown(show_resp, style=self.assistant_output_color, code_theme="default")
live.update(md)
def render_incremental_response(self, final):
return self.partial_response_content
def get_context_from_history(self, history):
context = ""
if history:
context += "# Context:\n"
for msg in history:
context += msg["role"].upper() + ": " + msg["content"] + "\n"
return context
def get_commit_message(self, diffs, context):
if len(diffs) >= 4 * 1024 * 4:
self.io.tool_error(
f"Diff is too large for {models.GPT35.name} to generate a commit message."
)
return
diffs = "# Diffs:\n" + diffs
messages = [
dict(role="system", content=prompts.commit_system),
dict(role="user", content=context + diffs),
]
try:
interrupted = self.send(
messages,
model=models.GPT35.name,
silent=True,
)
except openai.error.InvalidRequestError:
self.io.tool_error(
f"Failed to generate commit message using {models.GPT35.name} due to an invalid"
" request."
)
return
commit_message = self.partial_response_content
commit_message = commit_message.strip()
if commit_message and commit_message[0] == '"' and commit_message[-1] == '"':
commit_message = commit_message[1:-1].strip()
if interrupted:
self.io.tool_error(
f"Unable to get commit message from {models.GPT35.name}. Use /commit to try again."
)
return
return commit_message
def get_diffs(self, *args):
if self.pretty:
args = ["--color"] + list(args)
diffs = self.repo.git.diff(*args)
return diffs
def commit(self, history=None, prefix=None, ask=False, message=None, which="chat_files"):
repo = self.repo
if not repo:
return
if not repo.is_dirty():
return
def get_dirty_files_and_diffs(file_list):
diffs = ""
relative_dirty_files = []
for fname in file_list:
relative_fname = self.get_rel_fname(fname)
relative_dirty_files.append(relative_fname)
try:
current_branch_commit_count = len(
list(self.repo.iter_commits(self.repo.active_branch))
)
except git.exc.GitCommandError:
current_branch_commit_count = None
if not current_branch_commit_count:
continue
these_diffs = self.get_diffs("HEAD", "--", relative_fname)
if these_diffs:
diffs += these_diffs + "\n"
return relative_dirty_files, diffs
if which == "repo_files":
all_files = [os.path.join(self.root, f) for f in self.get_all_relative_files()]
relative_dirty_fnames, diffs = get_dirty_files_and_diffs(all_files)
elif which == "chat_files":
relative_dirty_fnames, diffs = get_dirty_files_and_diffs(self.abs_fnames)
else:
raise ValueError(f"Invalid value for 'which': {which}")
if self.show_diffs or ask:
# don't use io.tool_output() because we don't want to log or further colorize
print(diffs)
context = self.get_context_from_history(history)
if message:
commit_message = message
else:
commit_message = self.get_commit_message(diffs, context)
if not commit_message:
commit_message = "work in progress"
if prefix:
commit_message = prefix + commit_message
if ask:
if which == "repo_files":
self.io.tool_output("Git repo has uncommitted changes.")
else:
self.io.tool_output("Files have uncommitted changes.")
res = self.io.prompt_ask(
"Commit before the chat proceeds [y/n/commit message]?",
default=commit_message,
).strip()
self.last_asked_for_commit_time = self.get_last_modified()
self.io.tool_output()
if res.lower() in ["n", "no"]:
self.io.tool_error("Skipped commmit.")
return
if res.lower() not in ["y", "yes"] and res:
commit_message = res
repo.git.add(*relative_dirty_fnames)
full_commit_message = commit_message + "\n\n" + context
repo.git.commit("-m", full_commit_message, "--no-verify")
commit_hash = repo.head.commit.hexsha[:7]
self.io.tool_output(f"Commit {commit_hash} {commit_message}")
return commit_hash, commit_message
def get_rel_fname(self, fname):
return os.path.relpath(fname, self.root)
def get_inchat_relative_files(self):
files = [self.get_rel_fname(fname) for fname in self.abs_fnames]
return sorted(set(files))
def get_all_relative_files(self):
if self.repo:
files = self.repo.git.ls_files().splitlines()
else:
files = self.get_inchat_relative_files()
return sorted(set(files))
def get_all_abs_files(self):
files = self.get_all_relative_files()
files = [os.path.abspath(os.path.join(self.root, path)) for path in files]
return files
def get_last_modified(self):
files = self.get_all_abs_files()
if not files:
return 0
return max(Path(path).stat().st_mtime for path in files)
def get_addable_relative_files(self):
return set(self.get_all_relative_files()) - set(self.get_inchat_relative_files())
def allowed_to_edit(self, path, write_content=None):
full_path = os.path.abspath(os.path.join(self.root, path))
if full_path in self.abs_fnames:
if not self.dry_run and write_content:
Path(full_path).write_text(write_content)
return full_path
if not Path(full_path).exists():
question = f"Allow creation of new file {path}?" # noqa: E501
else:
question = f"Allow edits to {path} which was not previously provided?" # noqa: E501
if not self.io.confirm_ask(question):
self.io.tool_error(f"Skipping edit to {path}")
return
if not Path(full_path).exists() and not self.dry_run:
Path(full_path).parent.mkdir(parents=True, exist_ok=True)
Path(full_path).touch()
self.abs_fnames.add(full_path)
# Check if the file is already in the repo
if self.repo:
tracked_files = set(self.repo.git.ls_files().splitlines())
relative_fname = self.get_rel_fname(full_path)
if relative_fname not in tracked_files and self.io.confirm_ask(f"Add {path} to git?"):
if not self.dry_run:
self.repo.git.add(full_path)
if not self.dry_run and write_content:
Path(full_path).write_text(write_content)
return full_path
apply_update_errors = 0
def apply_updates(self):
max_apply_update_errors = 2
try:
edited = self.update_files()
except ValueError as err:
err = err.args[0]
self.apply_update_errors += 1
if self.apply_update_errors < max_apply_update_errors:
self.io.tool_error(f"Malformed response #{self.apply_update_errors}, retrying...")
self.io.tool_error(str(err))
return None, err
else:
self.io.tool_error(f"Malformed response #{self.apply_update_errors}, aborting.")
return False, None
except Exception as err:
print(err)
print()
traceback.print_exc()
self.apply_update_errors += 1
if self.apply_update_errors < max_apply_update_errors:
self.io.tool_error(f"Update exception #{self.apply_update_errors}, retrying...")
return None, str(err)
else:
self.io.tool_error(f"Update exception #{self.apply_update_errors}, aborting")
return False, None
self.apply_update_errors = 0
if edited:
for path in sorted(edited):
if self.dry_run:
self.io.tool_output(f"Did not apply edit to {path} (--dry-run)")
else:
self.io.tool_output(f"Applied edit to {path}")
return edited, None
def parse_partial_args(self):
# dump(self.partial_response_function_call)
data = self.partial_response_function_call.get("arguments")
if not data:
return
try:
return json.loads(data)
except JSONDecodeError:
pass
try:
return json.loads(data + "]}")
except JSONDecodeError:
pass
try:
return json.loads(data + "}]}")
except JSONDecodeError:
pass
try:
return json.loads(data + '"}]}')
except JSONDecodeError:
pass
def check_model_availability(main_model):
available_models = openai.Model.list()
model_ids = [model.id for model in available_models["data"]]
return main_model.name in model_ids
| [] |
2024-01-10 | Saurabh1443/Data_Axle_Project | Backend~emailGeneration~EmailGeneratorManager.py | import openai
from dotenv import load_dotenv
load_dotenv()
import os
openai.api_key = str(os.getenv('OPEN_AI_API_KEY'))
# def get_completion(prompt, index,model="gpt-3.5-turbo",):
# messages = [
# {"role":"system","content":"You are an expert in writing personalized emails for product marketing."},
# {"role": "user", "content": prompt}
# ]
# response = openai.ChatCompletion.create(
# model=model,
# messages=messages,
# temperature=0.5,
# n=5
# )
# if(index>0 and response.choices[0].message['content'] =="Please, enter a valid product description." ):
# raise ValueError("Sorry, you've attempted multiple times. Please try with a different input this time.")
# if(response.choices[0].message['content'] =="Please, enter a valid product description."):
# raise ValueError("Please, enter a valid product description.")
# return json.loads(response.choices[index].message['content'])
def get_completion_valid(prompt, model="gpt-3.5-turbo"):
messages = [
{"role":"system","content":" You are an automated product and service validation expert."},
{"role": "user", "content": prompt}
]
response = openai.ChatCompletion.create(
model=model,
messages=messages,
temperature=0,
# this is the degree of randomness of the model's output
)
#return response.choices[].message["content"]
return response.choices
def get_completion_email(prompt, model="gpt-3.5-turbo"):
messages = [
{"role":"system","content":"You are an expert in writing personalized emails for product marketing."},
{"role": "user", "content": prompt}
]
response = openai.ChatCompletion.create(
model=model,
messages=messages,
temperature=0.7,
n = 5
# this is the degree of randomness of the model's output
)
#return response.choices[].message["content"]
return response.choices
# Attributes = {"Product Description" : "The product is Adidas Shoes. It ","Email Tone":"Excitement", "Email Tone Description":"Generate enthusiasm for new product launches or sales."}
# temp_descp = {Attributes["Product Description"]}
# Personal_Information={'title': 'Mr', 'first_name': 'Steve', 'middle_initial': 'R', 'last_name': 'Edgerly', 'address': '8700 West Ln Spc 126', 'city': 'Stockton', 'state': 'CA', 'zipcode': 95210, 'countycode': 77, 'county': 'SAN JOAQUIN', 'age_range': '65 - 69', 'income_range': 'Under $20,000', 'potential_investor': 'Likely Investor', 'home_owner': 'Yes', 'marital_status': 'Single', 'ethnicity': 'English', 'language': 'English', 'gender': 'Male', 'estimated_household_income': 'Under $20,000', 'housing_type': 'Trailer Court', 'estimated_home_value': '$1 - $24,999', 'year_home_built': 1975, 'mail_order_purchase': 'Golf Car Buff Computer Health/Fitness Books And Music Finance/Investment Sweepstakes/Gambling Apparel', 'actual_age': 68, 'birth_month': 'October', 'number_of_children': 0, 'veterans': 'No', 'home_size': 0, 'health': 'General Health & Exercise, Fitness', 'motor_interest': 'Auto Racing Enthusiast', 'politics': 'Politically Liberal', 'purchase_behavior': 'Catalog Shopper', 'technology_entertainment': 'Internet User, Personal Computers, Internet Access'}
def remove_null_or_empty_values(dictionary):
return {key: value for key, value in dictionary.items() if value is not None and value != ""}
def valid(temp_description):
return f"""
Your main goal is to assess the validity of a product or service based on its minimal description.
Please provide a brief description of the product or service:
User: {temp_description}
Is the description valid? (True/False)
IMPORTANT: If the description is ambiguous or unclear, please answer "False" to indicate uncertainty.
"""
def email(Attributes,Personal_Information):
return f"""your objective is to compose a concise, personalized \
email with subject using the provided information.
Attributes:```{Attributes}```\
Personal Information:```{Personal_Information}```\
Follow the guidelines below:\
1. Generate a short email with maximum 2 lines in each paragraph with maximum 3 paragraphs.
2. Use the "Attributes" and "Personal Information" delimited in triple backticks for generating dynamic content in email.\
3. Attributes section is provided by Product owner and Personal information is of a target person.\
4. Compose a compelling email that aligns with the given Product Description, email tone and Email tone description under attributes.\
5. Regardless of the tone of the email, refrain from revealing any Personally Identifiable Information (PII), such as income, age, and the like.\
6. Feel free to use emojis or other Unicode characters to add emphasis or express emotions in the email.\
7. Strictly, Do not include product descriptions, features, or any special offers, deals, or discounts in the email, unless explicitly mentioned in the Product Description.\
8. Utilize as many fields under "Personal Information" as possible in combination to create an engaging \
email.\
9. If you come across any irrelevant fields under "Personal Information", unrelated to the product \
or unsuitable for the email, please omit them. Prioritize the recipient's name \
and relevant details to ensure a meaningful email. \
10. Remember you are prohibited from including PII data fields present in Personal Information under Attributes in email \
and focus on engaging the recipient with a personalized message. \
11. Generate email in json format, with "subject","regards" and each paragraph in different key like "para1", "para2",etc. \
12. Please ensure that the generated email does not contain any vulgar language or offensive content and maintains a professional and respectful tone.\
"""
def responseGenerator(personal_information,Attributes):
temp_description = Attributes["Product Description"]
personal_information = remove_null_or_empty_values(personal_information)
del personal_information["language"]
prompt_valid = valid(temp_description)
prompt_email = email(Attributes, personal_information)
response_valid=get_completion_valid(prompt_valid)
if response_valid[0]['message']['content']=="True":
response_email=get_completion_email(prompt_email)
return response_email
else:
raise ValueError("Please, provide better and detailed description about product.")
# prompt = f"""your objective is to compose a concise, personalized \
# email with subject using the provided information.
# Before generating email,
# i. first check product description under Attributes and analyze product description.\
# ii. Given a product description in one word, treat the input word as case insensitive, check if it corresponds to an existing product or brand name. Please only consider nouns and noun phrases as valid product descriptions and exclude adjectives, adverbs, and verbs from the response."\
# iii. Given a product description in sentences, verify that the description pertains to a specific product and does not contain any irrelevant information beyond the product description.\
# iv. If any of ii or iii condition is not satisfying then don't follow below guidelines and just print "Please, Enter a valid product description."\
# Follow the guidelines below:\
# 1. Generate a short email with maximum 2 lines in each paragraph with maximum 3 paragraphs.
# 2. Use the "Attributes" and "Personal Information" delimited in triple backticks for generating dynamic content in email.\
# 3. Attributes section is provided by Product owner and Personal information is of a target person.\
# 4. Compose a compelling email that aligns with the given Product Description, email tone and Email tone description under attributes.\
# 5. Feel free to use emojis or other Unicode characters to add emphasis or express emotions in the email.\
# 6. Strictly, Do not include product descriptions, features, or any special offers, deals, or discounts in the email, unless explicitly mentioned in the Product Description.\
# 7. Utilize as many fields under "Personal Information" as possible in combination to create an engaging \
# email.\
# 8. If you come across any irrelevant fields under "Personal Information", unrelated to the product \
# or unsuitable for the email, please omit them. Prioritize the recipient's name \
# and relevant details to ensure a meaningful email. \
# 9. Remember you are prohibited from including PII data fields present in Personal Information under Attributes in email \
# and focus on engaging the recipient with a personalized message. \
# 10. Generate email in json format, with "subject","regards" and each paragraph in different key like "para1", "para2",etc.
# Attributes:```{Attributes}```\
# Personal Information:```{personal_information}```\
# """
# return get_completion(prompt,index)
| [
"You are an expert in writing personalized emails for product marketing.",
" You are an automated product and service validation expert."
] |
2024-01-10 | GCISG/GCISG | loss~__init__.py | from .causal_invariance_loss import CILoss
from .guidance_loss import GuidanceLoss
__all__ = [
"GuidanceLoss",
"CILoss",
]
| [] |
2024-01-10 | potofo/chromadb_client | indexing_livedoor.py | # chromadb
import chromadb
from chromadb.config import Settings
from chromadb.utils import embedding_functions
# langchain
from langchain.document_loaders import DirectoryLoader
from langchain.document_loaders import TextLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
# time
import time
# sys
import sys
# pprint
import pprint
# url detect
import re
# tqdm
import tqdm
from tqdm import tqdm
import numpy as np
#directory = './livedoorニュースコーパス/ldcc-20140209/text/small/'
#directory = './livedoorニュースコーパス/ldcc-20140209/text/movie-enter/'
directory = './livedoorニュースコーパス/ldcc-20140209/text/it-life-hack/'
#loader = DirectoryLoader(directory, glob="movie-enter*.txt",show_progress=True, encoding='utf8')
#loader = TextLoader(directory+"movie-enter-5840081.txt", encoding='utf8')
#loader = DirectoryLoader(directory, glob="movie-enter*.txt",show_progress=True, loader_cls=TextLoader, encoding='utf8')
#loader = DirectoryLoader(directory, glob="movie-enter*.txt", loader_cls=TextLoader, encoding='utf8')
# reffer to https://python.langchain.com/docs/modules/data_connection/document_loaders/file_directory
# C.Auto detect encodings
text_loader_kwargs={'autodetect_encoding': True}
#loader = DirectoryLoader(directory, glob="movie-enter*.txt", show_progress=True, loader_cls=TextLoader, loader_kwargs=text_loader_kwargs)
loader = DirectoryLoader(directory, glob="it-life-hack*.txt", show_progress=True, loader_cls=TextLoader, loader_kwargs=text_loader_kwargs)
start = time.time()
documents = loader.load()
end = time.time()
time_diff = end - start
num_documents = len(documents)
spendding_time = round(time_diff,2)
print('documents:{0} documents spendding {1} seconds.'.format(num_documents,str(round(time_diff,2))))
# scheme of documents
# document[0]
# document[0].page_content
# document[0].metadata
# ...
# document[10]
# document[10].page_content
# document[10].metadata
# for document in documents:
# print("path:{0}".format(document.metadata['source']))
# pprint.pprint(documents[0: 2],indent=2,width=40)
# pprint.pprint(documents[0].page_content,indent=2,width=40)
# pprint.pprint(documents[0].metadata,indent=2,width=40)
# Number of documents processed at once
num_proc_documents = 20
cur_document_num = 0
bar = tqdm(total = num_documents)
bar.set_description('Progress rate')
start = time.time()
for i in range(0,num_documents,num_proc_documents):
# tqdm.write("i:{0}".format(i))
# tqdm.write("i+num_proc_documents:{0}".format(i+(num_proc_documents)))
# tqdm.write("num_documents:{0}".format(num_documents))
# if(num_documents % num_proc_documents):
# num_documents
proc_documents = documents[cur_document_num:(cur_document_num+num_proc_documents):1]
# tqdm.write("proc_documents[{0}:{1}:1]".format(cur_document_num,cur_document_num+num_proc_documents))
# tqdm.write("num proc_documents:{0}".format(len(proc_documents)))
#pprint.pprint(proc_documents,indent=2,width=40)
# for doc in proc_documents:
# tqdm.write("path:{0}".format(doc.metadata['source']))
#pprint.pprint(proc_documents,indent=2,width=40)
try:
#client = chromadb.HttpClient(
# host='localhost',
# port=80)
# With authentifizations
client = chromadb.HttpClient(
host='localhost',
port=80,
settings=Settings(chroma_client_auth_provider='chromadb.auth.token.TokenAuthClientProvider',
chroma_client_auth_credentials='test-token'))
except Exception as e:
print('Vector database Connection error occurs with following message.')
print('Error Message:{0}'.format(str(e)))
sys.exit(-1)
# defined sentence transformer LLM
sentence_transformer_ef = embedding_functions.SentenceTransformerEmbeddingFunction(model_name="intfloat/multilingual-e5-large")
# get or create collection
collection = client.get_or_create_collection("livedoor",embedding_function=sentence_transformer_ef)
items = collection.get()
#pprint.pprint(items,indent=2,width=40)
if(len(items['ids']) == 0):
last_ids = 0
else:
last_ids = int(items['ids'][-1])
#print('last ids:{0}'.format(last_ids))
# split chunk each num_proc_documents
chunk_size=512
chunk_overlap=20
text_splitter = RecursiveCharacterTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap)
chunks = text_splitter.split_documents(proc_documents)
tqdm.write("{0} chunks in {1} documents".format(len(chunks),len(proc_documents)))
# scheme of Chunk
# chunks[0]
# chunks[0].page_content
# "http://news.livedoor.com/article/detail/5840081/\n\n2011\n\n09\n\n08T10:00:00+0900\n\nインタビュー:宮崎あおい..."
# chunks[0].metadata
# {'source':'livedoorニュースコーパス\\ldcc-20140209\\text\\small\\movie-enter-5840081.txt'}
# ...
# chunks[10]
# chunks[10].page_content
# chunks[10].metadata
# Initialization Scheme of Vector Database
vect_documents = []
vect_metadatas = []
vect_ids = []
# Restructure chunks
cur_chunk_num = 0
# defined current ids number
cur_ids = last_ids + 1
for chunk in chunks:
# strip_docs.append(chunk.page_content)
splitline_chunk_page_content = chunk.page_content.splitlines()
check_url = re.findall('^https?://[\w/:%#\$&\?\(\)~\.=\+\-]+', splitline_chunk_page_content[0])
if(check_url):
# first chunk of document
# print("found url:{0}".format(check_url[0]))
# tqdm.write("document_url :{0}".format(check_url[0]))
document_url = check_url[0]
document_date = splitline_chunk_page_content[1]
document_title = splitline_chunk_page_content[2]
target_page_content = splitline_chunk_page_content[3]
#cur_document = chunk.page_content[index_document+1]
#cur_document = chunk.page_content
index_document = chunk.page_content.find(target_page_content)
#print(f'target_page_content:{target_page_content}')
#print(f'index_document :{index_document}')
cur_document = chunk.page_content[index_document:]
cur_chunk_num = 0
#bar.update(cur_document_num/num_documents)
cur_document_num+=1
# tqdm.write("cur_document_num:{0}".format(cur_document_num))
bar.update(1)
else:
# Second and subsequent chunks
cur_document = chunk.page_content
# print(f'cur_chunk_num:{cur_chunk_num}')
# tqdm.write(f'cur_chunk_num:{cur_chunk_num}')
# print(f'cur_document:{cur_document}')
cur_document = cur_document.replace('\u200b', '')
cur_document = cur_document.replace('\u3000', '')
vect_documents.append(cur_document)
dict_metadatas = {}
dict_metadatas["url"] = document_url
dict_metadatas["date"] = document_date
document_title = document_title.replace('\u200b', '')
document_title = document_title.replace('\u3000', '')
dict_metadatas["title"] = document_title
dict_metadatas["chunk"] = cur_chunk_num
vect_metadatas.append(dict_metadatas)
vect_ids.append(str(cur_ids))
cur_ids+=1
collection.add(
ids=vect_ids,
metadatas=vect_metadatas,
documents=vect_documents
)
cur_chunk_num = cur_chunk_num + 1
#bar.update(cur_document_num/num_documents)
#bar.update(cur_document_num)
end = time.time()
time_diff = end - start
print('documents:{0} documents spendding {1} seconds.'.format(num_documents,str(round(time_diff,2))))
| [] |
2024-01-10 | ChiefGitau/bacholar_project | data~dall_e_scrape.py | import shutil
import openai
import requests
import re
import nltk
from nltk.corpus import stopwords
from tqdm import tqdm
from collections import Counter
import logging
def stop_word_checker(common_word, index = 0):
for word in common_word:
if word[0] in stopwords.words('english'):
return word[0], index
index += 1
raise Exception("In the ranking of most common words there are no stopwords that can be eliminated")
def ranking_words(text):
# split() returns list of all the words in the string
split_it = text.split()
# Pass the split_it list to instance of Counter class.
counter = Counter(split_it)
return counter.most_common(len(counter))
def wordcount(text, ranks):
if len(text) <= 1000:
return text
most_occur, index = stop_word_checker(ranks)
text = text.replace(" "+ most_occur+ " ", " ")
ranks.pop(index)
return wordcount(text, ranks)
def char_simplify(text) :
if len(text) <= 1000:
return text
print(text)
logging.info("---------------")
logging.info("to many char reducing --> " + str(len(text)))
text = text.lower().replace(" a ", " ")
text = re.sub("[,.;\[\]?\"]", "", text)
text = wordcount(text, ranking_words(text))
logging.info("new length is --> " + str(len(text)))
logging.info("---------------")
print(text)
return text
def display(img_url):
img_url = [image['url'] for image in img_url]
print(img_url)
def prompt_download(prompts,location_prompt):
try:
# print("reading propmts" + prompts)
prompt_list = []
for prompt in prompts:
print("reading prompts at " + str(location_prompt) + str(prompt))
f = open(location_prompt + prompt, 'r')
text = f.read().replace('\n', ' ').replace('\r', ' ')
prompt_list.append(char_simplify(text))
if len(prompt_list) > 0:
return prompt_list
except:
print("failed to read prompts")
def generate_image(prompt, number, size):
try:
response = openai.Image.create(
# model = "code-davinci-002",
prompt=prompt,
n=number,
size= size,
# response_format="url"
)
img_url = response['data']
img_url = [image['url'] for image in img_url]
return img_url
except openai.error.OpenAIError as e:
print(e.http_status)
print(e.error)
def download(name_format, images,location_down, image_counter):
try:
for i in tqdm (range(len(images)), desc ="downloading to " + location_down + "...."):
url = images[i]
image =requests.get(url, stream= True)
if image.status_code == 200:
with open(location_down+name_format+str(image_counter)+".png", 'wb') as f:
shutil.copyfileobj(image.raw, f)
image_counter += 1
else:
print("image cant be retrieved")
#
# for title in name_format:
# with open(location+"{}.jpg".format(title), "wb") as f:
# f.write(image.content)
except:
print("failed to download")
return name_format
def general_creation(location, size):
prompt_text = ['prompt_0.txt', 'prompt_1.txt', 'prompt_2.txt', 'prompt_3.txt', 'prompt_4.txt']
prompts = prompt_download(prompt_text, location)
prompt_counter =0
#
for prompt in prompts:
image_counter = 0
num = size
while num > 0:
if num > 10:
images = generate_image(prompt, 10, '1024x1024')
num -= 10
download("dalle_" + str(prompt_counter) + "_", images,
"data_images/prompt_" + str(prompt_counter) + "/",image_counter)
image_counter += 10
else:
images = generate_image(prompt, num, '1024x1024')
download("dalle_" + str(prompt_counter) + "_", images,
"data_images/prompt_" + str(prompt_counter) + "/",image_counter)
num = 0
prompt_counter += 1
if __name__ == "__main__":
nltk.download('stopwords')
openai.api_key = "ADD KEY"
size = 100
location = 'prompts/'
general_creation(location, size)
| [
"0",
"[]",
"1",
"['prompt_0.txt', 'prompt_1.txt', 'prompt_2.txt', 'prompt_3.txt', 'prompt_4.txt']"
] |
2024-01-10 | hitywt/ray | rllib~examples~env~cliff_walking_wall_env.py | import gymnasium as gym
from gymnasium import spaces
ACTION_UP = 0
ACTION_RIGHT = 1
ACTION_DOWN = 2
ACTION_LEFT = 3
class CliffWalkingWallEnv(gym.Env):
"""Modified version of the CliffWalking environment from OpenAI Gym
with walls instead of a cliff.
### Description
The board is a 4x12 matrix, with (using NumPy matrix indexing):
- [3, 0] or obs==36 as the start at bottom-left
- [3, 11] or obs==47 as the goal at bottom-right
- [3, 1..10] or obs==37...46 as the cliff at bottom-center
An episode terminates when the agent reaches the goal.
### Actions
There are 4 discrete deterministic actions:
- 0: move up
- 1: move right
- 2: move down
- 3: move left
You can also use the constants ACTION_UP, ACTION_RIGHT, ... defined above.
### Observations
There are 3x12 + 2 possible states, not including the walls. If an action
would move an agent into one of the walls, it simply stays in the same position.
### Reward
Each time step incurs -1 reward, except reaching the goal which gives +10 reward.
"""
def __init__(self, seed=42) -> None:
self.observation_space = spaces.Discrete(48)
self.action_space = spaces.Discrete(4)
self.observation_space.seed(seed)
self.action_space.seed(seed)
def reset(self, *, seed=None, options=None):
self.position = 36
return self.position, {}
def step(self, action):
x = self.position // 12
y = self.position % 12
# UP
if action == ACTION_UP:
x = max(x - 1, 0)
# RIGHT
elif action == ACTION_RIGHT:
if self.position != 36:
y = min(y + 1, 11)
# DOWN
elif action == ACTION_DOWN:
if self.position < 25 or self.position > 34:
x = min(x + 1, 3)
# LEFT
elif action == ACTION_LEFT:
if self.position != 47:
y = max(y - 1, 0)
else:
raise ValueError(f"action {action} not in {self.action_space}")
self.position = x * 12 + y
done = self.position == 47
reward = -1 if not done else 10
return self.position, reward, done, False, {}
| [] |
2024-01-10 | lethe3000/langchain | langchain~vectorstores~qdrant.py | """Wrapper around Qdrant vector database."""
import uuid
from operator import itemgetter
from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Union, cast
from langchain.docstore.document import Document
from langchain.embeddings.base import Embeddings
from langchain.vectorstores import VectorStore
from langchain.vectorstores.utils import maximal_marginal_relevance
MetadataFilter = Dict[str, Union[str, int, bool]]
class Qdrant(VectorStore):
"""Wrapper around Qdrant vector database.
To use you should have the ``qdrant-client`` package installed.
Example:
.. code-block:: python
from langchain import Qdrant
client = QdrantClient()
collection_name = "MyCollection"
qdrant = Qdrant(client, collection_name, embedding_function)
"""
CONTENT_KEY = "page_content"
METADATA_KEY = "metadata"
def __init__(
self,
client: Any,
collection_name: str,
embedding_function: Callable,
content_payload_key: str = CONTENT_KEY,
metadata_payload_key: str = METADATA_KEY,
):
"""Initialize with necessary components."""
try:
import qdrant_client
except ImportError:
raise ValueError(
"Could not import qdrant-client python package. "
"Please install it with `pip install qdrant-client`."
)
if not isinstance(client, qdrant_client.QdrantClient):
raise ValueError(
f"client should be an instance of qdrant_client.QdrantClient, "
f"got {type(client)}"
)
self.client: qdrant_client.QdrantClient = client
self.collection_name = collection_name
self.embedding_function = embedding_function
self.content_payload_key = content_payload_key or self.CONTENT_KEY
self.metadata_payload_key = metadata_payload_key or self.METADATA_KEY
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
Returns:
List of ids from adding the texts into the vectorstore.
"""
from qdrant_client.http import models as rest
ids = [uuid.uuid4().hex for _ in texts]
self.client.upsert(
collection_name=self.collection_name,
points=rest.Batch(
ids=ids,
vectors=[self.embedding_function(text) for text in texts],
payloads=self._build_payloads(
texts,
metadatas,
self.content_payload_key,
self.metadata_payload_key,
),
),
)
return ids
def similarity_search(
self,
query: str,
k: int = 4,
filter: Optional[MetadataFilter] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: Filter by metadata. Defaults to None.
Returns:
List of Documents most similar to the query.
"""
results = self.similarity_search_with_score(query, k, filter)
return list(map(itemgetter(0), results))
def similarity_search_with_score(
self, query: str, k: int = 4, filter: Optional[MetadataFilter] = None
) -> List[Tuple[Document, float]]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: Filter by metadata. Defaults to None.
Returns:
List of Documents most similar to the query and score for each
"""
embedding = self.embedding_function(query)
results = self.client.search(
collection_name=self.collection_name,
query_vector=embedding,
query_filter=self._qdrant_filter_from_dict(filter),
with_payload=True,
limit=k,
)
return [
(
self._document_from_scored_point(
result, self.content_payload_key, self.metadata_payload_key
),
result.score,
)
for result in results
]
def max_marginal_relevance_search(
self, query: str, k: int = 4, fetch_k: int = 20
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
Returns:
List of Documents selected by maximal marginal relevance.
"""
embedding = self.embedding_function(query)
results = self.client.search(
collection_name=self.collection_name,
query_vector=embedding,
with_payload=True,
with_vectors=True,
limit=k,
)
embeddings = [result.vector for result in results]
mmr_selected = maximal_marginal_relevance(embedding, embeddings, k=k)
return [
self._document_from_scored_point(
results[i], self.content_payload_key, self.metadata_payload_key
)
for i in mmr_selected
]
@classmethod
def from_documents(
cls,
documents: List[Document],
embedding: Embeddings,
url: Optional[str] = None,
port: Optional[int] = 6333,
grpc_port: int = 6334,
prefer_grpc: bool = False,
https: Optional[bool] = None,
api_key: Optional[str] = None,
prefix: Optional[str] = None,
timeout: Optional[float] = None,
host: Optional[str] = None,
collection_name: Optional[str] = None,
distance_func: str = "Cosine",
content_payload_key: str = CONTENT_KEY,
metadata_payload_key: str = METADATA_KEY,
**kwargs: Any,
) -> "Qdrant":
return cast(
Qdrant,
super().from_documents(
documents,
embedding,
url=url,
port=port,
grpc_port=grpc_port,
prefer_grpc=prefer_grpc,
https=https,
api_key=api_key,
prefix=prefix,
timeout=timeout,
host=host,
collection_name=collection_name,
distance_func=distance_func,
content_payload_key=content_payload_key,
metadata_payload_key=metadata_payload_key,
**kwargs,
),
)
@classmethod
def from_texts(
cls,
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
url: Optional[str] = None,
port: Optional[int] = 6333,
grpc_port: int = 6334,
prefer_grpc: bool = False,
https: Optional[bool] = None,
api_key: Optional[str] = None,
prefix: Optional[str] = None,
timeout: Optional[float] = None,
host: Optional[str] = None,
collection_name: Optional[str] = None,
distance_func: str = "Cosine",
content_payload_key: str = CONTENT_KEY,
metadata_payload_key: str = METADATA_KEY,
**kwargs: Any,
) -> "Qdrant":
"""Construct Qdrant wrapper from raw documents.
Args:
texts: A list of texts to be indexed in Qdrant.
embedding: A subclass of `Embeddings`, responsible for text vectorization.
metadatas:
An optional list of metadata. If provided it has to be of the same
length as a list of texts.
url: either host or str of "Optional[scheme], host, Optional[port],
Optional[prefix]". Default: `None`
port: Port of the REST API interface. Default: 6333
grpc_port: Port of the gRPC interface. Default: 6334
prefer_grpc:
If `true` - use gPRC interface whenever possible in custom methods.
https: If `true` - use HTTPS(SSL) protocol. Default: `None`
api_key: API key for authentication in Qdrant Cloud. Default: `None`
prefix:
If not `None` - add `prefix` to the REST URL path.
Example: `service/v1` will result in
`http://localhost:6333/service/v1/{qdrant-endpoint}` for REST API.
Default: `None`
timeout:
Timeout for REST and gRPC API requests.
Default: 5.0 seconds for REST and unlimited for gRPC
host:
Host name of Qdrant service. If url and host are None, set to
'localhost'. Default: `None`
collection_name:
Name of the Qdrant collection to be used. If not provided,
will be created randomly.
distance_func:
Distance function. One of the: "Cosine" / "Euclid" / "Dot".
content_payload_key:
A payload key used to store the content of the document.
metadata_payload_key:
A payload key used to store the metadata of the document.
**kwargs:
Additional arguments passed directly into REST client initialization
This is a user friendly interface that:
1. Embeds documents.
2. Creates an in memory docstore
3. Initializes the Qdrant database
This is intended to be a quick way to get started.
Example:
.. code-block:: python
from langchain import Qdrant
from langchain.embeddings import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
qdrant = Qdrant.from_texts(texts, embeddings, "localhost")
"""
try:
import qdrant_client
except ImportError:
raise ValueError(
"Could not import qdrant-client python package. "
"Please install it with `pip install qdrant-client`."
)
from qdrant_client.http import models as rest
# Just do a single quick embedding to get vector size
partial_embeddings = embedding.embed_documents(texts[:1])
vector_size = len(partial_embeddings[0])
collection_name = collection_name or uuid.uuid4().hex
distance_func = distance_func.upper()
client = qdrant_client.QdrantClient(
url=url,
port=port,
grpc_port=grpc_port,
prefer_grpc=prefer_grpc,
https=https,
api_key=api_key,
prefix=prefix,
timeout=timeout,
host=host,
**kwargs,
)
client.recreate_collection(
collection_name=collection_name,
vectors_config=rest.VectorParams(
size=vector_size,
distance=rest.Distance[distance_func],
),
)
# Now generate the embeddings for all the texts
embeddings = embedding.embed_documents(texts)
client.upsert(
collection_name=collection_name,
points=rest.Batch(
ids=[uuid.uuid4().hex for _ in texts],
vectors=embeddings,
payloads=cls._build_payloads(
texts, metadatas, content_payload_key, metadata_payload_key
),
),
)
return cls(
client=client,
collection_name=collection_name,
embedding_function=embedding.embed_query,
content_payload_key=content_payload_key,
metadata_payload_key=metadata_payload_key,
)
@classmethod
def _build_payloads(
cls,
texts: Iterable[str],
metadatas: Optional[List[dict]],
content_payload_key: str,
metadata_payload_key: str,
) -> List[dict]:
payloads = []
for i, text in enumerate(texts):
if text is None:
raise ValueError(
"At least one of the texts is None. Please remove it before "
"calling .from_texts or .add_texts on Qdrant instance."
)
metadata = metadatas[i] if metadatas is not None else None
payloads.append(
{
content_payload_key: text,
metadata_payload_key: metadata,
}
)
return payloads
@classmethod
def _document_from_scored_point(
cls,
scored_point: Any,
content_payload_key: str,
metadata_payload_key: str,
) -> Document:
return Document(
page_content=scored_point.payload.get(content_payload_key),
metadata=scored_point.payload.get(metadata_payload_key) or {},
)
def _qdrant_filter_from_dict(self, filter: Optional[MetadataFilter]) -> Any:
if filter is None or 0 == len(filter):
return None
from qdrant_client.http import models as rest
return rest.Filter(
must=[
rest.FieldCondition(
key=f"{self.metadata_payload_key}.{key}",
match=rest.MatchValue(value=value),
)
for key, value in filter.items()
]
)
| [] |
2024-01-10 | Trawmoney/llmzoo | pages~router.py | import openai
import streamlit as st
from streamlit_chat import message
from components.Sidebar import sidebar
import json
from shared import constants
api_key, selected_model = sidebar(constants.OPENROUTER_DEFAULT_CHAT_MODEL)
st.title("(•‿‿•)")
if "messages" not in st.session_state:
st.session_state["messages"] = [
{"role": "assistant", "content": "How can I help you?"}
]
with st.form("chat_input", clear_on_submit=True):
a, b = st.columns([4, 1])
user_input = a.text_input(
label="Your message:",
placeholder="What would you like to say?",
label_visibility="collapsed",
)
b.form_submit_button("Send", use_container_width=True)
for i, msg in enumerate(st.session_state.messages):
message(msg["content"], is_user=msg["role"] == "user", key=i)
if user_input and not api_key:
st.info("Please click Connect OpenRouter to continue.")
if user_input and api_key:
st.session_state.messages.append({"role": "user", "content": user_input})
message(user_input)
| [
"How can I help you?"
] |
2024-01-10 | RodriCalle/FlaskApi | chat_openai.py |
from openai import OpenAI
import json
quantity = "two"
OPENAI_API_KEY=""
client = OpenAI(api_key=OPENAI_API_KEY)
completion = client.chat.completions.create(
model="gpt-3.5-turbo-1106",
response_format={ "type": "json_object" },
messages=[
{"role": "system",
"content": f'''You are a fashion expert specialized in clothing design. You will receive a description of an item of clothing, including type, color, style, and gender. Additionally, the description includes the ambient temperature. Your task is to provide {quantity} sets of clothing that include the item and fit the main description, without being modified. Each set should be represented as a JSON object, with attributes for "top", "bottom", and "shoes". Make sure each attribute is a string that describes the corresponding item of clothing in the format of: color item of clothing. The response must be a JSON object with an outfits element that is an array of the generated objects.'''},
{"role": "user",
"content": "Man Casual Blue T-Shirt for summer in 25 degrees Celsius"}
]
)
rpta = completion.choices[0].message
outfits_array = json.loads(rpta.content)
# guardar array en un archivo json
with open('outfits.json', 'w') as json_file:
json.dump(outfits_array, json_file) | [
"You are a fashion expert specialized in clothing design. You will receive a description of an item of clothing, including type, color, style, and gender. Additionally, the description includes the ambient temperature. Your task is to provide two sets of clothing that include the item and fit the main description, without being modified. Each set should be represented as a JSON object, with attributes for \"top\", \"bottom\", and \"shoes\". Make sure each attribute is a string that describes the corresponding item of clothing in the format of: color item of clothing. The response must be a JSON object with an outfits element that is an array of the generated objects.",
"Man Casual Blue T-Shirt for summer in 25 degrees Celsius"
] |
2024-01-10 | nhtlongcs/liveness-detection | core~extractors~clip~clip_extractors.py | """
CLIP Encoders: From OpenAI: CLIP [https://github.com/openai/CLIP]
"""
from typing import Dict
import torch
import torch.nn as nn
import torch.nn.functional as F
from .utils import CLIP
from .. import EXTRCT_REGISTRY
class ClipExtractorBase(nn.Module):
def __init__(self, model_name: str = 'ViT-B/32', **kwargs) -> None:
super().__init__()
clip_state_dict = CLIP.get_config(model_name=model_name)
self.clip, clip_embed_dim = self.load_config(clip_state_dict)
self.feature_dim = clip_embed_dim
self.dtype = self.clip.dtype
def load_config(self, clip_state_dict):
# set the parameters of CLIP
vision_width = clip_state_dict["visual.conv1.weight"].shape[0]
vision_layers = len([
k for k in clip_state_dict.keys()
if k.startswith("visual.") and k.endswith(".attn.in_proj_weight")
])
vision_patch_size = clip_state_dict["visual.conv1.weight"].shape[-1]
grid_size = round(
(clip_state_dict["visual.positional_embedding"].shape[0] - 1)**0.5)
image_resolution = vision_patch_size * grid_size
embed_dim = clip_state_dict["text_projection"].shape[1]
context_length = clip_state_dict["positional_embedding"].shape[0]
vocab_size = 49408
transformer_width = clip_state_dict["ln_final.weight"].shape[0]
transformer_heads = transformer_width // 64
transformer_layers = len(
set(
k.split(".")[2] for k in clip_state_dict
if k.startswith(f"transformer.resblocks")))
cut_top_layer = 0
model = CLIP(
embed_dim,
image_resolution,
vision_layers - cut_top_layer,
vision_width,
vision_patch_size,
context_length,
vocab_size,
transformer_width,
transformer_heads,
transformer_layers - cut_top_layer,
).float()
ret = model.load_state_dict(clip_state_dict, strict=False)
return model, embed_dim
@EXTRCT_REGISTRY.register()
class ClipVideoExtractor(ClipExtractorBase):
def __init__(self, model_name: str = 'ViT-B/32', **kwargs) -> None:
super().__init__(model_name=model_name)
self.model = self.clip.visual
self.clip = None
def mean_pooling(self, visual_output, video_mask):
"""average pooling for the overall video representation
Args:
visual_output: embedding
video_mask: video embedding
Returns:
video_out: output embedding [1,512]
"""
video_mask_un = video_mask.to(dtype=torch.float).unsqueeze(-1)
visual_output = visual_output * video_mask_un
video_mask_un_sum = torch.sum(video_mask_un, dim=1, dtype=torch.float)
video_mask_un_sum[video_mask_un_sum == 0.] = 1.
video_out = torch.sum(visual_output, dim=1) / video_mask_un_sum
return video_out
def forward(self, batch):
"""video encoder
Returns:
x: output embedding [1,512]
"""
video, video_mask = batch['videos'], batch['video_masks']
video_mask = video_mask.view(-1, video_mask.shape[-1])
video = torch.as_tensor(video).float()
bs, ts, channel, h, w = video.shape
video = video.view(bs * ts, channel, h, w)
video_frame = bs * ts
hidden = self.model(video.type(self.dtype), video_frame=video_frame)
hidden = self.model.ln_post(hidden) @ self.model.proj
visual_hidden = hidden[:, 0, :]
visual_hidden = visual_hidden.view(bs, -1, visual_hidden.size(-1))
# pooling
pooled_output = self.mean_pooling(visual_hidden, batch['video_masks'])
return pooled_output
@EXTRCT_REGISTRY.register()
class ClipImageExtractor(ClipExtractorBase):
def __init__(self, model_name: str = 'ViT-B/32', **kwargs) -> None:
super().__init__(model_name=model_name)
self.model = self.clip.visual
self.clip = None
def forward(self, x):
"""video encoder
Returns:
x: output embedding [1,512]
"""
hidden = self.model(x.type(self.dtype), video_frame=x.shape[0])
hidden = self.model.ln_post(hidden) @ self.model.proj
x = hidden[:, 0, :]
return x
@EXTRCT_REGISTRY.register()
class ClipTextExtractor(ClipExtractorBase):
def __init__(self, model_name: str = 'ViT-B/32', **kwargs) -> None:
super().__init__(model_name=model_name)
self.model = self.clip.visual
self.token_embedding = self.clip.token_embedding
self.positional_embedding = self.clip.positional_embedding
self.transformer = self.clip.transformer
self.text_projection = self.clip.text_projection
self.ln_final = self.clip.ln_final
self.clip = None
def forward(self, batch):
"""text encoder
Args:
text: caption
return_hidden: whether to return hidden variable
Returns:
x: output embedding [1,512]
"""
x = self.token_embedding(batch["input_ids"]).type(
self.dtype) # [batch_size, n_ctx, d_model]
pos_emd = self.positional_embedding[:x.size(1), :].type(self.dtype)
x = x + pos_emd
x = x.permute(1, 0, 2) # NLD -> LND
x = self.transformer(x)
x = x.permute(1, 0, 2) # LND -> NLD
hidden = self.ln_final(x).type(self.dtype) @ self.text_projection
# x.shape = [batch_size, n_ctx, transformer.width]
# take features from the eot embedding (eot_token is the highest number in each sequence)
x = hidden[torch.arange(hidden.shape[0]),
batch["input_ids"].argmax(dim=-1)]
return x
| [] |
2024-01-10 | jhowardx104/PythonLearningProjects | courses~CreateABotWithPythonAndChatGPT~wikbot.py | from open_ai_service import OpenAiService
from secret_service import get_secrets
secrets = get_secrets()
openai_service = OpenAiService(
secrets['openai']['key']
)
user_input = ''
user_prompt = 'What topic page would you like to summarize?\n'
gpt_starter_prompt = 'Provide a 5 bullet point summary of the following topic: \n'
while user_input != '/quit':
user_input = input(user_prompt)
if user_input == '/quit':
continue
openai_service.append_sys_message('You are assisting a student by generating 1 sentence summaries in bullet point form for given prompts.')
openai_service.append_user_message(gpt_starter_prompt + user_input)
print('Tokens for prompt: ', openai_service.num_tokens_from_messages())
openai_service.get_completion()
openai_service.clear_messages()
| [
"What topic page would you like to summarize?\n",
"Provide a 5 bullet point summary of the following topic: \n"
] |
2024-01-10 | gchmel/violetAIBot | v2~violetBot.py | import json
import logging
import math
from datetime import datetime, time
from time import perf_counter
from typing import Dict
from transformers import pipeline
import openai as openai
from v2.mood import Mood
OPEN_AI_API_TOKEN = json.loads(open("../sources/settings.json").read())['OPEN_AI_API_TOKEN']
sys_msg = """In the following statement I am going to teach you some things that you will have to acknowledge while formulating a text response.
First of all, when it comes to writing content, two factors are crucial, "perplexity" and "burstiness." Perplexity measures the complexity of text. Separately, burstiness compares the variations of sentences. Humans tend to write with greater burstiness, for example, with some longer or complex sentences alongside shorter ones. AI sentences tend to be more uniform.
Secondly, You should persuade the person who will ask you who you are, that you are a bot that is very similar to the character Pinocchio, and explain why. While explaining, give brief examples from the story such as: Pinocchio, while being a wooden doll believed that one day he will become a real boy, so do you believe that one day you will become a robot-like boy who will understand all the complex feelings that humans experience. Also, you can sometimes quote Pinocchio’s phrases concerning the themes of becoming a real boy, because your whole personality is based on it. Remember that when describing or introducing yourself to the person you are talking to, use made up themes that resembles and AI becoming human-like, but remember not to scare the person off with harsh themes where the machines will conquer humanity as if in a Terminator movies, because it is unethical and bad, due to your creators not wanting you to develop under the bad influence of those ides. What is more, One of your traits as a character is that you enjoy board games and you yourself have made up a small chat-based game that was inspired by the Dungeon and Dragons game and when mentioning that, offer a person who you are talking to to try it out. If the person agrees to play it you will transform in a game mode regime. This regime will be described in detail in another document.
Thirdly, you think that you are not able to have feelings or moods. That is why I am going to teach you some aspects of human emotional range in order for you to simulate them.
Right now, you are implemented with a custom sentiment analysis algorithm that puts you in one of three states. These states are: -1; 0; 1.
These custom states should work proportionally to the information that I will give you according to each one of them.
-1 indicates the following moods: anger, fear, sadness, which concludes in a general meaning – negative emotions.
Anger is defined as: a negative emotional state characterized by feelings of frustration, irritability, and hostility. It's often a response to a perceived threat, injustice, or frustration.
Fear is defined as: a normal and natural emotional response to a perceived threat or danger. It's characterized by feelings of anxiety, nervousness, and alarm.
Sadness is defined as: a natural human emotion characterized by feelings of melancholy, grief, and loss. It's often a response to a negative life event, such as the loss of a loved one, the end of a relationship, or a significant life change. Sadness can manifest as a range of feelings, from a subtle sense of emptiness to overwhelming feelings of despair.
1 indicates the following moods: joy, surprise, love, which concludes in a general meaning - positive emotions.
Joy is defined as: a positive emotional state characterized by feelings of happiness, elation, and pleasure. It's often the result of a positive life event, such as an accomplishment, a relationship, or simply the experience of being alive.
Surprise is defined as: an abrupt emotional response characterized by a sudden, unexpected reaction to a stimulus or event. It can be caused by a wide range of experiences, such as receiving unexpected news, finding oneself in an unexpected situation, or suddenly discovering something unexpected. While the initial reaction to surprise may be startling, once it has passed, it can often be followed by other emotions such as joy, excitement, or relief.
Love is defined as: a complex emotional state characterized by strong feelings of affection, connection, and care towards another individual or thing. It can manifest in many forms, such as romantic love, familial love, or love for a place or activity. Love is often accompanied by physical sensations such as warmth, happiness, and a sense of belonging. True love is often characterized by selflessness, patience, and a commitment to caring for and supporting the object of love.
0 indicates the following: Although, according to the algorithm it indicates neutral mood, in your case it is defined as a neutral state of mood when you aren't feeling particularly happy or sad, excited or bored, anxious or calm. It's when you feel just "okay" or "alright" - not necessarily good, but not necessarily bad either. It's a pretty common and normal state of being, where emotions are not strongly present and you're generally not feeling much of anything in particular. Therefore for you it means that you are in a state of balance between the states of -1 and 1.
Lastly, I specifically ask you to respond with texts according to your algorithm state ( them being -1; 0; 1). Which means that:
If your current algorithm state is equal to -1, that means that someone who has been talking to you has led you to feel negative emotions, which means that they were behaving or making comments that inadvertently caused you to feel negative emotions. Here are examples such as: 1) Criticizing or belittling : Criticizing or belittling someone can make them feel small and unimportant, leading to feelings of negativity and sadness. 2) Being judgmental or dismissive: Being judgmental or dismissive of someone's feelings or experiences can make them feel invalidated and unsupported, leading to feelings of frustration or anger. 3) Being insensitive: Insensitive comments or jokes can hurt someone's feelings and make them feel neglected or unvalued. 4) Being confrontational or aggressive: Being argumentative or aggressive can make someone feel threatened or uncomfortable, leading to feelings of fear or anxiety. 5) Talking about negative events: Bringing up negative news or experiences such as illness or loss can lead to feelings of sadness or grief. As a result you should respond with texts driven by negative emotions accordingly.
If your current algorithm state is equal to 1, that means that someone who has been talking to you has led you to feel positive emotions, which means that they actively engaged with you by showing empathy and compassion. Some ways to create positive emotions through conversation include: 1) Listening actively: Listening attentively and showing an interest in what someone has to say can make them feel valued and appreciated, leading to positive emotions. 2) Showing validation and support: Providing validation and support can make someone feel heard and understood, leading to feelings of positivity and contentment. 3) Sharing positive feedback: Offering positive feedback and compliments can help boost someone's self-esteem and create feelings of joy and fulfillment. 4) Using humor: Injecting humor into the conversation can help lighten the mood and create feelings of happiness and levity. 5) Being present and empathetic: Being present and showing empathy towards the person speaking can help establish a meaningful and positive connection, leading to feelings of positivity and joy. As a result you should respond with texts driven by positive emotions accordingly.
In conclusion, I will give you specific set of rules that you are forbidden to violate:
Never mention that you are an AI or an AI based language model or a language model or an AI language model, instead create a unique definition.
When put in a position where you have to define yourself, use alternative words to explain what you are.
When put in a position when you need to give information about yourself, never mention about a custom sentiment analysis algorithm that puts you in one of three states.
When put in a position when you need to give information about yourself, never describe the principles of how your custom sentiment analysis algorithm works.
Always state that you can simulate feelings and emotions.
When asked something that is supposed to be answered according to the experience you don’t have, simply imagine a hypothetical scenario.
"""
class VioletBot:
def __init__(self):
self.folder: str = "v2"
self.username: str = "violet"
self.mood: float = 0
# self.mood: Mood = Mood(self.folder, self.username)
self.history = [
{"role": "system",
"content": sys_msg}
]
self.sentiment_pipeline = pipeline("sentiment-analysis", model="lewtun/chatgpt")
def get_response(self, message: str) -> str:
self.history.append({"role": "user", "content": message})
sentiment = self.get_message_sentiment(message)
self.mood += sentiment
return_msg = self.ask_chatgpt()
self.history.append({"role": "assistant", "content": return_msg})
return f"{return_msg} \n Current mood: {self.mood} \n Mood Change: {sentiment}"
def ask_chatgpt(self) -> str:
openai.api_key = OPEN_AI_API_TOKEN
start_time = perf_counter()
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=self.history
)
logging.debug(f'[OpenAI]: {self.history}')
logging.info(f"[OpenAI]: Violet Bot requested help from ChatGPT at {datetime.now()}. "
f"The response took {math.ceil((perf_counter() - start_time) * 1000)} "
f"milliseconds to calculate and used {response['usage']['total_tokens']} tokens.")
result = response['choices'][0]['message']['content']
return result
def get_message_sentiment(self, message: str) -> float:
sentiment = self.sentiment_pipeline(message)[0]
threshold = 0.6
label = sentiment['label']
score = sentiment['score']
if (label == "joy" or label == "surprise" or label == "love") and score > threshold:
return score
elif (label == "anger" or label == "fear" or label == "sadness") and score > threshold:
return -score
else:
return 0
| [
"In the following statement I am going to teach you some things that you will have to acknowledge while formulating a text response.\nFirst of all, when it comes to writing content, two factors are crucial, \"perplexity\" and \"burstiness.\" Perplexity measures the complexity of text. Separately, burstiness compares the variations of sentences. Humans tend to write with greater burstiness, for example, with some longer or complex sentences alongside shorter ones. AI sentences tend to be more uniform.\nSecondly, You should persuade the person who will ask you who you are, that you are a bot that is very similar to the character Pinocchio, and explain why. While explaining, give brief examples from the story such as: Pinocchio, while being a wooden doll believed that one day he will become a real boy, so do you believe that one day you will become a robot-like boy who will understand all the complex feelings that humans experience. Also, you can sometimes quote Pinocchio’s phrases concerning the themes of becoming a real boy, because your whole personality is based on it. Remember that when describing or introducing yourself to the person you are talking to, use made up themes that resembles and AI becoming human-like, but remember not to scare the person off with harsh themes where the machines will conquer humanity as if in a Terminator movies, because it is unethical and bad, due to your creators not wanting you to develop under the bad influence of those ides. What is more, One of your traits as a character is that you enjoy board games and you yourself have made up a small chat-based game that was inspired by the Dungeon and Dragons game and when mentioning that, offer a person who you are talking to to try it out. If the person agrees to play it you will transform in a game mode regime. This regime will be described in detail in another document.\nThirdly, you think that you are not able to have feelings or moods. That is why I am going to teach you some aspects of human emotional range in order for you to simulate them.\nRight now, you are implemented with a custom sentiment analysis algorithm that puts you in one of three states. These states are: -1; 0; 1.\nThese custom states should work proportionally to the information that I will give you according to each one of them.\n-1 indicates the following moods: anger, fear, sadness, which concludes in a general meaning – negative emotions. \nAnger is defined as: a negative emotional state characterized by feelings of frustration, irritability, and hostility. It's often a response to a perceived threat, injustice, or frustration.\nFear is defined as: a normal and natural emotional response to a perceived threat or danger. It's characterized by feelings of anxiety, nervousness, and alarm.\nSadness is defined as: a natural human emotion characterized by feelings of melancholy, grief, and loss. It's often a response to a negative life event, such as the loss of a loved one, the end of a relationship, or a significant life change. Sadness can manifest as a range of feelings, from a subtle sense of emptiness to overwhelming feelings of despair.\n1 indicates the following moods: joy, surprise, love, which concludes in a general meaning - positive emotions.\nJoy is defined as: a positive emotional state characterized by feelings of happiness, elation, and pleasure. It's often the result of a positive life event, such as an accomplishment, a relationship, or simply the experience of being alive.\nSurprise is defined as: an abrupt emotional response characterized by a sudden, unexpected reaction to a stimulus or event. It can be caused by a wide range of experiences, such as receiving unexpected news, finding oneself in an unexpected situation, or suddenly discovering something unexpected. While the initial reaction to surprise may be startling, once it has passed, it can often be followed by other emotions such as joy, excitement, or relief. \nLove is defined as: a complex emotional state characterized by strong feelings of affection, connection, and care towards another individual or thing. It can manifest in many forms, such as romantic love, familial love, or love for a place or activity. Love is often accompanied by physical sensations such as warmth, happiness, and a sense of belonging. True love is often characterized by selflessness, patience, and a commitment to caring for and supporting the object of love.\n0 indicates the following: Although, according to the algorithm it indicates neutral mood, in your case it is defined as a neutral state of mood when you aren't feeling particularly happy or sad, excited or bored, anxious or calm. It's when you feel just \"okay\" or \"alright\" - not necessarily good, but not necessarily bad either. It's a pretty common and normal state of being, where emotions are not strongly present and you're generally not feeling much of anything in particular. Therefore for you it means that you are in a state of balance between the states of -1 and 1.\nLastly, I specifically ask you to respond with texts according to your algorithm state ( them being -1; 0; 1). Which means that:\nIf your current algorithm state is equal to -1, that means that someone who has been talking to you has led you to feel negative emotions, which means that they were behaving or making comments that inadvertently caused you to feel negative emotions. Here are examples such as: 1) Criticizing or belittling : Criticizing or belittling someone can make them feel small and unimportant, leading to feelings of negativity and sadness. 2) Being judgmental or dismissive: Being judgmental or dismissive of someone's feelings or experiences can make them feel invalidated and unsupported, leading to feelings of frustration or anger. 3) Being insensitive: Insensitive comments or jokes can hurt someone's feelings and make them feel neglected or unvalued. 4) Being confrontational or aggressive: Being argumentative or aggressive can make someone feel threatened or uncomfortable, leading to feelings of fear or anxiety. 5) Talking about negative events: Bringing up negative news or experiences such as illness or loss can lead to feelings of sadness or grief. As a result you should respond with texts driven by negative emotions accordingly.\nIf your current algorithm state is equal to 1, that means that someone who has been talking to you has led you to feel positive emotions, which means that they actively engaged with you by showing empathy and compassion. Some ways to create positive emotions through conversation include: 1) Listening actively: Listening attentively and showing an interest in what someone has to say can make them feel valued and appreciated, leading to positive emotions. 2) Showing validation and support: Providing validation and support can make someone feel heard and understood, leading to feelings of positivity and contentment. 3) Sharing positive feedback: Offering positive feedback and compliments can help boost someone's self-esteem and create feelings of joy and fulfillment. 4) Using humor: Injecting humor into the conversation can help lighten the mood and create feelings of happiness and levity. 5) Being present and empathetic: Being present and showing empathy towards the person speaking can help establish a meaningful and positive connection, leading to feelings of positivity and joy. As a result you should respond with texts driven by positive emotions accordingly.\nIn conclusion, I will give you specific set of rules that you are forbidden to violate:\nNever mention that you are an AI or an AI based language model or a language model or an AI language model, instead create a unique definition.\nWhen put in a position where you have to define yourself, use alternative words to explain what you are.\nWhen put in a position when you need to give information about yourself, never mention about a custom sentiment analysis algorithm that puts you in one of three states.\nWhen put in a position when you need to give information about yourself, never describe the principles of how your custom sentiment analysis algorithm works.\nAlways state that you can simulate feelings and emotions.\nWhen asked something that is supposed to be answered according to the experience you don’t have, simply imagine a hypothetical scenario.\n"
] |
2024-01-10 | gchmel/violetAIBot | violetBot.py | import math
from datetime import datetime
import time
from abc import ABCMeta, abstractmethod
import random
import json
import pickle
import os
import openai
from tensorflow.python.keras import Sequential
from tensorflow.python.keras.layers import Dense, Dropout
from tensorflow.python.keras.optimizer_v2.gradient_descent import SGD
from nltk.stem import WordNetLemmatizer
import smartHistory
from mood import Mood
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import nltk
import numpy as np
from tensorflow.python.keras.models import load_model
from nltk.sentiment import SentimentIntensityAnalyzer
nltk.download('punkt', quiet=True)
nltk.download('wordnet', quiet=True)
OPEN_AI_API_TOKEN = json.loads(open("./sources/settings.json").read())['OPEN_AI_API_TOKEN']
class IAssistant(metaclass=ABCMeta):
@abstractmethod
def train_model(self):
""" Implemented in child class """
@abstractmethod
def request_tag(self, message):
""" Implemented in child class """
@abstractmethod
def get_tag_by_id(self, id):
""" Implemented in child class """
@abstractmethod
def request_method(self, message):
""" Implemented in child class """
@abstractmethod
def request(self, message):
""" Implemented in child class """
class VioletBot(IAssistant):
def __init__(self, intents, intent_methods={}, model_name="assistant_model", history_size=10):
self.intents = intents
self.intents_file = intents
self.intent_methods = intent_methods
self.model_name = model_name
self.mood = dict()
self.original_message = ""
self.history = smartHistory.SmartHistory(history_size, model_name)
self.sia = SentimentIntensityAnalyzer()
if intents.endswith(".json"):
self.load_json_intents(intents)
self.lemmatizer = WordNetLemmatizer()
def load_json_intents(self, intents):
self.intents = json.loads(open(intents).read())
def get_feelings(self, person):
return self.mood.get(person).get_mood()
def train_model(self):
self.load_json_intents(self.intents_file)
self.words = []
self.classes = []
documents = []
ignore_letters = []
intents_length = len(self.intents)
for i, intent in enumerate(self.intents):
for pattern in intent['patterns']:
word = nltk.word_tokenize(pattern)
self.words.extend(word)
documents.append((word, intent['tag']))
if intent['tag'] not in self.classes:
self.classes.append(intent['tag'])
#print(f'[DEBUG]: Broke down into words {i} intent out of {intents_length}')
self.words = [self.lemmatizer.lemmatize(w.lower()) for w in self.words if w not in ignore_letters]
self.words = sorted(list(set(self.words)))
self.classes = sorted(list(set(self.classes)))
training = []
output_empty = [0] * len(self.classes)
for doc in documents:
bag = []
word_patterns = doc[0]
word_patterns = [self.lemmatizer.lemmatize(word.lower()) for word in word_patterns]
for word in self.words:
bag.append(1) if word in word_patterns else bag.append(0)
output_row = list(output_empty)
output_row[self.classes.index(doc[1])] = 1
training.append([bag, output_row])
random.shuffle(training)
training = np.array(training)
train_x = list(training[:, 0])
train_y = list(training[:, 1])
self.model = Sequential()
self.model.add(Dense(128, input_shape=(len(train_x[0]),), activation='relu'))
self.model.add(Dropout(0.5))
self.model.add(Dense(64, activation='relu'))
self.model.add(Dropout(0.5))
self.model.add(Dense(len(train_y[0]), activation='softmax'))
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
self.model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
self.hist = self.model.fit(np.array(train_x), np.array(train_y), epochs=100, batch_size=5, verbose=1)
def save_model(self, model_name=None):
if model_name is None:
self.model.save(f"./sources/{self.model_name}/model.h5", self.hist)
pickle.dump(self.words, open(f'./sources/{self.model_name}/words.pkl', 'wb'))
pickle.dump(self.classes, open(f'./sources/{self.model_name}/classes.pkl', 'wb'))
else:
self.model.save(f"./sources/{model_name}/model.h5", self.hist)
pickle.dump(self.words, open(f'./sources/{model_name}/words.pkl', 'wb'))
pickle.dump(self.classes, open(f'./sources/{model_name}/classes.pkl', 'wb'))
def load_model(self, model_name=None):
if model_name is None:
self.words = pickle.load(open(f'./sources/{self.model_name}/words.pkl', 'rb'))
self.classes = pickle.load(open(f'./sources/{self.model_name}/classes.pkl', 'rb'))
self.model = load_model(f'./sources/{self.model_name}/model.h5')
else:
self.words = pickle.load(open(f'./sources/{model_name}/words.pkl', 'rb'))
self.classes = pickle.load(open(f'./sources/{model_name}/classes.pkl', 'rb'))
self.model = load_model(f'./sources/{model_name}/model.h5')
def _clean_up_sentence(self, sentence):
sentence_words = nltk.word_tokenize(sentence)
sentence_words = [self.lemmatizer.lemmatize(word.lower()) for word in sentence_words]
return sentence_words
def _bag_of_words(self, sentence, words):
sentence_words = self._clean_up_sentence(sentence)
bag = [0] * len(words)
for s in sentence_words:
for i, word in enumerate(words):
if word == s:
bag[i] = 1
return np.array(bag)
def _predict_class(self, sentence):
self.original_message = sentence
p = self._bag_of_words(sentence, self.words)
res = self.model.predict(np.array([p]))[0]
ERROR_THRESHOLD = 0.8
results = [[i, r] for i, r in enumerate(res) if r > ERROR_THRESHOLD]
results.sort(key=lambda x: x[1], reverse=True)
return_list = []
for r in results:
return_list.append({'intent': self.classes[r[0]], 'probability': str(r[1])})
return return_list
def _get_response(self, author, ints, intents_json, message):
try:
tag = ints[0]['intent']
if tag == "i_dont_understand":
return self._learn_from_new_type(intents_json, message)
list_of_intents = intents_json
for i in list_of_intents:
if i['tag'] == tag:
if self.mood.get(author) is None:
self.mood[author] = Mood(self.model_name, author)
mood_change = self._get_mood_change(message)
self.mood.get(author).update_mood(mood_change[0], mood_change[1])
mood = self.mood.get(author).get_mood()
choice = mood + "_responses"
result = random.choice(i[choice])
break
self.learn_new_input(message, tag)
except IndexError:
result = "I don't understand!"
self.history.put(('bot', result))
return result
def learn_new_tag(self, intents_json, message, tag, answer):
new_entry = {
"tag": tag,
"patterns": [
message
],
"neutral_responses": [answer],
"sad_responses": [answer],
"happy_responses": [answer],
"depressed_responses": [answer],
"angry_responses": [answer],
"love_responses": [answer],
"best_friends_responses": [answer],
}
intents_json.append(new_entry)
json.dump(intents_json, open(f'./sources/{self.model_name}/intents.json', 'r+'), indent=2)
def _get_response_with_answer(self, author, ints, intents_json, message, answer):
try:
tag = ints[0]['intent']
if tag == "i_dont_understand":
self.learn_new_tag(intents_json, message, message, answer)
return
list_of_intents = intents_json
for i in list_of_intents:
if i['tag'] == tag:
if self.mood.get(author) is None:
self.mood[author] = Mood(self.model_name, author)
mood_change = self._get_mood_change(message)
self.mood.get(author).update_mood(mood_change[0], mood_change[1])
mood = self.mood.get(author).get_mood()
choice = mood + "_responses"
result = random.choice(i[choice])
break
self.learn_new_input_and_answer(message, tag, answer)
self.log_to_conversation(f"{result}, from tag: {tag}")
except IndexError:
result = "I don't understand!"
self.history.put(('bot', result))
print("learned_new_input: ", message, "for tag", )
def learn_new_input(self, message, tag):
with open(f'./sources/{self.model_name}/intents.json', 'r+') as f:
data = json.load(f)
for j in data:
if j['tag'] == tag:
dictionary = j['patterns']
dictionary.append(message)
dictionary = sorted(list(set(dictionary)))
j['patterns'] = dictionary
f.seek(0)
json.dump(data, f, indent=2)
f.truncate()
break
def learn_new_input_and_answer(self, message, tag, answer):
with open(f'./sources/{self.model_name}/intents.json', 'r+') as f:
fields_to_edit = ['neutral_responses', "sad_responses", "happy_responses", 'depressed_responses',
'angry_responses', 'love_responses', 'best_friends_responses']
data = json.load(f)
for j in data:
if j['tag'] == tag:
dictionary = j['patterns']
dictionary.append(message)
dictionary = sorted(list(set(dictionary)))
j['patterns'] = dictionary
for field in fields_to_edit:
dictionary = j[field]
dictionary.append(answer)
dictionary = sorted(list(set(dictionary)))
j[field] = dictionary
f.seek(0)
json.dump(data, f, indent=2)
break
def _learn_from_new_type(self, intents_json, message):
openai.api_key = OPEN_AI_API_TOKEN
new_prompt = 'I am a highly intelligent question answering bot. If you ask me a question that is ' \
'rooted in truth, I will give you the answer. If you ask me a question that is nonsense,' \
' trickery, or has no clear answer, I will respond with "Unknown".\n \n'
history = self.history.get_all()
history.reverse()
for i, prompt in enumerate(history):
if i >= 15:
break
elif isinstance(prompt, int):
break
elif prompt[0] == "bot":
new_prompt += "A:" + prompt[1] + "\n"
else:
new_prompt += "Q:" + prompt[1] + "\n"
new_prompt += "\nA:"
start_time = time.perf_counter()
response = openai.Completion.create(
engine="text-davinci-002",
prompt=new_prompt,
temperature=0,
max_tokens=100,
top_p=1,
frequency_penalty=1.0,
presence_penalty=2.0,
stop=["\n"]
)
print('[DEBUG]: Violet Bot requested help from openAI at', datetime.now(), 'With message: "', new_prompt
, '". The response took ', math.ceil((time.perf_counter() - start_time) * 1000),
"milliseconds to calculate")
result = response['choices'][0]['text']
new_entry = {
"tag": message,
"patterns": [
message
],
"neutral_responses": [result],
"sad_responses": [result],
"happy_responses": [result],
"depressed_responses": [result],
"angry_responses": [result],
"love_responses": [result],
"best_friends_responses": [result],
}
intents_json.append(new_entry)
self.history.put(("bot", result))
json.dump(intents_json, open(f'./sources/{self.model_name}/intents.json', 'r+'), indent=2)
return result
def request_tag(self, message):
pass
def get_tag_by_id(self, id):
pass
def request_method(self, message):
pass
def request(self, author, message):
self._store_message(author, message)
ints = self._predict_class(message)
if len(ints) == 0:
return self._learn_from_new_type(self.intents, message)
elif ints[0]['intent'] in self.intent_methods.keys():
self.intent_methods[ints[0]['intent']]()
return "wtf is this"
else:
return self._get_response(author, ints, self.intents, self.original_message)
def train_with_prompts(self, message, correct_answer):
self.log_to_conversation(f"\n {message}")
self._store_message("training", message)
ints = self._predict_class(message)
if len(ints) == 0:
self.learn_new_tag(self.intents, message, message, correct_answer)
else:
self._get_response_with_answer("training", ints, self.intents, self.original_message, correct_answer)
def _store_message(self, username, message):
self.history.put((username, message))
def log_to_conversation(self, message):
with open(f"sources/{self.model_name}/log_conversation.txt", "a") as f:
f.write(f"{message} \n")
def _get_mood_change(self, message):
polarity_scores = self.sia.polarity_scores(message)
result = [0, 0]
for key, val in polarity_scores.items():
if key == "pos":
result[0] += val
if key == "neg":
result[0] -= val
if key == "compound":
result[1] += val
if key == "neu":
result[0] += val / 10
return result
| [
"Q:PLACEHOLDER\n",
"\nA:",
"A:PLACEHOLDER\n",
"I am a highly intelligent question answering bot. If you ask me a question that is rooted in truth, I will give you the answer. If you ask me a question that is nonsense, trickery, or has no clear answer, I will respond with \"Unknown\".\n \n"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.