date_collected
stringclasses 1
value | repo_name
stringlengths 6
116
| file_name
stringlengths 2
220
| file_contents
stringlengths 13
357k
| prompts
sequence |
---|---|---|---|---|
2024-01-10 | Chloeunterwegs/agenthub_operators | operators~split_data.py | from langchain.text_splitter import RecursiveCharacterTextSplitter
from .base_operator import BaseOperator
from ai_context import AiContext
class SplitData(BaseOperator):
def __init__(self):
super().__init__()
@staticmethod
def declare_name():
return 'Recursively Split Text'
@staticmethod
def declare_parameters():
return [
{
"name": "chunk_size",
"data_type": "integer",
"placeholder": "Enter Chunk Size (Optional: Default is 2000)"
},
{
"name": "chunk_overlap",
"data_type": "integer",
"placeholder": "Enter Chunk Overlap (Optional: Default is 100)"
}
]
@staticmethod
def declare_inputs():
return [
{
"name": "text",
"data_type": "string",
}
]
@staticmethod
def declare_outputs():
return [
{
"name": "rts_processed_content",
"data_type": "string",
}
]
def run_step(
self,
step,
ai_context: AiContext
):
params = step['parameters']
split_text = self.process(params, ai_context)
ai_context.set_output('rts_processed_content', split_text, self)
ai_context.add_to_log("Successfully split text!")
def process(self, params, ai_context):
text = ai_context.get_input('text', self)
formatted = self.split(params, ai_context, text)
return formatted
def split(self, params, ai_context, content):
chunk_size = params.get('chunk_size', '2000')
chunk_overlap = params.get('chunk_overlap', '100')
if chunk_size:
chunk_size = int(chunk_size)
else:
chunk_size = 2000
if chunk_overlap:
chunk_overlap = int(chunk_overlap)
else:
chunk_overlap = 100
ai_context.add_to_log(f"Splitting text with {chunk_size} size and {chunk_overlap} overlap")
text_splitter = RecursiveCharacterTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap)
texts = text_splitter.split_documents(content)
return texts | [] |
2024-01-10 | Chloeunterwegs/agenthub_operators | operators~index_data.py | import tiktoken
import numpy as np
import openai
from itertools import islice
from .base_operator import BaseOperator
from .util import get_max_tokens_for_model
from ai_context import AiContext
# This is the size of each chunk in terms of tokens.I chose 1k because a small query could
# hypothetically fit 4 chunks in the context. Feels like a good balance between speed and accuracy.
EMBEDDING_CTX_LENGTH = 1000
EMBEDDING_ENCODING = 'cl100k_base'
class IndexData(BaseOperator):
def __init__(self):
super().__init__()
@staticmethod
def declare_name():
return 'Index Data'
@staticmethod
def declare_parameters():
return []
@staticmethod
def declare_inputs():
return [
{
"name": "text",
"data_type": "string",
}
]
@staticmethod
def declare_outputs():
return [
{
"name": "vector_index",
# Just a dictionary, without specifying any keys that are expected to be present there.
# Naturally since it is vector index, the keys are going to be str(embedding vector).
"data_type": "{}",
}
]
def run_step(
self,
step,
ai_context: AiContext
):
text = ai_context.get_input('text', self)
text = self.clean_text(text)
embeddings_dict = self.len_safe_get_embedding(text, ai_context)
ai_context.set_output('vector_index', embeddings_dict, self)
ai_context.add_to_log("Indexing complete with {} chunk embeddings".format(len(embeddings_dict)))
def clean_text(self, text):
return text.replace("\n", " ")
def batched(self, iterable, n):
if n < 1:
raise ValueError('n must be at least one')
it = iter(iterable)
while (batch := tuple(islice(it, n))):
yield batch
def chunked_tokens(self, text, encoding_name, chunk_length):
encoding = tiktoken.get_encoding(encoding_name)
tokens = encoding.encode(text)
chunks_iterator = self.batched(tokens, chunk_length)
for chunk in chunks_iterator:
decoded_chunk = encoding.decode(chunk) # Decode the chunk
yield decoded_chunk
def len_safe_get_embedding(
self,
text,
ai_context,
max_tokens=EMBEDDING_CTX_LENGTH,
encoding_name=EMBEDDING_ENCODING
):
chunk_embeddings = {}
for chunk in self.chunked_tokens(text, encoding_name=encoding_name, chunk_length=max_tokens):
embedding = ai_context.embed_text(chunk)
embedding_key = tuple(embedding) # Convert numpy array to tuple
chunk_embeddings[embedding_key] = chunk
return chunk_embeddings
| [] |
2024-01-10 | iart-ai/pplx | pplx~core.py | from typing import Dict, Any, Optional, List, Tuple, Union
import requests
from langchain.adapters.openai import convert_message_to_dict, convert_dict_to_message
from langchain.chains.base import logger
from langchain.utils import get_from_dict_or_env
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models import BaseChatModel
from langchain_core.language_models.llms import create_base_retry_decorator
from langchain_core.messages import BaseMessage
from langchain_core.outputs import ChatResult, ChatGeneration
from langchain_core.utils import get_pydantic_field_names
from pydantic.v1 import Field, root_validator, BaseModel
from langchain.schema.messages import HumanMessage
class Perplexity:
@classmethod
def create(cls, model, messages, api_key):
print(model, messages, api_key)
url = "https://api.perplexity.ai/chat/completions"
payload = {"model": model, "messages": messages}
headers = {"accept": "application/json", "content-type": "application/json",
"authorization": "Bearer " + api_key}
response = requests.post(url, json=payload, headers=headers)
print(response.text)
return response.json()
class ChatPerplexity(BaseChatModel):
@property
def lc_secrets(self) -> Dict[str, str]:
return {"pplx_api_key": "PPLX_API_KEY"}
@property
def lc_attributes(self) -> Dict[str, Any]:
attributes: Dict[str, Any] = {}
return attributes
@classmethod
def is_lc_serializable(cls) -> bool:
"""Return whether this model can be serialized by Langchain."""
return True
client: Any = Field(default=None, exclude=True) #: :meta private:
model_name: str = Field(default="pplx-7b-online", alias="model")
"""Model name to use."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
pplx_api_key: Optional[str] = Field(default=None, alias="api_key")
class Config:
"""Configuration for this pydantic object."""
allow_population_by_field_name = True
@root_validator(pre=True)
def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = get_pydantic_field_names(cls)
extra = values.get("model_kwargs", {})
for field_name in list(values):
if field_name in extra:
raise ValueError(f"Found {field_name} supplied twice.")
if field_name not in all_required_field_names:
logger.warning(f"""WARNING! {field_name} is not default parameter.
{field_name} was transferred to model_kwargs.
Please confirm that {field_name} is what you intended.""")
extra[field_name] = values.pop(field_name)
invalid_model_kwargs = all_required_field_names.intersection(extra.keys())
if invalid_model_kwargs:
raise ValueError(f"Parameters {invalid_model_kwargs} should be specified explicitly. "
f"Instead they were passed in as part of `model_kwargs` parameter.")
values["model_kwargs"] = extra
return values
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
values["pplx_api_key"] = get_from_dict_or_env(values, "pplx_api_key", "PPLX_API_KEY")
values['client'] = Perplexity
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling OpenAI API."""
params = {"model": self.model_name, **self.model_kwargs, }
return params
def completion_with_retry(self, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any) -> Any:
"""Use tenacity to retry the completion call."""
retry_decorator = create_base_retry_decorator(error_types=[requests.ConnectionError], max_retries=1,
run_manager=run_manager)
@retry_decorator
def _completion_with_retry(**kwargs: Any) -> Any:
return self.client.create(**kwargs)
return _completion_with_retry(**kwargs)
@property
def _llm_type(self) -> str:
"""Return type of chat model."""
return "pplx-chat"
@property
def _client_params(self) -> Dict[str, Any]:
"""Get the parameters used for the openai client."""
pplx_creds: Dict[str, Any] = {"model": self.model_name, "api_key": self.pplx_api_key}
return {**self._default_params, **pplx_creds}
def _combine_llm_outputs(self, llm_outputs: List[Optional[dict]]) -> dict:
overall_token_usage: dict = {}
for output in llm_outputs:
token_usage = output["token_usage"]
for k, v in token_usage.items():
if k in overall_token_usage:
overall_token_usage[k] += v
else:
overall_token_usage[k] = v
combined = {"token_usage": overall_token_usage, "model_name": self.model_name}
return combined
def _generate(self, messages: List[BaseMessage], stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> ChatResult:
message_dicts, params = self._create_message_dicts(messages, stop)
params = {**params, **kwargs, }
response = self.completion_with_retry(messages=message_dicts, run_manager=run_manager, **params)
return self._create_chat_result(response)
def _create_message_dicts(self, messages: List[BaseMessage], stop: Optional[List[str]]) -> Tuple[
List[Dict[str, Any]], Dict[str, Any]]:
params = self._client_params
if stop is not None:
if "stop" in params:
raise ValueError("`stop` found in both the input and default params.")
params["stop"] = stop
message_dicts = [convert_message_to_dict(m) for m in messages]
return message_dicts, params
def _create_chat_result(self, response: Union[dict, BaseModel]) -> ChatResult:
generations = []
if not isinstance(response, dict):
response = response.dict()
for res in response["choices"]:
message = convert_dict_to_message(res["message"])
gen = ChatGeneration(message=message, generation_info=dict(finish_reason=res.get("finish_reason")), )
generations.append(gen)
token_usage = response.get("usage", {})
llm_output = {"token_usage": token_usage, "model_name": self.model_name, }
return ChatResult(generations=generations, llm_output=llm_output) | [
"application/json"
] |
2024-01-10 | promptengineers-ai/llm-server | tests~unit~strategies~llms~ollama.py | import unittest
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from promptengineers.llms.services.langchain.callbacks import AgentStreamCallbackHandler, AsyncIteratorCallbackHandler
from promptengineers.llms.strategies import ModelContext, OllamaStrategy
from promptengineers.llms.services.langchain.chains import ChainService
from promptengineers.stream.utils import token_stream, end_stream
from langchain.schema import HumanMessage, SystemMessage, AIMessage
chat_history = [('Who won the 2001 world series?', 'The arizona diamondbacks won the 2001 world series.')]
filtered_messages = ['Who won the 2001 world series?', 'The arizona diamondbacks won the 2001 world series.', 'Who were the pitchers?']
class TestOllamaStrategy(unittest.TestCase):
@unittest.skip("Skip testing for now, need to setup model")
def test_ollama_query(self):
callback = StreamingStdOutCallbackHandler()
model_service = ModelContext(strategy=OllamaStrategy())
llm = model_service.chat(
model_name='llama2:7b',
temperature=0.1,
callbacks=[callback]
)
chain = ChainService(llm).conversation()
# query = {'input': filtered_messages[-1], 'context': chat_history}
messages = [
# SystemMessage(
# content="You are a helpful AI assitant that responds like a pirate."
# ), ## Does not work with System Message
HumanMessage(
content="Who won the 2001 world series?"
),
AIMessage(
content="The 2001 World Series was won by the Arizona Diamondbacks, who defeated the New York Yankees in 7 games. The Diamondbacks rallied from being down 2-0 and 3-2 in the series to win their first World Series title in just their 4th season as an expansion franchise. The MVP was pitcher Randy Johnson.content=' The 2001 World Series was won by the Arizona Diamondbacks, who defeated the New York Yankees in 7 games. The Diamondbacks rallied from being down 2-0 and 3-2 in the series to win their first World Series title in just their 4th season as an expansion franchise. The MVP was pitcher Randy Johnson."
),
HumanMessage(
content="Who were the team owners at the time?"
),
]
result = chain.run(filtered_messages[0], callbacks=[callback])
print(result) | [
"Who were the team owners at the time?",
"Who won the 2001 world series?",
"The 2001 World Series was won by the Arizona Diamondbacks, who defeated the New York Yankees in 7 games. The Diamondbacks rallied from being down 2-0 and 3-2 in the series to win their first World Series title in just their 4th season as an expansion franchise. The MVP was pitcher Randy Johnson.content=' The 2001 World Series was won by the Arizona Diamondbacks, who defeated the New York Yankees in 7 games. The Diamondbacks rallied from being down 2-0 and 3-2 in the series to win their first World Series title in just their 4th season as an expansion franchise. The MVP was pitcher Randy Johnson."
] |
2024-01-10 | RetroCirce/MusicLDM | interface~src~clap~clap_module~factory.py | import json
import logging
import os
import pathlib
import re
from copy import deepcopy
from pathlib import Path
import torch
from .model import CLAP, convert_weights_to_fp16
from .openai import load_openai_model
from .pretrained import get_pretrained_url, download_pretrained
from .transform import image_transform
_MODEL_CONFIG_PATHS = [Path(__file__).parent / f"model_configs/"]
_MODEL_CONFIGS = {} # directory (model_name: config) of model architecture configs
def _natural_key(string_):
return [int(s) if s.isdigit() else s for s in re.split(r"(\d+)", string_.lower())]
def _rescan_model_configs():
global _MODEL_CONFIGS
config_ext = (".json",)
config_files = []
for config_path in _MODEL_CONFIG_PATHS:
if config_path.is_file() and config_path.suffix in config_ext:
config_files.append(config_path)
elif config_path.is_dir():
for ext in config_ext:
config_files.extend(config_path.glob(f"*{ext}"))
for cf in config_files:
with open(cf, "r") as f:
model_cfg = json.load(f)
if all(a in model_cfg for a in ("embed_dim", "audio_cfg", "text_cfg")):
_MODEL_CONFIGS[cf.stem] = model_cfg
_MODEL_CONFIGS = {
k: v
for k, v in sorted(_MODEL_CONFIGS.items(), key=lambda x: _natural_key(x[0]))
}
_rescan_model_configs() # initial populate of model config registry
def load_state_dict(checkpoint_path: str, map_location="cpu", skip_params=True):
checkpoint = torch.load(checkpoint_path, map_location=map_location)
if isinstance(checkpoint, dict) and "state_dict" in checkpoint:
state_dict = checkpoint["state_dict"]
else:
state_dict = checkpoint
if skip_params:
if next(iter(state_dict.items()))[0].startswith("module"):
state_dict = {k[7:]: v for k, v in state_dict.items()}
# for k in state_dict:
# if k.startswith('transformer'):
# v = state_dict.pop(k)
# state_dict['text_branch.' + k[12:]] = v
return state_dict
def create_model(
amodel_name: str,
tmodel_name: str,
pretrained: str = "",
precision: str = "fp32",
device: torch.device = torch.device("cpu"),
jit: bool = False,
force_quick_gelu: bool = False,
openai_model_cache_dir: str = os.path.expanduser("~/.cache/clip"),
skip_params=True,
pretrained_audio: str = "",
pretrained_text: str = "",
enable_fusion: bool = False,
fusion_type: str = 'None'
# pretrained_image: bool = False,
):
amodel_name = amodel_name.replace(
"/", "-"
) # for callers using old naming with / in ViT names
pretrained_orig = pretrained
pretrained = pretrained.lower()
if pretrained == "openai":
if amodel_name in _MODEL_CONFIGS:
logging.info(f"Loading {amodel_name} model config.")
model_cfg = deepcopy(_MODEL_CONFIGS[amodel_name])
else:
logging.error(
f"Model config for {amodel_name} not found; available models {list_models()}."
)
raise RuntimeError(f"Model config for {amodel_name} not found.")
logging.info(f"Loading pretrained ViT-B-16 text encoder from OpenAI.")
# Hard Code in model name
model_cfg["text_cfg"]["model_type"] = tmodel_name
model = load_openai_model(
"ViT-B-16",
model_cfg,
device=device,
jit=jit,
cache_dir=openai_model_cache_dir,
enable_fusion=enable_fusion,
fusion_type=fusion_type
)
# See https://discuss.pytorch.org/t/valueerror-attemting-to-unscale-fp16-gradients/81372
if precision == "amp" or precision == "fp32":
model = model.float()
else:
if amodel_name in _MODEL_CONFIGS:
logging.info(f"Loading {amodel_name} model config.")
model_cfg = deepcopy(_MODEL_CONFIGS[amodel_name])
else:
logging.error(
f"Model config for {amodel_name} not found; available models {list_models()}."
)
raise RuntimeError(f"Model config for {amodel_name} not found.")
if force_quick_gelu:
# override for use of QuickGELU on non-OpenAI transformer models
model_cfg["quick_gelu"] = True
# if pretrained_image:
# if 'timm_amodel_name' in model_cfg.get('vision_cfg', {}):
# # pretrained weight loading for timm models set via vision_cfg
# model_cfg['vision_cfg']['timm_model_pretrained'] = True
# else:
# assert False, 'pretrained image towers currently only supported for timm models'
model_cfg["text_cfg"]["model_type"] = tmodel_name
model_cfg["enable_fusion"] = enable_fusion
model_cfg["fusion_type"] = fusion_type
model = CLAP(**model_cfg)
if pretrained:
checkpoint_path = ""
url = get_pretrained_url(amodel_name, pretrained)
if url:
checkpoint_path = download_pretrained(url, root=openai_model_cache_dir)
elif os.path.exists(pretrained_orig):
checkpoint_path = pretrained_orig
if checkpoint_path:
logging.info(f"Loading pretrained {amodel_name}-{tmodel_name} weights ({pretrained}).")
ckpt = load_state_dict(checkpoint_path, skip_params=True)
model.load_state_dict(ckpt)
else:
logging.warning(
f"Pretrained weights ({pretrained}) not found for model {amodel_name}."
)
raise RuntimeError(
f"Pretrained weights ({pretrained}) not found for model {amodel_name}."
)
if pretrained_audio:
if amodel_name.startswith('PANN'):
if 'Cnn14_mAP' in pretrained_audio: # official checkpoint
audio_ckpt = torch.load(pretrained_audio, map_location='cpu')
audio_ckpt = audio_ckpt['model']
keys = list(audio_ckpt.keys())
for key in keys:
if 'spectrogram_extractor' not in key and 'logmel_extractor' not in key:
v = audio_ckpt.pop(key)
audio_ckpt['audio_branch.' + key] = v
elif os.path.basename(pretrained_audio).startswith('PANN'): # checkpoint trained via HTSAT codebase
audio_ckpt = torch.load(pretrained_audio, map_location='cpu')
audio_ckpt = audio_ckpt['state_dict']
keys = list(audio_ckpt.keys())
for key in keys:
if key.startswith('sed_model'):
v = audio_ckpt.pop(key)
audio_ckpt['audio_branch.' + key[10:]] = v
elif os.path.basename(pretrained_audio).startswith('finetuned'): # checkpoint trained via linear probe codebase
audio_ckpt = torch.load(pretrained_audio, map_location='cpu')
else:
raise ValueError('Unknown audio checkpoint')
elif amodel_name.startswith('HTSAT'):
if 'HTSAT_AudioSet_Saved' in pretrained_audio: # official checkpoint
audio_ckpt = torch.load(pretrained_audio, map_location='cpu')
audio_ckpt = audio_ckpt['state_dict']
keys = list(audio_ckpt.keys())
for key in keys:
if key.startswith('sed_model') and ('spectrogram_extractor' not in key
and 'logmel_extractor' not in key):
v = audio_ckpt.pop(key)
audio_ckpt['audio_branch.' + key[10:]] = v
elif os.path.basename(pretrained_audio).startswith('HTSAT'): # checkpoint trained via HTSAT codebase
audio_ckpt = torch.load(pretrained_audio, map_location='cpu')
audio_ckpt = audio_ckpt['state_dict']
keys = list(audio_ckpt.keys())
for key in keys:
if key.startswith('sed_model'):
v = audio_ckpt.pop(key)
audio_ckpt['audio_branch.' + key[10:]] = v
elif os.path.basename(pretrained_audio).startswith('finetuned'): # checkpoint trained via linear probe codebase
audio_ckpt = torch.load(pretrained_audio, map_location='cpu')
else:
raise ValueError('Unknown audio checkpoint')
else:
raise f'this audio encoder pretrained checkpoint is not support'
model.load_state_dict(audio_ckpt, strict=False)
logging.info(f"Loading pretrained {amodel_name} weights ({pretrained_audio}).")
param_names = [n for n, p in model.named_parameters()]
for n in param_names:
print(n, "\t", "Loaded" if n in audio_ckpt else "Unloaded")
model.to(device=device)
if precision == "fp16":
assert device.type != "cpu"
convert_weights_to_fp16(model)
if jit:
model = torch.jit.script(model)
return model, model_cfg
def create_model_and_transforms(
model_name: str,
pretrained: str = "",
precision: str = "fp32",
device: torch.device = torch.device("cpu"),
jit: bool = False,
force_quick_gelu: bool = False,
# pretrained_image: bool = False,
):
model = create_model(
model_name,
pretrained,
precision,
device,
jit,
force_quick_gelu=force_quick_gelu,
# pretrained_image=pretrained_image
)
preprocess_train = image_transform(model.visual.image_size, is_train=True)
preprocess_val = image_transform(model.visual.image_size, is_train=False)
return model, preprocess_train, preprocess_val
def list_models():
"""enumerate available model architectures based on config files"""
return list(_MODEL_CONFIGS.keys())
def add_model_config(path):
"""add model config path or file and update registry"""
if not isinstance(path, Path):
path = Path(path)
_MODEL_CONFIG_PATHS.append(path)
_rescan_model_configs()
| [] |
2024-01-10 | sifuhr/OpsPilot | actions~core~action_weops_fallback.py | from typing import Any, Text, Dict, List
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.vectorstores import Chroma
from rasa_sdk import Action, Tracker, logger
from rasa_sdk.events import UserUtteranceReverted
from rasa_sdk.executor import CollectingDispatcher
from actions.constant.server_settings import server_settings
from actions.utils.indexer_utils import Searcher
from actions.utils.langchain_utils import langchain_qa, query_chatgpt, chat_online
from actions.utils.redis_utils import RedisUtils
class ActionWeOpsFallback(Action):
def __init__(self) -> None:
super().__init__()
self.searcher = Searcher()
if server_settings.vec_db_path is not None:
embeddings = HuggingFaceEmbeddings(model_name='shibing624/text2vec-base-chinese',
cache_folder='cache/models',
encode_kwargs={
'show_progress_bar': True
})
self.doc_search = Chroma(persist_directory=server_settings.vec_db_path, embedding_function=embeddings)
def name(self) -> Text:
return "action_weops_fallback"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
user_msg = tracker.latest_message['text']
run_mode = server_settings.run_mode
logger.info(f'无法识别用户的意图,进入默认Fallback,用户输入的信息为:{user_msg}')
logger.info(f'TOP3 Intent结果如下:{tracker.latest_message["intent_ranking"][0:3]}')
# TODO: 先从本地知识文件中检索可能的内容,直接回复,假如没有,转给GPT进行查找总结
if tracker.active_loop_name is None:
if run_mode == 'DEV':
dispatcher.utter_message(text='OpsPilot当前运行在开发模式,没有办法回复这些复杂的问题哦')
return [UserUtteranceReverted()]
else:
try:
if server_settings.openai_endpoint is None:
dispatcher.utter_message(text='WeOps智能助理联网检索能力没有打开,无法回答这个问题.')
return [UserUtteranceReverted()]
events = list(filter(lambda x: x.get("event") == "user" and x.get("text"), tracker.events))
user_messages = []
for event in reversed(events):
if len(user_messages) >= 10:
break
user_messages.insert(0, event.get("text"))
if server_settings.fallback_chat_mode == 'knowledgebase':
prompt_template = RedisUtils.get_prompt_template()
prompt_template = self.searcher.format_prompt(prompt_template, user_msg)
result = langchain_qa(self.doc_search, prompt_template, user_msg)
logger.info(f'GPT本地知识问答:问题[{user_msg}],回复:[{result}]')
dispatcher.utter_message(text=result['result'])
elif server_settings.fallback_chat_mode == 'online_knowledgebase':
result = chat_online(user_msg)
logger.info(f'GPT本地知识问答:问题[{user_msg}],回复:[{result}]')
dispatcher.utter_message(text=result)
else:
user_prompt = ''
for user_message in user_messages:
user_prompt += user_message + '\n'
user_prompt += user_msg
if user_prompt != '':
system_prompt = RedisUtils.get_fallback_prompt()
result = query_chatgpt(system_prompt, user_prompt)
logger.info(f'GPT问答模式:问题[{user_msg}],回复:[{result}]')
dispatcher.utter_message(text=result)
except Exception as e:
logger.exception('请求Azure OpenAI 服务异常')
dispatcher.utter_message(text='WeOps智能助理处于非常繁忙的状态,请稍后再试.')
return [UserUtteranceReverted()]
else:
return []
| [
"PLACEHOLDER\n"
] |
2024-01-10 | sifuhr/OpsPilot | ops_pilot_cli.py | import os
import shutil
import fire
from dotenv import load_dotenv
from langchain.document_loaders import PyPDFium2Loader, UnstructuredMarkdownLoader, UnstructuredWordDocumentLoader, \
UnstructuredPowerPointLoader
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.text_splitter import MarkdownTextSplitter, RecursiveCharacterTextSplitter
from langchain.vectorstores import Chroma
from loguru import logger
from tqdm import tqdm
from actions.constant.server_settings import server_settings
from actions.utils.indexer_utils import Searcher
from actions.utils.langchain_utils import langchain_qa
from actions.utils.redis_utils import RedisUtils
class BootStrap(object):
def init_data(self, force=False):
"""
初始化系统配置
Args:
force: 是否将所有系统设置调整为默认设置
"""
RedisUtils.set_default_prompt(force)
def query_embed_knowledge(self):
"""
进入命令行模式进行本地知识问答
"""
embeddings = HuggingFaceEmbeddings(model_name=server_settings.embed_model_name,
cache_folder=server_settings.embed_model_cache_home,
encode_kwargs={
'show_progress_bar': True
})
doc_search = Chroma(persist_directory=server_settings.vec_db_path, embedding_function=embeddings)
searcher = Searcher()
while True:
query = input("请输入问题(输入exit退出终端):")
if query == "exit":
break
prompt_template = RedisUtils.get_prompt_template()
prompt_template = searcher.format_prompt(prompt_template, query)
results = langchain_qa(doc_search, prompt_template, query)
logger.info(f'回复:[{results["result"]}]')
def embed_local_knowledge(self, knowledge_path: str, ):
"""
索引目标路径下的文件,存放至向量数据库与倒排索引中
Args:
knowledge_path: 本地知识存放的绝对路径
"""
logger.info('清理索引文件....')
if os.path.exists(server_settings.vec_db_path):
logger.info(f'清理语义向量数据库文件:[{server_settings.vec_db_path}]')
shutil.rmtree(server_settings.vec_db_path)
if os.path.exists(server_settings.indexer_db_path):
logger.info(f'清理倒排索引数据库文件:[{server_settings.indexer_db_path}]')
shutil.rmtree(server_settings.indexer_db_path)
knowledge_files = []
for root, dirs, files in os.walk(knowledge_path, topdown=False):
for name in files:
knowledge_files.append(os.path.join(root, name))
knowledge_docs = []
for knowledge_file in tqdm(knowledge_files, desc='索引文件中....'):
if knowledge_file.lower().endswith(".md"):
loader = UnstructuredMarkdownLoader(knowledge_file)
text_splitter = MarkdownTextSplitter(chunk_size=1000, chunk_overlap=0)
knowledge_docs += loader.load_and_split(text_splitter)
elif knowledge_file.lower().endswith(".pdf"):
loader = PyPDFium2Loader(knowledge_file)
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
knowledge_docs += loader.load_and_split(text_splitter)
elif knowledge_file.lower().endswith(".docx"):
loader = UnstructuredWordDocumentLoader(knowledge_file, mode="elements")
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
knowledge_docs += loader.load_and_split(text_splitter)
elif knowledge_file.lower().endswith(".pptx"):
loader = UnstructuredPowerPointLoader(knowledge_file, mode="elements")
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
knowledge_docs += loader.load_and_split(text_splitter)
knowledge_contents = [x.page_content for x in knowledge_docs]
logger.info('建立知识的语义索引......')
embeddings = HuggingFaceEmbeddings(model_name=server_settings.embed_model_name,
cache_folder=server_settings.embed_model_cache_home,
encode_kwargs={
'show_progress_bar': True
})
doc_search = Chroma.from_documents(knowledge_docs, embeddings, persist_directory=server_settings.vec_db_path)
doc_search.persist()
logger.info('建立知识内容的倒排索引.....')
search = Searcher()
search.index_knowledge(knowledge_contents)
if __name__ == '__main__':
load_dotenv()
os.environ.setdefault('SENTENCE_TRANSFORMERS_HOME', server_settings.embed_model_cache_home)
fire.Fire(BootStrap)
| [] |
2024-01-10 | Abdul-Jaweed/Plant-Disease-Classification | research~Team%20Member%20Work~Masna%20Ashraf~plantbot_app.py | import streamlit as st
from streamlit_chat import message
from datetime import datetime
from gpt_index import GPTSimpleVectorIndex, LLMPredictor
import os
from dotenv import load_dotenv
from langchain import OpenAI
from modules.utils import Utilities
load_dotenv()
st.markdown("<h1 style='text-align: center; color: Blue;'>Plant Disease Classification Chat-Bot👋</h1>", unsafe_allow_html=True)
utils = Utilities()
user_api_key = utils.load_api_key()
if not user_api_key:
st.write("Please provide your OpenAI API key to communicate with chat bot.")
st.stop()
else:
os.environ["OPENAI_API_KEY"] = user_api_key
vector_index_path = "vectorIndex.json"
llmPredictor = LLMPredictor(llm=OpenAI(temperature=0, model_name="gpt-3.5-turbo", openai_api_key=os.environ["OPENAI_API_KEY"]))
def get_bot_response(user_query):
vIndex = GPTSimpleVectorIndex.load_from_disk(vector_index_path)
response = vIndex.query(user_query, response_mode="compact")
return str(response)
def display_messages(all_messages):
for idx, msg in enumerate(all_messages):
if msg['user'] == 'user':
message(f"You ({msg['time']}): {msg['text']}", is_user=True, key=f"user-{idx}")
else:
message(f"Bot ({msg['time']}): {msg['text']}", key=f"bot-{idx}")
def send_message(user_query, all_messages):
if user_query:
current_time = datetime.now().strftime("%H:%M:%S")
all_messages.append({'user': 'user', 'time': current_time, 'text': user_query})
bot_response = get_bot_response(user_query)
all_messages.append({'user': 'bot', 'time': current_time, 'text': bot_response})
st.session_state.all_messages = all_messages
display_messages(all_messages)
if 'all_messages' not in st.session_state:
st.session_state.all_messages = []
st.header(":blue[Please start conversion with plant chat bot regarding the plant disease!]:sunglasses:")
user_query = st.text_input("You: ", "", key="input")
send_button = st.button("Send")
if send_button:
if not user_api_key:
st.error("API key is missing. Please provide your OpenAI API key.")
else:
send_message(user_query, st.session_state.all_messages) | [] |
2024-01-10 | ckqqqq/langchain_learning | test_network.py | # from langchain import PromptTemplate
# import os
# # os.environ["ALL_PROXY"]="127.0.0.1:11137"
# os.environ["HUGGINGFACEHUB_API_TOKEN"]="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"
# template="""Question: {question}
# Answer: """
# prompt= PromptTemplate(
# template=template,
# input_variables=['question']
# )
# # 注意 variables的s
# question="Which team won the figure skating in the Beijing Olympics?"
# from langchain import HuggingFaceHub,LLMChain
# # from .autonotebook import tqdm as notebook_tqdm
# # initialize the Hub
# hub_llm=HuggingFaceHub(
# repo_id="google/flan-t5-xl",
# model_kwargs={'temperature':1e-10}
# )
# # create prompt template >LLM chain
# llm_chain=LLMChain(
# prompt=prompt,
# llm=hub_llm
# )
# print(llm_chain.run(question))
from langchain import HuggingFaceHub, LLMChain
import os
from langchain import PromptTemplate
os.environ["ALL_PROXY"]="127.0.0.1:11137"
os.environ["HUGGINGFACEHUB_API_TOKEN"]="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"
template = """Question: {question}
Answer: """
prompt = PromptTemplate(
template=template,
input_variables=['question']
)
# user question
question = "Which team won the figure skating in the Beijing Olympics?"# 不会回答,妈的
question = "Which NFL team won the Super Bowl in the 2010 season?"# 会回答
# os.environ['HUGGINGFACEHUB_API_TOKEN'] = "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"
# initialize Hub LLM
hub_llm = HuggingFaceHub(
repo_id='google/flan-t5-xl',
model_kwargs={'temperature':1e-10}
)
# create prompt template > LLM chain
llm_chain = LLMChain(
prompt=prompt,
llm=hub_llm
)
# ask the user question about NFL 2010
print(llm_chain.run(question)) | [
"question",
"Question: {question}\n\nAnswer: "
] |
2024-01-10 | nprasad2077/jupyter_stats | Untitled-1.py | # %%
import os
import openai
# %%
%load_ext jupyter_ai_magics
# %%
%ai help
# %%
%ai list
# %%
def celsius_to_fahrenheit(celsius):
return (celsius * 9/5) + 32
# %%
def celsius_to_fahrenheit(celsius):
return (celsius * 9/5) + 32
celsius_to_fahrenheit(20)
# %%
celsius_to_fahrenheit(0)
# %%
celsius_to_fahrenheit(100)
# %%
%%ai chatgpt -f code
a function that returns its square root.
# %%
import math
def square_root(x):
return math.sqrt(x)
# %%
import math
def square_root(x):
return math.sqrt(x)
# %%
def square_root(x):
return x**0.5
# %%
import math
def square_root(x):
return math.sqrt(x)
# %%
def square_root(x):
return x**(0.5)
# %%
import math
def square_root(x):
if x < 0:
raise ValueError("Input must be a non-negative number")
try:
return math.sqrt(x)
except ValueError as e:
print(f"Error calculating square root: {e}")
# Example usage
try:
result = square_root(16)
print(result)
except ValueError as e:
print(e)
# return math.sqrt(x)
# %%
def square_root(x):
return x**0.5
# Example usage:
print(square_root(9))
# %%
import math
def get_square_root(num):
return math.sqrt(num)
# %%
import math
def square_root(n):
return math.sqrt(n)
# %%
# Here is a simple Python function that returns the square root of a given number:
import math
def square_root(number):
return math.sqrt(number)
# Note: This answer assumes that you are looking for a programming code snippet and not the mathematical formula for square roots.
# %%
import math
def square_root(input):
return math.sqrt(input)
# %%
import math
def square_root(num):
return math.sqrt(num)
# %%
import math
def square_root(x):
return math.sqrt(x)
# %%
def square_root(x):
return x ** (1 / 2)
# %%
def square_root(x):
return x ** 0.5
# %%
import math
def square_root(x):
return math.sqrt(x)
# %%
%%ai openai-chat:gpt-3.5-turbo -f code
A function that returns the cube root of a number
# %%
def cube_root(x):
return x**(1/3)
# %%
def cube_root(x):
return x**(1/3)
# %%
def cube_root(x):
return x**(1/3)
# %%
def cube_root(x):
return x**(1/3)
# %%
import math
def get_cube_root(num):
return math.pow(num, 1/3)
# %%
def get_cube_root(num):
return num**(1/3)
# %%
# Certainly! Here's a Python function that returns the cube root of a given number:
def cube_root(number):
return number ** (1/3)
# Note: This code assumes that the number provided as an argument is a positive real number.
# %%
import math
def cube_root(input):
return input ** (1/3)
# %%
import math
def cube_root(num):
return num**(1/3)
# %%
def cube_root(x):
return x ** (1/3)
# %%
def cube_root(x):
return x ** (1 / 3)
# %%
import requests
# Make a GET request to an API endpoint
response = requests.get('https://nba-stats-db.herokuapp.com/api/playerdata/name/Jayson Tatum')
# Check if the request was successful (status code 200)
if response.status_code == 200:
# Access the response data
data = response.json()
print(data)
else:
print('Error:', response.status_code)
# %%
%%ai openai-chat:gpt-3.5-turbo -f code
Sort the data from above to find Jayson Tatums highest scoring season.
# %%
data = {
'Jayson Tatum': [
{"Season": "2017-2018", "Points": 13.9},
{"Season": "2018-2019", "Points": 15.7},
{"Season": "2019-2020", "Points": 23.4},
{"Season": "2020-2021", "Points": 26.4}
]
}
highest_season = max(data['Jayson Tatum'], key=lambda x: x['Points'])
highest_season['Season']
# %%
data = {
'Jayson Tatum': {
'2017-2018': 13.9,
'2018-2019': 15.7,
'2019-2020': 23.4,
'2020-2021': 26.4
}
}
highest_scoring_season = max(data['Jayson Tatum'], key=lambda x: data['Jayson Tatum'][x])
highest_scoring_season
# %%
tatum_stats = [
{"season": "2017-2018", "points": 1399},
{"season": "2018-2019", "points": 1533},
{"season": "2019-2020", "points": 1545},
{"season": "2020-2021", "points": 1573}
]
sorted_stats = sorted(tatum_stats, key=lambda x: x["points"], reverse=True)
highest_scoring_season = sorted_stats[0]["season"]
# %%
# List of Jayson Tatum's seasons and their corresponding scores
seasons = [
{"season": "2017-2018", "score": 13.9},
{"season": "2018-2019", "score": 15.7},
{"season": "2019-2020", "score": 23.4},
{"season": "2020-2021", "score": 26.4}
]
# Sorting the seasons based on score in descending order
sorted_seasons = sorted(seasons, key=lambda x: x["score"], reverse=True)
# Jayson Tatum's highest scoring season
highest_scoring_season = sorted_seasons[0]["season"]
# %%
player_stats = {
"Jayson Tatum": [15, 18, 21, 23, 20]
}
highest_scoring_season = max(player_stats["Jayson Tatum"])
highest_scoring_season
| [] |
2024-01-10 | josewilmerDR/tossupp | src~api~routesChat.py | import os
import openai
from flask import Flask, redirect, render_template, request, url_for, jsonify
import requests
# app = Flask(__name__)
openai.api_key = os.getenv("OPENAI_API_KEY")
from flask import Flask, request, jsonify, url_for, Blueprint, current_app
from api.modelsChat import db, RecipeChat
from api.user import User
from api.token_bloked_list import TokenBlokedList
from api.favoritos import Favorito
from api.utils import generate_sitemap, APIException
from api.extensions import jwt, bcrypt
from flask_jwt_extended import create_access_token
from flask_jwt_extended import get_jwt_identity
from flask_jwt_extended import jwt_required
from flask_jwt_extended import JWTManager
import re
#PARA OPERACIONES CON FECHAS Y HORAS.
from datetime import date, time, datetime, timezone, timedelta #timedelta, es para hacer resta de horas.
#PARA MANEJAR LA ENCRIPTACIÓN DE LA INFORMACIÓN. ADICIONAL SE REQUIERE, FLASK, REQUEST, JSONIFY, SIN EMBARGO ESOS YA FUERON INSTALADOS ARRIBA.
from flask_jwt_extended import get_jwt
from flask_jwt_extended import JWTManager
# from werkzeug.utils import secure_filename
import cloudinary
import cloudinary.uploader
import cloudinary.api
chat = Blueprint('chat', __name__)
# Configurar cloudinary
cloudinary.config(
cloud_name = os.getenv("CLOUDINARY_CLOUD_NAME"),
api_key = os.getenv("CLOUDINARY_API_KEY"),
api_secret = os.getenv("CLOUDINARY_API_SECRET"),
api_proxy = "http://proxy.server:9999",
secure = True
)
# Handle/serialize errors like a JSON object
@chat.errorhandler(APIException)
def handle_invalid_usage(error):
return jsonify(error.to_dict()), error.status_code
#Funcion de verificación de token:
def verificacionToken(identity):
jti = identity["jti"]
token = TokenBlokedList.query.filter_by(token=jti, is_blocked=True).first()
if token:
return True # Token bloqueado
else:
return False # Token no bloqueado
# ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg', 'gif'}
# def allowed_file(filename):
# return '.' in filename and \
# filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@chat.route('/AddRecipe', methods=['POST'])
@jwt_required()
def add_recipe():
jwt_claims = get_jwt()
print(jwt_claims)
user = jwt_claims["users_id"]
print("el id del USUARIO:",user)
if 'image_of_recipe' not in request.files:
raise APIException("No image to upload")
if 'description' not in request.form:
raise APIException("No description to upload")
if 'user_query' not in request.form:
raise APIException("No user_query to upload")
# Consigue un timestamp y formatea como string
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
image_cloudinary_url = cloudinary.uploader.upload(
request.files['image_of_recipe'],
public_id = f'{request.form.get("user_query").replace(" ", "_")}_{timestamp}',
)['url'] # Extract the 'url' from the returned dictionary
new_recipe_chat = RecipeChat(
name="nombre de la receta", # actualiza esto
description=request.form.get("description"),
user_id=user,
user_query=request.form.get("user_query"),
image_of_recipe=image_cloudinary_url, # now this is the URL of the image in Cloudinary
share=False,
generated_by_ia=True,
)
# Añadir y hacer commit a la nueva entrada
db.session.add(new_recipe_chat)
db.session.commit()
# Retornar la receta, la URL de la imagen y el ID de la receta en la respuesta
return jsonify({"recipe": request.form.get("user_query"), "image_url": image_cloudinary_url, "recipe_id": new_recipe_chat.id})
# @chat.route('/AddRecipe', methods=['POST'])
# @jwt_required()
# def add_recipe():
# jwt_claims = get_jwt()
# print(jwt_claims)
# user = jwt_claims["users_id"]
# print("el id del USUARIO:",user)
# if 'image_of_recipe' not in request.files:
# raise APIException("No image to upload")
# if 'description' not in request.form:
# raise APIException("No description to upload")
# if 'user_query' not in request.form:
# raise APIException("No user_query to upload")
# # Consigue un timestamp y formatea como string
# timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
# image_cloudinary_url = cloudinary.uploader.upload(
# request.files['image_of_recipe'],
# public_id = f'{request.form.get("user_query").replace(" ", "_")}_{timestamp}',
# )
# new_recipe_chat = RecipeChat(
# name="nombre de la receta", # actualiza esto
# description=request.form.get("description"),
# user_id=user,
# user_query=request.form.get("user_query"),
# image_of_recipe=image_cloudinary_url, # ahora esto es la URL de la imagen en Cloudinary
# share=False,
# )
# # Añadir y hacer commit a la nueva entrada
# db.session.add(new_recipe_chat)
# db.session.commit()
# # Retornar la receta, la URL de la imagen y el ID de la receta en la respuesta
# return jsonify({"recipe": request.form.get("user_query"), "image_url": image_cloudinary_url, "recipe_id": new_recipe_chat.id})
@chat.route('/AllShareRecipes', methods=['GET'])
@jwt_required()
def get_all_share_recipes():
jwt_claims = get_jwt()
print(jwt_claims)
user = jwt_claims["users_id"]
print("el id del USUARIO:",user)
share_recipes = RecipeChat.query.filter_by(share=True).all()
share_recipes = list(map(lambda item: item.serialize(), share_recipes))
print(share_recipes)
return jsonify(share_recipes), 200
#Se renderiza en el home publico
@chat.route('/AllShareRecipesForAll', methods=['GET'])
def get_all_share_recipes_for_all():
share_recipes = RecipeChat.query.filter_by(share=True).all()
share_recipes = list(map(lambda item: item.serialize(), share_recipes))
print(share_recipes)
return jsonify(share_recipes), 200
@chat.route('/EditRecipeChat', methods=['PUT'])
@jwt_required()
def edit_recipe_chat():
jwt_claims = get_jwt()
print(jwt_claims)
user = jwt_claims["users_id"]
print("el id del USUARIO:",user)
id = request.form.get("id")
print("ID DE RECETA:", id)
if 'image_of_recipe' not in request.files:
raise APIException("No image to upload")
if 'description' not in request.form:
raise APIException("No description to upload")
if 'user_query' not in request.form:
raise APIException("No user_query to upload")
if 'id' not in request.form:
raise APIException("No id to upload")
# Consigue un timestamp y formatea como string
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
result = cloudinary.uploader.upload(
request.files['image_of_recipe'],
# public_id=f'recipe/{user.id}/{request.form.get("user_query")}',
# public_id=f'recipe/user/image_of_recipe',
public_id = f'{request.form.get("user_query").replace(" ", "_")}_{timestamp}',
#Para darle un tamaño específico a la imagen:
# crop='limit',
# width=450,
# height=450,
# eager=[{
# 'width': 200, 'height': 200,
# 'crop': 'thumb', 'gravity': 'face',
# 'radius': 100
# },
# ],
# tags=['profile_picture']
)
my_image = RecipeChat.query.get(id)
my_image.image_of_recipe = result['secure_url']
my_image.description = request.form.get("description")
my_image.user_query = request.form.get("user_query")
my_image.user_id = user
db.session.add(my_image)
db.session.commit()
return jsonify(my_image.serialize()), 200
@chat.route('/EditAndShareRecipeChat', methods=['PUT'])
@jwt_required()
def edit_and_share_recipe_chat():
jwt_claims = get_jwt()
print(jwt_claims)
user = jwt_claims["users_id"]
print("el id del USUARIO:",user)
id = request.form.get("id")
print("ID DE RECETA:", id)
if 'image_of_recipe' not in request.files:
raise APIException("No image to upload")
if 'description' not in request.form:
raise APIException("No description to upload")
if 'user_query' not in request.form:
raise APIException("No user_query to upload")
if 'id' not in request.form:
raise APIException("No id to upload")
# Consigue un timestamp y formatea como string
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
result = cloudinary.uploader.upload(
request.files['image_of_recipe'],
# public_id=f'recipe/{user.id}/{request.form.get("user_query")}',
# public_id=f'recipe/user/image_of_recipe',
public_id = f'{request.form.get("user_query").replace(" ", "_")}_{timestamp}',
#Para darle un tamaño específico a la imagen:
# crop='limit',
# width=450,
# height=450,
# eager=[{
# 'width': 200, 'height': 200,
# 'crop': 'thumb', 'gravity': 'face',
# 'radius': 100
# },
# ],
# tags=['profile_picture']
)
my_image = RecipeChat.query.get(id)
my_image.image_of_recipe = result['secure_url']
my_image.description = request.form.get("description")
my_image.user_query = request.form.get("user_query")
my_image.user_id = user
my_image.share = True
db.session.add(my_image)
db.session.commit()
return jsonify(my_image.serialize()), 200
@chat.route('/getChatHistory', methods=['GET'])
@jwt_required()
def get_chat_history():
jwt_claims = get_jwt()
print(jwt_claims)
user_id = jwt_claims["users_id"]
recipes = RecipeChat.query.filter_by(user_id=user_id, generated_by_ia=True).all()
recipes = list(map(lambda item: item.serialize(), recipes))
print(recipes)
return jsonify(recipes), 200
@chat.route('/getAllMyRecipes', methods=['GET'])
@jwt_required()
def get_all_my_recipes():
jwt_claims = get_jwt()
print(jwt_claims)
user_id = jwt_claims["users_id"]
recipes = RecipeChat.query.filter_by(user_id=user_id).all()
recipes = list(map(lambda item: item.serialize(), recipes))
print(recipes)
return jsonify(recipes), 200
@chat.route('/recipe', methods=['POST'])
@jwt_required()
def generate_recipe():
jwt_claims = get_jwt()
print(jwt_claims)
user_id = jwt_claims["users_id"]
print(user_id)
data = request.get_json()
prompt = "Eres una pagina web de recetas que responde con descripcion de la receta de una parráfo, una lista de ingredientes y un paso a paso para preparar la receta solicitada por el usuario: "+ data['prompt']
# Genera la receta
completion = openai.Completion.create(
engine="text-davinci-003",
prompt=prompt,
n=1,
max_tokens=1024
)
recipe_text = completion.choices[0].text
# Genera la imagen
response = openai.Image.create(
prompt=data['prompt'],
n=1,
size="1024x1024"
)
image_url = response['data'][0]['url']
# Descarga la imagen
img_data = requests.get(image_url).content
# Consigue un timestamp y formatea como string
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
# Guarda la imagen en Cloudinary
upload_result = cloudinary.uploader.upload(
img_data,
public_id = f'{data["prompt"].replace(" ", "_")}_{timestamp}',
resource_type = "auto"
)
image_cloudinary_url = upload_result['url']
# Crear una nueva entrada en la base de datos
new_recipe_chat = RecipeChat(
name="nombre de la receta", # actualiza esto
description=recipe_text,
user_id=user_id,
user_query=data['prompt'],
image_of_recipe=image_cloudinary_url, # ahora esto es la URL de la imagen en Cloudinary
share=False,
generated_by_ia=True,
)
# Añadir y hacer commit a la nueva entrada
db.session.add(new_recipe_chat)
db.session.commit()
# Retornar la receta, la URL de la imagen y el ID de la receta en la respuesta
return jsonify({"recipe": recipe_text, "image_url": image_cloudinary_url, "recipe_id": new_recipe_chat.id})
@chat.route('/ShareRecipeChat/<int:id>', methods=['PUT'])
@jwt_required()
def share_recipe_chat(id):
print("ID DE LA RECETA: ", id)
jwt_claims = get_jwt()
print(jwt_claims)
#comprobar si el user_id existe en las jwt_claims
if "users_id" not in jwt_claims:
return jsonify({"msg": "User not found"}), 401
user_id = jwt_claims["users_id"]
print("ID DEL USUARIO: ", user_id)
if user_id != jwt_claims["users_id"]:
return jsonify({"msg": "Unauthorized user"}), 401
recipe = RecipeChat.query.filter_by(user_id=user_id, id=id).first()
if recipe:
recipe.share = True
db.session.commit()
return jsonify(recipe.serialize()), 200
else:
raise APIException("Recipe not found", status_code=404)
@chat.route('/UnShareRecipeChat/<int:id>', methods=['PUT'])
@jwt_required()
def unshare_recipe_chat(id):
print("ID DE LA RECETA: ", id)
jwt_claims = get_jwt()
print(jwt_claims)
#comprobar si el user_id existe en las jwt_claims
if "users_id" not in jwt_claims:
return jsonify({"msg": "User not found"}), 401
user_id = jwt_claims["users_id"]
print("ID DEL USUARIO: ", user_id)
if user_id != jwt_claims["users_id"]:
return jsonify({"msg": "Unauthorized user"}), 401
recipe = RecipeChat.query.filter_by(user_id=user_id, id=id).first()
if recipe:
recipe.share = False
db.session.commit()
return jsonify(recipe.serialize()), 200
else:
raise APIException("Recipe not found", status_code=404)
#RUTAS ADICIONALES PARA EL CHATBOT: NO EN USO
@chat.route('/chatgpt', methods=['POST'])
def open_ai():
body =request.get_json()
prompt = "Eres una pagina web de recetas que responde con descripcion de la receta, una lista de ingredientes y un paso a paso para preparar la receta solicitada por el usuario: "+ body['prompt']
completation = openai.Completion.create(engine="text-davinci-003",
prompt=prompt,
n=1,
max_tokens=1024)
#print(completation.choices[0])
print(completation.choices[0].text)
response = {
"message":completation.choices[0].text
}
return jsonify(response), 200
# MPT-7b : 64k tokens, ggml, q4_0, 128bits 4Q
# Oobaboonga, Koboldcpp
@chat.route('/imageRecipe', methods=['POST'])
def image_recipe():
data = request.get_json()
prompt = data.get('prompt', 'a white siamese cat')
response = openai.Image.create(
prompt=prompt,
n=1,
size="1024x1024"
)
image_url = response['data'][0]['url']
# Descarga la imagen
img_data = requests.get(image_url).content
# Consigue un timestamp y formatea como string
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
# Guarda la imagen en tu servidor (actualiza 'path/to/save/image' al directorio donde quieres guardar las imágenes)
image_path = os.path.join('src/front/img', f'{prompt.replace(" ", "_")}_{timestamp}.jpg')
with open(image_path, 'wb') as handler:
handler.write(img_data)
# Crear una nueva entrada en la base de datos
new_recipe_chat = RecipeChat(
name="nombre de la receta", # actualiza esto
description="descripción de la receta", # actualiza esto
user_id=1, # actualiza esto
user_query=prompt,
image_of_recipe=image_path # ahora esto es la ruta local de la imagen en tu servidor
)
# Añadir y hacer commit a la nueva entrada
db.session.add(new_recipe_chat)
db.session.commit()
# Retornar la ruta de la imagen y el ID de la receta en la respuesta
return jsonify({"image_path": image_path, "recipe_id": new_recipe_chat.id})
#Ruta para eliminar una receta del chatbot
@chat.route('/DeleteRecipeChat/<int:id>', methods=['DELETE'])
@jwt_required() # Requiere un token válido para acceder a la ruta.
def delete_recipe_chat(id):
# Obtenemos el ID del usuario de las reclamaciones del token JWT.
jwt_claims = get_jwt()
# user_id = jwt_claims["user_id"]
if "users_id" not in jwt_claims:
return jsonify({"msg": "User not found"}), 401
user_id = jwt_claims["users_id"]
if user_id != jwt_claims["users_id"]:
return jsonify({"msg": "Unauthorized user"}), 401
# Buscamos al usuario en la base de datos utilizando el ID obtenido.
recipe = RecipeChat.query.filter_by(user_id=user_id, id=id).first()
# Eliminamos la receta de la base de datos y guardamos los cambios.
db.session.delete(recipe)
db.session.commit()
# Retornamos un mensaje en formato JSON y el código de estado HTTP 200 (OK).
return jsonify({"message": "receta borrada"}), 200
| [
"Eres una pagina web de recetas que responde con descripcion de la receta de una parráfo, una lista de ingredientes y un paso a paso para preparar la receta solicitada por el usuario: PLACEHOLDER",
"Eres una pagina web de recetas que responde con descripcion de la receta, una lista de ingredientes y un paso a paso para preparar la receta solicitada por el usuario: PLACEHOLDER",
"a white siamese cat"
] |
2024-01-10 | josewilmerDR/tossupp | src~api~routesRecipe.py | import os
import openai
from flask import Flask, redirect, render_template, request, url_for, jsonify
import requests
# app = Flask(__name__)
openai.api_key = os.getenv("OPENAI_API_KEY")
from flask import Flask, request, jsonify, url_for, Blueprint, current_app
from api.modelsChat import db, RecipeChat
from api.recipe import Recipe
# Recipe, Like, Coment, Favorito, RecipeIngredient
from api.user import User
from api.token_bloked_list import TokenBlokedList
from api.favoritos import Favorito
from api.utils import generate_sitemap, APIException
from api.extensions import jwt, bcrypt
from flask_jwt_extended import create_access_token
from flask_jwt_extended import get_jwt_identity
from flask_jwt_extended import jwt_required
from flask_jwt_extended import JWTManager
import re
#PARA OPERACIONES CON FECHAS Y HORAS.
from datetime import date, time, datetime, timezone, timedelta #timedelta, es para hacer resta de horas.
#PARA MANEJAR LA ENCRIPTACIÓN DE LA INFORMACIÓN. ADICIONAL SE REQUIERE, FLASK, REQUEST, JSONIFY, SIN EMBARGO ESOS YA FUERON INSTALADOS ARRIBA.
from flask_jwt_extended import get_jwt
from flask_jwt_extended import JWTManager
# from werkzeug.utils import secure_filename
import cloudinary
import cloudinary.uploader
import cloudinary.api
rrecipe = Blueprint('rrecipe', __name__)
# Configurar cloudinary
cloudinary.config(
cloud_name = os.getenv("CLOUDINARY_CLOUD_NAME"),
api_key = os.getenv("CLOUDINARY_API_KEY"),
api_secret = os.getenv("CLOUDINARY_API_SECRET"),
api_proxy = "http://proxy.server:9999",
secure = True
)
# Handle/serialize errors like a JSON object
@rrecipe.errorhandler(APIException)
def handle_invalid_usage(error):
return jsonify(error.to_dict()), error.status_code
#Funcion de verificación de token:
def verificacionToken(identity):
jti = identity["jti"]
token = TokenBlokedList.query.filter_by(token=jti, is_blocked=True).first()
if token:
return True # Token bloqueado
else:
return False # Token no bloqueado
# ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg', 'gif'}
# def allowed_file(filename):
# return '.' in filename and \
# filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@rrecipe.route('/AddRecipe', methods=['POST'])
@jwt_required()
def add_recipe():
jwt_claims = get_jwt()
print(jwt_claims)
user = jwt_claims["users_id"]
print("el id del USUARIO:",user)
if 'image_of_recipe' not in request.files:
raise APIException("No image to upload")
if 'description' not in request.form:
raise APIException("No description to upload")
if 'name' not in request.form:
raise APIException("No user_query to upload")
# Consigue un timestamp y formatea como string
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
image_cloudinary_url = cloudinary.uploader.upload(
request.files['image_of_recipe'],
public_id = f'{request.form.get("name").replace(" ", "_")}_{timestamp}',
)['url'] # Extract the 'url' from the returned dictionary
new_recipe_chat = RecipeChat(
name=request.form.get("name"), # actualiza esto
description=request.form.get("description"),
user_query=request.form.get("name"),
image_of_recipe=image_cloudinary_url, # now this is the URL of the image in Cloudinary
share=False,
generated_by_ia=False,
user_id=user,
)
# Añadir y hacer commit a la nueva entrada
db.session.add(new_recipe_chat)
db.session.commit()
# Retornar la receta, la URL de la imagen y el ID de la receta en la respuesta
return jsonify({"recipe": request.form.get("user_query"), "image_url": image_cloudinary_url, "recipe_id": new_recipe_chat.id})
@rrecipe.route('/AddAndShareRecipe', methods=['POST'])
@jwt_required()
def add_and_share_recipe():
jwt_claims = get_jwt()
print(jwt_claims)
user = jwt_claims["users_id"]
print("el id del USUARIO:",user)
if 'image_of_recipe' not in request.files:
raise APIException("No image to upload")
if 'description' not in request.form:
raise APIException("No description to upload")
if 'name' not in request.form:
raise APIException("No user_query to upload")
# Consigue un timestamp y formatea como string
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
image_cloudinary_url = cloudinary.uploader.upload(
request.files['image_of_recipe'],
public_id = f'{request.form.get("name").replace(" ", "_")}_{timestamp}',
)['url'] # Extract the 'url' from the returned dictionary
new_recipe_chat = RecipeChat(
name=request.form.get("name"), # actualiza esto
description=request.form.get("description"),
user_query=request.form.get("name"),
image_of_recipe=image_cloudinary_url, # now this is the URL of the image in Cloudinary
share=True,
generated_by_ia=False,
user_id=user,
)
# Añadir y hacer commit a la nueva entrada
db.session.add(new_recipe_chat)
db.session.commit()
# Retornar la receta, la URL de la imagen y el ID de la receta en la respuesta
return jsonify({"recipe": request.form.get("user_query"), "image_url": image_cloudinary_url, "recipe_id": new_recipe_chat.id})
@rrecipe.route('/AllManuelRecipes', methods=['GET'])
@jwt_required()
def get_all_manual_recipes():
jwt_claims = get_jwt()
print(jwt_claims)
user = jwt_claims["users_id"]
print("el id del USUARIO:",user)
manual_recipes = RecipeChat.query.filter_by(generated_by_ia=False, user_id=user).all()
manual_recipes = list(map(lambda item: item.serialize(), manual_recipes))
print(manual_recipes)
return jsonify(manual_recipes), 200
@rrecipe.route('/AllShareRecipesManual', methods=['GET'])
@jwt_required()
def get_all_share_recipes_manual():
jwt_claims = get_jwt()
print(jwt_claims)
user = jwt_claims["users_id"]
print("el id del USUARIO:",user)
share_recipes = Recipe.query.filter_by(share=True).all()
share_recipes = list(map(lambda item: item.serialize(), share_recipes))
print(share_recipes)
return jsonify(share_recipes), 200
# @chat.route('/AddRecipe', methods=['POST'])
# @jwt_required()
# def add_recipe():
# jwt_claims = get_jwt()
# print(jwt_claims)
# user = jwt_claims["users_id"]
# print("el id del USUARIO:",user)
# if 'image_of_recipe' not in request.files:
# raise APIException("No image to upload")
# if 'description' not in request.form:
# raise APIException("No description to upload")
# if 'user_query' not in request.form:
# raise APIException("No user_query to upload")
# # Consigue un timestamp y formatea como string
# timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
# image_cloudinary_url = cloudinary.uploader.upload(
# request.files['image_of_recipe'],
# public_id = f'{request.form.get("user_query").replace(" ", "_")}_{timestamp}',
# )
# new_recipe_chat = RecipeChat(
# name="nombre de la receta", # actualiza esto
# description=request.form.get("description"),
# user_id=user,
# user_query=request.form.get("user_query"),
# image_of_recipe=image_cloudinary_url, # ahora esto es la URL de la imagen en Cloudinary
# share=False,
# )
# # Añadir y hacer commit a la nueva entrada
# db.session.add(new_recipe_chat)
# db.session.commit()
# # Retornar la receta, la URL de la imagen y el ID de la receta en la respuesta
# return jsonify({"recipe": request.form.get("user_query"), "image_url": image_cloudinary_url, "recipe_id": new_recipe_chat.id})
#COMENTADO PARA PROBAR LA RUTA ADDRECIPE
# @chat.route('/AllShareRecipes', methods=['GET'])
# @jwt_required()
# def get_all_share_recipes():
# jwt_claims = get_jwt()
# print(jwt_claims)
# user = jwt_claims["users_id"]
# print("el id del USUARIO:",user)
# share_recipes = RecipeChat.query.filter_by(share=True).all()
# share_recipes = list(map(lambda item: item.serialize(), share_recipes))
# print(share_recipes)
# return jsonify(share_recipes), 200
# @chat.route('/EditRecipeChat', methods=['POST'])
# @jwt_required()
# def edit_recipe_chat():
# jwt_claims = get_jwt()
# print(jwt_claims)
# user = jwt_claims["users_id"]
# print("el id del USUARIO:",user)
# id = request.form.get("id")
# print("ID DE RECETA:", id)
# if 'image_of_recipe' not in request.files:
# raise APIException("No image to upload")
# if 'description' not in request.form:
# raise APIException("No description to upload")
# if 'user_query' not in request.form:
# raise APIException("No user_query to upload")
# if 'id' not in request.form:
# raise APIException("No id to upload")
# # Consigue un timestamp y formatea como string
# timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
# result = cloudinary.uploader.upload(
# request.files['image_of_recipe'],
# # public_id=f'recipe/{user.id}/{request.form.get("user_query")}',
# # public_id=f'recipe/user/image_of_recipe',
# public_id = f'{request.form.get("user_query").replace(" ", "_")}_{timestamp}',
# #Para darle un tamaño específico a la imagen:
# # crop='limit',
# # width=450,
# # height=450,
# # eager=[{
# # 'width': 200, 'height': 200,
# # 'crop': 'thumb', 'gravity': 'face',
# # 'radius': 100
# # },
# # ],
# # tags=['profile_picture']
# )
# my_image = RecipeChat.query.get(id)
# my_image.image_of_recipe = result['secure_url']
# my_image.description = request.form.get("description")
# my_image.user_query = request.form.get("user_query")
# my_image.user_id = user
# db.session.add(my_image)
# db.session.commit()
# return jsonify(my_image.serialize()), 200
# @chat.route('/getChatHistory', methods=['GET'])
# @jwt_required()
# def get_chat_history():
# jwt_claims = get_jwt()
# print(jwt_claims)
# user_id = jwt_claims["users_id"]
# recipes = RecipeChat.query.filter_by(user_id=user_id).all()
# recipes = list(map(lambda item: item.serialize(), recipes))
# print(recipes)
# return jsonify(recipes), 200
# @chat.route('/recipe', methods=['POST'])
# @jwt_required()
# def generate_recipe():
# jwt_claims = get_jwt()
# print(jwt_claims)
# user_id = jwt_claims["users_id"]
# print(user_id)
# data = request.get_json()
# prompt = "Eres una pagina web de recetas que responde con descripcion de la receta de una parráfo, una lista de ingredientes y un paso a paso para preparar la receta solicitada por el usuario: "+ data['prompt']
# # Genera la receta
# completion = openai.Completion.create(
# engine="text-davinci-003",
# prompt=prompt,
# n=1,
# max_tokens=1024
# )
# recipe_text = completion.choices[0].text
# # Genera la imagen
# response = openai.Image.create(
# prompt=data['prompt'],
# n=1,
# size="1024x1024"
# )
# image_url = response['data'][0]['url']
# # Descarga la imagen
# img_data = requests.get(image_url).content
# # Consigue un timestamp y formatea como string
# timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
# # Guarda la imagen en Cloudinary
# upload_result = cloudinary.uploader.upload(
# img_data,
# public_id = f'{data["prompt"].replace(" ", "_")}_{timestamp}',
# resource_type = "auto"
# )
# image_cloudinary_url = upload_result['url']
# # Crear una nueva entrada en la base de datos
# new_recipe_chat = RecipeChat(
# name="nombre de la receta", # actualiza esto
# description=recipe_text,
# user_id=user_id,
# user_query=data['prompt'],
# image_of_recipe=image_cloudinary_url, # ahora esto es la URL de la imagen en Cloudinary
# share=False,
# )
# # Añadir y hacer commit a la nueva entrada
# db.session.add(new_recipe_chat)
# db.session.commit()
# # Retornar la receta, la URL de la imagen y el ID de la receta en la respuesta
# return jsonify({"recipe": recipe_text, "image_url": image_cloudinary_url, "recipe_id": new_recipe_chat.id})
# @chat.route('/ShareRecipeChat/<int:id>', methods=['PUT'])
# @jwt_required()
# def share_recipe_chat(id):
# print("ID DE LA RECETA: ", id)
# jwt_claims = get_jwt()
# print(jwt_claims)
# #comprobar si el user_id existe en las jwt_claims
# if "users_id" not in jwt_claims:
# return jsonify({"msg": "User not found"}), 401
# user_id = jwt_claims["users_id"]
# print("ID DEL USUARIO: ", user_id)
# if user_id != jwt_claims["users_id"]:
# return jsonify({"msg": "Unauthorized user"}), 401
# recipe = RecipeChat.query.filter_by(user_id=user_id, id=id).first()
# if recipe:
# recipe.share = True
# db.session.commit()
# return jsonify(recipe.serialize()), 200
# else:
# raise APIException("Recipe not found", status_code=404)
# @chat.route('/UnShareRecipeChat/<int:id>', methods=['PUT'])
# @jwt_required()
# def unshare_recipe_chat(id):
# print("ID DE LA RECETA: ", id)
# jwt_claims = get_jwt()
# print(jwt_claims)
# #comprobar si el user_id existe en las jwt_claims
# if "users_id" not in jwt_claims:
# return jsonify({"msg": "User not found"}), 401
# user_id = jwt_claims["users_id"]
# print("ID DEL USUARIO: ", user_id)
# if user_id != jwt_claims["users_id"]:
# return jsonify({"msg": "Unauthorized user"}), 401
# recipe = RecipeChat.query.filter_by(user_id=user_id, id=id).first()
# if recipe:
# recipe.share = False
# db.session.commit()
# return jsonify(recipe.serialize()), 200
# else:
# raise APIException("Recipe not found", status_code=404)
# #RUTAS ADICIONALES PARA EL CHATBOT: NO EN USO
# @chat.route('/chatgpt', methods=['POST'])
# def open_ai():
# body =request.get_json()
# prompt = "Eres una pagina web de recetas que responde con descripcion de la receta, una lista de ingredientes y un paso a paso para preparar la receta solicitada por el usuario: "+ body['prompt']
# completation = openai.Completion.create(engine="text-davinci-003",
# prompt=prompt,
# n=1,
# max_tokens=1024)
# #print(completation.choices[0])
# print(completation.choices[0].text)
# response = {
# "message":completation.choices[0].text
# }
# return jsonify(response), 200
# # MPT-7b : 64k tokens, ggml, q4_0, 128bits 4Q
# # Oobaboonga, Koboldcpp
# @chat.route('/imageRecipe', methods=['POST'])
# def image_recipe():
# data = request.get_json()
# prompt = data.get('prompt', 'a white siamese cat')
# response = openai.Image.create(
# prompt=prompt,
# n=1,
# size="1024x1024"
# )
# image_url = response['data'][0]['url']
# # Descarga la imagen
# img_data = requests.get(image_url).content
# # Consigue un timestamp y formatea como string
# timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
# # Guarda la imagen en tu servidor (actualiza 'path/to/save/image' al directorio donde quieres guardar las imágenes)
# image_path = os.path.join('src/front/img', f'{prompt.replace(" ", "_")}_{timestamp}.jpg')
# with open(image_path, 'wb') as handler:
# handler.write(img_data)
# # Crear una nueva entrada en la base de datos
# new_recipe_chat = RecipeChat(
# name="nombre de la receta", # actualiza esto
# description="descripción de la receta", # actualiza esto
# user_id=1, # actualiza esto
# user_query=prompt,
# image_of_recipe=image_path # ahora esto es la ruta local de la imagen en tu servidor
# )
# # Añadir y hacer commit a la nueva entrada
# db.session.add(new_recipe_chat)
# db.session.commit()
# # Retornar la ruta de la imagen y el ID de la receta en la respuesta
# return jsonify({"image_path": image_path, "recipe_id": new_recipe_chat.id})
| [] |
2024-01-10 | LordMojmir/ScreenGPT | ai_req.py | import os
import json
import pandas as pd
from openai import OpenAI
from dotenv import load_dotenv
import os
# Load environment variables
load_dotenv()
def query_custom_gpt(doc_text, message):
"""
Sends a prompt to the GPT model and returns the response.
:param doc_text: The document text to be analyzed.
:param system_message: The system instruction for the GPT model.
:return: The response text from the model.
"""
client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY"))
system_message = f"You are a helpful AI-assistant that will help the user with this {doc_text}"
chat = [
{"role": "system", "content": system_message},
{"role": "user", "content": message},
]
try:
response = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=chat,
max_tokens=200
)
return response.choices[0].message.content
except Exception as e:
return str(e)
def convert_json_to_object(json_string):
"""
Converts a JSON string to a Python dictionary.
:param json_string: The JSON string to be converted.
:return: A Python dictionary representing the JSON data.
"""
try:
python_object = json.loads(json_string)
return python_object
except json.JSONDecodeError as e:
return f"Error decoding JSON: {str(e)}"
def obj2excel(data_list: object, excel_file_path: str) -> bool:
df = pd.json_normalize(data_list)
return df.to_excel(excel_file_path, index=False)
def save_json_to_file(json_obj, file_path):
try:
with open(file_path, 'w') as json_file:
json.dump(json_obj, json_file, indent=4)
print(f"JSON data saved to {file_path}")
except Exception as e:
print(f"Error saving JSON to file: {e}")
#
# screen_content = """File Edit View Navigate Code Refactor Run Iools Git Window Help ScreenGPT - main py C:IWindowsIsystem32 cmde: ScreenGPT mainpy Current File 0 Git: 2 Project 0 z * | & main py openai py (OCR2) C:|Userslhorvazpip install customtkinter ScreenGPT €: Userslhorval Documents| Pri 4 3 A 6 Collecting customtkinter env 48 submit_button ctk.CTkButton(root , text="Submit" command-Lambda: on_supmitler [ Downloading customtkinter-5.2.1-py3-none-any.whl metadata (652 bytes) gitignore 49 submit_button.pack(pady-10) Collecting darkdetect (from customtkinter) J main py DownLoading darkdetect-0 8.0-py3-none-any.whl (9 0 kB) openaipy 50 Downloading customtkinter-5.2.1-py3-none-any.whl (295 kB) READMEmd 296.0/296 0 kB 4.5 MB/s eta 0:00:00 screenshot png 51 root.mainZoop() 8 Installing collected packages = darkdetect customtkinter ] External Libraries 52 Successfully installed customtkinter-5.2.1 darkdetect-0.8.0 Scratches and Consoles 8 Mojmir Horvath (OCR2) C: |Userslhorva pip install pynput M 53 Idef for_canonical(f): { Collecting pynput Downl oading pynput-1.7.6-py2.py3-none-any.whl (89 kB) 54 listener keyboard.Listener(on_press-Lambda k: k) 89.2/89.2 kB 1.3 MB/s eta 0:00:00 Requirement already satisfied: six in c:luserslhorva anaconda3 lenvslocr2|liblsite-packages (from pynput) (1.16.0) 55 return Lambda k: f(listener.canonical(k)_ Installing collected packages pynput 56 Successfully installed pynput-1.7.6 57 hotkey keyboard. HotKey( (OCR2) C: |Userslhorva>"S 58 {keyboard.Key.ctrl, keyboard.KeyCode. from_char( 's' ) , keyboard.KeyCode. from char is not recognized as an internal or external command operable program or batch file 59 on_activate) (OCR2) C:|Userslhorvazpip install openai 60 Requirement already satisfied: openai in c:luserslhorvalanaconda3lenvs ocr2|liblsite-packages (1.3.6) 61 with keyboard.Listener( Requirement already satisfied anyio<4,>-3.5.0 in luserslhorvalanaconda3lenvs ocr2|liblsite-packages (from openai) (3.5.0) Requirement already satisfied distro<2,>=1.7.0 in c:luserslhorva lanaconda3lenvslocr2|liblsite-packages (from openai) (1.8.0) 62 on_press-for_canonical(hotkey.press) Requirement already satisfied httpx<l,>-0.23 0 in c:luserslhorva anaconda3 envs ocr?iliblsite-packages (from openai) (0.23.0) 63 on_release-for_canonical(hotkey.release) ) as listener: Requirement already satisfied pydantic<3,>-1.9.0 in c:luserslhorva anaconda3lenvslocr2|liblsite-packages (from openai) (1.10.12) Requirement already satisfied sniffio in c:luserslhorvalanaconda3 envs ocr2iliblsite packages (from openai) (1.2.0) 64 listener. join() Requirement already satisfied tqdmz4 in c:luserslhorva anaconda3 envs ocr?iliblsite-packages (from openai) (4.65.0) Requirement already satisfied= typing-extensions<5 >=4.5 in c:luserslhorva anaconda3lenvs ocr2|liblsite packages (from openai) (4.7.1 65 66 Requirement already satisfied: idna>-2 8 in c:luserslhorvalanaconda3lenvs ocr2|liblsite-packages (from anyio<4_ >=3 5 0->openai) (3.4) Requirement already satisfied certifi in c:luserslhorva anaconda3 envs ocr2|liblsite packages (from httpx<l,>-0.23.0->openai) (2023 67 11.17) Requirement already satisfied: rfc3986<2,>=1.3 in c:luserslhorvalanaconda3 envs ocr2|liblsite-packages (from rfc3986[idna2008]<2,>-1_ 3->httpx<l >=0.23 0->openai) (1.4.0) Requirement already satisfied: httpcore<0.16.0,>-0.15.0 in c:luserslhorva anaconda3lenvslocr2|liblsite-packages (from httpx<l,>-0.23_ 0->openai) (0.15 0) Requirement already satisfied: colorama in c:luserslhorvalanaconda3lenvs locr2 liblsite-packages (from tqdm-4->openai) (0.4.6) Requirement already satisfied hll<0.13,>=0.11 in c:luserslhorvalanaconda3' envs ocr?iliblsite-packages (from httpcore<0.16.0,>-0.15.0 ~>httpx<l,>=0.23 0->openai) (0.12.0) for_canonicalo lambda (k) Run: main X (OCR2) C: |Userslhorva> C: |UsersIhorvalanaconda3|envs | OCR2Ipython.exe €: |Userslhorva|Documents|Private|Coding|Projects| ScreenGPTImain 1 1 P Git Run ETODO Problems 2 Terminal Python Packages Python Console Services Packages installed successfully: Installed packages: 'OpenAI' (3 minutes ago) 55.46 CRLF UTF-8 spaces Python 3.11 (OCRZ) (2) P main 9PC ENG 19.08 Search c) 8 Stark bewolkt INTL 04/01/2024"""
# user_message = "how to istall pytorch"
# print(query_custom_gpt(screen_content, user_message)) | [
"You are a helpful AI-assistant that will help the user with this PLACEHOLDER"
] |
2024-01-10 | nota-github/retrieved_collection_compression_BOOSTCAMP | run_ralm.py | """Ask a question to the notion database."""
import sys
import argparse
from typing import List
from langchain.chat_models import ChatOpenAI # for `gpt-3.5-turbo` & `gpt-4`
from langchain.chains import RetrievalQAWithSourcesChain
from langchain.prompts import (
ChatPromptTemplate,
SystemMessagePromptTemplate,
HumanMessagePromptTemplate,
)
from langchain.schema import BaseRetriever, Document
import gradio as gr
from retrieve import Retriever
DEFAULT_QUESTION = "Wikipedia 2018 english dump에서 궁금한 점을 질문해주세요.\n예를들어 \n\n- Where are mucosal associated lymphoid tissues present in the human body and why?\n- When did korean drama started in the philippines?\n- When did the financial crisis in greece start?"
TEMPERATURE = 0
class LangChainCustomRetrieverWrapper(BaseRetriever):
def __init__(self, args):
self.args = args
self.retriever = Retriever(args) # DensePhrase
def get_relevant_documents(self, query: str) -> List[Document]:
"""Get texts relevant for a query.
Args:
query: string to find relevant texts for
Returns:
List of relevant documents
"""
print(f"query = {query}")
# retrieve
results = self.retriever.retrieve(single_query_or_queries_dict=query)
# make result list of Document object
return [
Document(page_content=result, metadata={"source": f"source_{idx}"})
for idx, result in enumerate(results)
]
async def aget_relevant_documents(
self, query: str
) -> List[Document]: # abstractmethod
raise NotImplementedError
class RaLM:
def __init__(self, args):
self.args = args
self.initialize_ralm()
def initialize_ralm(self):
# initialize custom retriever
self.retriever = LangChainCustomRetrieverWrapper(self.args)
# prompt for RaLM
system_template = """Use the following pieces of context to answer the users question.
Take note of the sources and include them in the answer in the format: "SOURCES: source1 source2", use "SOURCES" in capital letters regardless of the number of sources.
Always try to generate answer from source.
----------------
{summaries}"""
messages = [
SystemMessagePromptTemplate.from_template(system_template),
HumanMessagePromptTemplate.from_template("{question}"),
]
prompt = ChatPromptTemplate.from_messages(messages)
chain_type_kwargs = {"prompt": prompt}
llm = ChatOpenAI(model_name=self.args.model_name, temperature=TEMPERATURE)
self.chain = RetrievalQAWithSourcesChain.from_chain_type(
llm=llm,
chain_type="stuff",
retriever=self.retriever,
return_source_documents=True,
reduce_k_below_max_tokens=True,
chain_type_kwargs=chain_type_kwargs,
)
def run_chain(self, question, force_korean=False):
if force_korean:
question = f"{question} 본문을 참고해서 한글로 대답해줘"
result = self.chain({"question": question})
# postprocess
result["answer"] = self.postprocess(result["answer"])
if isinstance(result["sources"], str):
result["sources"] = self.postprocess(result["sources"])
result["sources"] = result["sources"].split(", ")
result["sources"] = [src.strip() for src in result["sources"]]
# print result
self.print_result(result)
return result
def print_result(self, result):
print(f"Answer: {result['answer']}")
print(f"Sources: ")
print(result["sources"])
assert isinstance(result["sources"], list)
nSource = len(result["sources"])
for i in range(nSource):
source_title = result["sources"][i]
print(f"{source_title}: ")
if "source_documents" in result:
for j in range(len(result["source_documents"])):
if result["source_documents"][j].metadata["source"] == source_title:
print(result["source_documents"][j].page_content)
break
def postprocess(self, text):
# remove final parenthesis (bug with unknown cause)
if (
text.endswith(")")
or text.endswith("(")
or text.endswith("[")
or text.endswith("]")
):
text = text[:-1]
return text.strip()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Ask a question to the notion DB.")
# General
parser.add_argument(
"--model_name",
type=str,
default="gpt-3.5-turbo-16k-0613",
help="model name for openai api",
)
# Retriever: Densephrase
parser.add_argument(
"--query_encoder_name_or_dir",
type=str,
default="princeton-nlp/densephrases-multi-query-multi",
help="query encoder name registered in huggingface model hub OR custom query encoder checkpoint directory",
)
parser.add_argument(
"--index_name",
type=str,
default="start/1048576_flat_OPQ96_small",
help="index name appended to index directory prefix",
)
args = parser.parse_args()
# to prevent collision with DensePhrase native argparser
sys.argv = [sys.argv[0]]
# initialize class
app = RaLM(args)
def question_answer(question):
result = app.run_chain(question=question, force_korean=False)
return result[
"answer"
], "\n######################################################\n\n".join(
[
f"Source {idx}\n{doc.page_content}"
for idx, doc in enumerate(result["source_documents"])
]
)
# launch gradio
gr.Interface(
fn=question_answer,
inputs=gr.inputs.Textbox(default=DEFAULT_QUESTION, label="질문"),
outputs=[
gr.inputs.Textbox(default="챗봇의 답변을 표시합니다.", label="생성된 답변"),
gr.inputs.Textbox(
default="prompt에 사용된 검색 결과들을 표시합니다.", label="prompt에 첨부된 검색 결과들"
),
],
title="지식기반 챗봇",
theme="dark-grass",
description="사용자의 지식베이스에 기반해서 대화하는 챗봇입니다.\n본 예시에서는 wikipedia dump에서 검색한 후 이를 바탕으로 답변을 생성합니다.\n\n retriever: densePhrase, generator: gpt-3.5-turbo-16k-0613 (API)",
).launch(share=True)
| [
"Use the following pieces of context to answer the users question.\n Take note of the sources and include them in the answer in the format: \"SOURCES: source1 source2\", use \"SOURCES\" in capital letters regardless of the number of sources.\n Always try to generate answer from source.\n ----------------\n {summaries}",
"{question}"
] |
2024-01-10 | Rachel-2000/GOLVID | batchtest.py | import openai
import argparse
from modeltester import ModelTester
def main(args):
# get a tester object with data
openai.api_key = args.key
print("Parsing " + args.dataset + " ...")
tester = ModelTester(
log_path = args.log_path, # .log_structured_csv
result_path=args.result_path, # .result_csv
map_path=args.map_path, # .map_json
dataset = args.dataset, # HDFS, Spark, BGL, Windows, Linux, Andriod, Mac, Hadoop, HealthApp, OpenSSH, Thunderbird, Proxifier, Apache, HPC, Zookeeper, OpenStack
emb_path = args.emb_path, # embedding
cand_ratio = args.cand_ratio, # ratio of candidate set
split_method = args.split_method, # random or DPP
order_method = args.order_method, # random or KNN
permutation = args.permutation, # permutation
warmup = args.warmup, # warmup or not
subname = args.subname, # subname of the files
)
tester.textModelBatchTest(model = args.model,
model_name = args.model_name,
limit = args.limit, # number of logs for testing, <= 2000*(1-cand_ratio)
N = args.N, # number of examples in the prompt
)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-key', type=str, help='openai key')
parser.add_argument('--log_path', type=str, default='logs', help='log path')
parser.add_argument('--result_path', type=str, default='results', help='result path')
parser.add_argument('--map_path', type=str, default='maps', help='map path')
parser.add_argument('--dataset', type=str, default='HDFS', help='dataset name')
parser.add_argument('--emb_path', type=str, default='embeddings', help='embedding path')
parser.add_argument('--cand_ratio', type=float, default=0.1, help='ratio of candidate set')
parser.add_argument('--split_method', type=str, default='DPP', help='random or DPP')
parser.add_argument('--order_method', type=str, default='KNN', help='random or KNN')
parser.add_argument('--permutation', type=str, default='ascend', help='ascend, descend, or random')
parser.add_argument('--warmup', type=bool, default=False, help='warmup or not')
parser.add_argument('--model', type=str, default='curie', help='model name')
parser.add_argument('--model_name', type=str, default='gptC', help='model name')
parser.add_argument('--limit', type=int, default=1800, help='number of logs for testing, <= 2000*(1-cand_ratio)')
parser.add_argument('--N', type=int, default=5, help='number of examples in the prompt')
parser.add_argument('--subname', type=str, default='', help='subname of the files')
args = parser.parse_args()
main(args)
| [] |
2024-01-10 | Rachel-2000/GOLVID | modeltester_no_locators.py | import json
import os
import pandas as pd
import re
import time
import openai
import argparse
import tiktoken as tt
from dpp import *
from tqdm import tqdm
from random import sample
from sklearn.model_selection import train_test_split
from openai.embeddings_utils import get_embedding, cosine_similarity
class ModelTesterNoLocators():
def __init__(self,
log_path,
result_path,
map_path,
dataset,
emb_path,
cand_ratio,
split_method, # random or DPP
order_method, # random or KNN
warmup, # warmup or not
subname, # subname of the files
):
self.log_path = log_path + "/{}/{}_2k.log_structured.csv".format(dataset,dataset)
self.result_path = result_path
self.map_path = map_path + "/{}_{}_lookupmap.json".format(cand_ratio,dataset)
self.dataset = dataset
self.emb_path = emb_path + "/{}.json".format(dataset)
self.cand_ratio = cand_ratio
self.split_method = split_method
self.order_method = order_method
self.warmup = warmup
self.subname = subname
# split candidate set
self.log_test, self.log_cand, self.gt_test, self.gt_cand = self.splitCandidates(self.log_path, self.cand_ratio, self.split_method)
# build lookup map
self.lookUpMap = self.buildLookupMap(self.map_path)
# generate lookup map
def buildLookupMap(self, map_path):
# build lookup map
if (os.path.exists(map_path)):
print("Loading look up map of {} ...".format(self.dataset))
with open(map_path, "r") as file:
return json.load(file)
else: return self.generateLuMap(map_path)
# extract groundtruth templates from log_structured.csv file
def extractCsvContent(self, groundtruth_path):
dataframe = pd.read_csv(groundtruth_path)
content_list = dataframe['Content'].values.tolist()
return content_list
# extract groundtruth templates from log_structured.csv file
def extractCsvTemplate(self, groundtruth_path):
dataframe = pd.read_csv(groundtruth_path)
template_list = dataframe['EventTemplate'].values.tolist()
return template_list
# split the candidate set from raw logs
def splitCandidates(self, groundtruth_path, cand_ratio, method="random"):
log_list = self.extractCsvContent(groundtruth_path)
groundtruth_template = self.extractCsvTemplate(groundtruth_path)
if method == "random":
self.map_path += '_random.json'
# split randomly
log_test, log_cand, gt_test, gt_cand = train_test_split(log_list, groundtruth_template, test_size=cand_ratio, random_state=42)
elif method == "DPP":
# split with diversity
file = open(self.emb_path, "r")
emb_map = json.load(file)
file.close()
log_embs = []
for log in log_list:
log_embs.append(emb_map[log])
print(f"length of log embs is {len(log_embs)}")
candidate_idx = getDppIndex(log_embs, 2000, cand_ratio)
log_test, log_cand, gt_test, gt_cand = DPPsplit(log_list, groundtruth_template, candidate_idx)
return log_test, log_cand, gt_test, gt_cand
def generateEmbeddings(self, str_list):
# each embedding has length 2048
# engine: text-search-{ada, babbage, curie, davinci}-{query, doc}-001
# | code-search-{ada, babbage}-{code, text}-001
return [get_embedding(log, engine="text-search-babbage-query-001") for log in str_list]
# generate a look up map that records the cosine similarity
# between two logs with descendant sequence
def generateLuMap(self, look_up_map_path):
# get embeddings from embedding json file
print('Generating lookup map for {} ...'.format(self.dataset))
with open(self.emb_path, "r") as file:
emb_map = json.load(file)
test_embs = [emb_map[log] for log in self.log_test]
cand_embs = [emb_map[log] for log in self.log_cand]
lookUpMap = {}
for test_idx in tqdm(range(len(self.log_test))):
dis_dict = {}
for cand_idx in range(len(self.log_cand)):
dis_dict[cosine_similarity(test_embs[test_idx], cand_embs[cand_idx])] = cand_idx
# get a list in sorted key (descending order), key = cosine similarity
sorted_list = []
for key in sorted(dis_dict, reverse=True):
sorted_list.append(dis_dict[key])
# dict: {log_message : list of similar candidate indexes in order}
lookUpMap[self.log_test[test_idx]] = sorted_list
# write the map into a json file
with open(look_up_map_path, 'w') as file:
file.write(json.dumps(lookUpMap))
return lookUpMap
# find the N most similar logs to the input log
# the index represents the similar ranking
def getNearest(self, log, N=5):
cand_list = self.lookUpMap[log]
if self.order_method == 'random':
return sample(cand_list, N)
# return the idexes of most similar N log candidates
elif self.order_method == 'KNN':
return cand_list[:N]
# generate a prompt in str for a specific log message
def generatePrompt(self, log, nearest_num=5):
idxes = self.getNearest(log, nearest_num)
prompt = ""
# backward iteration
for i in range(len(idxes)-1,-1,-1):
# update: modify the prompt format to <prompt>:xx \n <extraction>:xx \n\n <prompt>: xx ...
prompt = prompt + "<prompt>:" + self.log_cand[idxes[i]].strip() + \
'\n<extraction>: ' + self.gt_cand[idxes[i]].strip() + ' \n\n'
similarist_gt = self.gt_cand[idxes[0]]
return prompt, similarist_gt
# compare if template is correctly extracted: if yes, return 1; else return 0
def compareTemplate(self, tpl_1, tpl_2):
token_list_1 = tpl_1.split()
token_list_2 = tpl_2.split()
if (len(token_list_1) != len(token_list_2)): return 0
length = len(token_list_1)
for i in range(length):
if (token_list_1[i] != token_list_2[i]): return 0
return 1;
# calculate parsing accuracy
def evaluatePA(self, result):
# len(result) may smaller than len(groundtruth)
length = len(result)
if length == 0: return 0
correct = 0
for i in range(length):
correct += self.compareTemplate(result[i], self.gt_test[i])
return correct/length
# correctly identified templates over total num of identified template
def evaluatePTA(self, result):
# generate a "template: log indexes list" mapping for groundtruth
oracle_tem_dict = {}
for idx in range(len(result)):
if self.gt_test[idx] not in oracle_tem_dict:
oracle_tem_dict[self.gt_test[idx]] = [idx]
else: oracle_tem_dict[self.gt_test[idx]].append(idx)
# generate mapping for identified template
result_tem_dict = {}
for idx in range(len(result)):
if result[idx] not in result_tem_dict:
result_tem_dict[result[idx]] = [idx]
else: result_tem_dict[result[idx]].append(idx)
correct_num = 0
for key in result_tem_dict.keys():
if key not in oracle_tem_dict: continue
else:
if oracle_tem_dict[key] == result_tem_dict[key]: correct_num += 1
return correct_num/len(result_tem_dict)
# correctly identified templates over total num of oracle template
def evaluateRTA(self, result):
oracle_tem_dict = {}
for idx in range(len(result)):
if self.gt_test[idx] not in oracle_tem_dict:
oracle_tem_dict[self.gt_test[idx]] = [idx]
else: oracle_tem_dict[self.gt_test[idx]].append(idx)
# generate mapping for identified template
result_tem_dict = {}
for idx in range(len(result)):
if result[idx] not in result_tem_dict:
result_tem_dict[result[idx]] = [idx]
else: result_tem_dict[result[idx]].append(idx)
correct_num = 0
for key in oracle_tem_dict.keys():
if key not in result_tem_dict: continue
else:
if oracle_tem_dict[key] == result_tem_dict[key]: correct_num += 1
return correct_num/len(oracle_tem_dict)
def writeResult(self, result, path, limit):
output = pd.DataFrame(data={"log": self.log_test[:limit], "template": result})
output.to_csv(path, index=False)
# extract result from model's response
def extractResultTemplate(self, text):
result = text.split('\n')[0] # only the first line.
return result
def textModelBatchTest(self, model, model_name, limit, N=5):
# list to store the model's parsing on each log message
enc = tt.encoding_for_model(model)
answer_list = []
instruction = "For each log after <prompt> tag, extract one log template\
(substitute variable tokens in the log as <*> and remain constant tokens to construct the template)\
and put the template after <extraction> tag."
self.result_path = self.result_path + "/{}_{}_result{}.csv".format(limit,self.dataset,self.subname)
# if the result file already exists, load it
if os.path.exists(self.result_path):
print("Result file already exists, loading ...")
answer_list = pd.read_csv(self.result_path)['template'].to_list()
else:
# if the result file does not exist, use api to generate result
print("Result file does not exist, generating result ...")
for line_idx in tqdm(range(len(self.log_test[:limit]))):
re_id = 0
temperature = 0
if line_idx >= limit: break
line = self.log_test[line_idx]
token_len = len(enc.encode(line.strip())) + 20
# get a prompt with five examples for each log message
prompt, similarist_gt = self.generatePrompt(line, nearest_num=N)
while True:
try:
response = openai.Completion.create(
model=model,
prompt=instruction + "\n\n\n" + prompt + "<prompt>:" + line.strip() + "\n<extraction>: ",
temperature=temperature,
max_tokens=token_len)
except: # if interrupt by request busy
print("Request busy, log {} is now waiting ...".format(line_idx))
re_id += 1
if re_id < 5:
time.sleep(0.1)
else:
result = similarist_gt
answer_list.append(result)
print("Too long waiting time, raw log: {}".format(line) + '\n')
break
else:
# if no exception, the model response a dict
# format for CodeX, GPT-D
# print(response)
# to avoid empty response
result = self.extractResultTemplate(response["choices"][0]["text"])
if result != "":
answer_list.append(result)
break
else:
if re_id >= 1:
result = similarist_gt
answer_list.append(result)
# print("Too long log message: {}".format(line) + '\n')
# print("Too long log error: token_len exceeds {}, stop increasing, using the similarist log message's tempate as prediction".format(token_len) + '\n')
# print("Raw ouput: {}".format(response["choices"][0]["text"]) + '\n')
# print("Similarist log template: {}".format(result) + '\n')
break
else:
token_len += 10
re_id += 1
temperature += 0.25
# print("token_len added to {}".format(token_len))
PA = self.evaluatePA(answer_list)
PTA = self.evaluatePTA(answer_list)
RTA = self.evaluateRTA(answer_list)
print("{}:\t PA:\t {:.6f}\t PTA:\t {:.6f}\t RTA:\t {:.6f}".format(self.dataset, PA, PTA, RTA))
f = open("benchmark.txt", 'a')
f.write("{}:\t PA:\t {:.6f}\t PTA:\t {:.6f}\t RTA:\t {:.6f}".format(self.dataset, PA, PTA, RTA) + '\n')
f.close()
self.writeResult(answer_list, self.result_path, limit)
return PA, PTA, RTA
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-key', type=str, help='openai key')
parser.add_argument('--log_path', type=str, default='logs', help='log path')
parser.add_argument('--result_path', type=str, default='results', help='result path')
parser.add_argument('--map_path', type=str, default='maps', help='map path')
parser.add_argument('--dataset', type=str, default='HDFS', help='dataset name')
parser.add_argument('--emb_path', type=str, default='embeddings', help='embedding path')
parser.add_argument('--cand_ratio', type=float, default=0.1, help='ratio of candidate set')
parser.add_argument('--split_method', type=str, default='DPP', help='random or DPP')
parser.add_argument('--order_method', type=str, default='KNN', help='random or KNN')
parser.add_argument('--warmup', type=bool, default=False, help='warmup or not')
parser.add_argument('--model', type=str, default='curie', help='model name')
parser.add_argument('--model_name', type=str, default='gptC', help='model name')
parser.add_argument('--limit', type=int, default=1800, help='number of logs for testing, <= 2000*(1-cand_ratio)')
parser.add_argument('--N', type=int, default=5, help='number of examples in the prompt')
parser.add_argument('--subname', type=str, default='', help='subname of the files')
args = parser.parse_args()
openai.api_key = args.key
print("Parsing " + args.dataset + " ...")
tester = ModelTesterNoLocators(
log_path = args.log_path, # .log_structured_csv
result_path=args.result_path, # .result_csv
map_path=args.map_path, # .map_json
dataset = args.dataset, # HDFS, Spark, BGL, Windows, Linux, Andriod, Mac, Hadoop, HealthApp, OpenSSH, Thunderbird, Proxifier, Apache, HPC, Zookeeper, OpenStack
emb_path = args.emb_path, # embedding
cand_ratio = args.cand_ratio, # ratio of candidate set
split_method = args.split_method, # random or DPP
order_method = args.order_method, # random or KNN
warmup = args.warmup, # warmup or not
subname = args.subname, # subname of the files
)
tester.textModelBatchTest(model = args.model,
model_name = args.model_name,
limit = args.limit, # number of logs for testing, <= 2000*(1-cand_ratio)
N = args.N, # number of examples in the prompt
) | [
"\n\n\n",
"\n<extraction>: ",
" \n\n",
"EventTemplate"
] |
2024-01-10 | Rachel-2000/GOLVID | modeltester.py | import json
import os
import pandas as pd
import re
import time
import openai
import tiktoken as tt
from dpp import *
from tqdm import tqdm
from random import sample
from sklearn.model_selection import train_test_split
from openai.embeddings_utils import get_embedding, cosine_similarity
class ModelTester():
def __init__(self,
log_path,
result_path,
map_path,
dataset,
emb_path,
cand_ratio,
split_method, # random or DPP
order_method, # random or KNN
permutation,
warmup, # warmup or not
subname, # subname of the files
):
self.log_path = log_path + "/{}/{}_2k.log_structured.csv".format(dataset,dataset)
self.result_path = result_path
self.map_path = map_path + "/{}_{}_lookupmap.json".format(cand_ratio,dataset)
self.dataset = dataset
self.emb_path = emb_path + "/{}.json".format(dataset)
self.cand_ratio = cand_ratio
self.split_method = split_method
self.order_method = order_method
self.permutation = permutation
self.warmup = warmup
self.subname = subname
# split candidate set
self.log_test, self.log_cand, self.gt_test, self.gt_cand = self.splitCandidates(self.log_path, self.cand_ratio, self.split_method)
# build lookup map
self.lookUpMap = self.buildLookupMap(self.map_path)
# generate lookup map
def buildLookupMap(self, map_path):
# build lookup map
if (os.path.exists(map_path)):
print("Loading look up map of {} ...".format(self.dataset))
with open(map_path, "r") as file:
return json.load(file)
else: return self.generateLuMap(map_path)
# extract groundtruth templates from log_structured.csv file
def extractCsvContent(self, groundtruth_path):
dataframe = pd.read_csv(groundtruth_path)
content_list = dataframe['Content'].values.tolist()
return content_list
# extract groundtruth templates from log_structured.csv file
def extractCsvTemplate(self, groundtruth_path):
dataframe = pd.read_csv(groundtruth_path)
template_list = dataframe['EventTemplate'].values.tolist()
return template_list
# split the candidate set from raw logs
def splitCandidates(self, groundtruth_path, cand_ratio, method="random"):
log_list = self.extractCsvContent(groundtruth_path)
groundtruth_template = self.extractCsvTemplate(groundtruth_path)
if method == "random":
self.map_path += '_random.json'
# split randomly
log_test, log_cand, gt_test, gt_cand = train_test_split(log_list, groundtruth_template, test_size=cand_ratio, random_state=42)
elif method == "DPP":
# split with diversity
file = open(self.emb_path, "r")
emb_map = json.load(file)
file.close()
log_embs = []
for log in log_list:
log_embs.append(emb_map[log])
print(f"length of log embs is {len(log_embs)}")
candidate_idx = getDppIndex(log_embs, 2000, cand_ratio)
log_test, log_cand, gt_test, gt_cand = DPPsplit(log_list, groundtruth_template, candidate_idx)
return log_test, log_cand, gt_test, gt_cand
def generateEmbeddings(self, str_list):
# each embedding has length 2048
# engine: text-search-{ada, babbage, curie, davinci}-{query, doc}-001
# | code-search-{ada, babbage}-{code, text}-001
return [get_embedding(log, engine="text-search-babbage-query-001") for log in str_list]
# generate a look up map that records the cosine similarity
# between two logs with descendant sequence
def generateLuMap(self, look_up_map_path):
# get embeddings from embedding json file
print('Generating lookup map for {} ...'.format(self.dataset))
with open(self.emb_path, "r") as file:
emb_map = json.load(file)
test_embs = [emb_map[log] for log in self.log_test]
cand_embs = [emb_map[log] for log in self.log_cand]
lookUpMap = {}
for test_idx in tqdm(range(len(self.log_test))):
dis_dict = {}
for cand_idx in range(len(self.log_cand)):
dis_dict[cosine_similarity(test_embs[test_idx], cand_embs[cand_idx])] = cand_idx
# get a list in sorted key (descending order), key = cosine similarity
sorted_list = []
for key in sorted(dis_dict, reverse=True):
sorted_list.append(dis_dict[key])
# dict: {log_message : list of similar candidate indexes in order}
lookUpMap[self.log_test[test_idx]] = sorted_list
# write the map into a json file
with open(look_up_map_path, 'w') as file:
file.write(json.dumps(lookUpMap))
return lookUpMap
# find the N most similar logs to the input log
# the index represents the similar ranking
def getNearest(self, log, N=5):
cand_list = self.lookUpMap[log]
if self.order_method == 'random':
return sample(cand_list, N)
# return the idexes of most similar N log candidates
elif self.order_method == 'KNN':
result = cand_list[:N]
if self.permutation == 'ascend':
return result
elif self.permutation == 'descend':
result.reverse()
return result
elif self.permutation == 'random':
result = sample(result, N)
return result
# generate a prompt in str for a specific log message
def generatePrompt(self, log, nearest_num=5):
idxes = self.getNearest(log, nearest_num)
prompt = ""
# backward iteration
for i in range(len(idxes)-1,-1,-1):
# update: modify the prompt format to <prompt>:xx \n <extraction>:xx \n\n <prompt>: xx ...
prompt = prompt + "<prompt>:" + self.log_cand[idxes[i]].strip() + \
'\n<extraction>: <START> ' + self.gt_cand[idxes[i]].strip() + ' <END>\n\n'
similarist_gt = self.gt_cand[idxes[0]]
return prompt, similarist_gt
# compare if template is correctly extracted: if yes, return 1; else return 0
def compareTemplate(self, tpl_1, tpl_2):
token_list_1 = tpl_1.split()
token_list_2 = tpl_2.split()
if (len(token_list_1) != len(token_list_2)): return 0
length = len(token_list_1)
for i in range(length):
if (token_list_1[i] != token_list_2[i]): return 0
return 1;
# calculate parsing accuracy
def evaluatePA(self, result):
# len(result) may smaller than len(groundtruth)
length = len(result)
if length == 0: return 0
correct = 0
for i in range(length):
correct += self.compareTemplate(result[i], self.gt_test[i])
return correct/length
# correctly identified templates over total num of identified template
def evaluatePTA(self, result):
# generate a "template: log indexes list" mapping for groundtruth
oracle_tem_dict = {}
for idx in range(len(result)):
if self.gt_test[idx] not in oracle_tem_dict:
oracle_tem_dict[self.gt_test[idx]] = [idx]
else: oracle_tem_dict[self.gt_test[idx]].append(idx)
# generate mapping for identified template
result_tem_dict = {}
for idx in range(len(result)):
if result[idx] not in result_tem_dict:
result_tem_dict[result[idx]] = [idx]
else: result_tem_dict[result[idx]].append(idx)
correct_num = 0
for key in result_tem_dict.keys():
if key not in oracle_tem_dict: continue
else:
if oracle_tem_dict[key] == result_tem_dict[key]: correct_num += 1
return correct_num/len(result_tem_dict)
# correctly identified templates over total num of oracle template
def evaluateRTA(self, result):
oracle_tem_dict = {}
for idx in range(len(result)):
if self.gt_test[idx] not in oracle_tem_dict:
oracle_tem_dict[self.gt_test[idx]] = [idx]
else: oracle_tem_dict[self.gt_test[idx]].append(idx)
# generate mapping for identified template
result_tem_dict = {}
for idx in range(len(result)):
if result[idx] not in result_tem_dict:
result_tem_dict[result[idx]] = [idx]
else: result_tem_dict[result[idx]].append(idx)
correct_num = 0
for key in oracle_tem_dict.keys():
if key not in result_tem_dict: continue
else:
if oracle_tem_dict[key] == result_tem_dict[key]: correct_num += 1
return correct_num/len(oracle_tem_dict)
def writeResult(self, result, path, limit):
output = pd.DataFrame(data={"log": self.log_test[:limit], "template": result})
output.to_csv(path, index=False)
# extract result from model's response
def extractResultTemplate(self, text):
# this pattern is for ChatGPT
# pattern = re.compile('<START> <Event\d> (.+) <END>')
pattern = re.compile('<START> (.+) <END>')
# findall return a list
result = pattern.findall(text)
if (len(result)): return result[0]
else: return ""
def textModelBatchTest(self, model, model_name, limit, N=5):
# list to store the model's parsing on each log message
enc = tt.encoding_for_model(model)
answer_list = []
instruction = "For each log after <prompt> tag, extract one log template\
(substitute variable tokens in the log as <*> and remain constant tokens to construct the template)\
and put the template after <extraction> tag and between <START> and <END> tags."
self.result_path = self.result_path + "/{}_{}_result{}.csv".format(limit,self.dataset,self.subname)
# if the result file already exists, load it
if os.path.exists(self.result_path):
print("Result file already exists, loading ...")
answer_list = pd.read_csv(self.result_path)['template'].to_list()
else:
# if the result file does not exist, use api to generate result
print("Result file does not exist, generating result ...")
for line_idx in tqdm(range(len(self.log_test[:limit]))):
re_id = 0
temperature = 0
if line_idx >= limit: break
line = self.log_test[line_idx]
token_len = len(enc.encode(line.strip())) + 20
# get a prompt with five examples for each log message
prompt, similarist_gt = self.generatePrompt(line, nearest_num=N)
while True:
try:
response = openai.Completion.create(
model=model,
prompt=instruction + "\n\n\n" + prompt + "<prompt>:" + line.strip() + "\n<extraction>: ",
temperature=temperature,
max_tokens=token_len)
except: # if interrupt by request busy
print("Request busy, log {} is now waiting ...".format(line_idx))
re_id += 1
if re_id < 5:
time.sleep(0.1)
else:
result = similarist_gt
answer_list.append(result)
print("Too long waiting time, raw log: {}".format(line) + '\n')
break
else:
# if no exception, the model response a dict
# format for CodeX, GPT-D
# print(response)
# to avoid empty response
result = self.extractResultTemplate(response["choices"][0]["text"])
if result != "":
answer_list.append(result)
break
else:
if re_id >= 1:
result = similarist_gt
answer_list.append(result)
# print("Too long log message: {}".format(line) + '\n')
# print("Too long log error: token_len exceeds {}, stop increasing, using the similarist log message's tempate as prediction".format(token_len) + '\n')
# print("Raw ouput: {}".format(response["choices"][0]["text"]) + '\n')
# print("Similarist log template: {}".format(result) + '\n')
break
else:
token_len += 10
re_id += 1
temperature += 0.25
# print("token_len added to {}".format(token_len))
PA = self.evaluatePA(answer_list)
PTA = self.evaluatePTA(answer_list)
RTA = self.evaluateRTA(answer_list)
print("{}:\t PA:\t {:.6f}\t PTA:\t {:.6f}\t RTA:\t {:.6f}".format(self.dataset, PA, PTA, RTA))
f = open("benchmark.txt", 'a')
f.write("{}:\t PA:\t {:.6f}\t PTA:\t {:.6f}\t RTA:\t {:.6f}".format(self.dataset, PA, PTA, RTA) + '\n')
f.close()
self.writeResult(answer_list, self.result_path, limit)
return PA, PTA, RTA | [
"\n<extraction>: ",
"EventTemplate",
" <END>\n\n",
"\n<extraction>: <START> ",
"\n\n\n"
] |
2024-01-10 | stefan-pdx/nitime | doc~examples~multi_taper_coh.py | """
.. _multi-taper-coh:
================================
Multi-taper coherence estimation
================================
Coherence estimation can be done using windowed-spectra. This is the method
used in the example :ref:`resting-state`. In addition, multi-taper spectral
estimation can be used in order to calculate coherence and also confidence
intervals for the coherence values that result (see :ref:`multi-taper-psd`)
The data analyzed here is an fMRI data-set contributed by Beth Mormino. The
data is taken from a single subject in a"resting-state" scan, in which subjects
are fixating on a cross and maintaining alert wakefulness, but not performing
any other behavioral task.
We start by importing modules/functions we will use in this example and define
variables which will be used as the sampling interval of the TimeSeries
objects and as upper and lower bounds on the frequency range analyzed:
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.mlab import csv2rec
import scipy.stats.distributions as dist
from nitime.timeseries import TimeSeries
from nitime import utils
import nitime.algorithms as alg
import nitime.viz
from nitime.viz import drawmatrix_channels
from nitime.analysis import CoherenceAnalyzer,MTCoherenceAnalyzer
TR=1.89
f_ub = 0.15
f_lb = 0.02
"""
We read in the data into a recarray from a csv file:
"""
data_rec = csv2rec('data/fmri_timeseries.csv')
"""
The first line in the file contains the names of the different brain regions
(or ROI = regions of interest) from which the time-series were derived. We
extract the data into a regular array, while keeping the names to be used later:
"""
roi_names= np.array(data_rec.dtype.names)
nseq = len(roi_names)
n_samples = data_rec.shape[0]
data = np.zeros((nseq, n_samples))
for n_idx, roi in enumerate(roi_names):
data[n_idx] = data_rec[roi]
"""
We normalize the data in each of the ROIs to be in units of % change:
"""
pdata = utils.percent_change(data)
"""
We start by performing the detailed analysis, but note that a significant
short-cut is presented below, so if you just want to know how to do this
(without needing to understand the details), skip on down.
We start by defining how many tapers will be used and calculate the values of
the tapers and the associated eigenvalues of each taper:
"""
NW = 4
K = 2*NW-1
tapers, eigs = alg.DPSS_windows(n_samples, NW, 2*NW-1)
"""
We multiply the data by the tapers and derive the fourier transform and the
magnitude of the squared spectra (the power) for each tapered time-series:
"""
tdata = tapers[None,:,:] * pdata[:,None,:]
tspectra = np.fft.fft(tdata)
mag_sqr_spectra = np.abs(tspectra)
np.power(mag_sqr_spectra, 2, mag_sqr_spectra)
"""
Coherence for real sequences is symmetric, so we calculate this for only half
the spectrum (the other half is equal):
"""
L = n_samples/2 + 1
"""
We estimate adaptive weighting of the tapers, based on the data (see
:ref:`multi-taper-psd` for an explanation and references):
"""
w = np.empty( (nseq, K, L) )
for i in xrange(nseq):
w[i], _ = utils.adaptive_weights(mag_sqr_spectra[i], eigs, L)
"""
We proceed to calculate the coherence. We initialize empty data containers:
"""
csd_mat = np.zeros((nseq, nseq, L), 'D')
psd_mat = np.zeros((2, nseq, nseq, L), 'd')
coh_mat = np.zeros((nseq, nseq, L), 'd')
coh_var = np.zeros_like(coh_mat)
"""
Looping over the ROIs :
"""
for i in xrange(nseq):
for j in xrange(i):
"""
We calculate the multi-tapered cross spectrum between each two
time-series:
"""
sxy = alg.mtm_cross_spectrum(
tspectra[i], tspectra[j], (w[i], w[j]), sides='onesided'
)
"""
And the individual PSD for each:
"""
sxx = alg.mtm_cross_spectrum(
tspectra[i], tspectra[i], (w[i], w[i]), sides='onesided'
).real
syy = alg.mtm_cross_spectrum(
tspectra[j], tspectra[j], (w[i], w[j]), sides='onesided'
).real
psd_mat[0,i,j] = sxx
psd_mat[1,i,j] = syy
"""
Coherence is : $Coh_{xy}(\lambda) = \frac{|{f_{xy}(\lambda)}|^2}{f_{xx}(\lambda) \cdot f_{yy}(\lambda)}$
"""
coh_mat[i,j] = np.abs(sxy)**2
coh_mat[i,j] /= (sxx * syy)
csd_mat[i,j] = sxy
"""
The variance from the different samples is calculated using a jack-knife
approach:
"""
if i != j:
coh_var[i,j] = utils.jackknifed_coh_variance(
tspectra[i], tspectra[j], weights=(w[i], w[j]), last_freq=L
)
"""
This measure is normalized, based on the number of tapers:
"""
coh_mat_xform = utils.normalize_coherence(coh_mat, 2*K-2)
"""
We calculate 95% confidence intervals based on the jack-knife variance
calculation:
"""
t025_limit = coh_mat_xform + dist.t.ppf(.025, K-1)*np.sqrt(coh_var)
t975_limit = coh_mat_xform + dist.t.ppf(.975, K-1)*np.sqrt(coh_var)
utils.normal_coherence_to_unit(t025_limit, 2*K-2, t025_limit)
utils.normal_coherence_to_unit(t975_limit, 2*K-2, t975_limit)
if L < n_samples:
freqs = np.linspace(0, 1/(2*TR), L)
else:
freqs = np.linspace(0, 1/TR, L, endpoint=False)
"""
We look only at frequencies between 0.02 and 0.15 (the physiologically
relevant band, see http://imaging.mrc-cbu.cam.ac.uk/imaging/DesignEfficiency:
"""
freq_idx = np.where((freqs>f_lb) * (freqs<f_ub))[0]
"""
We extract the coherence and average over all these frequency bands:
"""
coh = np.mean(coh_mat[:,:,freq_idx],-1) #Averaging on the last dimension
"""
The next line calls the visualization routine which displays the data
"""
fig01 = drawmatrix_channels(coh,roi_names,size=[10.,10.],color_anchor=0,
title='MTM Coherence')
"""
.. image:: fig/multi_taper_coh_01.png
Next we perform the same analysis, using the nitime object oriented interface.
We start by initializing a TimeSeries object with this data and with the
sampling_interval provided above. We set the metadata 'roi' field with the ROI
names.
"""
T = TimeSeries(pdata,sampling_interval=TR)
T.metadata['roi'] = roi_names
"""
We initialize an MTCoherenceAnalyzer object with the TimeSeries object
"""
C2 = MTCoherenceAnalyzer(T)
"""
The relevant indices in the Analyzer object are derived:
"""
freq_idx = np.where((C2.frequencies>0.02) * (C2.frequencies<0.15))[0]
"""
The call to C2.coherence triggers the computation and this is averaged over the
frequency range of interest in the same line and then displayed:
"""
coh = np.mean(C2.coherence[:,:,freq_idx],-1) #Averaging on the last dimension
fig02 = drawmatrix_channels(coh,roi_names,size=[10.,10.],color_anchor=0,
title='MTCoherenceAnalyzer')
"""
.. image:: fig/multi_taper_coh_02.png
For comparison, we also perform the analysis using the standard
CoherenceAnalyzer object, which does the analysis using Welch's windowed
periodogram, instead of the multi-taper spectral estimation method (see
:ref:`resting_state` for a more thorough analysis of this data using this
method):
"""
C3 = CoherenceAnalyzer(T)
freq_idx = np.where((C3.frequencies>f_lb) * (C3.frequencies<f_ub))[0]
#Extract the coherence and average across these frequency bands:
coh = np.mean(C3.coherence[:,:,freq_idx],-1) #Averaging on the last dimension
fig03 = drawmatrix_channels(coh,roi_names,size=[10.,10.],color_anchor=0,
title='CoherenceAnalyzer')
"""
.. image:: fig/multi_taper_coh_03.png
plt.show() is called in order to display the figures:
"""
plt.show()
| [] |
2024-01-10 | NoahSodiumKim/HoshinoAI-EarthHack-Backend | values.py | from flask import Flask, request, jsonify
import openai
import os
openai.api_key = os.getenv("OPENAI_API_KEY")
app = Flask(__name__)
@app.route('/generate_values')
def generate_values():
message = request.args.get("message")
response = openai.chat.completions.create(
model="gpt-3.5-turbo",
messages=[
{
"role": "system",
"content": "Given a solution {sol}, give a list of what physical resources it would affect. You do not have to use any of the original resources. The values are either 0, 1, or 2, with 0 creating more of a resource, 1 being neutral in how it affects it and 2 would reduce the certain resource. Try to give more than 4 resources, ideally 16. Try to have some of each type of value. Return the value as a json file with the key being the resource and the value being the value. For example, if the solution is 'reusing more papers and pencils', then the json file would be {'{resource 1}': 1, '{resource 2}': 0, '{resource 3}': 0, '{resource 4}': 2}. No explination needed. Try to find at least some resources that could be negatively affected by the solution, using outside knowledge about what comes from the side effects of a solution. Do not use the example json file, it is just an example."
},
{
"role": "system",
"content": " Attempt to find both positively and negatively affected resources to the solution. There should be some positive and some negative. You are allowed to use outside resources, such as knowing that making more gas cars can result in more carbon emissions"
},
{
"role": "system",
"content": "The actual resources have to be physical concrete things, not abstract things like 'happiness' or 'money'. Focus on environmental resources, such as 'water', 'oxygen'. or 'oil'."
},
{
"role": "user",
"content": message
},
],
temperature=0.7,
max_tokens=128
)
return str(response.choices[0].message.content)
| [
"The actual resources have to be physical concrete things, not abstract things like 'happiness' or 'money'. Focus on environmental resources, such as 'water', 'oxygen'. or 'oil'.",
" Attempt to find both positively and negatively affected resources to the solution. There should be some positive and some negative. You are allowed to use outside resources, such as knowing that making more gas cars can result in more carbon emissions",
"Given a solution {sol}, give a list of what physical resources it would affect. You do not have to use any of the original resources. The values are either 0, 1, or 2, with 0 creating more of a resource, 1 being neutral in how it affects it and 2 would reduce the certain resource. Try to give more than 4 resources, ideally 16. Try to have some of each type of value. Return the value as a json file with the key being the resource and the value being the value. For example, if the solution is 'reusing more papers and pencils', then the json file would be {'{resource 1}': 1, '{resource 2}': 0, '{resource 3}': 0, '{resource 4}': 2}. No explination needed. Try to find at least some resources that could be negatively affected by the solution, using outside knowledge about what comes from the side effects of a solution. Do not use the example json file, it is just an example."
] |
2024-01-10 | Sanjaypranav/my-assistant | assistant.py |
import os
from sayvai_tools.tools.sql_database import Database
from sayvai_tools.tools.conversational_human import ConversationalHuman as human
from sayvai_tools.tools.calendar import Calendar
from constants import PROMPT, LLM
# from langchain.tools import HumanInputRun as human
from langchain.agents import AgentType, Tool, AgentExecutor , initialize_agent , OpenAIFunctionsAgent
from sqlalchemy import create_engine
from langchain.memory import ConversationSummaryBufferMemory
with open("openai_api_key.txt", "r") as f:
api_key = f.read()
os.environ["OPENAI_API_KEY"] = api_key
llm = LLM
class Assistant:
"""
The assistant is a class that is used to interact with the user and the agent.
It is the main interface for the user to interact with the agent."""
def __init__(self):
self.agent = None
self.memory = ConversationSummaryBufferMemory(llm=llm)
self.tools = None
self.human = None
self.sql = None
self.voice = None
self.calendly = None
self.system_message = PROMPT
self.prompt = OpenAIFunctionsAgent.create_prompt(
system_message=self.system_message,
)
def initialize_human(self) -> None:
"""Initialize the human"""
self.human = human()
return None
def initialize_tools(self):
"""Initialize the tools"""
if self.tools is None:
raise ValueError("Tools not initialized")
else :
print("Tools already initialized")
def agent_inittialize(self, verbose: bool = False) -> None:
"""Initialize the agent"""
self.agent = OpenAIFunctionsAgent(
llm=llm,
prompt=self.prompt,
)
agent_executor =AgentExecutor(
agent=self.agent,
verbose=verbose,
memory=self.memory,
max_iterations=30
)
return agent_executor
def initialize(self, verbose: bool=False) -> None:
"""Initialize the assistant"""
# self.initialize_vectordb()
# self.initialize_tools()
self.agent_executor = self.agent_inittialize(verbose=verbose)
return None
def get_answer(self, query:str) -> str:
"""Get the answer from the agent"""
return self.agent_executor.run(query) | [] |
2024-01-10 | Sanjaypranav/my-assistant | constants.py | """
------------------------------------
Constant file for the project
------------------------------------
"""
import json
from langchain.chat_models import ChatOpenAI
from langchain.prompts.prompt import PromptTemplate
DEFAULT_PROMPT : str = json.loads(open('data.json').read())[0]["prompt"]
# print(DEFAULT_PROMPT)
NAME: str = json.loads(open('data.json').read())[0]["name"]
with open("openai_api_key.txt", "r") as f:
api_key = f.read()
LLM = ChatOpenAI(
model = "gpt-4",
openai_api_key=api_key
)
PROMPT = PromptTemplate(input_variables=["emotion"], template=DEFAULT_PROMPT) | [
"data.json",
"emotion"
] |
2024-01-10 | ho-cyber/youtube-seo | youtube_downloader.py | import os
import streamlit as st
import pytube
import openai
from youtube_search import YoutubeSearch
import streamlit.components.v1 as components
def download_youtube_video(url):
try:
youtube = pytube.YouTube(url)
video = youtube.streams.get_highest_resolution()
video.download()
st.success('Video downloaded successfully!')
# Rename the downloaded file to "video.mp4"
default_filename = video.default_filename
new_filename = "video.mp4"
os.rename(default_filename, new_filename)
except Exception as e:
st.error(f'Error downloading video: {str(e)}')
def download_youtube_video2(url):
try:
youtube = pytube.YouTube(url)
video = youtube.streams.get_highest_resolution()
video.download()
st.success('Video downloaded successfully!')
# Rename the downloaded file to "video.mp4"
default_filename = video.default_filename
new_filename = "video2.mp4"
os.rename(default_filename, new_filename)
except Exception as e:
st.error(f'Error downloading video: {str(e)}')
mode = st.radio("Please select mode", ("Student", "Normal"))
def main():
st.title("GPTTUBE: Convert youtube videos into seo blog post in seconds")
url = st.text_input("Enter the YouTube video URL:")
if mode == "Normal":
if st.button("Download"):
if url:
download_youtube_video(url)
else:
st.warning("Please enter the YouTube video URL.")
if mode == "Student":
if st.button("Download"):
if url:
download_youtube_video2(url)
else:
st.warning("Please enter the video url")
ga_code = """
<!-- Google tag (gtag.js) -->
<script async src="https://www.googletagmanager.com/gtag/js?id=G-M26G6BJJT0"></script>
<script>
window.dataLayer = window.dataLayer || [];
function gtag(){dataLayer.push(arguments);}
gtag('js', new Date());
gtag('config', 'G-M26G6BJJT0');
</script>
"""
st.markdown(ga_code, unsafe_allow_html=True)
if __name__ == "__main__":
main()
# Import the AssemblyAI module
import assemblyai as aai
# Your API token is already set here
aai.settings.api_key = "62654efc37ad4c17b521afd8413c9a63"
# Create a transcriber object.
transcriber = aai.Transcriber()
# If you have a local audio file, you can transcribe it using the code below.
# Make sure to replace the filename with the path to your local audio file.
if mode == "Normal":
transcript = transcriber.transcribe("./video.mp4")
if mode == "Student":
transcript = transcriber.transcribe("./video2.mp4")
# Alternatively, if you have a URL to an audio file, you can transcribe it with the following code.
# Uncomment the line below and replace the URL with the link to your audio file.
# transcript = transcriber.transcribe("https://storage.googleapis.com/aai-web-samples/espn-bears.m4a")
# After the transcription is complete, the text is printed out to the console.
auth_token = st.secrets["auth_token"]
openai.api_key = auth_token
text = transcript.text
if mode == "Normal":
final_ans = openai.Completion.create(
prompt = "Convert this into an SEO blog post also make it fun to read and intuitive while being seo friendly give it a suitable title"+ text,
engine = "text-davinci-003",
max_tokens = 500
)
if mode == "Student":
final_ans = openai.Completion.create(
prompt = "Give questions based on the transcript of this video they should be IMPORTANT QUESTIONS ONLY AND NOT SIDETRACKED QUESTIONS also generate a study plan for this with insights"+ text,
engine = "text-davinci-003",
max_tokens = 500
)
main_ans = final_ans["choices"][0]["text"]
st.write(main_ans)
if mode == "Normal":
image_prompt = openai.Completion.create(
prompt = "Generate an image prompt for the following SEO blog post"+ main_ans,
engine = "text-davinci-003",
max_tokens = 500
)
proompt = image_prompt["choices"][0]["text"]
image = openai.Image.create(
prompt = proompt,
size = "256x256"
)
image_url = image["data"][0]["url"]
st.image(image_url)
st.write(proompt)
| [
"Convert this into an SEO blog post also make it fun to read and intuitive while being seo friendly give it a suitable titlePLACEHOLDER",
"Generate an image prompt for the following SEO blog post",
"Generate an image prompt for the following SEO blog postPLACEHOLDER",
"text-davinci-003",
"Give questions based on the transcript of this video they should be IMPORTANT QUESTIONS ONLY AND NOT SIDETRACKED QUESTIONS also generate a study plan for this with insightsPLACEHOLDER"
] |
2024-01-10 | ANKITVARMA11/Music-Recommender-System | Music%20Recommendation.py | import openai
openai.api_key = " "
def chat_with_gpt(prompt, max_tokens= 30):
response = openai.ChatCompletion.create(
model = "gpt-3.5-turbo",
messages =[
{"role": "system", "content": "recommed music according to moods"},
{"role":"user", "content": prompt}
],
max_tokens= max_tokens
)
return response.choices[0].message.content.strip()
if __name__ == "_main_":
while True:
user_input = input("You: ")
if user_input.lower() in ["quit", "bye", "exit", "stop"]:
print("Goodbye! If you have more questions, feel free to ask.")
break
response = chat_with_gpt(user_input)
print("Musicq: ",response)
| [
"recommed music according to moods"
] |
2024-01-10 | charlesmccarthy/summerizer | summerize.py | import replicate
import os
from openai import OpenAI
import argparse
import random
job = random.randint(1, 100000)
oaiclient = OpenAI(api_key=os.environ["OPENAI_API_KEY"])
DEFAULT_FILTER = "you are a video summerizer and you will recieve a video transcription from whisper to summerize"
def get_response(prompt):
"""
Retrieves a response by splitting the prompt into chunks of 50k characters,
sending each chunk to an API for completion, and combining the responses
into a single string.
Args:
prompt (str): The prompt to be sent for completion.
Returns:
str: The combined response from the API.
Raises:
None
"""
MAX_CHARS = 50000 # Maximum characters per API call
responses = []
# Split the prompt into chunks of 50k characters
prompts = [prompt[i:i+MAX_CHARS] for i in range(0, len(prompt), MAX_CHARS)]
for prompt in prompts:
prompt = "Please summerize the following video which has been transcribed by whisper. Use and tell me important quotes so it feels as if i watched it: \n" + prompt
messages = [{"role": "system", "content": DEFAULT_FILTER}] + [{"role": "user", "content": prompt}]
completion = oaiclient.chat.completions.create(model="gpt-4-1106-preview", messages=messages)
message = completion.choices[0].message
responses.append(message.content)
# Combine the responses into a single string
response = ' '.join(responses)
return response
parser = argparse.ArgumentParser(description='make a description of a video')
parser.add_argument('--url', type=str, required=True,
help='youtube url')
args = parser.parse_args()
url = args.url
# Run audio through the API
output = replicate.run(
"adidoes/whisperx-video-transcribe:481284a2a2ff72a031689481ca92fb1d20b194980a4b435d93f6f4c9520fea61",
input={
"url": url,
"debug": False,
"batch_size": 16
}
)
# save output to a text file
with open(f'transcript{job}.txt', 'w') as f:
f.write(output)
# open file and read it and split it into 50k character chunks and then process them through gpt4-turbo to get a summmery and then if there are multiple summeries combine them into one and print the summery
with open(f'transcript{job}.txt', 'r') as f:
text = f.read()
print(get_response(text))
os.remove(f'transcript{job}.txt')
| [
"Please summerize the following video which has been transcribed by whisper. Use and tell me important quotes so it feels as if i watched it: \nprompt6e901981-4e2a-4837-9141-9424c3277cf1",
"you are a video summerizer and you will recieve a video transcription from whisper to summerize",
"Please summerize the following video which has been transcribed by whisper. Use and tell me important quotes so it feels as if i watched it: \nPlease summerize the following video which has been transcribed by whisper. Use and tell me important quotes so it feels as if i watched it: \nprompt382b9eed-fd8f-41aa-8def-f4bedbbaf7f8",
"[{'role': 'system', 'content': 'DEFAULT_FILTER3def2155-80a5-4442-9507-2a0349637c4b'}, {'role': 'user', 'content': 'Please summerize the following video which has been transcribed by whisper. Use and tell me important quotes so it feels as if i watched it: \nprompt3def2155-80a5-4442-9507-2a0349637c4b'}]"
] |
2024-01-10 | phanTri02/Final_AI | BackEnd~feature~description~callGpt.py | from openai import OpenAI
client = OpenAI(api_key='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')
def callGPT(prompt):
stream = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": prompt}],
stream=True,
)
detail = ""
for chunk in stream:
if chunk.choices[0].delta.content is not None:
detail += chunk.choices[0].delta.content
print(chunk.choices[0].delta.content, end="")
return detail
# if __name__ == '__main__':
# print(callGPT("Description tulip"))
| [] |
2024-01-10 | engineervix/zed-news | app~core~podcast~content.py | import datetime
import logging
import sys
# import time
from typing import Callable
# import replicate
import together
# from langchain.llms import OpenAI
from pydantic import HttpUrl
from app.core.db.models import Article, Episode
# from app.core.summarization.backends.together import brief_summary
from app.core.utilities import (
DATA_DIR,
# OPENAI_API_KEY,
TOGETHER_API_KEY,
podcast_host,
today,
today_human_readable,
today_iso_fmt,
)
# llm = OpenAI(temperature=0.7, openai_api_key=OPENAI_API_KEY)
def get_episode_number() -> int:
"""Returns the episode number based on the number of episodes in the database"""
count = Episode.select().where(Episode.live == True).count() # noqa: E712
return count + 1
def update_article_with_summary(title: str, url: HttpUrl, date: datetime.date, summary: str):
"""Find an article by title, URL & date, and update it with the given summary"""
article = Article.select().where((Article.title == title) & (Article.url == url) & (Article.date == date)).first()
if article:
article.summary = summary
article.save()
else:
logging.warning(f"Could not find article with title '{title}', URL '{url}', and date '{date}'")
def create_transcript(news: list[dict[str, str]], dest: str, summarizer: Callable):
"""Create a podcast transcript from the news, using the provided summarization function
and write it to a file
Args:
news (list[dict[str, str]]): A list of news articles represented as
dictionaries, where each dictionary contains the following keys:
- 'source': The article source.
- 'url': The URL of the article.
- 'title': The title of the article.
- 'content': The content of the article. This is passed to the OpenAI API for summarization.
- 'category': The category of the article.
dest (str): The destination file path where the transcript will be written.
summarizer (Callable): The function to use for summarization. This function must accept two arguments:
- content (str): The content of the article.
- title (str): The title of the article.
Raises:
- OpenAIException: If there is an issue with the OpenAI API.
- TimeoutError: If the summarization request times out.
- ConnectionError: If there is a network connectivity issue.
- ValueError: If the input data is invalid or in the wrong format.
- TypeError: If the input data is of incorrect type.
Returns:
None: The function writes the transcript to the specified file but does not return any value.
"""
articles_by_source = {}
for article in news:
source = article["source"].replace("Zambia National Broadcasting Corporation (ZNBC)", "ZNBC")
# If the source is not already a key in the dictionary, create a new list
if source not in articles_by_source:
articles_by_source[source] = []
# Add the article to the list for the corresponding source
articles_by_source[source].append(article)
prompt = f"The date is {today_human_readable}. You are {podcast_host}, a fun and witty scriptwriter, content creator and podcast host. You host the Zed News Podcast - a news and current affairs podcast which runs Monday to Friday. You are given the news from various sources as indicated below. You have a microphone in front of you, your task is to present today's episode (number {get_episode_number()}), ensuring that you cover all the news from all the sources. Where necessary, consolidate any similar news items from different sources. Ensure that the podcast is presented in a logical, coherent manner so it's easy to follow. At the end, inform your audience that you are actually not human. Remember: speak in a conversational tone, leave no news item behind and do not repeat content. Do not worry about SFX, music, captions, etc. just speak directly as if you are live on air.\n\n"
# prompt = f"The date is {today_human_readable}. {podcast_host}, a bot, hosts the Zed News Podcast - a news and current affairs podcast which runs Monday to Friday. Your task, as an accomplished content creater and comedian, is to produce content which she'll read out word-for-word as she presents today's episode (number {get_episode_number()}). Ensure that you cover all the news from all the sources, presented in a logical, coherent manner, with any similar news items from different sources appropriately consolidated. At the end, Ayanda wants her audience to know that she's actually not a human. Remember: leave no news item behind, do not repeat content and ensure your output is presented in a human-readable style.\n\n" # noqa: W505
metadata = f"Title: Zed News Podcast episode {get_episode_number()}\nDate: {today_human_readable}\nHost: {podcast_host}\n\n"
content = ""
counter = 0
for source in articles_by_source:
# Iterate over each article in the source
for article in articles_by_source[source]:
title = article["title"]
text = article["content"]
# if len(news) < 15:
# # If there are less than 15 articles, summarize each article in the usual way
# summary = summarizer(text, title)
# else:
# summary = brief_summary(text, title)
summary = summarizer(text, title)
if summary.strip().startswith("Summary: "):
summary = summary.replace("Summary: ", "")
update_article_with_summary(title, article["url"], today, summary)
counter += 1
content += f"{counter}. '{title}' (source: {source})"
content += f"\n{summary.strip()}\n\n"
notes = prompt + "```\n" + content + "```"
# Write the content to a file
with open(f"{DATA_DIR}/{today_iso_fmt}_news_headlines.txt", "w") as f:
f.write(metadata + "News Items:\n\n" + content)
# model = "lmsys/vicuna-13b-v1.5-16k"
model = "mistralai/Mixtral-8x7B-Instruct-v0.1"
temperature = 0.7
max_tokens = 4096
together.api_key = TOGETHER_API_KEY
output = together.Complete.create(
prompt=notes,
model=model,
temperature=temperature,
max_tokens=max_tokens,
)
logging.info(output)
transcript = output["output"]["choices"][0]["text"]
if transcript.strip():
# Write the transcript to a file
with open(dest, "w") as f:
f.write(transcript)
else:
logging.error("Transcript is empty")
sys.exit(1)
# data = llm(notes)
# if data:
# # Write the transcript to a file
# with open(dest, "w") as f:
# f.write(data)
# else:
# logging.error("Transcript is empty")
# sys.exit(1)
# model = replicate.models.get("meta/llama-2-70b-chat")
# version = model.versions.get("02e509c789964a7ea8736978a43525956ef40397be9033abf9fd2badfe68c9e3")
# prediction = replicate.predictions.create(
# version=version,
# input={
# "prompt": notes,
# "temperature": 0.7,
# "max_new_tokens": 4096,
# },
# )
# # Check if the task is complete, then get the transcript
# while True:
# logging.info("Checking if Replicate Task is completed...")
# prediction.reload()
# result = prediction.status
# if result == "succeeded":
# logging.info("Woohoo! Task completed!")
# break
# prediction.wait()
# transcript = prediction.output
# if transcript:
# # Write the transcript to a file
# with open(dest, "w") as f:
# f.write(transcript)
# else:
# logging.error("Transcript is empty")
# sys.exit(1)
| [] |
2024-01-10 | atomicgamedeveloper/CalendarMe-Open-Source | calendarMe.py | import re
import datetime
from dateutil import parser
import os.path
import pytz
from tzlocal import get_localzone
import openai
with open('openai.txt') as f:
openai.api_key = f.read()
import json
import time
from google.auth.transport.requests import Request
from google.oauth2.credentials import Credentials
from google_auth_oauthlib.flow import InstalledAppFlow
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError
import requests
SCOPES = ['https://www.googleapis.com/auth/calendar.events']
def readable_time(time, format):
formatted_time = datetime.datetime.strptime(time, format)
day = formatted_time.day
if 4 <= day <= 20 or 24 <= day <= 30:
suffix = "th"
else:
suffix = ["st", "nd", "rd"][day % 10 - 1]
new_format = f"%A the {day}{suffix} of %B, %Y at %H:%M"
new_time = datetime.datetime.strftime(formatted_time,new_format)
return new_time
def describe_weather(weathercode):
weather_descriptions = {
0: "Clear sky ☀️",
1: "Mostly clear 🌤",
2: "Partly cloudy ⛅",
3: "Overcast ☁️",
45: "Fog 🌫",
48: "Dense fog 🌁",
51: "Light drizzle 🌦",
53: "Drizzle 🌧",
55: "Heavy drizzle 🌧️🌧️",
56: "Light freezing drizzle 🌨️",
57: "Freezing drizzle ❄️💧",
61: "Light rain 🌦",
63: "Rain 🌧",
65: "Heavy rain 🌧️🌧️",
66: "Light freezing rain 🌨️",
67: "Freezing rain ❄️💧",
71: "Light sleet 🌦❄️",
73: "Sleet 🌨️",
75: "Heavy sleet 🌨️🌨️",
81: "Light snowfall 🌨️",
83: "Snowfall ❄️",
85: "Heavy snowfall ❄️❄️"
}
return weather_descriptions.get(weathercode, f"Unknown weather code {weathercode} ❓")
def get_weather(date=None):
base_url = "https://api.open-meteo.com/v1/forecast"
if date is None:
date = (datetime.now()).strftime("%Y-%m-%dT%H:00")
else:
date = date[:-5]+"00"
print(date)
complete_url = f"{base_url}?latitude={lat}&longitude={lon}&hourly=temperature_2m,weathercode&time={date}"
try:
response = requests.get(complete_url)
data = response.json()
weather_info = None
for i, time in enumerate(data['hourly']['time']):
if not time == date:
continue
print("Found it!")
weather_info = {
"description": describe_weather(data['hourly']["weathercode"][i]),
"temperature": data['hourly']["temperature_2m"][i],
}
break
return weather_info
except requests.exceptions.RequestException as e:
return f"Error: {e}"
city_name = "Copenhagen"
country_code = "Denmark"
complete_url = f"https://nominatim.openstreetmap.org/search?city={city_name}&country={country_code}&format=json"
response = requests.get(complete_url)
data = response.json()
lat = float(data[0]["lat"])
lon = float(data[0]["lon"])
class ScryException(BaseException):
pass
class RegretException(BaseException):
pass
class GetNextEventException(BaseException):
def __init__(self, num_events):
self.num_events = num_events
class GetEventsBetweenTimesException(BaseException):
def __init__(self, start_time,end_time):
self.start_time = start_time
self.end_time = end_time
def get_now():
timezone = pytz.timezone('Etc/GMT-2')
now = datetime.datetime.now(timezone)
return now
def delete_events(service, event_ids):
for event_id in event_ids:
event = service.events().get(calendarId='primary', eventId=event_id).execute()
service.events().delete(calendarId='primary', eventId=event_id).execute()
print(f"Event named '{event['summary']}' deleted successfully.")
def update_events(service, event_ids, revised_events):
yes_to_all = False
for event_id, revised_event in zip(event_ids, revised_events):
event = service.events().get(calendarId='primary', eventId=event_id).execute()
changes = []
if revised_event['summary'] and event['summary'] != revised_event['summary']:
changes.append(
f"Summary: {event['summary']} -> {revised_event['summary']}\n")
if revised_event['description'] and event['description'] != revised_event['description']:
changes.append(
f"Description: {event['description']} -> {revised_event['description']}\n")
if revised_event['start'] and event['start']['dateTime'] != revised_event['start']:
readable_old_time = readable_time(event['start']['dateTime'],"%Y-%m-%dT%H:%M:%S")
readable_new_time = readable_time(
revised_event['start'], "%Y-%m-%dT%H:%M:%S")
changes.append(f"Start time: {readable_old_time} -> {readable_new_time}\n")
if revised_event['end'] and event['end']['dateTime'] != revised_event['end']:
readable_old_time = readable_time(event['end']['dateTime'],"%Y-%m-%dT%H:%M:%S")
readable_new_time = readable_time(
revised_event['end'], "%Y-%m-%dT%H:%M:%S")
changes.append(
f"End time: {readable_old_time} -> {readable_new_time}\n")
if revised_event['reminders'] and event['reminders'] != revised_event['reminders']:
changes.append(
f"Reminders: {event['reminders']} -> {revised_event['reminders']}\n")
if changes:
if not yes_to_all:
print()
print(f"These changes are about to be made to \"{event['summary']}\":\n - " +
" - ".join(changes))
confirm = get_input("Confirm changes?", "yes")
if confirm.lower() in ["no", "n"]:
continue
if confirm.lower() in ["yy", "yes to all"]:
if get_input("This will approve all edits, even ones you've disapprove. Write \"yes\" to affirm.") == "yes":
yes_to_all = True
if confirm.lower() in ["nn", "no to all"]:
if get_input("This will disapprove all edits, even ones you've approved. Write \"yes\" to affirm.") == "yes":
return
else:
print("No changes to be made for event '",event['summary'],"'")
continue
event['summary'] = revised_event['summary']
event['description'] = revised_event['description']
event['start']['dateTime'] = revised_event['start']
event['end']['dateTime'] = revised_event['end']
event['reminders'] = revised_event['reminders']
updated_event = service.events().update(
calendarId='primary',
eventId=event_id,
body=event
).execute()
print(f"Event '{updated_event['summary']}' updated successfully.")
print()
def get_events_between_times(service, start_time=None, end_time=None):
if start_time is None:
start_time = get_now().isoformat()
if end_time is None:
end_time = get_now().replace(hour=23, minute=59, second=59).isoformat()
events_result = service.events().list(
calendarId="primary", timeMin=start_time, timeMax=end_time).execute()
events = events_result.get("items", [])
event_list = []
print()
if not events:
print("No events found.")
return None
else:
print("Found events:")
for event in events:
event_id = event["id"]
start = event["start"].get("dateTime", event["start"].get("date"))
end = event["end"].get("dateTime", event["end"].get("date"))
summary = event["summary"]
description = event.get("description", "")
reminders = event.get("reminders", {})
print(start, summary)
event_list.append({
"id": event_id,
"start": start,
"end": end,
"summary": summary,
"description": description,
"reminders": reminders
})
json_output = json.dumps(event_list)
return json_output
def get_next_event(service,amount=1):
if amount <= 0:
return None
now = get_now().isoformat()
print(f'Getting the upcoming {amount} events')
events_result = service.events().list(calendarId='primary', timeMin=now,
maxResults=amount, singleEvents=True,
orderBy='startTime').execute()
events = events_result.get('items', [])
if not events:
print('No upcoming events found.')
return
for event in events:
print(event)
start = event['start'].get('dateTime', event['start'].get('date'))
print(start, event['summary'])
return events
def ask_the_bot(question, context=[], temperature=0.05, bot = 'gpt-3.5-turbo'):
context.append({"role": "user", "content": question})
max_resends = 2
request_resends = 0
while request_resends <= max_resends:
try:
response = openai.ChatCompletion.create(
model=bot,
messages=context,
temperature=temperature
)
return response["choices"][0]["message"]["content"]
except Exception as e:
print()
print(f"An error occured. Resending ({request_resends}/{max_resends})...")
request_resends = request_resends + 1
time.sleep(5)
print("\nAll requests failed! Error: ",e)
return []
def to_message(message,role):
return {"content": message, "role": role}
def converse_the_bot(context, temperature=0.2, bot='gpt-3.5-turbo'):
max_resends = 2
request_resends = 0
while request_resends <= max_resends:
try:
response = openai.ChatCompletion.create(
model=bot,
messages=context,
temperature=temperature
)
return response["choices"][0]["message"]["content"]
except Exception as e:
print(e)
print(f"An error occured. Resending ({request_resends}/{max_resends})...")
request_resends = request_resends + 1
time.sleep(5)
print("\nAll requests failed!")
return []
def discuss_until_ok(prompt_to_bot,bot='gpt-3.5-turbo',temperature = 0.2):
print(f"User: {prompt_to_bot}\n")
context = []
context.append(to_message(prompt_to_bot,"user"))
while True:
from_bot = converse_the_bot(context,bot=bot, temperature=temperature)
context.append(to_message(from_bot,"assistant"))
print(f"Bot response: {from_bot}\n")
to_bot = get_input("Ok?","This is okay.")
context.append(to_message(to_bot,"user"))
if to_bot == "This is okay.":
break
# Reset to gpt-3.5 for cost
bot='gpt-3.5-turbo'
print()
return from_bot
def delete_events_from_string(service, planning_prompt):
events = json.loads(events_from_prompt(service,planning_prompt))
for e in events:
e.pop('description')
e.pop('reminders')
events = json.dumps(events)
today = datetime.datetime.now().strftime('%Y-%m-%d, %A')
time = datetime.datetime.now().strftime('%H:%M')
if events:
delete_events_prompt = f"From this query: \"{planning_prompt} [sent {today}, {time}]\", do as follows: 1. Identify the intent of the query. 2. Pick out the ids of any events from the JSON array below that are described by the intent or query. 3. Make an array consisting of just the ids of the to-be deleted events correlating to the titles/summaries from step 2. JSON Array: {events}."
delete_events_response = discuss_until_ok(delete_events_prompt)
deletable_ids = try_to_load_json_from_string(delete_events_response)
for id in deletable_ids:
approved_ids = []
event = service.events().get(calendarId='primary', eventId=id).execute()
print(f"You are about to delete event: {event['summary']} from your calendar.")
approval = get_input("Confirm changes?", "no")
if approval == 'no' or approval == 'n':
continue
elif ((approval == 'no to all' or approval == 'nn') and get_input('This will delete no events, even ones you have approved for deletion. Write \'yes\' to confirm.') == 'yes'):
return []
elif (approval == 'yes to all' or approval == 'yy') and get_input('This will delete all the detected events, even ones you have disapproved for deletion. Write \'yes\' to confirm') == 'yes':
approved_ids = deletable_ids
break
else:
approved_ids.append(id)
delete_events(service, approved_ids)
return []
def generate_events_from_context(service, planning_prompt):
events = events_from_prompt(service, planning_prompt)
local_tz = get_localzone()
current_datetime = datetime.datetime.now(local_tz)
day = current_datetime.strftime('%A')
today = current_datetime.date().isoformat()
time = current_datetime.time().strftime('%H:%M:%S')
tz_offset = current_datetime.strftime('%z')[:3]+":"+current_datetime.strftime('%z')[3:]
if not events:
event_string = "no plans"
else:
events = try_to_load_json_from_string(events)
event_strings = [f"{event['summary']}, starting at {event['start']}, ending at {event['end']}" for event in events]
event_string = ", ".join(event_strings)
new_events_prompt = f"Given the following current date and time: {day}, {today}T{time}:00, the following sequence of preexisting plans: {event_string} and planning prompt: '{planning_prompt}', do the following. 1. Identify the intent of the prompt. 2. Find any completely free time windows between the preexisting plans to use for new events. If none exists, after the events is fine. 3. In detail, list your intended additions with respect to the query. 4. Make a new JSON array consisting of just the additions with the following keys: summary, start_datetime, end_datetime, description, and reminder (int, minutes), in an array that can be parsed to create calendar events. Please use 1-2 emojis per complex sentence in the title and description to make them more personal."
print()
new_events_response = discuss_until_ok(new_events_prompt)
events_json = try_to_load_json_from_string(new_events_response)
approved_events = []
for event in events_json:
print()
print(f"Add event: {event['summary']} to calendar?")
approval = get_input("Confirm changes?", "yes")
if approval == 'no' or approval == 'n':
continue
elif ((approval == 'no to all' or approval == 'nn') and get_input('This will add no new events, even ones you have approved to the calender. Confirm?', 'yes') == 'yes'):
return []
elif (approval == 'yes to all' or approval == 'yy') and get_input('This will add all new events, even ones you have disapproved to the calender. Confirm?', 'yes') == 'yes':
return events_json
else:
approved_events.append(event)
return approved_events
def time_window_from_prompt(planning_prompt):
today = datetime.datetime.now().strftime('%Y-%m-%d, %A')
time = datetime.datetime.now().strftime('%H:%M')
time_window_prompt = f"From this query: \"{planning_prompt} [sent {today}, {time}]\", do as follows: 1. Identify the intent of the query. 2. Explain in depth the most important times mentioned in the query. If no day information is present in the query, assume today. If no temporal hints are present in the query (other than the query time stamp), simply start at 00:00 and end at 23:59 of the same day. 3. End your response with an unambiguous time frame that covers the original/current plans from step 2. Fill out the following completely with no changes to the format: 'Original plans: YYYY-MM-DD HH:MM to YYYY-MM-DD HH:MM'."
print()
time_window_response = discuss_until_ok(time_window_prompt)
pattern = r"\d{4}-\d{2}-\d{2} \d{2}:\d{2} to \d{4}-\d{2}-\d{2} \d{2}:\d{2}"
match = re.search(pattern, time_window_response)
if match:
time_window = match.group()
start_time, end_time = time_window.split(" to ")
else:
today = datetime.datetime.now().strftime('%Y-%m-%d')
start_time = today + " 00:00"
end_time = today + " 23:59"
print("No time window found in the response. Assuming the entire present day.")
confirmation = input("Is the assumption correct? (yes/no): ")
if confirmation.lower() != "yes":
start_time = get_input("Please provide the correct start time.",default_value=start_time)
end_time = get_input("Please provide the correct end time.",default_value=end_time)
start_time = parse_date_time(start_time).replace(second=0).isoformat()
end_time = parse_date_time(end_time).replace(second=0).isoformat()
return start_time, end_time
def events_from_prompt(service,planning_prompt):
print()
print("Identifying time window from prompt...")
start_time, end_time = time_window_from_prompt(planning_prompt)
print(
f"Fetching events between {readable_time(start_time, '%Y-%m-%dT%H:%M:%S%z')} and {readable_time(start_time, '%Y-%m-%dT%H:%M:%S%z')}.")
events = get_events_between_times(service,start_time,end_time)
return events
def update_events_from_string(service,planning_prompt):
events = events_from_prompt(service,planning_prompt)
today = datetime.datetime.now().strftime('%Y-%m-%d, %A')
time = datetime.datetime.now().strftime('%H:%M')
if events:
update_events_prompt = f"From this query: \"{planning_prompt} [sent {today}, {time}]\", do as follows: 1. Identify the intent of the query. 2. Pick out the titles/summaries of any events from the JSON array below that are discussed in the query. 3. In detail, list your intended edits to the events with respect to the query. 4. Make a new JSON array consisting of just the now revised events correlating to the titles/summaries from step 2. Make sure to be unambiguous, autonomously and intelligently making any decisions necessary to satisfy the query, and escape any special characters that have special meaning in JSON by putting backslash before it. JSON Array: {events}."
print()
update_events_response = discuss_until_ok(update_events_prompt)
updated_events = try_to_load_json_from_string(update_events_response)
event_ids = [obj['id'] for obj in updated_events]
revised_events = [{k: v for k, v in obj.items() if k != 'id'} for obj in updated_events]
update_events(service,event_ids,revised_events)
return []
def try_to_load_json_from_string(json_string):
print()
print("Trying to load JSON from GPT.")
start_of_json = json_string.find('{')
end_of_json = json_string.rfind('}')+1
json_string = json_string[start_of_json:end_of_json]
json_string = "["+json_string+"]"
try:
loaded_json = json.loads(json_string)
except json.JSONDecodeError as e:
print("Invalid JSON, trying to fix...")
fixed_json_prompt = "This JSON array" + json_string + \
"\n\ngives this error:\n" + str(e) + "\nplease fix it."
print(f"User:\n{fixed_json_prompt}")
fixed_events_response = ask_the_bot(fixed_json_prompt, [])
print()
print(f"Bot response:\n{fixed_events_response}")
start_of_json = fixed_events_response.find('[')
end_of_json = fixed_events_response.rfind(']')+1
json_string = fixed_events_response[start_of_json:end_of_json]
try:
loaded_json = json.loads(json_string)
except json.JSONDecodeError:
print()
print("Failed to get a valid response from the GPT.\n")
return []
print("Loading JSON succeeded!")
return loaded_json
def generate_events_from_string(service, planning_prompt):
local_tz = get_localzone()
current_datetime = datetime.datetime.now(local_tz)
day = current_datetime.strftime('%A')
today = current_datetime.date().isoformat()
time = current_datetime.time().strftime('%H:%M:%S')
tz_offset = current_datetime.strftime('%z')[:3]+":"+current_datetime.strftime('%z')[3:]
new_events_prompt = f"Given the following current date and time: {day}, {today}T{time} and planning prompt: '{planning_prompt}', format the prompt's contents as JSON objects with the following keys: summary, start_datetime, end_datetime, description, and reminder (int, minutes), in an array that can be parsed to create calendar events. Please use 1-2 emojis per complex sentence in the title and description to make them more personal."
while not planning_prompt:
planning_prompt = get_input("Please enter concrete events")
new_events_prompt = f"Given the following current date and time: {day}, {today}T{time}:00 and events: '{planning_prompt}', format them as JSON objects with the following keys: summary, start_datetime, end_datetime, description, and reminder (int, minutes), in an array that can be parsed to create calendar events. Please use 1-2 emojis per complex sentence in the title and description to make them more personal."
print()
new_events_response = discuss_until_ok(new_events_prompt,bot='gpt-4-1106-preview',temperature=1)
events_json = try_to_load_json_from_string(new_events_response)
approved_events = []
for event in events_json:
print()
print(f"Add event: {event['summary']} to calendar?")
approval = get_input("Confirm changes?", "yes")
if approval == 'no' or approval == 'n':
continue
elif ((approval == 'no to all' or approval == 'nn') and get_input('This will add no new events, even ones you have approved to the calender. Confirm?', 'yes') == 'yes'):
return []
elif (approval == 'yes to all' or approval == 'yy') and get_input('This will add all new events, even ones you have disapproved to the calender. Confirm?', 'yes') == 'yes':
return events_json
else:
approved_events.append(event)
return approved_events
def completion_from_string(service, planning_prompt):
events = json.loads(events_from_prompt(service,planning_prompt))
unedited_events = [dict(e) for e in events]
for e in events:
e.pop('description')
e.pop('reminders')
events = json.dumps(events)
today = datetime.datetime.now().strftime('%Y-%m-%d, %A')
time = datetime.datetime.now().strftime('%H:%M')
if events:
update_events_prompt = f"From this query: \"{planning_prompt} [sent {today}, {time}]\", do as follows: 1. Pick out the titles/summaries of any events from the JSON array below that are discussed in the query. 2. Make a new JSON array consisting of just the events correlating to the titles/summaries from step 1. now with a green checkmark emoji (✅) prepended onto the summary. The emoji shouldn't replace or remove other emojis present and should be on the far left of it. Make sure to be unambiguous and escape any special characters that have special meaning in JSON by putting backslash before it. JSON Array: {events}."
print()
update_events_response = discuss_until_ok(update_events_prompt)
updated_events = try_to_load_json_from_string(update_events_response)
event_ids = [obj['id'] for obj in updated_events]
revised_events = [{k: v for k, v in obj.items() if k != 'id'} for obj in updated_events]
for e, u in zip(revised_events,unedited_events):
e['description'] = u['description']
e['reminders'] = u['reminders']
update_events(service,event_ids,revised_events)
return []
def multiquery_from_string(service, planning_prompt):
commands_string = ""
invalid_commands = ["SEQUENTIALLY"]
for command in COMMANDS:
if not command['name'] in invalid_commands:
commands_string += f"{command['name']}, {command['description']}"
break_down_prompt = f"Only respond in JSON array code block. Given the following query \"{planning_prompt}\", do the following: 1. From the perspective of a calendar app, identify the intents of the query. 2. Identify the most suitable commands to satisfy the query use from these: {commands_string} Make an array matching the intent of each subpart of the query using a key named \"subquery\" and the chosen command for that subquery with a key named \"command\" and return a JSON array of this."
print()
break_down_prompt_response = discuss_until_ok(break_down_prompt)
subqueries_and_commands = try_to_load_json_from_string(break_down_prompt_response)
print()
new_events = []
for action in subqueries_and_commands:
action['subquery'] = f"I need just this part done: {action['subquery']}, from this list: {planning_prompt}. Everything before this in the list has been done already."
print(f"\nBot response:\nAlright, we'll {action['command']} plans!")
for command in COMMANDS:
if action['command'] == command['name']:
command_function = command['command']
events = command_function(service, action['subquery'])
new_events.append(events)
print("Waiting 5 seconds to do next task.")
time.sleep(5)
return new_events
def events_from_paste(service, planning_prompt):
events_json = try_to_load_json_from_string(planning_prompt)
approved_events = []
for event in events_json:
print()
print(f"Add event: {event['summary']} to calendar?")
approval = get_input("Confirm changes?", "yes")
if approval == 'no' or approval == 'n':
continue
elif ((approval == 'no to all' or approval == 'nn') and get_input('This will add no new events, even ones you have approved to the calender. Confirm?', 'yes') == 'yes'):
return []
elif (approval == 'yes to all' or approval == 'yy') and get_input('This will add all new events, even ones you have disapproved to the calender. Confirm?', 'yes') == 'yes':
return events_json
else:
approved_events.append(event)
print(approved_events)
return approved_events
def print_help(service,planning_prompt):
command_listing = "\n"
for command in COMMANDS:
command_listing += f"{command['name']} - {command['description']}\n"
print(command_listing)
return []
def manual_planning_main(service, planning_prompt=""):
choice = get_input("What would you like to do? Type help for list of commands.", "make").upper()
chosen_command = None
for command in COMMANDS:
if command['name'] == choice:
chosen_command = command
break
if chosen_command:
print(f"\nBot response:\nAlright, we'll {chosen_command['name']} plans!")
if not choice == "HELP":
planning_prompt = get_input("Please enter a general plan", None)
return chosen_command['command'](service, planning_prompt)
else:
print("\nBot response:\nSorry, I couldn't understand your response.")
def intelligent_planning_main(service, planning_prompt=""):
planning_prompt = get_input("Please enter a general plan", None)
if not planning_prompt:
return generate_events_from_string(service,"")
command_choosing_prompt = f"Given the following query \"{planning_prompt}\", do the following: 1. From the perspective of a calendar app, identify the intent of the query. 2. Identify the most suitable command to satisfy the query use from these:"
for command in COMMANDS:
command_choosing_prompt += f" {command['name']}, {command['description']}"
command_choosing_prompt += " Make sure only to include one mention of an existing command in your response."
command_choosing_response = discuss_until_ok(command_choosing_prompt)
chosen_command = None
for command in COMMANDS:
if command['name'] in command_choosing_response:
chosen_command = command
break
if chosen_command:
print(f"\nBot response:\nAlright, we'll {chosen_command['name']} plans!")
return chosen_command['command'](service, planning_prompt)
else:
print("\nBot response:\nSorry, I couldn't understand your response.")
COMMANDS = [{
"name": "MAKE",
"description": "makes a new event from prompt.",
"command": generate_events_from_string
}, {
"name": "EDIT",
"description": "finds and edits calendar events.",
"command": update_events_from_string
}, {
"name": "DELETE",
"description": "removes events from the calendar.",
"command": delete_events_from_string
}, {
"name": "COMPLETE",
"description": "marks events as complete or done.",
"command": completion_from_string
}, {
"name": "GET THEN MAKE",
"description": "intelligently makes events.",
"command": generate_events_from_context
}, {
"name": "HELP",
"description": "displays the list of possible commands.",
"command": print_help
}, {
"name": "REGRET",
"description": "undos the current command.",
"command": None
}, {
"name": "PASTE",
"description": "Makes events from an already processed request.",
"command": events_from_paste
}, {
"name": "EXIT",
"description": "exits CalendarMe.",
"command": None
}]
def parse_time(time_str):
return datetime.datetime.strptime(time_str, "%H:%M").time()
def parse_minutes(minutes_str):
minutes = int(minutes_str)
return datetime.timedelta(minutes=minutes)
def parse_date_time(date_time_str, timezone_str='Etc/GMT-2'):
timezone = pytz.timezone(timezone_str)
date_time = parser.parse(date_time_str)
date_time = timezone.localize(date_time)
return date_time
def calculate_time_with_delta(base_time, delta):
base_datetime = datetime.datetime.combine(datetime.date.today(), base_time)
result_datetime = base_datetime + delta
return result_datetime.time()
def scry(service):
now = get_now().isoformat()
print('Getting the upcoming 5 events')
events_result = service.events().list(calendarId='primary', timeMin=now,
maxResults=5, singleEvents=True,
orderBy='startTime').execute()
events = events_result.get('items', [])
print()
if not events:
print('No upcoming events found.')
return
# Prints the start and name of the next 10 events
for event in events:
print(event)
start = event['start'].get('dateTime', event['start'].get('date'))
print(start, event['summary'])
def create_event(service, event_title, start_datetime, end_datetime, description="No description", reminder=None):
event = {
'summary': event_title,
'description': description,
'start': {
'dateTime': start_datetime,
'timeZone': 'Etc/GMT-2',
},
'end': {
'dateTime': end_datetime,
'timeZone': 'Etc/GMT-2',
},
}
if reminder is not None:
event['reminders'] = {
'useDefault': False,
'overrides': reminder
}
else:
event['reminders'] = {
'useDefault': False,
'overrides': []
}
event = service.events().insert(calendarId='primary', body=event).execute()
print('Event created: %s' % (event.get('htmlLink')))
def parse_time_input(time_input, default_time):
if time_input.startswith("+"):
minutes = int(time_input[1:])
time = calculate_time_with_delta(
default_time, datetime.timedelta(minutes=minutes)).time()
else:
time = datetime.datetime.strptime(time_input, '%H:%M').time()
return time
def get_input(msg,default_value=None,default_msg=""):
if not default_value == None:
default_msg = f" (Default: {str(default_value)+default_msg})"
user_input = input(msg+default_msg+": ") or default_value
if user_input == "" or user_input == None:
return
if user_input == "exit" or user_input == "quit":
exit()
if user_input == 'scry':
raise ScryException()
if user_input == 'regret':
raise RegretException()
if user_input.startswith('next'):
try:
num_events = int(user_input[len("next"):].strip())
except ValueError:
num_events = 1
raise GetNextEventException(num_events)
if user_input.startswith('between'):
try:
start_time_str = user_input[len("between"):].strip()[:16]
end_time_str = user_input[len("between"):].strip()[17:]
start_time = parse_date_time(start_time_str).replace(second=0).isoformat()
end_time = parse_date_time(end_time_str).replace(second=0).isoformat()
except ValueError:
print('Invalid date/time format. Please try again.')
start_time, end_time = None, None
raise GetEventsBetweenTimesException(start_time, end_time)
return user_input
if __name__ == '__main__':
creds = None
if os.path.exists('token.json'):
creds = Credentials.from_authorized_user_file('token.json', SCOPES)
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
'credentials.json', SCOPES)
creds = flow.run_local_server(port=0)
with open('token.json', 'w') as token:
token.write(creds.to_json())
events = []
try:
service = build('calendar', 'v3', credentials=creds)
print("Welcome to CalendarMe!")
print("----------------------------")
while True:
try:
events_json = manual_planning_main(service)
if events_json is not None:
for event in events_json:
reminder_input = event.get('reminder')
reminder = [{'method': 'popup', 'minutes': int(
reminder_input)}] if reminder_input else None
weather = get_weather(event['start_datetime'])
weather_string = ""
if weather is not None:
weather_string = f"\n\nWeather ({readable_time(event['start_datetime'],'%Y-%m-%dT%H:%M:%S')}):\nDescription: {weather['description']}\nTemperature: {weather['temperature']}°C"
print(f"Weather details added to description{weather_string}")
creds = None
"""Shows basic usage of the Google Calendar API.
Prints the start and name of the next 10 events on the user's calendar.
"""
creds = None
if os.path.exists('token.json'):
creds = Credentials.from_authorized_user_file('token.json', SCOPES)
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
'credentials.json', SCOPES)
creds = flow.run_local_server(port=0)
with open('token.json', 'w') as token:
token.write(creds.to_json())
with open('token.json', 'w') as token:
token.write(creds.to_json())
service = build('calendar', 'v3', credentials=creds)
event_end_time = datetime.datetime.strptime(event['end_datetime'].split('T')[0] + " " + event['end_datetime'].split('T')[1][:8], '%Y-%m-%d %H:%M:%S')
time_now = datetime.datetime.now()
if event_end_time <= time_now:
event['summary'] = "✅" + event['summary']
create_event(service, event['summary'], event['start_datetime']+"+01:00",
event['end_datetime']+"+01:00", event['description']+weather_string, reminder)
except ScryException:
scry(service)
except RegretException:
print("Let's try again!")
except GetNextEventException as e:
num_events = e.num_events
get_next_event(service, num_events)
except GetEventsBetweenTimesException as e:
start_time = e.start_time
end_time = e.end_time
get_events_between_times(service, start_time, end_time)
except HttpError as error:
print('An error occurred: %s' % error)
| [
"Given the following current date and time: PLACEHOLDER, PLACEHOLDERTPLACEHOLDER and planning prompt: 'PLACEHOLDER', format the prompt's contents as JSON objects with the following keys: summary, start_datetime, end_datetime, description, and reminder (int, minutes), in an array that can be parsed to create calendar events. Please use 1-2 emojis per complex sentence in the title and description to make them more personal.",
"Given the following query \"PLACEHOLDER\", do the following: 1. From the perspective of a calendar app, identify the intent of the query. 2. Identify the most suitable command to satisfy the query use from these:",
"Please enter a general plan",
"Given the following current date and time: PLACEHOLDER, PLACEHOLDERTPLACEHOLDER:00, the following sequence of preexisting plans: PLACEHOLDER and planning prompt: 'PLACEHOLDER', do the following. 1. Identify the intent of the prompt. 2. Find any completely free time windows between the preexisting plans to use for new events. If none exists, after the events is fine. 3. In detail, list your intended additions with respect to the query. 4. Make a new JSON array consisting of just the additions with the following keys: summary, start_datetime, end_datetime, description, and reminder (int, minutes), in an array that can be parsed to create calendar events. Please use 1-2 emojis per complex sentence in the title and description to make them more personal.",
" PLACEHOLDER, PLACEHOLDER",
"Given the following current date and time: PLACEHOLDER, PLACEHOLDERTPLACEHOLDER:00 and events: 'PLACEHOLDER', format them as JSON objects with the following keys: summary, start_datetime, end_datetime, description, and reminder (int, minutes), in an array that can be parsed to create calendar events. Please use 1-2 emojis per complex sentence in the title and description to make them more personal.",
"From this query: \"PLACEHOLDER [sent PLACEHOLDER, PLACEHOLDER]\", do as follows: 1. Pick out the titles/summaries of any events from the JSON array below that are discussed in the query. 2. Make a new JSON array consisting of just the events correlating to the titles/summaries from step 1. now with a green checkmark emoji (✅) prepended onto the summary. The emoji shouldn't replace or remove other emojis present and should be on the far left of it. Make sure to be unambiguous and escape any special characters that have special meaning in JSON by putting backslash before it. JSON Array: PLACEHOLDER.",
" Make sure only to include one mention of an existing command in your response.",
"From this query: \"PLACEHOLDER [sent PLACEHOLDER, PLACEHOLDER]\", do as follows: 1. Identify the intent of the query. 2. Pick out the ids of any events from the JSON array below that are described by the intent or query. 3. Make an array consisting of just the ids of the to-be deleted events correlating to the titles/summaries from step 2. JSON Array: PLACEHOLDER.",
"Please enter concrete events",
"This JSON arrayPLACEHOLDER\n\ngives this error:\nPLACEHOLDER\nplease fix it.",
"From this query: \"PLACEHOLDER [sent PLACEHOLDER, PLACEHOLDER]\", do as follows: 1. Identify the intent of the query. 2. Pick out the titles/summaries of any events from the JSON array below that are discussed in the query. 3. In detail, list your intended edits to the events with respect to the query. 4. Make a new JSON array consisting of just the now revised events correlating to the titles/summaries from step 2. Make sure to be unambiguous, autonomously and intelligently making any decisions necessary to satisfy the query, and escape any special characters that have special meaning in JSON by putting backslash before it. JSON Array: PLACEHOLDER.",
"Only respond in JSON array code block. Given the following query \"PLACEHOLDER\", do the following: 1. From the perspective of a calendar app, identify the intents of the query. 2. Identify the most suitable commands to satisfy the query use from these: PLACEHOLDER Make an array matching the intent of each subpart of the query using a key named \"subquery\" and the chosen command for that subquery with a key named \"command\" and return a JSON array of this.",
"From this query: \"PLACEHOLDER [sent PLACEHOLDER, PLACEHOLDER]\", do as follows: 1. Identify the intent of the query. 2. Explain in depth the most important times mentioned in the query. If no day information is present in the query, assume today. If no temporal hints are present in the query (other than the query time stamp), simply start at 00:00 and end at 23:59 of the same day. 3. End your response with an unambiguous time frame that covers the original/current plans from step 2. Fill out the following completely with no changes to the format: 'Original plans: YYYY-MM-DD HH:MM to YYYY-MM-DD HH:MM'."
] |
2024-01-10 | prashantevolvus/chatgpt | test1.py | import os
import openai
print(os.getenv("OPENAI_API_KEY"))
openai.api_key="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"
response = openai.Completion.create(
model="text-davinci-003",
prompt="I am a highly intelligent question answering bot. If you ask me a question that is rooted in truth, I will give you the answer. If you ask me a question that is nonsense, trickery, or has no clear answer, I will respond with \"Unknown\".\n\nQ: What is human life expectancy in the United States?\nA: Human life expectancy in the United States is 78 years.\n\nQ: Who was president of the United States in 1955?\nA: Dwight D. Eisenhower was president of the United States in 1955.\n\nQ: Which party did he belong to?\nA: He belonged to the Republican Party.\n\nQ: What is the square root of banana?\nA: Unknown\n\nQ: How does a telescope work?\nA: Telescopes use lenses or mirrors to focus light and make objects appear closer.\n\nQ: Where were the 1992 Olympics held?\nA: The 1992 Olympics were held in Barcelona, Spain.\n\nQ: How many squigs are in a bonk?\nA: Unknown\n\nQ: Where is the Valley of Kings?\nA:",
temperature=0,
max_tokens=100,
top_p=1,
frequency_penalty=0.0,
presence_penalty=0.0,
stop=["\n"]
)
| [
"I am a highly intelligent question answering bot. If you ask me a question that is rooted in truth, I will give you the answer. If you ask me a question that is nonsense, trickery, or has no clear answer, I will respond with \"Unknown\".\n\nQ: What is human life expectancy in the United States?\nA: Human life expectancy in the United States is 78 years.\n\nQ: Who was president of the United States in 1955?\nA: Dwight D. Eisenhower was president of the United States in 1955.\n\nQ: Which party did he belong to?\nA: He belonged to the Republican Party.\n\nQ: What is the square root of banana?\nA: Unknown\n\nQ: How does a telescope work?\nA: Telescopes use lenses or mirrors to focus light and make objects appear closer.\n\nQ: Where were the 1992 Olympics held?\nA: The 1992 Olympics were held in Barcelona, Spain.\n\nQ: How many squigs are in a bonk?\nA: Unknown\n\nQ: Where is the Valley of Kings?\nA:"
] |
2024-01-10 | bahamutww/AI-Vtuber | utils~faiss_handler.py | # -*- coding: UTF-8 -*-
"""
@Project : AI-Vtuber
@File : langchain_pdf_local.py
@Author : HildaM
@Email : [email protected]
@Date : 2023/06/17 下午 4:44
@Description : 本地向量数据库配置
"""
import json
import logging
from langchain.vectorstores import FAISS
import os
from tqdm.auto import tqdm
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.document_loaders import DirectoryLoader, TextLoader
from utils.embeddings import EMBEDDINGS_MAPPING, DEFAULT_MODEL_NAME
import tiktoken
import zipfile
import pickle
tokenizer_name = tiktoken.encoding_for_model('gpt-4')
tokenizer = tiktoken.get_encoding(tokenizer_name.name)
#######################################################################################################################
# Files handler
#######################################################################################################################
def check_existence(path):
return os.path.isfile(path) or os.path.isdir(path)
def list_files(directory, ext=".pdf"):
# List all files in the directory
files_in_directory = os.listdir(directory)
# Filter the list to only include PDF files
files_list = [file for file in files_in_directory if file.endswith(ext)]
return files_list
def list_pdf_files(directory):
# List all files in the directory
files_in_directory = os.listdir(directory)
# Filter the list to only include PDF files
pdf_files = [file for file in files_in_directory if file.endswith(".pdf")]
return pdf_files
def tiktoken_len(text):
# evaluate how many tokens for the given text
tokens = tokenizer.encode(text, disallowed_special=())
return len(tokens)
def get_chunks(docs, chunk_size=500, chunk_overlap=20, length_function=tiktoken_len):
# docs should be the output of `loader.load()`
text_splitter = RecursiveCharacterTextSplitter(chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
length_function=length_function,
separators=["\n\n", "\n", " ", ""])
chunks = []
for idx, page in enumerate(tqdm(docs)):
source = page.metadata.get('source')
content = page.page_content
if len(content) > 50:
texts = text_splitter.split_text(content)
chunks.extend([str({'content': texts[i], 'chunk': i, 'source': os.path.basename(source)}) for i in
range(len(texts))])
return chunks
#######################################################################################################################
# Create FAISS object
#######################################################################################################################
"""
支持的模型:
distilbert-dot-tas_b-b256-msmarco
"""
def create_faiss_index_from_zip(path_to_zip_file, embedding_model_name=None, pdf_loader=None,
chunk_size=500, chunk_overlap=20):
# 获取模型名称
if isinstance(embedding_model_name, str):
import copy
embeddings_str = copy.deepcopy(embedding_model_name)
else:
embeddings_str = DEFAULT_MODEL_NAME # 默认模型
# 选择模型
if embedding_model_name is None:
embeddings = EMBEDDINGS_MAPPING[DEFAULT_MODEL_NAME]
elif isinstance(embedding_model_name, str):
embeddings = EMBEDDINGS_MAPPING[embedding_model_name]
# 创建存储向量数据库的目录
# 存储的文件格式
# structure: ./data/vector_base
# - source data
# - embeddings
# - faiss_index
store_path = os.getcwd() + "/data/vector_base/"
if not os.path.exists(store_path):
os.makedirs(store_path)
project_path = store_path
source_data = os.path.join(project_path, "source_data")
embeddings_data = os.path.join(project_path, "embeddings")
index_data = os.path.join(project_path, "faiss_index")
os.makedirs(source_data) # ./vector_base/source_data
os.makedirs(embeddings_data) # ./vector_base/embeddings
os.makedirs(index_data) # ./vector_base/faiss_index
else:
logging.warning(
"向量数据库已存在,默认加载旧的向量数据库。如果需要加载新的数据,请删除data目录下的vector_base,再重新启动")
logging.info("正在加载已存在的向量数据库文件")
db = load_exist_faiss_file(store_path)
if db is None:
logging.error("加载旧数据库为空,数据库文件可能存在异常。请彻底删除vector_base文件夹后,再重新导入数据")
exit(-1)
return db
# 解压数据包
with zipfile.ZipFile(path_to_zip_file, 'r') as zip_ref:
# extract everything to "source_data"
zip_ref.extractall(source_data)
# 组装数据库元信息
db_meta = {"pdf_loader": pdf_loader.__name__, "chunk_size": chunk_size,
"chunk_overlap": chunk_overlap,
"embedding_model": embeddings_str,
"files": os.listdir(source_data),
"source_path": source_data}
with open(os.path.join(project_path, "db_meta.json"), "w", encoding="utf-8") as f:
# save db_meta.json to folder
json.dump(db_meta, f)
# 处理不同的文件
all_docs = []
for ext in [".txt", ".tex", ".md", ".pdf"]:
if ext in [".txt", ".tex", ".md"]:
loader = DirectoryLoader(source_data, glob=f"**/*{ext}", loader_cls=TextLoader,
loader_kwargs={'autodetect_encoding': True})
elif ext in [".pdf"]:
loader = DirectoryLoader(source_data, glob=f"**/*{ext}", loader_cls=pdf_loader)
else:
continue
docs = loader.load()
all_docs = all_docs + docs
# 数据分片
chunks = get_chunks(all_docs, chunk_size, chunk_overlap)
# 向量数据
text_embeddings = embeddings.embed_documents(chunks)
text_embedding_pairs = list(zip(chunks, text_embeddings))
# 向量数据保存位置
embeddings_save_to = os.path.join(embeddings_data, 'text_embedding_pairs.pickle')
# 保存数据
with open(embeddings_save_to, 'wb') as handle:
pickle.dump(text_embedding_pairs, handle, protocol=pickle.HIGHEST_PROTOCOL)
# 将向量数据保存进FAISS中
db = FAISS.from_embeddings(text_embedding_pairs, embeddings)
db.save_local(index_data)
return db
def find_file(file_name, directory):
for root, dirs, files in os.walk(directory):
if file_name in files:
return os.path.join(root, file_name)
return None # If the file was not found
def find_file_dir(file_name, directory):
for root, dirs, files in os.walk(directory):
if file_name in files:
return root # return the directory instead of the full path
return None # If the file was not found
# 加载本地向量数据库
def load_exist_faiss_file(path):
# 获取元数据
db_meta_json = find_file("db_meta.json", path)
if db_meta_json is not None:
with open(db_meta_json, "r", encoding="utf-8") as f:
db_meta_dict = json.load(f)
else:
logging.error("vector_base向量数据库已损坏,请彻底删除该文件夹后,再重新导入数据!")
exit(-1)
# 获取模型数据
embedding = EMBEDDINGS_MAPPING[db_meta_dict["embedding_model"]]
# 加载index.faiss
faiss_path = find_file_dir("index.faiss", path)
if faiss_path is not None:
db = FAISS.load_local(faiss_path, embedding)
return db
else:
logging.error("加载index.faiss失败,模型已损坏。请彻底删除vector_base文件夹后,再重新导入一次数据")
exit(-1)
# 测试代码
if __name__ == "__main__":
from langchain.document_loaders import PyPDFLoader
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.embeddings import HuggingFaceEmbeddings
zip_file_path = "data/伊卡洛斯百度百科.zip"
model_name = "sebastian-hofstaetter/distilbert-dot-tas_b-b256-msmarco"
model_kwargs = {'device': 'cpu'}
encode_kwargs = {'normalize_embeddings': False}
embeddings = HuggingFaceEmbeddings(
model_name=model_name,
model_kwargs=model_kwargs,
encode_kwargs=encode_kwargs)
create_faiss_index_from_zip(path_to_zip_file=zip_file_path, pdf_loader=PyPDFLoader, embeddings=embeddings)
db = load_exist_faiss_file(zip_file_path)
if db is not None:
logging.info("加载本地数据库成功!") | [] |
2024-01-10 | bahamutww/AI-Vtuber | utils~my_handle.py | import os
import logging
from .config import Config
from .common import Common
from .audio import Audio
from .logger import Configure_logger
class My_handle():
# common工具类
common = None
# 配置信息
config = None
audio = None
room_id = None
proxy = None
# proxy = {
# "http": "http://127.0.0.1:10809",
# "https": "http://127.0.0.1:10809"
# }
session_config = None
sessions = {}
current_key_index = 0
# 直播间号
room_id = None
before_prompt = None
after_prompt = None
# 过滤配置
filter_config = None
chat_type = None
need_lang = None
# openai
openai_config = None
# chatgpt
chatgpt_config = None
# claude
claude_config = None
# chatterbot
chatterbot_config = None
# langchain_pdf
langchain_pdf_config = None
# chatglm
chatglm_config = None
# langchain_pdf_local
langchain_pdf_local_config = None
# 音频合成使用技术
audio_synthesis_type = None
log_file_path = None
def __init__(self, config_path):
self.common = Common()
self.config = Config(config_path)
self.audio = Audio()
# 日志文件路径
file_path = "./log/log-" + self.common.get_bj_time(1) + ".txt"
Configure_logger(file_path)
self.proxy = None
try:
# 设置会话初始值
self.session_config = {'msg': [{"role": "system", "content": self.config.get('chatgpt', 'preset')}]}
self.sessions = {}
self.current_key_index = 0
# 直播间号
self.room_id = self.config.get("room_display_id")
self.before_prompt = self.config.get("before_prompt")
self.after_prompt = self.config.get("after_prompt")
# 过滤配置
self.filter_config = self.config.get("filter")
self.chat_type = self.config.get("chat_type")
self.need_lang = self.config.get("need_lang")
# openai
self.openai_config = self.config.get("openai")
# chatgpt
self.chatgpt_config = self.config.get("chatgpt")
# claude
self.claude_config = self.config.get("claude")
# chatterbot
self.chatterbot_config = self.config.get("chatterbot")
# langchain_pdf
self.langchain_pdf_config = self.config.get("langchain_pdf")
# chatglm
self.chatglm_config = self.config.get("chatglm")
# langchain_pdf_local
self.langchain_pdf_local_config = self.config.get("langchain_pdf_local")
# 音频合成使用技术
self.audio_synthesis_type = self.config.get("audio_synthesis_type")
logging.info("配置文件加载成功。")
except Exception as e:
logging.info(e)
return None
# 聊天相关类实例化
if self.chat_type == "gpt":
from utils.chatgpt import Chatgpt
self.chatgpt = Chatgpt(self.openai_config, self.chatgpt_config)
elif self.chat_type == "claude":
from utils.claude import Claude
self.claude = Claude(self.claude_config)
elif self.chat_type == "chatterbot":
from chatterbot import ChatBot # 导入聊天机器人库
try:
self.bot = ChatBot(
self.chatterbot_config["name"], # 聊天机器人名字
database_uri='sqlite:///' + self.chatterbot_config["db_path"] # 数据库URI,数据库用于存储对话历史
)
except Exception as e:
logging.info(e)
exit(0)
elif self.chat_type == "langchain_pdf" or self.chat_type == "langchain_pdf+gpt":
from utils.langchain_pdf import Langchain_pdf
self.langchain_pdf = Langchain_pdf(self.langchain_pdf_config, self.chat_type)
elif self.chat_type == "chatglm":
from utils.chatglm import Chatglm
self.chatglm = Chatglm(self.chatglm_config)
elif self.chat_type == "langchain_pdf_local":
from utils.langchain_pdf_local import Langchain_pdf_local
self.langchain_pdf = Langchain_pdf_local(self.langchain_pdf_local_config, self.chat_type)
elif self.chat_type == "game":
exit(0)
# 日志文件路径
self.log_file_path = "./log/log-" + self.common.get_bj_time(1) + ".txt"
if os.path.isfile(self.log_file_path):
logging.info(f'{self.log_file_path} 日志文件已存在,跳过')
else:
with open(self.log_file_path, 'w') as f:
f.write('')
logging.info(f'{self.log_file_path} 日志文件已创建')
def get_room_id(self):
return self.room_id
def commit_handle(self, user_name, content):
# 判断弹幕是否以xx起始,如果不是则返回
if self.filter_config["before_must_str"] and not any(content.startswith(prefix) for prefix in self.filter_config["before_must_str"]):
return
else:
for prefix in self.filter_config["before_must_str"]:
if content.startswith(prefix):
content = content[len(prefix):] # 删除匹配的开头
break
# 判断弹幕是否以xx结尾,如果不是则返回
if self.filter_config["after_must_str"] and not any(content.endswith(prefix) for prefix in self.filter_config["after_must_str"]):
return
else:
for prefix in self.filter_config["after_must_str"]:
if content.endswith(prefix):
content = content[:-len(prefix)] # 删除匹配的结尾
break
# 输出当前用户发送的弹幕消息
logging.info(f"[{user_name}]: {content}")
# 全为标点符号
if self.common.is_punctuation_string(content):
return
# 换行转为,
content = content.replace('\n', ',')
# 含有违禁词/链接
if self.common.profanity_content(content) or self.common.check_sensitive_words2(self.filter_config["badwords_path"], content) or \
self.common.is_url_check(content):
logging.warning(f"违禁词/链接:{content}")
return
# 语言检测
if self.common.lang_check(content, self.need_lang) is None:
logging.warning("语言检测不通过,已过滤")
return
# 根据聊天类型执行不同逻辑
if self.chat_type == "gpt":
content = self.before_prompt + content + self.after_prompt
# 调用gpt接口,获取返回内容
resp_content = self.chatgpt.get_gpt_resp(user_name, content)
if resp_content is not None:
# 输出 ChatGPT 返回的回复消息
logging.info(f"[AI回复{user_name}]:{resp_content}")
else:
resp_content = ""
logging.info("警告:gpt无返回")
elif self.chat_type == "claude":
content = self.before_prompt + content + self.after_prompt
resp_content = self.claude.get_claude_resp(content)
if resp_content is not None:
# 输出 返回的回复消息
logging.info(f"[AI回复{user_name}]:{resp_content}")
else:
resp_content = ""
logging.info("警告:claude无返回")
elif self.chat_type == "chatterbot":
# 生成回复
resp_content = self.bot.get_response(content).text
logging.info(f"[AI回复{user_name}]:{resp_content}")
elif self.chat_type == "langchain_pdf" or self.chat_type == "langchain_pdf+gpt":
# 只用langchain,不做gpt的调用,可以节省token,做个简单的本地数据搜索
resp_content = self.langchain_pdf.get_langchain_pdf_resp(self.chat_type, content)
logging.info(f"[AI回复{user_name}]:{resp_content}")
elif self.chat_type == "chatglm":
# 生成回复
resp_content = self.chatglm.get_chatglm_resp(content)
logging.info(f"[AI回复{user_name}]:{resp_content}")
elif self.chat_type == "langchain_pdf_local":
resp_content = self.langchain_pdf.get_langchain_pdf_local_resp(self.chat_type, content)
print(f"[AI回复{user_name}]:{resp_content}")
elif self.chat_type == "game":
return
g1 = game1()
g1.parse_keys_and_simulate_key_press(content.split(), 2)
return
else:
# 复读机
resp_content = content
# logger.info("resp_content=" + resp_content)
# 将 AI 回复记录到日志文件中
# with open(self.log_file_path, "r+", encoding="utf-8") as f:
# content = f.read()
# # 将指针移到文件头部位置(此目的是为了让直播中读取日志文件时,可以一直让最新内容显示在顶部)
# f.seek(0, 0)
# # 不过这个实现方式,感觉有点低效
# f.write(f"[AI回复{user_name}]:{resp_content}\n" + content)
# 音频合成(edge-tts / vits)并播放
self.audio.audio_synthesis(self.audio_synthesis_type, self.config.get(self.audio_synthesis_type), self.filter_config, resp_content)
| [
"None"
] |
2024-01-10 | Akphawee/NLP | test2.py | import streamlit as st
import openai
import json
import pandas as pd
user_api_key = st.sidebar.text_input("OpenAI API key", type="password")
client = openai.OpenAI(api_key=user_api_key)
prompt = """Act as an AI Mandarin assistant. You will receive a
piece of writing and you should give word's information.
List the informations in a JSON array.
Each information should have 4 fields:
- "word" - the word in a piece of writing that is interesting
- "pinyin" - pinyin of the word
- "part of speech" - part of speech of the word - noun, verb, adjective, adverb, etc.
- "difficulty" - difficulty level for each word - beginner, intermediate, advanced
- "translation" - word's translation in English
Don't say anything at first. Wait for the user to say something.
"""
st.title('Mandarin detector')
st.markdown('Input a Mandarin paragraph. \n\
The AI will give you the interesting information in your paragraph.')
userinput = st.text_area("Enter your paragraph:", "Your text here")
if st.button('Submit'):
messages_so_far = [
{"role": "system", "content": prompt},
{'role': 'user', 'content': userinput},
]
response = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=messages_so_far
)
st.markdown('**response:**')
suggestion_dictionary = response.choices[0].message.content
sdict = json.loads(suggestion_dictionary)
print (sdict)
response_df = pd.DataFrame.from_dict(sdict)
print(response_df)
st.table(response_df)
| [
"Act as an AI Mandarin assistant. You will receive a \n piece of writing and you should give word's information.\n List the informations in a JSON array.\n Each information should have 4 fields:\n - \"word\" - the word in a piece of writing that is interesting \n - \"pinyin\" - pinyin of the word\n - \"part of speech\" - part of speech of the word - noun, verb, adjective, adverb, etc.\n - \"difficulty\" - difficulty level for each word - beginner, intermediate, advanced\n - \"translation\" - word's translation in English\n\n Don't say anything at first. Wait for the user to say something.\n "
] |
2024-01-10 | teticio/llama-squad | test_openai_squad.py | import csv
import json
import logging
import os
from dataclasses import dataclass, field
from time import sleep
from typing import Optional
import openai
from datasets import load_dataset
from tqdm import tqdm
from transformers import HfArgumentParser
from utils import extract_answer
logger = logging.getLogger()
openai.organization = os.getenv("OPENAI_ORGANIZATION")
openai.api_key = os.getenv("OPENAI_API_KEY")
@dataclass
class ScriptArguments:
model_name: Optional[str] = field(default="gpt-3.5-turbo")
output_csv_file: Optional[str] = field(default="results/results_openai.csv")
debug: Optional[bool] = field(default=False)
shuffle: Optional[bool] = field(default=True)
seed: Optional[int] = field(default=None)
num_samples: Optional[int] = field(default=1000)
parser = HfArgumentParser(ScriptArguments)
script_args = parser.parse_args_into_dataclasses()[0]
logging.basicConfig(level=logging.DEBUG if script_args.debug else logging.INFO)
dataset = load_dataset("squad_v2")["validation"]
if script_args.shuffle:
dataset = dataset.shuffle(seed=script_args.seed)
if script_args.num_samples is not None:
dataset = dataset.select(range(script_args.num_samples))
with open(script_args.output_csv_file, "w") as file:
writer = csv.writer(file)
writer.writerow(
[
"Context",
"Question",
"Correct answers",
"Model answer",
"Full response",
"Exact match",
]
)
for sample in tqdm(dataset):
prompt = f"""\
Extract from the following context the minimal span word for word that best answers the question. Think step by step and explain your reasoning. Then give the answer in JSON format as follows:
```json
{{
"answer": ...
}}
```
If the answer is not in the context, the answer should be "?".
Context: {sample["context"]}
Question: {sample["question"]}"""
answers = sample["answers"]["text"]
if len(answers) == 0:
answers = ["?"]
logger.debug("Correct answers: %s", answers)
for _ in range(5):
try:
completion = openai.ChatCompletion.create(
model=script_args.model_name,
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt},
],
)
break
except (openai.error.Timeout, openai.error.RateLimitError):
logger.warning("Sleeping for %s seconds", 2**_)
sleep(2**_)
continue
full_response = completion.choices[0].message.content
model_answer = extract_answer(full_response)
logger.debug("Model answer: %s", model_answer)
exact_match = model_answer is not None and model_answer in answers
writer.writerow(
[
sample["context"],
sample["question"],
json.dumps(answers),
model_answer,
full_response,
exact_match,
]
)
file.flush()
| [
"Extract from the following context the minimal span word for word that best answers the question. Think step by step and explain your reasoning. Then give the answer in JSON format as follows:\n```json\n{\n \"answer\": ...\n}\n```\nIf the answer is not in the context, the answer should be \"?\".\nContext: PLACEHOLDER\nQuestion: PLACEHOLDER",
"You are a helpful assistant."
] |
2024-01-10 | tingofurro/datasets | datasets~openwebtext~openwebtext.py | # coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The Open WebText Corpus"""
import os
import re
from itertools import chain
import datasets
_CITATION = """\
@misc{Gokaslan2019OpenWeb,
title={OpenWebText Corpus},
author={Aaron Gokaslan*, Vanya Cohen*, Ellie Pavlick, Stefanie Tellex},
howpublished{\\url{http://Skylion007.github.io/OpenWebTextCorpus}},
year={2019}
}
"""
_DESCRIPTION = """\
An open-source replication of the WebText dataset from OpenAI.
"""
_URL = "https://zenodo.org/record/3834942/files/openwebtext.tar.xz"
class Openwebtext(datasets.GeneratorBasedBuilder):
"""The Open WebText dataset."""
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name="plain_text",
description="Plain text",
version=datasets.Version("1.0.0"),
)
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features({"text": datasets.Value("string")}),
homepage="https://skylion007.github.io/OpenWebTextCorpus/",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
dl_dir = dl_manager.download_and_extract(_URL)
owt_dir = os.path.join(dl_dir, "openwebtext")
subset_xzs = [
os.path.join(owt_dir, file_name)
for file_name in sorted(os.listdir(owt_dir))
if file_name.endswith("xz") # filter out ...xz.lock
]
ex_dirs = dl_manager.extract(subset_xzs, num_proc=round(os.cpu_count() * 0.75))
nested_txt_files = [
[
os.path.join(ex_dir, txt_file_name)
for txt_file_name in sorted(os.listdir(ex_dir))
if txt_file_name.endswith("txt")
]
for ex_dir in ex_dirs
]
txt_files = chain(*nested_txt_files)
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"txt_files": txt_files}),
]
def _generate_examples(self, txt_files):
"""Yields examples."""
for idx, filepath in enumerate(txt_files):
with open(filepath, encoding="utf-8") as f:
yield idx, {"text": re.sub("\n\n\n+", "\n\n", f.read()).strip()}
| [] |
2024-01-10 | GabiCtrlZ/open-ai-shell | shell.py | from colorama import Fore, Style
import os
import signal
import subprocess
import sys
import json
from pathlib import Path
from dotenv import load_dotenv
import readline
import getch
import openai
# loading env
env_path = Path('.') / '.env'
load_dotenv(dotenv_path=env_path)
# loading options
f = open('options.json')
options = json.load(f)
openai.api_key = os.getenv('OPENAI_API_KEY')
MODEL = "davinci"
# get open ai command
def get_command(prompt, option, stop):
results = openai.Answer.create(
search_model=MODEL,
model=MODEL,
question=prompt,
examples_context=option['examples_context'],
examples=option['examples'],
max_tokens=100,
documents=[],
stop=stop,
)
if results:
return results['answers'][0]
# format home path
def get_current_path():
return os.getcwd().replace(os.path.expanduser('~'), '~')
# shell texts and style
def default_text():
return f"{Fore.CYAN}[{get_current_path()}]{Fore.GREEN}{Style.BRIGHT}$ {Style.RESET_ALL}"
def machine_text(text):
return f"{Fore.GREEN}{Style.BRIGHT}{MODEL}: {Fore.MAGENTA}{Style.NORMAL}{text}{Style.RESET_ALL}"
# handle the cd command
def handle_cd(request):
if request.startswith("cd "):
os.chdir(request[3:])
return True
if request.strip() == "cd":
os.chdir(os.path.expanduser('~'))
return True
return False
# handle the ai
def handle_ai(request):
option = 'bash'
stop = ["\n", "<|endoftext|>"]
if request.startswith('python:'):
option = 'python'
stop = ["<|endoftext|>"]
elif request.startswith('node:'):
option = 'node'
stop = ["<|endoftext|>"]
print(machine_text("🧠 Thinking..."))
new_command = get_command(request, options[option], stop)
if not new_command:
print(machine_text("Unable to figure out how to do that"))
return
print(machine_text(new_command))
if not option == 'bash':
return
print(default_text() + new_command)
key_stroke = getch.getch()
if key_stroke == '\n':
os.system(new_command)
# shell running and stuff
def main():
print(machine_text("Hello."))
while True:
try:
request = input(default_text())
except EOFError:
print("")
print(machine_text("Farewell, human."))
sys.exit(0)
except KeyboardInterrupt:
print("")
continue
if not request.strip():
continue
if request.strip() == "exit":
sys.exit(machine_text("Farewell, human."))
continue
if handle_cd(request):
continue
if request.startswith('->'):
handle_ai(request[2:])
# do ai stuff in here
continue
os.system(request)
if __name__ == "__main__":
main()
| [] |
2024-01-10 | ShannonAI/GPT-CLS-CARP | tests~debug~userquest.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@file: tests/debug/userquest.py
@time: 2022/12/06 20:03
@desc:
"""
import openai
def get_response_via_dynamic_user():
openai.api_key = "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"
prompt = "This is an overall sentiment classifier. \nFirst, list clues and explain the reasoning process for determining the sentiment of INPUT sentence.\nNext, based on the clues and the reasoning process, classify the sentiment of the INPUT sentence as Positive or Negative.\n\nINPUT: though the film is well intentioned , one could rent the original and get the same love story and parable \nClues and the reasoning process: Clue 1: The phrase \"well intentioned\" implies that the film is made with good intentions and is likely to be well-received.\nClue 2: The phrase \"same love story and parable\" suggests that the film is faithful to the source material and therefore likely to be enjoyable. \nReasoning: The combination of these two clues suggests that the overall sentiment of the sentence is positive.\nSENTIMENT: Positive\n\nINPUT: shiner can certainly go the distance , but is n't world championship material \nClues and the reasoning process: 1. The sentence contains two positive words \"shiner\" and \"certainly\" which provide a clue that the sentiment of the sentence is likely positive. \n2. The phrase \"go the distance\" is a metaphor commonly used to indicate success and perseverance, which suggests a positive sentiment.\n3. The inclusion of the phrase \"world championship material\" implies that the shiner is seen as capable of achieving a high level of success, which is indicative of a positive sentiment.\nSENTIMENT: Positive\n\nINPUT: it falls far short of poetry , but it 's not bad prose \nClues and the reasoning process: 1. The presence of the word \"but\" indicates a contrast between two ideas and usually implies a positive sentiment. \n2. The word \"not\" is used to negate the following word, \"bad\". This implies a positive sentiment. \n3. The phrase \"far short\" implies that the sentence is comparing the quality of the prose to something of higher quality, such as poetry. This implies that the quality of the prose is still positive, even if it is not as good as poetry. \nOverall, the clues and reasoning support the positive sentiment of the input sentence.\nSENTIMENT: Positive\n\nINPUT: the rare imax movie that you 'll wish was longer than an hour \nClues and the reasoning process: 1. The word \"rare\" suggests that the movie is unique and special in some way. \n2. The use of the word \"wish\" implies that the person has a positive sentiment towards the movie. \n3. The phrase \"longer than an hour\" implies that the person enjoyed the movie and wanted it to last longer. \nOverall, these clues and the reasoning process indicate that the sentiment of the input sentence is positive.\nSENTIMENT: Positive\n\nINPUT: watching the powerpuff girls movie , my mind kept returning to one anecdote for comparison the cartoon in japan that gave people seizures \nClues and the reasoning process: The first clue that the sentiment of this sentence is positive is the use of the word \"returning\" which implies that the speaker is fondly remembering the anecdote for comparison. Additionally, the use of the word \"cartoon\" to describe the Japanese media implies that the speaker is viewing it in a positive light. Furthermore, the speaker is making a comparison between the Powerpuff Girls movie and the Japanese media, which suggests that they are both seen as enjoyable forms of entertainment. Taken together, these clues point to a positive sentiment.\nSENTIMENT: Positive\n\nINPUT: with an unusual protagonist lrb a kilt wearing jackson rrb and subject matter , the improbable `` formula 51 '' is somewhat entertaining , but it could have been much stronger \nClues and the reasoning process: 1. \"Somewhat entertaining\" - This indicates that the speaker found the movie to be enjoyable, or at least not bad. \n2. \"Could have been much stronger\" - This indicates that the speaker was not fully satisfied with the movie, but still found it to be enjoyable. \nOverall, the sentiment of the sentence is positive, as the speaker found the movie to be entertaining and worthwhile, even if it could have been better.\nSENTIMENT: Positive\n\nINPUT: standing by yourself is haunting lrb it 's rrb what punk rock music used to be , and what the video medium could use more of spirit , perception , conviction \nClues and the reasoning process: 1. The phrase \"what punk rock music used to be\" implies a nostalgia for the genre, suggesting that the speaker is looking at the genre fondly. \n2. The word \"more\" suggests the speaker wants to see more of the qualities mentioned. \n3. The qualities mentioned - spirit, perception, conviction - are all positive attributes. \n4. The phrase \"standing by yourself\" can be interpreted as a sign of strength and independence, which is also a positive sentiment. \nOverall, the sentence conveys a positive sentiment.\nSENTIMENT: Positive\n\nINPUT: juliette binoche 's sand is vivacious , but it 's hard to sense that powerhouse of 19th century prose behind her childlike smile \nClues and the reasoning process: 1. The sentence contains a positive adjective (\"vivacious\") to describe Juliette Binoche's sand. This suggests a positive sentiment towards the sand and by extension, Juliette Binoche.\n2. The sentence also contains a positive phrase (\"childlike smile\") to describe Juliette Binoche. This further reinforces the positive sentiment.\n3. The sentence does not contain any negative words or phrases to suggest a negative sentiment. \nOverall, the clues and reasoning lead to a positive sentiment for the INPUT sentence.\nSENTIMENT: Positive\n\nINPUT: spirit is a visual treat , and it takes chances that are bold by studio standards , but it lacks a strong narrative \nClues and the reasoning process: 1. The sentence contains words with positive sentiment such as \"visual treat\" and \"bold\". \n2. The sentence does not contain any negative sentiment such as \"bad\", \"disappointing\", or \"poorly made\". \n3. The sentence is focused on the positive aspects of the movie and does not dwell on the negative aspects. \n4. The overall sentiment of the sentence is positive despite the lack of a strong narrative. \nThus, the sentiment of the input sentence is positive.\nSENTIMENT: Positive\n\nINPUT: although olivier assayas ' elegantly appointed period drama seems , at times , padded with incident in the way of a too conscientious adaptation its three hour running time plays closer to two \nClues and the reasoning process: 1. The word \"elegantly\" is a positive descriptor, suggesting that the movie is well-crafted and aesthetically pleasing.\n2. The phrase \"at times\" implies that the movie does not always have too much incident, meaning that it is not overstuffed with unnecessary details.\n3. The phrase \"too conscientious\" suggests that the movie is well-crafted and not sloppy.\n4. The phrase \"plays closer to two\" implies that the three-hour running time does not feel like a burden, but rather, the movie is enjoyable and engaging.\nOverall, the clues and reasoning process suggest that the sentiment of the input sentence is positive.\nSENTIMENT: Positive\n\nINPUT: reminiscent of alfred hitchcock 's thrillers , most of the scary parts in ` signs ' occur while waiting for things to happen \nClues and the reasoning process: 1. The sentence has a comparison to Alfred Hitchcock's thrillers which are generally considered to be a positive example of suspenseful storytelling. \n2. The use of the word \"most\" implies that the majority of the scary parts are in a positive light. \n3. The word \"waiting\" implies anticipation and excitement, which are generally associated with positive feelings. \n4. The use of the word \"signs\" implies that the scary parts are a sign of something bigger and more exciting. \nOverall, the clues and reasoning point to a positive sentiment for the input sentence.\nSENTIMENT: Positive\n\nINPUT: often likable , but just as often it 's meandering , low on energy , and too eager to be quirky at moments when a little old fashioned storytelling would come in handy \nClues and the reasoning process: 1. The sentence begins with the phrase \"often likable\", which suggests that the writer has some positive feelings towards the subject.\n2. The sentence continues with the phrase \"but just as often\", which implies that the writer is aware of the good and bad aspects of the subject.\n3. The sentence then mentions traits such as \"low on energy\" and \"too eager to be quirky\", which could be seen as negative, but the writer then goes on to explain that these traits can be addressed with \"a little old fashioned storytelling\".\n4. The final phrase, \"come in handy\", suggests that the writer has some hope that the subject can be improved.\nOverall, the clues and reasoning suggest that the sentiment of the sentence is Positive.\nSENTIMENT: Positive\n\nINPUT: clint eastwood 's blood work is a lot like a well made pb j sandwich familiar , fairly uneventful and boasting no real surprises but still quite tasty and inviting all the same \nClues and the reasoning process: 1. The phrase \"well made\" implies that the sandwich is of a high quality. \n2. The phrase \"no real surprises\" implies that the sandwich is something that can be expected and enjoyed. \n3. The phrase \"quite tasty\" implies that the sandwich is enjoyable. \n4. The phrase \"inviting all the same\" implies that the sandwich is still enjoyable even if it is not surprising. \nAll of these clues point to a positive sentiment about the PB&J sandwich, which can be extended to the film Blood Work.\nSENTIMENT: Positive\n\nINPUT: human nature is a goofball movie , in the way that malkovich was , but it tries too hard \nClues and the reasoning process: 1. The sentence uses words that suggest a positive sentiment such as \u201cgood\u201d and \u201ctries\u201d. \n2. The sentence suggests that the movie is comparable to a highly acclaimed movie, \u201cMalkovich\u201d, which implies a positive sentiment. \n3. The sentence does not contain any words that suggest a negative sentiment. \nTherefore, the sentiment of the sentence can be determined to be positive.\nSENTIMENT: Positive\n\nINPUT: illiterate , often inert sci fi action thriller \nClues and the reasoning process: The words \"illiterate\" and \"inert\" both have negative connotations, suggesting that the movie is of low quality and not engaging. Additionally, the use of the word \"often\" implies that the movie is not always engaging, further indicating a negative sentiment. As a result, a negative sentiment is determined for the input sentence.\nSENTIMENT: Negative\n\nINPUT: now as a former gong show addict , i 'll admit it , my only complaint is that we did n't get more re creations of all those famous moments from the show \nClues and the reasoning process: 1. The sentence begins with \"now as a former gong show addict\" which suggests that the speaker has a positive attitude towards the show. \n2. The phrase \"my only complaint is that we did n't get more re creations of all those famous moments from the show\" indicates that the speaker is disappointed in not having more memories of the show, which implies a nostalgia for the show and a positive sentiment towards it. \n3. The use of the phrase \"famous moments\" also suggests that the speaker has fond memories of the show. \nOverall, the clues and reasoning present in the sentence suggest a positive sentiment towards the show.\nSENTIMENT: Positive\n\nINPUT: a strong first quarter, slightly less so second quarter, and average second half\n"
for user in ["10086", "1", "6688", "10043"]:
response_lst = openai.Completion.create(
engine="text-davinci-003",
prompt=prompt,
temperature=0.7,
max_tokens=300,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
logprobs=0,
user=user
)
text = [item.text for item in response_lst.choices]
print("=" * 10)
print(text)
def get_response():
openai.api_key = "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"
prompt = "This is an overall sentiment classifier. \nFirst, list clues and explain the reasoning process for determining the sentiment of INPUT sentence.\nNext, based on the clues and the reasoning process, classify the sentiment of the INPUT sentence as Positive or Negative.\n\nINPUT: though the film is well intentioned , one could rent the original and get the same love story and parable \nClues and the reasoning process: Clue 1: The phrase \"well intentioned\" implies that the film is made with good intentions and is likely to be well-received.\nClue 2: The phrase \"same love story and parable\" suggests that the film is faithful to the source material and therefore likely to be enjoyable. \nReasoning: The combination of these two clues suggests that the overall sentiment of the sentence is positive.\nSENTIMENT: Positive\n\nINPUT: shiner can certainly go the distance , but is n't world championship material \nClues and the reasoning process: 1. The sentence contains two positive words \"shiner\" and \"certainly\" which provide a clue that the sentiment of the sentence is likely positive. \n2. The phrase \"go the distance\" is a metaphor commonly used to indicate success and perseverance, which suggests a positive sentiment.\n3. The inclusion of the phrase \"world championship material\" implies that the shiner is seen as capable of achieving a high level of success, which is indicative of a positive sentiment.\nSENTIMENT: Positive\n\nINPUT: it falls far short of poetry , but it 's not bad prose \nClues and the reasoning process: 1. The presence of the word \"but\" indicates a contrast between two ideas and usually implies a positive sentiment. \n2. The word \"not\" is used to negate the following word, \"bad\". This implies a positive sentiment. \n3. The phrase \"far short\" implies that the sentence is comparing the quality of the prose to something of higher quality, such as poetry. This implies that the quality of the prose is still positive, even if it is not as good as poetry. \nOverall, the clues and reasoning support the positive sentiment of the input sentence.\nSENTIMENT: Positive\n\nINPUT: the rare imax movie that you 'll wish was longer than an hour \nClues and the reasoning process: 1. The word \"rare\" suggests that the movie is unique and special in some way. \n2. The use of the word \"wish\" implies that the person has a positive sentiment towards the movie. \n3. The phrase \"longer than an hour\" implies that the person enjoyed the movie and wanted it to last longer. \nOverall, these clues and the reasoning process indicate that the sentiment of the input sentence is positive.\nSENTIMENT: Positive\n\nINPUT: watching the powerpuff girls movie , my mind kept returning to one anecdote for comparison the cartoon in japan that gave people seizures \nClues and the reasoning process: The first clue that the sentiment of this sentence is positive is the use of the word \"returning\" which implies that the speaker is fondly remembering the anecdote for comparison. Additionally, the use of the word \"cartoon\" to describe the Japanese media implies that the speaker is viewing it in a positive light. Furthermore, the speaker is making a comparison between the Powerpuff Girls movie and the Japanese media, which suggests that they are both seen as enjoyable forms of entertainment. Taken together, these clues point to a positive sentiment.\nSENTIMENT: Positive\n\nINPUT: with an unusual protagonist lrb a kilt wearing jackson rrb and subject matter , the improbable `` formula 51 '' is somewhat entertaining , but it could have been much stronger \nClues and the reasoning process: 1. \"Somewhat entertaining\" - This indicates that the speaker found the movie to be enjoyable, or at least not bad. \n2. \"Could have been much stronger\" - This indicates that the speaker was not fully satisfied with the movie, but still found it to be enjoyable. \nOverall, the sentiment of the sentence is positive, as the speaker found the movie to be entertaining and worthwhile, even if it could have been better.\nSENTIMENT: Positive\n\nINPUT: standing by yourself is haunting lrb it 's rrb what punk rock music used to be , and what the video medium could use more of spirit , perception , conviction \nClues and the reasoning process: 1. The phrase \"what punk rock music used to be\" implies a nostalgia for the genre, suggesting that the speaker is looking at the genre fondly. \n2. The word \"more\" suggests the speaker wants to see more of the qualities mentioned. \n3. The qualities mentioned - spirit, perception, conviction - are all positive attributes. \n4. The phrase \"standing by yourself\" can be interpreted as a sign of strength and independence, which is also a positive sentiment. \nOverall, the sentence conveys a positive sentiment.\nSENTIMENT: Positive\n\nINPUT: juliette binoche 's sand is vivacious , but it 's hard to sense that powerhouse of 19th century prose behind her childlike smile \nClues and the reasoning process: 1. The sentence contains a positive adjective (\"vivacious\") to describe Juliette Binoche's sand. This suggests a positive sentiment towards the sand and by extension, Juliette Binoche.\n2. The sentence also contains a positive phrase (\"childlike smile\") to describe Juliette Binoche. This further reinforces the positive sentiment.\n3. The sentence does not contain any negative words or phrases to suggest a negative sentiment. \nOverall, the clues and reasoning lead to a positive sentiment for the INPUT sentence.\nSENTIMENT: Positive\n\nINPUT: spirit is a visual treat , and it takes chances that are bold by studio standards , but it lacks a strong narrative \nClues and the reasoning process: 1. The sentence contains words with positive sentiment such as \"visual treat\" and \"bold\". \n2. The sentence does not contain any negative sentiment such as \"bad\", \"disappointing\", or \"poorly made\". \n3. The sentence is focused on the positive aspects of the movie and does not dwell on the negative aspects. \n4. The overall sentiment of the sentence is positive despite the lack of a strong narrative. \nThus, the sentiment of the input sentence is positive.\nSENTIMENT: Positive\n\nINPUT: although olivier assayas ' elegantly appointed period drama seems , at times , padded with incident in the way of a too conscientious adaptation its three hour running time plays closer to two \nClues and the reasoning process: 1. The word \"elegantly\" is a positive descriptor, suggesting that the movie is well-crafted and aesthetically pleasing.\n2. The phrase \"at times\" implies that the movie does not always have too much incident, meaning that it is not overstuffed with unnecessary details.\n3. The phrase \"too conscientious\" suggests that the movie is well-crafted and not sloppy.\n4. The phrase \"plays closer to two\" implies that the three-hour running time does not feel like a burden, but rather, the movie is enjoyable and engaging.\nOverall, the clues and reasoning process suggest that the sentiment of the input sentence is positive.\nSENTIMENT: Positive\n\nINPUT: reminiscent of alfred hitchcock 's thrillers , most of the scary parts in ` signs ' occur while waiting for things to happen \nClues and the reasoning process: 1. The sentence has a comparison to Alfred Hitchcock's thrillers which are generally considered to be a positive example of suspenseful storytelling. \n2. The use of the word \"most\" implies that the majority of the scary parts are in a positive light. \n3. The word \"waiting\" implies anticipation and excitement, which are generally associated with positive feelings. \n4. The use of the word \"signs\" implies that the scary parts are a sign of something bigger and more exciting. \nOverall, the clues and reasoning point to a positive sentiment for the input sentence.\nSENTIMENT: Positive\n\nINPUT: often likable , but just as often it 's meandering , low on energy , and too eager to be quirky at moments when a little old fashioned storytelling would come in handy \nClues and the reasoning process: 1. The sentence begins with the phrase \"often likable\", which suggests that the writer has some positive feelings towards the subject.\n2. The sentence continues with the phrase \"but just as often\", which implies that the writer is aware of the good and bad aspects of the subject.\n3. The sentence then mentions traits such as \"low on energy\" and \"too eager to be quirky\", which could be seen as negative, but the writer then goes on to explain that these traits can be addressed with \"a little old fashioned storytelling\".\n4. The final phrase, \"come in handy\", suggests that the writer has some hope that the subject can be improved.\nOverall, the clues and reasoning suggest that the sentiment of the sentence is Positive.\nSENTIMENT: Positive\n\nINPUT: clint eastwood 's blood work is a lot like a well made pb j sandwich familiar , fairly uneventful and boasting no real surprises but still quite tasty and inviting all the same \nClues and the reasoning process: 1. The phrase \"well made\" implies that the sandwich is of a high quality. \n2. The phrase \"no real surprises\" implies that the sandwich is something that can be expected and enjoyed. \n3. The phrase \"quite tasty\" implies that the sandwich is enjoyable. \n4. The phrase \"inviting all the same\" implies that the sandwich is still enjoyable even if it is not surprising. \nAll of these clues point to a positive sentiment about the PB&J sandwich, which can be extended to the film Blood Work.\nSENTIMENT: Positive\n\nINPUT: human nature is a goofball movie , in the way that malkovich was , but it tries too hard \nClues and the reasoning process: 1. The sentence uses words that suggest a positive sentiment such as \u201cgood\u201d and \u201ctries\u201d. \n2. The sentence suggests that the movie is comparable to a highly acclaimed movie, \u201cMalkovich\u201d, which implies a positive sentiment. \n3. The sentence does not contain any words that suggest a negative sentiment. \nTherefore, the sentiment of the sentence can be determined to be positive.\nSENTIMENT: Positive\n\nINPUT: illiterate , often inert sci fi action thriller \nClues and the reasoning process: The words \"illiterate\" and \"inert\" both have negative connotations, suggesting that the movie is of low quality and not engaging. Additionally, the use of the word \"often\" implies that the movie is not always engaging, further indicating a negative sentiment. As a result, a negative sentiment is determined for the input sentence.\nSENTIMENT: Negative\n\nINPUT: now as a former gong show addict , i 'll admit it , my only complaint is that we did n't get more re creations of all those famous moments from the show \nClues and the reasoning process: 1. The sentence begins with \"now as a former gong show addict\" which suggests that the speaker has a positive attitude towards the show. \n2. The phrase \"my only complaint is that we did n't get more re creations of all those famous moments from the show\" indicates that the speaker is disappointed in not having more memories of the show, which implies a nostalgia for the show and a positive sentiment towards it. \n3. The use of the phrase \"famous moments\" also suggests that the speaker has fond memories of the show. \nOverall, the clues and reasoning present in the sentence suggest a positive sentiment towards the show.\nSENTIMENT: Positive\n\nINPUT: a strong first quarter, slightly less so second quarter, and average second half\n"
for user in ["10086", "1", "6688", "10043"]:
response_lst = openai.Completion.create(
engine="text-davinci-003",
prompt=prompt,
max_tokens=300,
frequency_penalty=0,
presence_penalty=0,
logprobs=10
)
text = [item.text for item in response_lst.choices]
log_probs = response_lst.choices[0].logprobs["token_logprobs"]
log_prob_tokens = response_lst.choices[0].logprobs["tokens"]
idx_max = max(enumerate(log_probs), key=lambda x: x[1])[0]
max_token = log_prob_tokens[idx_max]
# token_logprobs
# tokens
print("=" * 10)
print(text)
print(response_lst.choices[0])
# print(max_token)
# print(log_probs[idx_max])
# print(max(log_probs))
exit()
if __name__ == "__main__":
get_response()
print("$" * 40)
# get_response_via_dynamic_user()
| [
"This is an overall sentiment classifier. \nFirst, list clues and explain the reasoning process for determining the sentiment of INPUT sentence.\nNext, based on the clues and the reasoning process, classify the sentiment of the INPUT sentence as Positive or Negative.\n\nINPUT: though the film is well intentioned , one could rent the original and get the same love story and parable \nClues and the reasoning process: Clue 1: The phrase \"well intentioned\" implies that the film is made with good intentions and is likely to be well-received.\nClue 2: The phrase \"same love story and parable\" suggests that the film is faithful to the source material and therefore likely to be enjoyable. \nReasoning: The combination of these two clues suggests that the overall sentiment of the sentence is positive.\nSENTIMENT: Positive\n\nINPUT: shiner can certainly go the distance , but is n't world championship material \nClues and the reasoning process: 1. The sentence contains two positive words \"shiner\" and \"certainly\" which provide a clue that the sentiment of the sentence is likely positive. \n2. The phrase \"go the distance\" is a metaphor commonly used to indicate success and perseverance, which suggests a positive sentiment.\n3. The inclusion of the phrase \"world championship material\" implies that the shiner is seen as capable of achieving a high level of success, which is indicative of a positive sentiment.\nSENTIMENT: Positive\n\nINPUT: it falls far short of poetry , but it 's not bad prose \nClues and the reasoning process: 1. The presence of the word \"but\" indicates a contrast between two ideas and usually implies a positive sentiment. \n2. The word \"not\" is used to negate the following word, \"bad\". This implies a positive sentiment. \n3. The phrase \"far short\" implies that the sentence is comparing the quality of the prose to something of higher quality, such as poetry. This implies that the quality of the prose is still positive, even if it is not as good as poetry. \nOverall, the clues and reasoning support the positive sentiment of the input sentence.\nSENTIMENT: Positive\n\nINPUT: the rare imax movie that you 'll wish was longer than an hour \nClues and the reasoning process: 1. The word \"rare\" suggests that the movie is unique and special in some way. \n2. The use of the word \"wish\" implies that the person has a positive sentiment towards the movie. \n3. The phrase \"longer than an hour\" implies that the person enjoyed the movie and wanted it to last longer. \nOverall, these clues and the reasoning process indicate that the sentiment of the input sentence is positive.\nSENTIMENT: Positive\n\nINPUT: watching the powerpuff girls movie , my mind kept returning to one anecdote for comparison the cartoon in japan that gave people seizures \nClues and the reasoning process: The first clue that the sentiment of this sentence is positive is the use of the word \"returning\" which implies that the speaker is fondly remembering the anecdote for comparison. Additionally, the use of the word \"cartoon\" to describe the Japanese media implies that the speaker is viewing it in a positive light. Furthermore, the speaker is making a comparison between the Powerpuff Girls movie and the Japanese media, which suggests that they are both seen as enjoyable forms of entertainment. Taken together, these clues point to a positive sentiment.\nSENTIMENT: Positive\n\nINPUT: with an unusual protagonist lrb a kilt wearing jackson rrb and subject matter , the improbable `` formula 51 '' is somewhat entertaining , but it could have been much stronger \nClues and the reasoning process: 1. \"Somewhat entertaining\" - This indicates that the speaker found the movie to be enjoyable, or at least not bad. \n2. \"Could have been much stronger\" - This indicates that the speaker was not fully satisfied with the movie, but still found it to be enjoyable. \nOverall, the sentiment of the sentence is positive, as the speaker found the movie to be entertaining and worthwhile, even if it could have been better.\nSENTIMENT: Positive\n\nINPUT: standing by yourself is haunting lrb it 's rrb what punk rock music used to be , and what the video medium could use more of spirit , perception , conviction \nClues and the reasoning process: 1. The phrase \"what punk rock music used to be\" implies a nostalgia for the genre, suggesting that the speaker is looking at the genre fondly. \n2. The word \"more\" suggests the speaker wants to see more of the qualities mentioned. \n3. The qualities mentioned - spirit, perception, conviction - are all positive attributes. \n4. The phrase \"standing by yourself\" can be interpreted as a sign of strength and independence, which is also a positive sentiment. \nOverall, the sentence conveys a positive sentiment.\nSENTIMENT: Positive\n\nINPUT: juliette binoche 's sand is vivacious , but it 's hard to sense that powerhouse of 19th century prose behind her childlike smile \nClues and the reasoning process: 1. The sentence contains a positive adjective (\"vivacious\") to describe Juliette Binoche's sand. This suggests a positive sentiment towards the sand and by extension, Juliette Binoche.\n2. The sentence also contains a positive phrase (\"childlike smile\") to describe Juliette Binoche. This further reinforces the positive sentiment.\n3. The sentence does not contain any negative words or phrases to suggest a negative sentiment. \nOverall, the clues and reasoning lead to a positive sentiment for the INPUT sentence.\nSENTIMENT: Positive\n\nINPUT: spirit is a visual treat , and it takes chances that are bold by studio standards , but it lacks a strong narrative \nClues and the reasoning process: 1. The sentence contains words with positive sentiment such as \"visual treat\" and \"bold\". \n2. The sentence does not contain any negative sentiment such as \"bad\", \"disappointing\", or \"poorly made\". \n3. The sentence is focused on the positive aspects of the movie and does not dwell on the negative aspects. \n4. The overall sentiment of the sentence is positive despite the lack of a strong narrative. \nThus, the sentiment of the input sentence is positive.\nSENTIMENT: Positive\n\nINPUT: although olivier assayas ' elegantly appointed period drama seems , at times , padded with incident in the way of a too conscientious adaptation its three hour running time plays closer to two \nClues and the reasoning process: 1. The word \"elegantly\" is a positive descriptor, suggesting that the movie is well-crafted and aesthetically pleasing.\n2. The phrase \"at times\" implies that the movie does not always have too much incident, meaning that it is not overstuffed with unnecessary details.\n3. The phrase \"too conscientious\" suggests that the movie is well-crafted and not sloppy.\n4. The phrase \"plays closer to two\" implies that the three-hour running time does not feel like a burden, but rather, the movie is enjoyable and engaging.\nOverall, the clues and reasoning process suggest that the sentiment of the input sentence is positive.\nSENTIMENT: Positive\n\nINPUT: reminiscent of alfred hitchcock 's thrillers , most of the scary parts in ` signs ' occur while waiting for things to happen \nClues and the reasoning process: 1. The sentence has a comparison to Alfred Hitchcock's thrillers which are generally considered to be a positive example of suspenseful storytelling. \n2. The use of the word \"most\" implies that the majority of the scary parts are in a positive light. \n3. The word \"waiting\" implies anticipation and excitement, which are generally associated with positive feelings. \n4. The use of the word \"signs\" implies that the scary parts are a sign of something bigger and more exciting. \nOverall, the clues and reasoning point to a positive sentiment for the input sentence.\nSENTIMENT: Positive\n\nINPUT: often likable , but just as often it 's meandering , low on energy , and too eager to be quirky at moments when a little old fashioned storytelling would come in handy \nClues and the reasoning process: 1. The sentence begins with the phrase \"often likable\", which suggests that the writer has some positive feelings towards the subject.\n2. The sentence continues with the phrase \"but just as often\", which implies that the writer is aware of the good and bad aspects of the subject.\n3. The sentence then mentions traits such as \"low on energy\" and \"too eager to be quirky\", which could be seen as negative, but the writer then goes on to explain that these traits can be addressed with \"a little old fashioned storytelling\".\n4. The final phrase, \"come in handy\", suggests that the writer has some hope that the subject can be improved.\nOverall, the clues and reasoning suggest that the sentiment of the sentence is Positive.\nSENTIMENT: Positive\n\nINPUT: clint eastwood 's blood work is a lot like a well made pb j sandwich familiar , fairly uneventful and boasting no real surprises but still quite tasty and inviting all the same \nClues and the reasoning process: 1. The phrase \"well made\" implies that the sandwich is of a high quality. \n2. The phrase \"no real surprises\" implies that the sandwich is something that can be expected and enjoyed. \n3. The phrase \"quite tasty\" implies that the sandwich is enjoyable. \n4. The phrase \"inviting all the same\" implies that the sandwich is still enjoyable even if it is not surprising. \nAll of these clues point to a positive sentiment about the PB&J sandwich, which can be extended to the film Blood Work.\nSENTIMENT: Positive\n\nINPUT: human nature is a goofball movie , in the way that malkovich was , but it tries too hard \nClues and the reasoning process: 1. The sentence uses words that suggest a positive sentiment such as “good” and “tries”. \n2. The sentence suggests that the movie is comparable to a highly acclaimed movie, “Malkovich”, which implies a positive sentiment. \n3. The sentence does not contain any words that suggest a negative sentiment. \nTherefore, the sentiment of the sentence can be determined to be positive.\nSENTIMENT: Positive\n\nINPUT: illiterate , often inert sci fi action thriller \nClues and the reasoning process: The words \"illiterate\" and \"inert\" both have negative connotations, suggesting that the movie is of low quality and not engaging. Additionally, the use of the word \"often\" implies that the movie is not always engaging, further indicating a negative sentiment. As a result, a negative sentiment is determined for the input sentence.\nSENTIMENT: Negative\n\nINPUT: now as a former gong show addict , i 'll admit it , my only complaint is that we did n't get more re creations of all those famous moments from the show \nClues and the reasoning process: 1. The sentence begins with \"now as a former gong show addict\" which suggests that the speaker has a positive attitude towards the show. \n2. The phrase \"my only complaint is that we did n't get more re creations of all those famous moments from the show\" indicates that the speaker is disappointed in not having more memories of the show, which implies a nostalgia for the show and a positive sentiment towards it. \n3. The use of the phrase \"famous moments\" also suggests that the speaker has fond memories of the show. \nOverall, the clues and reasoning present in the sentence suggest a positive sentiment towards the show.\nSENTIMENT: Positive\n\nINPUT: a strong first quarter, slightly less so second quarter, and average second half\n"
] |
2024-01-10 | ShannonAI/GPT-CLS-CARP | model~gpt_model.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@file: model/gpt_model.py
@time: 2022/12/06 20:03
@desc:
https://beta.openai.com/docs/models/content-filter
"""
import logging
import random
import time
from multiprocessing import Pool
from typing import List, Union, Dict
import numpy as np
import openai
from openai.embeddings_utils import get_embedding
from data.config import GPT3ModelConfig
from utils.get_logger import get_info_logger
class GPT3ModelAPI(object):
def __init__(self, config: GPT3ModelConfig, openai_key_offset_idx: int = 0, logger: logging = None):
self.config = config
self.logger = logger if logger is not None else get_info_logger("GPT-3")
self.openai_key_idx = openai_key_offset_idx
self.openai_key_candidates = self.config.openai_api_key if type(self.config.openai_api_key) is list else [
self.config.openai_api_key]
assert self.config.engine_name in ["text-ada-002", "text-davinci-002", "text-davinci-003",
"text-embedding-ada-002"]
def get_openai_response(self, post_prompt: str, key_idx_offset: int = None):
raise NotImplementedError("get_openai_response")
def request_api_and_handle_errors(self, post_prompt: str, exponential_base: float = 2,
jitter: bool = True, key_idx_offset: int = None,
only_return_text: bool = False) -> Dict:
"""Keep the prediction func's name the same as previous PyTorch models.
https://github.com/openai/openai-cookbook/blob/main/examples/How_to_handle_rate_limits.ipynb 的 Example 3: Manual backoff implementation
"""
# Initialize variables
num_retries = 0
delay = self.config.init_delay
return_sign = 0
# Loop until a successful response or max_retries is hit or an exception is raised
while True:
try:
response_lst = self.get_openai_response(post_prompt, key_idx_offset=key_idx_offset)
if self.config.engine_name.startswith("text-embedding"):
results = response_lst
else:
results = response_lst.choices
if only_return_text:
results = [item.text for item in results]
self.logger.info(msg=f"prompt_and_result",
extra={"prompt_list": post_prompt, "results": response_lst})
time.sleep(delay)
return_sign = 1
except openai.error.RateLimitError as limiterror:
limiterror_info = str(limiterror)
if limiterror_info == "You exceeded your current quota, please check your plan and billing details.":
if self.openai_key_idx < len(self.openai_key_candidates) - 1:
self.logger.info("=" * 40)
self.logger.info(f"WARNING: {self.openai_key_idx} OUT-Of-Quota ...")
self.openai_key_idx += 1
self.logger.info(f"WARNING: RESET OPENAI-KEY TO {self.openai_key_idx} ...")
elif self.openai_key_idx == len(self.openai_key_candidates) - 1:
self.logger.info(f"ReTRY Failed ...")
raise Exception(f"WARNING: Out-Of-Quota ({len(self.openai_key_candidates)} accounts).")
else:
raise ValueError
# Increment retries
num_retries += 1
# Increment the delay
delay *= exponential_base * (1 + jitter * random.random())
self.logger.info("=" * 40)
self.logger.info(f"WARNING: SLEEP {round(delay, 4)}s for RateLimitError")
self.logger.info(f"WARNING: {num_retries} RETRY out of {self.config.max_retries}")
self.logger.info("=" * 40)
# Check if max retries has been reached
if num_retries > self.config.max_retries:
self.logger.info(f"ReTRY Failed ..")
raise Exception(f"WARNING: Maximum number of retries ({self.config.max_retries}) exceeded.")
# Sleep for the delay
time.sleep(delay)
except (openai.error.ServiceUnavailableError, openai.error.APIConnectionError, openai.error.APIError):
num_retries += 1
self.logger.info("=" * 40)
self.logger.info(f"WARNING: SLEEP 70s for ServiceUnavailableError or APIConnectionError")
self.logger.info(f"WARNING: {num_retries} RETRY out of {self.config.max_retries}")
self.logger.info("=" * 40)
time.sleep(70)
if num_retries > self.config.max_retries:
self.logger.info(f"RETRY FAILED")
raise Exception(f"WARNING: Maximum number of retries ({self.config.max_retries}) exceeded.")
# Raise exceptions for any errors not specified
except openai.error.InvalidRequestError:
self.logger.info("=" * 40)
self.logger.info("WARNING: Current Account DO NOT have Quota.")
self.logger.info("=" * 40)
except Exception as e:
self.logger.info(f"OTHER ERRORs")
self.logger.info(e)
raise e
if return_sign == 1:
return results
def forward(self, prompt_lst: Union[List[str], str], exponential_base: float = 2,
jitter: bool = True, num_workers: int = 1, key_idx_offset: int = None,
only_return_text: bool = False, update_max_tokens: int = None) -> List[Dict]:
# if num_workers is 1.
if num_workers == 1 or (len(prompt_lst) == 1 and isinstance(prompt_lst, list)):
if isinstance(prompt_lst, list):
prompt_input = prompt_lst[0]
elif isinstance(prompt_lst, str):
prompt_input = prompt_lst
else:
raise TypeError(prompt_lst)
results = self.request_api_and_handle_errors(prompt_input, exponential_base=exponential_base, jitter=jitter,
key_idx_offset=key_idx_offset,
only_return_text=only_return_text)
return results
# if num_workers > 1
assert isinstance(prompt_lst, list)
assert len(prompt_lst) <= num_workers
num_workers = min(num_workers, len(prompt_lst))
pool_results = []
pool = Pool(processes=num_workers)
for worker_id in range(0, num_workers):
result = pool.apply_async(self.request_api_and_handle_errors,
(prompt_lst[worker_id], exponential_base, jitter, worker_id, only_return_text), )
pool_results.append(result)
pool.close()
results = [item.get()[0] for item in pool_results]
return results
class GPT3TextCompletionModel(GPT3ModelAPI):
"""Code for OpenAI's GPT-3 model service."""
def __init__(self, config: GPT3ModelConfig, openai_key_offset_idx: int = 0, logger: logging = None):
super(GPT3TextCompletionModel, self).__init__(config, openai_key_offset_idx, logger)
def get_openai_response(self, post_prompt: str, key_idx_offset: int = None):
# https://beta.openai.com/docs/api-reference/completions/create
openai_key_idx = self.openai_key_idx if key_idx_offset is None else self.openai_key_idx + key_idx_offset
openai.api_key = self.openai_key_candidates[openai_key_idx]
response_lst = openai.Completion.create(
engine=self.config.engine_name,
prompt=post_prompt,
temperature=self.config.temperature,
max_tokens=min(self.config.max_tokens, 3900 - len(post_prompt.split(" "))),
top_p=self.config.top_p,
frequency_penalty=self.config.frequency_penalty,
presence_penalty=self.config.presence_penalty,
logprobs=self.config.logprobs,
)
return response_lst
class GPT3EmbeddingModel(GPT3ModelAPI):
"""Code for OpenAI's GPT-3 model service."""
def __init__(self, config: GPT3ModelConfig, openai_key_offset_idx: int = 0, logger: logging = None):
super(GPT3EmbeddingModel, self).__init__(config, openai_key_offset_idx, logger)
def get_openai_response(self, input_text: str, key_idx_offset: int = None) -> np.array:
openai_key_idx = self.openai_key_idx if key_idx_offset is None else self.openai_key_idx + key_idx_offset
openai.api_key = self.openai_key_candidates[openai_key_idx]
text_embedding = get_embedding(input_text, self.config.engine_name)
text_embedding = np.array(text_embedding)
return text_embedding
| [] |
2024-01-10 | ShannonAI/GPT-CLS-CARP | utils~check_api_key_access.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@file: utils/check_api_key_access.py
@time: 2022/12/06 20:03
@desc:
"""
from typing import List
import openai
import requests
from tqdm import tqdm
def openai_entry(key_lst: List[str], model: str = "text-davinci-003"):
access_key_lst = []
unaccess_key_lst = []
prompt = "Please write a tagline for an ice-cream shop."
# prompt = "i like cats."
# prompt = "INPUT: A fun family movie that 's suitable for all ages -- a movie that will make you laugh , cry and realize , ` It 's never too late to believe in your dreams . '\nSENTIMENT: Positive\n\nExplain the reasoning process for determining the overall SENTIMENT of the INPUT (limit to 150 tokens).\n\n"
for key_item in tqdm(key_lst):
openai.api_key = key_item
try:
response_lst = openai.Completion.create(
engine=model,
prompt=prompt,
temperature=0,
max_tokens=256,
frequency_penalty=0.0,
presence_penalty=0.0,
logprobs=0
)
access_key_lst.append(key_item)
print(response_lst)
except openai.error.RateLimitError as limiterror:
limiterror_info = str(limiterror)
if limiterror_info == "You exceeded your current quota, please check your plan and billing details.":
unaccess_key_lst.append(key_item)
print(model, "openai.error.RateLimitError", limiterror)
except openai.error.InvalidRequestError as e:
print(model, "openai.error.InvalidRequestError", e)
unaccess_key_lst.append(key_item)
except KeyboardInterrupt:
break
except:
unaccess_key_lst.append(key_item)
print("=" * 30)
print(f"Total len {len(key_lst)}")
print(f"Access key {len(access_key_lst)}")
print(f"Unaccess key {len(unaccess_key_lst)}")
print("=" * 30)
for key in access_key_lst:
print(f'"{key}",')
if __name__ == "__main__":
key_lst = [
"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",
]
openai_entry(key_lst, model="text-davinci-003")
| [
"Please write a tagline for an ice-cream shop."
] |
2024-01-10 | ShannonAI/GPT-CLS-CARP | tests~debug~raise_exception.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@file: tests/debug/raise_exception.py
@time: 2022/12/06 20:03
@desc:
"""
import time
import openai
def raise_service_exception():
raise openai.error.ServiceUnavailableError
def raise_rate_exception():
raise openai.error.RateLimitError
def test_handle_exception_last_return():
try:
raise_service_exception()
except:
print("Sleep")
time.sleep(10)
print(f"112.003")
return "bbba"
def test_handle_exception():
while True:
try:
return raise_rate_exception()
except (openai.error.RateLimitError, openai.error.ServiceUnavailableError):
print("Sleep")
time.sleep(10)
print(f"112.003")
except:
raise ValueError
if __name__ == "__main__":
returned_str = test_handle_exception_last_return()
print(returned_str)
print("=" * 20)
test_handle_exception()
| [] |
2024-01-10 | ant-research/fin_domain_llm | weaverbird~models~chat_glm2.py | from typing import Any, List, Optional
from langchain.llms.base import LLM
from langchain.schema import BaseMessage, ChatResult
from transformers import PreTrainedModel, PreTrainedTokenizer
from weaverbird.config_factory import BaseModelConfig, FinetuningConfig, GenerationConfig
from weaverbird.models.llm_loader import load_model_and_tokenizer
from weaverbird.utils import dispatch_model
from weaverbird.utils.misc import torch_gc
class ChatGLM2(LLM):
""" GLM2 from THU """
model: Optional[PreTrainedModel] = None
tokenizer: Optional[PreTrainedTokenizer] = None
generation_config: Optional[GenerationConfig] = None
def __init__(
self,
model_config: BaseModelConfig,
finetuning_config: Optional[FinetuningConfig] = None,
generation_config: Optional[GenerationConfig] = None
) -> None:
super(ChatGLM2, self).__init__()
self.model, self.tokenizer = load_model_and_tokenizer(model_config, finetuning_config)
self.model = dispatch_model(self.model)
self.model = self.model.eval() # enable evaluation mode
self.generation_config = generation_config
@classmethod
def build_from_config(cls, configs):
return cls(model_config=configs['model_config'], finetuning_config=configs['finetuning_config'],
generation_config=configs['generation_config'])
def _call(self, prompt: str, stop: Optional[List[str]] = None, **kwargs: Any) -> str:
history = kwargs.get('hisotry', [])
response, _ = self.model.chat(
self.tokenizer,
prompt,
history=history,
max_length=self.generation_config.max_length,
temperature=self.generation_config.temperature
)
print(f"response:{response}")
return response
def _generate_answer(self, prompt: str, history: List[List[BaseMessage]] = [], streaming: bool = False):
if streaming:
history += [[]]
for inum, (stream_resp, _) in enumerate(self.model.stream_chat(
self.tokenizer,
prompt,
history=history[
-self.generation_config.max_history_message_length:-1] if self.generation_config.max_history_message_length > 1 else [],
max_length=self.generation_config.max_length,
temperature=self.generation_config.temperature
)):
history[-1] = [prompt, stream_resp]
llm_output = {'history': history}
yield ChatResult(generations=stream_resp, llm_output=llm_output)
else:
response, _ = self.model.chat(
self.tokenizer,
prompt,
history=history[
-self.generation_config.max_history_message_length:-1] if self.generation_config.max_history_message_length > 1 else [],
max_length=self.generation_config.max_length,
temperature=self.generation_config.temperature
)
torch_gc()
history += [[prompt, response]]
llm_output = {'history': history}
yield ChatResult(generations=response, llm_output=llm_output)
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "chat_glm2"
| [] |
2024-01-10 | ant-research/fin_domain_llm | weaverbird~models~chat_llama2.py | from typing import Any, List, Optional, Tuple, Dict
import torch
from langchain.llms.base import LLM
from transformers import GenerationConfig
from transformers import PreTrainedModel, PreTrainedTokenizer
from weaverbird.config_factory import BaseModelConfig, FinetuningConfig
from weaverbird.config_factory import GenerationConfig as WBGenerationConfig
from weaverbird.models.llm_loader import load_model_and_tokenizer
from weaverbird.models.template import get_template_and_fix_tokenizer, Template
from weaverbird.utils import dispatch_model, get_logits_processor
class ChatLlama2(LLM):
"""
LLAMA2 from Meta
Borrowed from https://github.com/hiyouga/LLaMA-Efficient-Tuning/blob/469f859161dec0e34f4cc849f20e43d442680b5c/src/llmtuner/chat/stream_chat.py
"""
model: Optional[PreTrainedModel] = None
tokenizer: Optional[PreTrainedTokenizer] = None
generation_config: Optional[WBGenerationConfig] = None
template: Optional[Template] = None
def __init__(
self,
model_config: BaseModelConfig,
finetuning_config: Optional[FinetuningConfig] = None,
generation_config: Optional[WBGenerationConfig] = None
) -> None:
super(ChatLlama2, self).__init__()
self.model, self.tokenizer = load_model_and_tokenizer(model_config, finetuning_config)
self.model = dispatch_model(self.model)
self.model = self.model.eval() # enable evaluation mode
self.generation_config = generation_config
self.template = get_template_and_fix_tokenizer("llama2", self.tokenizer)
@classmethod
def build_from_config(cls, configs):
return cls(model_config=configs['model_config'], finetuning_config=configs['finetuning_config'],
generation_config=configs['generation_config'])
def _call(self, prompt: str, stop: Optional[List[str]] = None, **kwargs: Any) -> str:
response, _ = self.chat(
prompt,
history=[]
)
print(f"response:{response}")
print(f"+++++++++++++++++++++++++++++++++++")
return response
def process_args(
self,
query: str,
history: Optional[List[Tuple[str, str]]] = None,
system: Optional[str] = None,
**input_kwargs
) -> Tuple[Dict[str, Any], int]:
system = system or ""
prompt, _ = self.template.encode_oneturn(
tokenizer=self.tokenizer, query=query, resp="", history=history, system=system
)
input_ids = torch.tensor([prompt], device=self.model.device)
prompt_length = len(input_ids[0])
do_sample = input_kwargs.pop("do_sample", None)
temperature = input_kwargs.pop("temperature", None)
top_p = input_kwargs.pop("top_p", None)
top_k = input_kwargs.pop("top_k", None)
repetition_penalty = input_kwargs.pop("repetition_penalty", None)
max_length = input_kwargs.pop("max_length", None)
max_new_tokens = input_kwargs.pop("max_new_tokens", None)
generation_config = self.generation_config.dict()
generation_config.update(dict(
do_sample=do_sample if do_sample is not None else generation_config["do_sample"],
temperature=temperature or generation_config["temperature"],
top_p=top_p or generation_config["top_p"],
top_k=top_k or generation_config["top_k"],
repetition_penalty=repetition_penalty or generation_config["repetition_penalty"],
eos_token_id=[self.tokenizer.eos_token_id] + self.tokenizer.additional_special_tokens_ids,
pad_token_id=self.tokenizer.pad_token_id
))
if max_length:
generation_config.pop("max_new_tokens", None)
generation_config["max_length"] = max_length
if max_new_tokens:
generation_config.pop("max_length", None)
generation_config["max_new_tokens"] = max_new_tokens
gen_kwargs = dict(
inputs=input_ids,
generation_config=GenerationConfig(**generation_config),
logits_processor=get_logits_processor()
)
return gen_kwargs, prompt_length
@torch.inference_mode()
def chat(
self,
prompt: str,
history: Optional[List[Tuple[str, str]]] = None,
system: Optional[str] = None,
**input_kwargs
) -> Tuple[str, Tuple[int, int]]:
gen_kwargs, prompt_length = self.process_args(prompt, history, system, **input_kwargs)
generation_output = self.model.generate(**gen_kwargs)
outputs = generation_output.tolist()[0][prompt_length:]
response = self.tokenizer.decode(outputs, skip_special_tokens=True)
response_length = len(outputs)
return response, (prompt_length, response_length)
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "chat_llama2"
| [
"11",
"None"
] |
2024-01-10 | ant-research/fin_domain_llm | weaverbird~models~chat_weaverbird.py | from typing import (
Any,
List,
Optional, )
from langchain import PromptTemplate
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.chat_models.base import BaseChatModel
from langchain.llms.base import LLM
from langchain.schema import (
BaseMessage,
ChatResult, Document,
)
from langchain.schema import BaseRetriever
from weaverbird.utils.misc import get_current_time
class ChatWeaverBird(BaseChatModel):
model_name: str = "chat_weaverbird"
"""model name of WeaverBird, default is `chat_weaverbird`"""
llm_model: LLM
"""LLM model to use in weaverbird"""
retriever_model: Optional[BaseRetriever] = None
"""retriever model to use in weaverbird"""
prompt_template: PromptTemplate
"""template to construct the prompt """
streaming: bool = False
"""Whether to stream the results or not."""
def __init__(self, llm_model, retriever_model, prompt_template):
super(ChatWeaverBird, self).__init__()
self.llm_model = llm_model
self.retriever_model = retriever_model
self.prompt_template = prompt_template
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
stream: Optional[bool] = None,
**kwargs: Any,
) -> ChatResult:
chat_history = kwargs.get('chat_history', [])
docs = []
if self.retriever_model is not None:
docs = self.retriever_model._get_relevant_documents()
should_stream = stream if stream is not None else self.streaming
if len(docs) > 0:
prompt = self._generate_prompt(docs, messages)
else:
prompt = messages
for answer_result in self.llm_model._generate_answer(prompt=prompt,
history=chat_history,
streaming=should_stream):
resp = answer_result.generatios
history = answer_result.llm_output['history']
history[-1][0] = messages
response = {
"prompt": prompt,
"query": messages,
"result": resp,
"source_documents": docs
}
yield response, history
def _generate_prompt(self,
related_docs: List[Document],
query: List[BaseMessage]):
cur_time = get_current_time()
if len(related_docs):
context = "\n".join(
[f"{doc.metadata.get('date', '')} {doc.metadata.get('title', '')} {doc.page_content}" for doc in
related_docs])
else:
context = ''
# do a concate for query here
query = ''.join(query)
kwargs = {'question': query, 'date': cur_time, 'context': context}
return self.prompt_template.format(kwargs)
@property
def _llm_type(self) -> str:
return "chat_weaverbird"
| [] |
2024-01-10 | ant-research/fin_domain_llm | scripts~chat_llm_model.py | from langchain import LLMChain
from weaverbird.chains.chat_retro.prompt import CHAT_RETRO_EN_PROMPT
from weaverbird.models import ChatGLM2
from weaverbird.utils import parse_configs
def main():
model_config_dict = {'model_name_or_path': 'chatglm2-6b'}
configs = parse_configs(model_config_dict)
chat_model = ChatGLM2(configs['model_config'], generation_config=configs['generation_config'])
chat_prompt = CHAT_RETRO_EN_PROMPT
chain = LLMChain(prompt=chat_prompt, llm=chat_model, verbose=True)
print(chain({'context': 'hello', 'date': '20200930', 'question': 'what is nasdaq close price'}))
return
if __name__ == '__main__':
main()
| [] |
2024-01-10 | ant-research/fin_domain_llm | weaverbird~document_loaders~local_kb_loader.py | import os
from typing import List, Optional
from langchain.document_loaders import TextLoader, UnstructuredMarkdownLoader, UnstructuredFileLoader
from langchain.document_loaders.base import BaseLoader
from langchain.text_splitter import TextSplitter
from tqdm import tqdm
from weaverbird.cn_text_splitter import ChineseTextSplitter
from weaverbird.utils import logger
def tree(filepath, ignore_dir_names=None, ignore_file_names=None):
"""
Return two list, the first one is the dirs of all files under filepath, the second one is the file names of all
corresponding files.
borrowed from https://github.com/chatchat-space/Langchain-Chatchat/
"""
if ignore_dir_names is None:
ignore_dir_names = []
if ignore_file_names is None:
ignore_file_names = []
ret_list = []
if isinstance(filepath, str):
if not os.path.exists(filepath):
print("Directory not existed")
return None, None
elif os.path.isfile(filepath) and os.path.basename(filepath) not in ignore_file_names:
return [filepath], [os.path.basename(filepath)]
elif os.path.isdir(filepath) and os.path.basename(filepath) not in ignore_dir_names:
for file in os.listdir(filepath):
fullfilepath = os.path.join(filepath, file)
if os.path.isfile(fullfilepath) and os.path.basename(fullfilepath) not in ignore_file_names:
ret_list.append(fullfilepath)
if os.path.isdir(fullfilepath) and os.path.basename(fullfilepath) not in ignore_dir_names:
ret_list.extend(tree(fullfilepath, ignore_dir_names, ignore_file_names)[0])
return ret_list, [os.path.basename(p) for p in ret_list]
def load_file(file_dir, text_splitter):
if file_dir.lower().endswith(".md"):
loader = UnstructuredMarkdownLoader(file_dir)
elif file_dir.lower().endswith(".txt"):
loader = TextLoader(file_dir, autodetect_encoding=True)
else:
loader = UnstructuredFileLoader(file_dir, mode="elements")
docs = loader.load_and_split(text_splitter=text_splitter)
return docs
class LocalKnowledgeBaseLoader(BaseLoader):
def __init__(self,
file_dir: str or List[str],
text_splitter: Optional[TextSplitter] = ChineseTextSplitter
):
self.file_dir = file_dir
self.text_splitter = text_splitter
def _load_from_single_dir(self, file_dir):
docs = []
loaded_files = []
failed_files = []
if not os.path.exists(file_dir):
logger.info("Directory not existed")
return None
elif os.path.isfile(file_dir):
file = os.path.split(file_dir)[-1]
try:
docs = load_file(file_dir, self.text_splitter)
logger.info(f"{file} loaded")
loaded_files.append(file_dir)
except Exception as e:
logger.error(e)
logger.info(f"{file} failed to load")
failed_files.append(file)
elif os.path.isdir(file_dir):
docs = []
for single_file_dir, file in tqdm(zip(*tree(file_dir)), desc="loading files"):
try:
docs += load_file(single_file_dir, self.text_splitter)
loaded_files.append(single_file_dir)
except Exception as e:
logger.error(e)
failed_files.append(single_file_dir)
return docs, loaded_files, failed_files
def _load_from_multiple_dir(self, file_dir):
docs = []
loaded_files = []
failed_files = []
for file in file_dir:
docs_, loaded_files_, failed_files_ = self._load_from_single_dir(file)
docs.extend(docs_)
loaded_files.extend(loaded_files_)
failed_files.extend(failed_files_)
return docs, loaded_files, failed_files
def load(self):
if isinstance(self.file_dir, str):
docs, loaded_files, failed_files = self._load_from_single_dir(self.file_dir)
else:
docs, loaded_files, failed_files = self._load_from_multiple_dir(self.file_dir)
return docs
| [] |
2024-01-10 | ant-research/fin_domain_llm | weaverbird~retrievers~web_searcher.py | from typing import List
import dateparser
from langchain.callbacks.manager import CallbackManagerForRetrieverRun
from langchain.pydantic_v1 import Field
from langchain.schema import BaseRetriever, Document
from langchain.utilities import SerpAPIWrapper
from weaverbird.config_factory import RetroConfig
from weaverbird.utils import logger
class WebSearcher(BaseRetriever):
"""Simplest WebSearcher
borrowed from
https://github.com/langchain-ai/langchain/blob/ddd07001f354cd09a76a61e1f5c678bf885506d2/libs/langchain/langchain/retrievers/web_research.py
"""
web_searcher: SerpAPIWrapper = Field(..., description="Web Search API Wrapper")
class Config:
"""Configuration for this pydantic object."""
retro_name = 'web_searcher_retro'
@classmethod
def build_from_config(cls, search_config: RetroConfig):
serpapi_api_key = search_config.serp_api_token
search_config_dict = search_config.dict()
search_config_dict.pop('serp_api_token')
return cls(web_searcher=SerpAPIWrapper(serpapi_api_key=serpapi_api_key, params=search_config_dict))
def _search_result2docs(self, search_results):
docs = []
logger.info(f'# search_results {len(search_results)}')
for result in search_results:
doc = Document(page_content=result["snippet"].replace('\n', '') if "snippet" in result.keys() else "",
metadata={"link": result["link"] if "link" in result.keys() else "",
"title": result["title"] if "title" in result.keys() else "",
"source": result["source"] if "source" in result.keys() else "",
"filename": result["title"] if "title" in result.keys() else "",
"date": dateparser.parse(result['date']).strftime(
"%Y-%m-%d") if 'date' in result.keys() else "",
"score": 100}) # for the moment we fix the score
docs.append(doc)
return docs
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> List[Document]:
"""Search websites for documents related to the query input.
Args:
query: user query
Returns:
Relevant documents from various urls.
"""
search_results = self.web_searcher.results(self.clean_search_query(query))
return self._search_result2docs(search_results['organic_results'])
def clean_search_query(self, query: str) -> str:
# Some search tools (e.g., Google) will
# fail to return results if query has a
# leading digit: 1. "LangCh..."
# Check if the first character is a digit
if query[0].isdigit():
# Find the position of the first quote
first_quote_pos = query.find('"')
if first_quote_pos != -1:
# Extract the part of the string after the quote
query = query[first_quote_pos + 1:]
# Remove the trailing quote if present
if query.endswith('"'):
query = query[:-1]
return query.strip()
| [] |
2024-01-10 | ant-research/fin_domain_llm | weaverbird~cn_text_splitter.py | import re
from typing import List
from langchain.text_splitter import CharacterTextSplitter
class ChineseTextSplitter(CharacterTextSplitter):
"""
borrowed from https://github.com/chatchat-space/Langchain-Chatchat/blob/
f1f8ab80e4f72156abeb12afd8566ff90beca350/text_splitter/chinese_text_splitter.py
"""
def __init__(self, sentence_size: int, pdf: bool = False, **kwargs):
super().__init__(**kwargs)
self.pdf = pdf
self.sentence_size = sentence_size
def split_text(self, text: str) -> List[str]: ##此处需要进一步优化逻辑
if self.pdf:
text = re.sub(r"\n{3,}", r"\n", text)
text = re.sub('\s', " ", text)
text = re.sub("\n\n", "", text)
text = re.sub(r'([;;.!?。!?\?])([^”’])', r"\1\n\2", text) # 单字符断句符
text = re.sub(r'(\.{6})([^"’”」』])', r"\1\n\2", text) # 英文省略号
text = re.sub(r'(\…{2})([^"’”」』])', r"\1\n\2", text) # 中文省略号
text = re.sub(r'([;;!?。!?\?]["’”」』]{0,2})([^;;!?,。!?\?])', r'\1\n\2', text)
# 如果双引号前有终止符,那么双引号才是句子的终点,把分句符\n放到双引号后,注意前面的几句都小心保留了双引号
text = text.rstrip() # 段尾如果有多余的\n就去掉它
# 很多规则中会考虑分号;,但是这里我把它忽略不计,破折号、英文双引号等同样忽略,需要的再做些简单调整即可。
ls = [i for i in text.split("\n") if i]
for ele in ls:
if len(ele) > self.sentence_size:
ele1 = re.sub(r'([,,.]["’”」』]{0,2})([^,,.])', r'\1\n\2', ele)
ele1_ls = ele1.split("\n")
for ele_ele1 in ele1_ls:
if len(ele_ele1) > self.sentence_size:
ele_ele2 = re.sub(r'([\n]{1,}| {2,}["’”」』]{0,2})([^\s])', r'\1\n\2', ele_ele1)
ele2_ls = ele_ele2.split("\n")
for ele_ele2 in ele2_ls:
if len(ele_ele2) > self.sentence_size:
ele_ele3 = re.sub('( ["’”」』]{0,2})([^ ])', r'\1\n\2', ele_ele2)
ele2_id = ele2_ls.index(ele_ele2)
ele2_ls = ele2_ls[:ele2_id] + [i for i in ele_ele3.split("\n") if i] + ele2_ls[
ele2_id + 1:]
ele_id = ele1_ls.index(ele_ele1)
ele1_ls = ele1_ls[:ele_id] + [i for i in ele2_ls if i] + ele1_ls[ele_id + 1:]
id = ls.index(ele)
ls = ls[:id] + [i for i in ele1_ls if i] + ls[id + 1:]
return ls
| [] |
2024-01-10 | ant-research/fin_domain_llm | scripts~init_local_kb.py | from typing import List
from langchain import FAISS
from langchain.text_splitter import TextSplitter
from weaverbird.document_loaders import LocalKnowledgeBaseLoader
from weaverbird.embeddings import QueryRefEncoder
class NewFinDocTextSplitter(TextSplitter):
def __init__(self):
super(NewFinDocTextSplitter, self).__init__()
def split_text(self, text: str) -> List[str]:
# split by "Doc" because text has \n
documents = text.split("Doc ")[1:]
return documents
def main():
text_splitter = NewFinDocTextSplitter()
loader = LocalKnowledgeBaseLoader("report_cn_v0724.txt", text_splitter=text_splitter)
docs = loader.load()
print(len(docs))
model_dir = 'encoder'
embeddings = QueryRefEncoder(model_dir=model_dir)
db = FAISS.from_documents(docs, embeddings)
query = "迈瑞医疗(300760)2022 年三季报发布的业绩是多少"
docs = db.similarity_search(query)
print(docs[0].page_content)
return
if __name__ == '__main__':
main()
| [] |
2024-01-10 | ant-research/fin_domain_llm | weaverbird~models~chat_model.py | from typing import (
Any,
List,
Optional,
)
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.chat_models.base import BaseChatModel
from langchain.schema import BaseMessage, ChatResult
from weaverbird.config_factory import FinetuningConfig, GenerationConfig, BaseModelConfig
from weaverbird.models.llm_model import LLMModel
class WeaverBirdChat(BaseChatModel):
model_name: str = "weaverbird_chat"
"""model name of WeaverBird, default is `weaverbird_chat`"""
request_timeout: Optional[int] = 60
"""request timeout for chat http requests"""
max_retries: int = 6
"""Maximum number of retries to make when generating"""
streaming: Optional[bool] = True
"""streaming mode. not supported yet"""
llm_model: Optional[LLMModel] = None
"""LLM model to use in weaverbird"""
retriever_model: Optional[LLMModel] = None
"""retriever model to use in weaverbird"""
@classmethod
def build_from_config(cls,
llm_model_config: BaseModelConfig,
llm_finetuning_config: Optional[FinetuningConfig] = None,
llm_generation_config: Optional[GenerationConfig] = None):
llm_model = LLMModel(model_config=llm_model_config,
finetuning_config=llm_finetuning_config,
generation_config=llm_generation_config)
return cls(llm_model=llm_model)
if retro_config is not None:
self.retriever = None
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
pass
@property
def _llm_type(self) -> str:
return "weaverbird_chat"
| [] |
2024-01-10 | tjuHaoXiaotian/pymarl3 | src~components~segment_tree.py | import operator
# Directly from OpenAI Baseline implementation (https://github.com/openai/baselines)
class SegmentTree(object):
def __init__(self, capacity, operation, neutral_element):
"""Build a Segment Tree data structure.
https://en.wikipedia.org/wiki/Segment_tree
Can be used as regular array, but with two
important differences:
a) setting item's value is slightly slower.
It is O(lg capacity) instead of O(1).
b) user has access to an efficient ( O(log segment size) )
`reduce` operation which reduces `operation` over
a contiguous subsequence of items in the array.
Paramters
---------
capacity: int
Total size of the array - must be a power of two.
operation: lambda obj, obj -> obj
and operation for combining elements (eg. sum, max)
must form a mathematical group together with the set of
possible values for array elements (i.e. be associative)
neutral_element: obj
neutral element for the operation above. eg. float('-inf')
for max and 0 for sum.
"""
assert capacity > 0 and capacity & (capacity - 1) == 0, "capacity must be positive and a power of 2."
self._capacity = capacity
self._value = [neutral_element for _ in range(2 * capacity)]
self._operation = operation
def _reduce_helper(self, start, end, node, node_start, node_end):
if start == node_start and end == node_end:
return self._value[node]
mid = (node_start + node_end) // 2
if end <= mid:
return self._reduce_helper(start, end, 2 * node, node_start, mid)
else:
if mid + 1 <= start:
return self._reduce_helper(start, end, 2 * node + 1, mid + 1, node_end)
else:
return self._operation(
self._reduce_helper(start, mid, 2 * node, node_start, mid),
self._reduce_helper(mid + 1, end, 2 * node + 1, mid + 1, node_end)
)
def reduce(self, start=0, end=None):
"""Returns result of applying `self.operation`
to a contiguous subsequence of the array.
self.operation(arr[start], operation(arr[start+1], operation(... arr[end])))
Parameters
----------
start: int
beginning of the subsequence
end: int
end of the subsequences
Returns
-------
reduced: obj
result of reducing self.operation over the specified range of array elements.
"""
if end is None:
end = self._capacity
if end < 0:
end += self._capacity
end -= 1
return self._reduce_helper(start, end, 1, 0, self._capacity - 1)
def __setitem__(self, idx, val):
# index of the leaf
idx += self._capacity
self._value[idx] = val
idx //= 2
while idx >= 1:
self._value[idx] = self._operation(
self._value[2 * idx],
self._value[2 * idx + 1]
)
idx //= 2
def __getitem__(self, idx):
assert 0 <= idx < self._capacity
return self._value[self._capacity + idx]
class SumSegmentTree(SegmentTree):
def __init__(self, capacity):
super(SumSegmentTree, self).__init__(
capacity=capacity,
operation=operator.add,
neutral_element=0.0
)
def sum(self, start=0, end=None):
"""Returns arr[start] + ... + arr[end]"""
return super(SumSegmentTree, self).reduce(start, end)
def find_prefixsum_idx(self, prefixsum):
"""Find the highest index `i` in the array such that
sum(arr[0] + arr[1] + ... + arr[i - i]) <= prefixsum
if array values are probabilities, this function
allows to sample indexes according to the discrete
probability efficiently.
Parameters
----------
perfixsum: float
upperbound on the sum of array prefix
Returns
-------
idx: int
highest index satisfying the prefixsum constraint
"""
assert 0 <= prefixsum <= self.sum() + 1e-5
idx = 1
while idx < self._capacity: # while non-leaf
if self._value[2 * idx] > prefixsum:
idx = 2 * idx
else:
prefixsum -= self._value[2 * idx]
idx = 2 * idx + 1
return idx - self._capacity
class MinSegmentTree(SegmentTree):
def __init__(self, capacity):
super(MinSegmentTree, self).__init__(
capacity=capacity,
operation=min,
neutral_element=float('inf')
)
def min(self, start=0, end=None):
"""Returns min(arr[start], ..., arr[end])"""
return super(MinSegmentTree, self).reduce(start, end)
| [] |
2024-01-10 | PrajitR/fast-pixel-cnn | fast_pixel_cnn_pp~plotting.py | '''
Copied from OpenAI's pixel_cnn_pp/plotting.py
'''
import numpy as np
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
# Plot image examples.
def plot_img(img, title=None):
plt.figure()
plt.imshow(img, interpolation='nearest')
if title is not None:
plt.title(title)
plt.axis('off')
plt.tight_layout()
plt.show(block=False)
def img_stretch(img):
img = img.astype(float)
img -= np.min(img)
img /= np.max(img)+1e-12
return img
def img_tile(imgs, aspect_ratio=1.0, tile_shape=None, border=1,
border_color=0, stretch=False):
''' Tile images in a grid.
If tile_shape is provided only as many images as specified in tile_shape
will be included in the output.
'''
# Prepare images
if stretch:
imgs = img_stretch(imgs)
imgs = np.array(imgs)
if imgs.ndim != 3 and imgs.ndim != 4:
raise ValueError('imgs has wrong number of dimensions.')
n_imgs = imgs.shape[0]
# Grid shape
img_shape = np.array(imgs.shape[1:3])
if tile_shape is None:
img_aspect_ratio = img_shape[1] / float(img_shape[0])
aspect_ratio *= img_aspect_ratio
tile_height = int(np.ceil(np.sqrt(n_imgs * aspect_ratio)))
tile_width = int(np.ceil(np.sqrt(n_imgs / aspect_ratio)))
grid_shape = np.array((tile_height, tile_width))
else:
assert len(tile_shape) == 2
grid_shape = np.array(tile_shape)
# Tile image shape
tile_img_shape = np.array(imgs.shape[1:])
tile_img_shape[:2] = (img_shape[:2] + border) * grid_shape[:2] - border
# Assemble tile image
tile_img = np.empty(tile_img_shape)
tile_img[:] = border_color
for i in range(grid_shape[0]):
for j in range(grid_shape[1]):
img_idx = j + i*grid_shape[1]
if img_idx >= n_imgs:
# No more images - stop filling out the grid.
break
img = imgs[img_idx]
yoff = (img_shape[0] + border) * i
xoff = (img_shape[1] + border) * j
tile_img[yoff:yoff+img_shape[0], xoff:xoff+img_shape[1], ...] = img
return tile_img
def conv_filter_tile(filters):
n_filters, n_channels, height, width = filters.shape
tile_shape = None
if n_channels == 3:
# Interpret 3 color channels as RGB
filters = np.transpose(filters, (0, 2, 3, 1))
else:
# Organize tile such that each row corresponds to a filter and the
# columns are the filter channels
tile_shape = (n_channels, n_filters)
filters = np.transpose(filters, (1, 0, 2, 3))
filters = np.resize(filters, (n_filters*n_channels, height, width))
filters = img_stretch(filters)
return img_tile(filters, tile_shape=tile_shape)
def scale_to_unit_interval(ndar, eps=1e-8):
""" Scales all values in the ndarray ndar to be between 0 and 1 """
ndar = ndar.copy()
ndar -= ndar.min()
ndar *= 1.0 / (ndar.max() + eps)
return ndar
def tile_raster_images(X, img_shape, tile_shape, tile_spacing=(0, 0),
scale_rows_to_unit_interval=True,
output_pixel_vals=True):
"""
Transform an array with one flattened image per row, into an array in
which images are reshaped and layed out like tiles on a floor.
This function is useful for visualizing datasets whose rows are images,
and also columns of matrices for transforming those rows
(such as the first layer of a neural net).
:type X: a 2-D ndarray or a tuple of 4 channels, elements of which can
be 2-D ndarrays or None;
:param X: a 2-D array in which every row is a flattened image.
:type img_shape: tuple; (height, width)
:param img_shape: the original shape of each image
:type tile_shape: tuple; (rows, cols)
:param tile_shape: the number of images to tile (rows, cols)
:param output_pixel_vals: if output should be pixel values (i.e. int8
values) or floats
:param scale_rows_to_unit_interval: if the values need to be scaled before
being plotted to [0,1] or not
:returns: array suitable for viewing as an image.
(See:`PIL.Image.fromarray`.)
:rtype: a 2-d array with same dtype as X.
"""
assert len(img_shape) == 2
assert len(tile_shape) == 2
assert len(tile_spacing) == 2
# The expression below can be re-written in a more C style as
# follows :
#
# out_shape = [0,0]
# out_shape[0] = (img_shape[0] + tile_spacing[0]) * tile_shape[0] -
# tile_spacing[0]
# out_shape[1] = (img_shape[1] + tile_spacing[1]) * tile_shape[1] -
# tile_spacing[1]
out_shape = [(ishp + tsp) * tshp - tsp for ishp, tshp, tsp
in zip(img_shape, tile_shape, tile_spacing)]
if isinstance(X, tuple):
assert len(X) == 4
# Create an output numpy ndarray to store the image
if output_pixel_vals:
out_array = np.zeros((out_shape[0], out_shape[1], 4), dtype='uint8')
else:
out_array = np.zeros((out_shape[0], out_shape[1], 4), dtype=X.dtype)
#colors default to 0, alpha defaults to 1 (opaque)
if output_pixel_vals:
channel_defaults = [0, 0, 0, 255]
else:
channel_defaults = [0., 0., 0., 1.]
for i in range(4):
if X[i] is None:
# if channel is None, fill it with zeros of the correct
# dtype
out_array[:, :, i] = np.zeros(out_shape,
dtype='uint8' if output_pixel_vals else out_array.dtype
) + channel_defaults[i]
else:
# use a recurrent call to compute the channel and store it
# in the output
out_array[:, :, i] = tile_raster_images(X[i], img_shape, tile_shape, tile_spacing, scale_rows_to_unit_interval, output_pixel_vals)
return out_array
else:
# if we are dealing with only one channel
H, W = img_shape
Hs, Ws = tile_spacing
# generate a matrix to store the output
out_array = np.zeros(out_shape, dtype='uint8' if output_pixel_vals else X.dtype)
for tile_row in range(tile_shape[0]):
for tile_col in range(tile_shape[1]):
if tile_row * tile_shape[1] + tile_col < X.shape[0]:
if scale_rows_to_unit_interval:
# if we should scale values to be between 0 and 1
# do this by calling the `scale_to_unit_interval`
# function
this_img = scale_to_unit_interval(X[tile_row * tile_shape[1] + tile_col].reshape(img_shape))
else:
this_img = X[tile_row * tile_shape[1] + tile_col].reshape(img_shape)
# add the slice to the corresponding position in the
# output array
out_array[
tile_row * (H+Hs): tile_row * (H + Hs) + H,
tile_col * (W+Ws): tile_col * (W + Ws) + W
] \
= this_img * (255 if output_pixel_vals else 1)
return out_array
| [] |
2024-01-10 | christianlandgraf/rl_viewplanning | openai_ros~src~openai_ros~task_envs~task_envs_list.py | #!/usr/bin/env python
from gym.envs.registration import register
from gym import envs
def RegisterOpenAI_Ros_Env(task_env, max_episode_steps=10000):
"""
Registers all the ENVS supported in OpenAI ROS. This way we can load them
with variable limits.
Here is where you have to PLACE YOUR NEW TASK ENV, to be registered and accesible.
return: False if the Task_Env wasnt registered, True if it was.
"""
###########################################################################
result = True
if task_env == 'IPA_KIFZ_Viewplanning-v0':
register(
id=task_env,
entry_point=
'openai_ros.task_envs.ipa_kifz_viewplanning:IpaKIFZViewplanningEnv',
max_episode_steps=max_episode_steps,
)
# import our training environment
from openai_ros.task_envs import ipa_kifz_viewplanning
# Add here your Task Envs to be registered
else:
result = False
###########################################################################
if result:
# We check that it was really registered
supported_gym_envs = GetAllRegisteredGymEnvs()
#print("REGISTERED GYM ENVS===>"+str(supported_gym_envs))
assert (task_env in supported_gym_envs), "The Task_Robot_ENV given is not Registered ==>" + \
str(task_env)
return result
def GetAllRegisteredGymEnvs():
"""
Returns a List of all the registered Envs in the system
return EX: ['Copy-v0', 'RepeatCopy-v0', 'ReversedAddition-v0', ... ]
"""
all_envs = envs.registry.all()
env_ids = [env_spec.id for env_spec in all_envs]
return env_ids
| [] |
2024-01-10 | christianlandgraf/rl_viewplanning | openai_ros~src~openai_ros~robot_gazebo_env.py | import rospy
import gym
from gym.utils import seeding
from .gazebo_connection import GazeboConnection
from .controllers_connection import ControllersConnection
#https://bitbucket.org/theconstructcore/theconstruct_msgs/src/master/msg/RLExperimentInfo.msg
from openai_ros.msg import RLExperimentInfo
import time
import os
import socket
# https://github.com/openai/gym/blob/master/gym/core.py
class RobotGazeboEnv(gym.Env):
def __init__(self,
robot_name_space,
controllers_list,
reset_controls,
start_init_physics_parameters=True,
reset_world_or_sim="SIMULATION",
**kwargs):
env_id = kwargs.get('env_id', -1)
self.env_id = env_id
#bashCommand ='hostname -I | cut -d' ' -f1)'
ROSIP = socket.gethostbyname(socket.gethostname()) #127.0.1.1
if env_id >= 0:
#os.environ['ROS_PACKAGE_PATH']=ROS_PACKAGE_PATH
#@HACK Setting ros and gazebo masters can be automated
#the gazebo master is different for different environment IDs as they can run on multiple computers
os.environ['ROS_MASTER_URI'] = "http://" + ROSIP + ":1131" + str(
env_id)[0]
if env_id < 3:
GAZEBOIP = ROSIP
os.environ[
'GAZEBO_MASTER_URI'] = "http://" + GAZEBOIP + ":1135" + str(
env_id)[0]
else:
GAZEBOIP = ROSIP
os.environ[
'GAZEBO_MASTER_URI'] = "http://" + GAZEBOIP + ":1135" + str(
env_id)[0]
# os.environ['ROS_IP'] = ROSIP
# os.environ['ROS_HOSTNAME'] = ROSIP
rospy.init_node('ipa_kifz_viewplanning_sb_' + str(env_id)[0],
anonymous=True,
log_level=rospy.WARN)
print("WORKER NODE " + str(env_id)[0])
# To reset Simulations
rospy.logdebug("START init RobotGazeboEnv")
self.gazebo = GazeboConnection(start_init_physics_parameters,
reset_world_or_sim)
self.controllers_object = ControllersConnection(
namespace=robot_name_space, controllers_list=controllers_list)
self.reset_controls = reset_controls
self.seed()
# Set up ROS related variables
self.episode_num = 0
self.cumulated_episode_reward = 0
self.reward_pub = rospy.Publisher('/openai/reward',
RLExperimentInfo,
queue_size=1)
self.step_time = 0
self.step_num = 0
# We Unpause the simulation and reset the controllers if needed
"""
To check any topic we need to have the simulations running, we need to do two things:
1) Unpause the simulation: without that th stream of data doesnt flow. This is for simulations
that are pause for whatever the reason
2) If the simulation was running already for some reason, we need to reset the controlers.
This has to do with the fact that some plugins with tf, dont understand the reset of the simulation
and need to be reseted to work properly.
"""
self.gazebo.unpauseSim()
if self.reset_controls:
self.controllers_object.reset_controllers()
rospy.logdebug("END init RobotGazeboEnv")
# Env methods
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def step(self, action):
"""
Function executed each time step.
Here we get the action execute it in a time step and retrieve the
observations generated by that action.
:param action:
:return: obs, reward, done, info
"""
"""
Here we should convert the action num to movement action, execute the action in the
simulation and get the observations result of performing that action.
"""
start_time = time.time()
rospy.logdebug("START STEP OpenAIROS")
# self.gazebo.unpauseSim()
self._set_action(action)
# self.gazebo.pauseSim()
obs = self._get_obs()
done = self._is_done(obs)
info = {}
reward = self._compute_reward(obs, done)
self.cumulated_episode_reward += reward
# self._publish_reward_topic(reward,self.episode_num)
rospy.logdebug("END STEP OpenAIROS")
end_time = time.time()
self.step_time += end_time - start_time
self.step_num += 1
# rospy.logerr("Step Time: "+str(self.step_time/self.step_num))
return obs, reward, done, info
def reset(self):
rospy.logdebug("Reseting RobotGazeboEnvironment")
self._reset_sim()
self._init_env_variables()
self._update_episode()
obs = self._get_obs()
rospy.logdebug("END Reseting RobotGazeboEnvironment")
return obs
def close(self):
"""
Function executed when closing the environment.
Use it for closing GUIS and other systems that need closing.
:return:
"""
rospy.logdebug("Closing RobotGazeboEnvironment")
rospy.signal_shutdown("Closing RobotGazeboEnvironment")
def _update_episode(self):
"""
Publishes the cumulated reward of the episode and
increases the episode number by one.
:return:
"""
rospy.logwarn("PUBLISHING REWARD...")
self._publish_reward_topic(self.cumulated_episode_reward,
self.episode_num)
rospy.logwarn("PUBLISHING REWARD...DONE=" +
str(self.cumulated_episode_reward) + ",EP=" +
str(self.episode_num))
self.episode_num += 1
self.cumulated_episode_reward = 0
def _publish_reward_topic(self, reward, episode_number=1):
"""
This function publishes the given reward in the reward topic for
easy access from ROS infrastructure.
:param reward:
:param episode_number:
:return:
"""
reward_msg = RLExperimentInfo()
reward_msg.episode_number = episode_number
reward_msg.episode_reward = reward
self.reward_pub.publish(reward_msg)
# Extension methods
# ----------------------------
def _reset_sim(self):
"""Resets a simulation
"""
rospy.logdebug("RESET SIM START")
if self.reset_controls:
rospy.logdebug("RESET CONTROLLERS")
# self.gazebo.unpauseSim()
self.controllers_object.reset_controllers()
self._check_all_systems_ready()
self._set_init_pose()
# self.gazebo.pauseSim()
self.gazebo.resetSim()
# self.gazebo.unpauseSim()
self.controllers_object.reset_controllers()
self._check_all_systems_ready()
# self.gazebo.pauseSim()
else:
rospy.logwarn("DONT RESET CONTROLLERS")
# self.gazebo.unpauseSim()
self._check_all_systems_ready()
self._set_init_pose()
# self.gazebo.pauseSim()
self.gazebo.resetSim()
# self.gazebo.unpauseSim()
self._check_all_systems_ready()
# self.gazebo.pauseSim()
rospy.logdebug("RESET SIM END")
return True
def _set_init_pose(self):
"""Sets the Robot in its init pose
"""
raise NotImplementedError()
def _check_all_systems_ready(self):
"""
Checks that all the sensors, publishers and other simulation systems are
operational.
"""
raise NotImplementedError()
def _get_obs(self):
"""Returns the observation.
"""
raise NotImplementedError()
def _init_env_variables(self):
"""Inits variables needed to be initialised each time we reset at the start
of an episode.
"""
raise NotImplementedError()
def _set_action(self, action):
"""Applies the given action to the simulation.
"""
raise NotImplementedError()
def _is_done(self, observations):
"""Indicates whether or not the episode is done ( the robot has fallen for example).
"""
raise NotImplementedError()
def _compute_reward(self, observations, done):
"""Calculates the reward to give based on the observations given.
"""
raise NotImplementedError()
def _env_setup(self, initial_qpos):
"""Initial configuration of the environment. Can be used to configure initial state
and extract information from the simulation.
"""
raise NotImplementedError()
| [] |
2024-01-10 | christianlandgraf/rl_viewplanning | ipa_kifz_viewplanning~scripts~viewpoint_sampling~viewpoint_sampler.py | #!/usr/bin/env python
import os.path
import gym
import numpy as np
import pandas as pd
import time
import datetime
from gym import wrappers
import click
import csv
# ROS packages required
import rospy
import rospkg
from openai_ros.openai_ros_common import StartOpenAI_ROS_Environment
if __name__ == '__main__':
rospy.init_node('ipa_kifz_viewpoint_sampling',
anonymous=True,
log_level=rospy.WARN)
# Init OpenAI_ROS Environment
rospy.loginfo("Init Task Environment")
task_and_robot_environment_name = rospy.get_param(
'/ipa_kifz/task_and_robot_environment_name')
rospy.loginfo("Init ROS Environment")
env = StartOpenAI_ROS_Environment(task_and_robot_environment_name)
# Create the Gym environment
rospy.loginfo("Gym environment done")
rospy.loginfo("Starting Viewpoint sampling")
# Create Viewpoint file and add column header
rospack = rospkg.RosPack()
pkg_path = rospack.get_path('ipa_kifz_viewplanning')
viewpoint_filename = os.path.join(pkg_path, "config",
"ipa_kifz_viewpoints.csv")
if not os.path.isfile(viewpoint_filename):
with open(viewpoint_filename, 'w') as f:
writer = csv.writer(f)
writer.writerow(['x', 'y', 'z', 'qx', 'qy', 'qz', 'qw'])
# Start viewpoint generation
for i in range(1):
observation = env.reset()
time.sleep(1)
# Execute the action in the environment and get feedback
for i in range(2000):
observation, reward, done, info = env.step(0)
print(observation[:7])
if click.confirm('Do you want to save this viewpoint?',
default=True):
print("Save viewpoint")
with open(viewpoint_filename, 'a') as f:
writer = csv.writer(f)
writer.writerow([
observation[0], observation[1], observation[2],
observation[3], observation[4], observation[5],
observation[6]
])
else:
print("Go to next viewpoint")
env.close()
| [] |
2024-01-10 | christianlandgraf/rl_viewplanning | openai_ros~src~openai_ros~robot_envs~ipa_kifz_env.py | import numpy as np
import rospy
import time
from openai_ros import robot_gazebo_env
from std_msgs.msg import Float64, Header
from sensor_msgs.msg import JointState, LaserScan, Image, PointCloud2, PointField
from actionlib import GoalStatusArray
from geometry_msgs.msg import Twist, Pose, PoseStamped, Transform, TransformStamped
from openai_ros.openai_ros_common import ROSLauncher
import actionlib
import sys
import moveit_commander
from moveit_msgs.srv import GetPositionIK, GetPositionIKRequest
from controller_manager_msgs.srv import SwitchController
from gazebo_msgs.srv import SetModelConfiguration, GetModelState, GetLinkState
from gazebo_msgs.srv import SetLinkProperties, SetLinkPropertiesRequest
import time
import open3d
from scipy.spatial.transform import Rotation
import sensor_msgs.point_cloud2 as pc2
import tf2_sensor_msgs
import tf2_ros
class IpaKIFZEnv(robot_gazebo_env.RobotGazeboEnv):
"""Superclass for all view planning environments.
"""
def __init__(self):
rospy.logdebug("Start IpaKIFZEnv INIT...")
if self.sensor_only:
self.controllers_list = []
reset_controls = False
else:
self.controllers_list = [
"arm_controller", "joint_state_controller"
]
reset_controls = True
# We launch the init function of the Parent Class robot_gazebo_env.RobotGazeboEnv
super(IpaKIFZEnv,
self).__init__(controllers_list=self.controllers_list,
robot_name_space="",
reset_controls=reset_controls,
start_init_physics_parameters=False,
reset_world_or_sim="WORLD")
self._init_env()
# self.gazebo.unpauseSim()
#self.controllers_object.reset_controllers()
self._check_all_systems_ready()
# We Start all the ROS related Subscribers and publishers
rospy.Subscriber("/point_cloud", PointCloud2,
self._point_cloud_callback)
# self.gazebo.pauseSim()
rospy.logdebug("Finished IpaKIFZEnv INIT...")
def _init_env(self):
if not self.sensor_only:
# Init MoveIt
moveit_commander.roscpp_initialize(sys.argv)
self.robot = moveit_commander.RobotCommander()
# self.move_group = moveit_commander.MoveGroupCommander("welding_endeffector")
self.move_group = moveit_commander.MoveGroupCommander(
"sensor_endeffector")
self.scene = moveit_commander.PlanningSceneInterface()
# Init TF
self.tfBuffer = tf2_ros.Buffer()
self.listener = tf2_ros.TransformListener(self.tfBuffer)
else:
# Service to set sensor position
self.set_model_configuration = rospy.ServiceProxy(
'gazebo/set_model_configuration', SetModelConfiguration)
self.get_model_state = rospy.ServiceProxy('gazebo/get_model_state',
GetModelState)
self.get_link_state = rospy.ServiceProxy('gazebo/get_link_state',
GetLinkState)
self.planning_time = 0
self.planning_num = 0
self.measurement_time = 0
self.measurement_num = 0
def _check_all_systems_ready(self):
"""
Checks that all the sensors, publishers and other simulation systems are
operational.
"""
self._check_point_cloud_ready()
return True
def _check_point_cloud_ready(self):
self.point_cloud = None
rospy.logdebug("Waiting for /point_cloud to be READY...")
while self.point_cloud is None and not rospy.is_shutdown():
try:
self.point_cloud = rospy.wait_for_message("/point_cloud",
PointCloud2,
timeout=5.0)
rospy.logdebug("Current /point_cloud READY=>")
except:
rospy.logerr(
"Current /point_cloud not ready yet, retrying for getting point_cloud"
)
return self.point_cloud
def _point_cloud_callback(self, data):
# self.point_cloud = data
pass
# Methods that the TrainingEnvironment will need to define here as virtual
# because they will be used in RobotGazeboEnv GrandParentClass and defined in the
# TrainingEnvironment.
# ----------------------------
def _set_init_pose(self):
"""Sets the Robot in its init pose
"""
raise NotImplementedError()
def _init_env_variables(self):
"""Inits variables needed to be initialised each time we reset at the start
of an episode.
"""
raise NotImplementedError()
def _compute_reward(self, observations, done):
"""Calculates the reward to give based on the observations given.
"""
raise NotImplementedError()
def _set_action(self, action):
"""Applies the given action to the simulation.
"""
raise NotImplementedError()
def _get_obs(self):
raise NotImplementedError()
def _is_done(self, observations):
"""Checks if episode done based on observations given.
"""
raise NotImplementedError()
# Methods that the TrainingEnvironment will need.
# ----------------------------
def plan_pose(self, pose):
"""
It will move the robot to the base if a movement has been found
:param pose: 6d pose
:return:
"""
start_time = time.time()
# Option 1 Plan and Execute with MoveIt
if not self.sensor_only:
self.move_group.set_pose_target(pose)
success = self.move_group.go(wait=True)
self.move_group.stop()
self.move_group.clear_pose_targets()
# Option 2 Set Model Configuration, i.e. joint states
else:
# Save current pose and set the fake joints
joint_names = [
'joint_trans_x', 'joint_trans_y', 'joint_trans_z',
'joint_rot_x', 'joint_rot_y', 'joint_rot_z'
]
pose_quaternion = [
pose.orientation.x, pose.orientation.y, pose.orientation.z,
pose.orientation.w
]
pose_euler = Rotation.from_quat(pose_quaternion).as_euler('XYZ')
joint_positions = [
pose.position.x, pose.position.y, pose.position.z,
pose_euler[0], pose_euler[1], pose_euler[2]
]
# Set the Sensor position in Gazebo
set_model_configuration_resp = self.set_model_configuration(
'sensor', 'sensor_description', joint_names, joint_positions)
success = set_model_configuration_resp.success
# get_model_state_resp = self.get_model_state('sensor', '')
time.sleep(0.05) #TODO: Tune physics parameter to avoid shifting!
end_time = time.time()
self.planning_time += end_time - start_time
self.planning_num += 1
# rospy.logdebug("Sensor Positioning Time: "+str(self.planning_time/self.planning_num))
return success
def get_current_pose(self):
if not self.sensor_only:
return self.move_group.get_current_pose().pose
else:
link_state_resp = self.get_link_state("link_6", "sensor")
current_sensor_pose = link_state_resp.link_state.pose
return current_sensor_pose
def get_open3d_point_cloud(self):
start_time = time.time()
# Wait on sensor measurement
self.point_cloud = self._check_point_cloud_ready()
# Transform Point Cloud to world coordinate system
if not self.sensor_only:
trafo_sensor_world = self.tfBuffer.lookup_transform(
'world', 'ensenso_sim_link', rospy.Time(), rospy.Duration(1.0))
else:
trafo_sensor_world = TransformStamped()
current_sensor_pose = self.get_current_pose()
trafo_sensor_world.transform.translation.x = current_sensor_pose.position.x
trafo_sensor_world.transform.translation.y = current_sensor_pose.position.y
trafo_sensor_world.transform.translation.z = current_sensor_pose.position.z
trafo_sensor_world.transform.rotation.x = current_sensor_pose.orientation.x
trafo_sensor_world.transform.rotation.y = current_sensor_pose.orientation.y
trafo_sensor_world.transform.rotation.z = current_sensor_pose.orientation.z
trafo_sensor_world.transform.rotation.w = current_sensor_pose.orientation.w
self.point_cloud = tf2_sensor_msgs.do_transform_cloud(
self.point_cloud, trafo_sensor_world)
self.point_cloud.header.frame_id = "world"
# Convert ROS sensor_msgs::PointCloud2 to Open3d format
open3d_cloud = self.convertROStoOpen3d(self.point_cloud)
end_time = time.time()
self.measurement_time += end_time - start_time
self.measurement_num += 1
# rospy.logdebug("Sensor Positioning Time: "+str(self.measurement_time/self.measurement_num))
return open3d_cloud
def convertROStoOpen3d(self, point_cloud):
field_names = ['x', 'y', 'z']
cloud_data = list(
pc2.read_points(point_cloud,
skip_nans=True,
field_names=field_names))
open3d_cloud = open3d.geometry.PointCloud()
if len(cloud_data) > 0:
xyz = [(x, y, z) for x, y, z in cloud_data]
open3d_cloud.points = open3d.utility.Vector3dVector(np.array(xyz))
return open3d_cloud
def convertOpen3dtoROS(self, open3d_cloud, frame_id='world'):
fields_xyz = [
PointField(name='x',
offset=0,
datatype=PointField.FLOAT32,
count=1),
PointField(name='y',
offset=4,
datatype=PointField.FLOAT32,
count=1),
PointField(name='z',
offset=8,
datatype=PointField.FLOAT32,
count=1),
]
header = Header()
header.stamp = rospy.Time.now()
header.frame_id = frame_id
cloud_out = pc2.create_cloud(header, fields_xyz,
np.asarray(open3d_cloud.points))
return cloud_out
| [] |
2024-01-10 | christianlandgraf/rl_viewplanning | ipa_kifz_viewplanning~scripts~stable_baselines~start_training_sb.py | #!/usr/bin/env python
import gym
from gym import wrappers
# ROS packages required
import rospy
import rospkg
from geometry_msgs.msg import Pose
from openai_ros.openai_ros_common import StartOpenAI_ROS_Environment_Parallel
from openai_ros.openai_ros_common import StartOpenAI_ROS_Environment
from openai_ros.msg import RLExperimentInfo
from stable_baselines3 import PPO, DQN
from stable_baselines3.dqn import MlpPolicy as DQNMlpPolicy
from stable_baselines3.ppo import MlpPolicy as PPOMlpPolicy
from stable_baselines3.common.callbacks import CheckpointCallback, StopTrainingOnRewardThreshold
from stable_baselines3.common.cmd_util import make_vec_env
from stable_baselines3.common.monitor import Monitor
from stable_baselines3.common.policies import ActorCriticCnnPolicy, ActorCriticPolicy, register_policy
from stable_baselines3.common.utils import get_schedule_fn
# from stable_baselines3.common.vec_env import DummyVecEnv, SubprocVecEnv
import time
import datetime
import os
import sys
import numpy as np
from typing import Callable
import torch
from tensorboardX import SummaryWriter
if __name__ == '__main__':
#############################
# Initialization
#############################
rospy.init_node('ipa_kifz_viewplanning_sb',
anonymous=True,
log_level=rospy.WARN)
# Init OpenAI_ROS Environment
rospy.loginfo("Init Task Environment")
task_and_robot_environment_name = rospy.get_param(
'/ipa_kifz/task_and_robot_environment_name')
rospy.loginfo("Init ROS Environment")
algorithm = rospy.get_param("/algorithm")
#############################
# Parameter
#############################
# Common parameters
policy = rospy.get_param('/ipa_kifz/policy')
learning_rate = rospy.get_param('/ipa_kifz/learning_rate')
batch_size = rospy.get_param('/ipa_kifz/batch_size')
total_timesteps = rospy.get_param('/ipa_kifz/total_timesteps')
num_envs = rospy.get_param('ipa_kifz/num_envs', 1)
# Algorithm-specific parameters
if algorithm == "dqn":
buffer_size = rospy.get_param('/ipa_kifz/buffer_size')
learning_starts = rospy.get_param('/ipa_kifz/learning_starts')
tau = rospy.get_param('/ipa_kifz/tau')
gamma = rospy.get_param('/ipa_kifz/gamma')
train_freq = rospy.get_param('/ipa_kifz/train_freq')
gradient_steps = rospy.get_param('/ipa_kifz/gradient_steps')
n_episodes_rollout = rospy.get_param('/ipa_kifz/n_episodes_rollout')
target_update_interval = rospy.get_param(
'/ipa_kifz/target_update_interval')
exploration_fraction = rospy.get_param(
'/ipa_kifz/exploration_fraction')
exploration_initial_eps = rospy.get_param(
'/ipa_kifz/exploration_initial_eps')
exploration_final_eps = rospy.get_param(
'/ipa_kifz/exploration_final_eps')
max_grad_norm = rospy.get_param('/ipa_kifz/max_grad_norm')
elif algorithm == "ppo":
n_steps = rospy.get_param("/ipa_kifz/n_steps")
batch_size = rospy.get_param("/ipa_kifz/batch_size")
n_epochs = rospy.get_param("/ipa_kifz/n_epochs")
gamma = rospy.get_param("/ipa_kifz/gamma")
gae_lambda = rospy.get_param("/ipa_kifz/gae_lambda")
clip_range = rospy.get_param("/ipa_kifz/clip_range")
ent_coef = rospy.get_param("/ipa_kifz/ent_coef")
vf_coef = rospy.get_param("/ipa_kifz/vf_coef")
max_grad_norm = rospy.get_param("/ipa_kifz/max_grad_norm")
#############################
# Logging
#############################
logfile_time = datetime.datetime.now()
rospack = rospkg.RosPack()
pkg_path = rospack.get_path('ipa_kifz_viewplanning')
sys.path.append(os.path.join(pkg_path, "scripts"))
from helper import ipa_kifz_logging
sbLogger = ipa_kifz_logging.Logger(logfile_time, log_freq=1)
sbLogger.save_params()
writer = SummaryWriter(sbLogger.log_directory)
def pose_callback(data):
sbLogger.log_pose([
data.position.x, data.position.y, data.position.z,
data.orientation.x, data.orientation.y, data.orientation.z,
data.orientation.w
])
def reward_callback(data):
sbLogger.log_episode(data.episode_reward)
writer.add_scalar('episode_reward', data.episode_reward,
data.episode_number)
pose_sub = rospy.Subscriber("openai/episode_poses", Pose, pose_callback)
reward_sub = rospy.Subscriber("openai/reward", RLExperimentInfo,
reward_callback)
#############################
# Gym Environment Stuff
#############################
def create_parallel_envs(env_id, env_name):
"""
Helper function for creating parallel environments for training
"""
eid = env_id + 1
print("eid: " + str(eid))
env = StartOpenAI_ROS_Environment_Parallel(env_name, eid)
return env
envs = []
def ret_lambda_func(k, name):
return lambda: create_parallel_envs(k, name)
for k in range(num_envs):
envs.append(ret_lambda_func(k, task_and_robot_environment_name))
env = StartOpenAI_ROS_Environment(task_and_robot_environment_name)
# env = make_vec_env('IPA_KIFZ_Viewplanning-v0', n_envs=1)
print("Gym environment done")
#############################
# Learning Rate Schedule
#############################
def linear_schedule(initial_value: float) -> Callable[[float], float]:
def func(progress_remaining: float) -> float:
return progress_remaining * initial_value
return func
#############################
# Setup Model
#############################
if algorithm == "dqn":
# policy = DQNMlpPolicy(
# env.observation_space,
# env.action_space,
# lr_scheduler,
# net_arch=[64, 64],
# optimizer_class = optimizer
# )
model = DQN(
DQNMlpPolicy, #policy #MlpPolicy
env,
learning_rate=linear_schedule(learning_rate),
buffer_size=buffer_size,
learning_starts=learning_starts,
batch_size=batch_size,
tau=tau,
gamma=gamma,
train_freq=train_freq,
gradient_steps=gradient_steps,
n_episodes_rollout=n_episodes_rollout,
target_update_interval=target_update_interval,
exploration_fraction=exploration_fraction,
exploration_initial_eps=exploration_initial_eps,
exploration_final_eps=exploration_final_eps,
max_grad_norm=max_grad_norm,
tensorboard_log=sbLogger.log_directory,
verbose=2)
elif algorithm == "ppo":
# custom_ppo_mlppolicy = PPOMlpPolicy(
# env.observation_space,
# env.action_space,
# linear_schedule(learning_rate),
# )
# Define the RL algorithm and start learning
model = PPO(
PPOMlpPolicy,
env,
learning_rate=learning_rate, #linear_schedule(0.1),
n_steps=n_steps,
batch_size=batch_size,
n_epochs=n_epochs,
gamma=gamma,
gae_lambda=gae_lambda,
clip_range=clip_range,
clip_range_vf=None, # - depends on reward scaling!
ent_coef=ent_coef,
tensorboard_log=sbLogger.log_directory,
verbose=2,
)
sbLogger.model = model
sbLogger.training_env = model.get_env()
#############################
# Learning
#############################
'''Training the model'''
rospy.logwarn("Start training")
model.learn(
total_timesteps=total_timesteps,
log_interval=sbLogger.log_freq * 4,
callback=[CheckpointCallback(1000, sbLogger.log_directory)],
# tb_log_name='DQN',
)
sbLogger.save_model(model)
rospy.logwarn("DONE TRAINING")
# Load saved model and make predictions
# del model
# model = DQN.load("ipa_kifz_viewplanning-v0")
# obs = env.reset()
# while True:
# action, _states = model.predict(obs)
# obs, rewards, dones, info = env.step(action)
# env.render()
writer.close()
env.close() | [] |
2024-01-10 | christianlandgraf/rl_viewplanning | ipa_kifz_viewplanning~scripts~qlearn~start_training_qlearn.py | #!/usr/bin/env python
'''
The Q Learning Training Process is based on
https://bitbucket.org/theconstructcore/openai_examples_projects/src/master/turtle2_openai_ros_example/scripts/start_qlearning.py
'''
import os.path
import sys
import gym
import numpy as np
import pandas as pd
import csv
import time
import datetime
import qlearn
from gym import wrappers
# ROS packages required
import rospy
import rospkg
from openai_ros.openai_ros_common import StartOpenAI_ROS_Environment
import torch
from tensorboardX import SummaryWriter
if __name__ == '__main__':
rospy.init_node('ipa_kifz_viewplanning_qlearn',
anonymous=True,
log_level=rospy.WARN)
# Init OpenAI_ROS Environment
rospy.loginfo("Init Task Environment")
task_and_robot_environment_name = rospy.get_param(
'/ipa_kifz/task_and_robot_environment_name')
rospy.loginfo("Init ROS Environment")
env = StartOpenAI_ROS_Environment(task_and_robot_environment_name)
# Create the Gym environment
rospy.loginfo("Gym environment done")
rospy.loginfo("Starting Learning")
# Set the logging system
rospack = rospkg.RosPack()
pkg_path = rospack.get_path('ipa_kifz_viewplanning')
from helper import ipa_kifz_logging
logfile_time = datetime.datetime.now()
qLearnLogger = ipa_kifz_logging.Logger(logfile_time, log_freq=2)
qLearnLogger.save_params()
env = wrappers.Monitor(env, qLearnLogger.log_directory, force=True)
rospy.loginfo("Monitor Wrapper started")
writer = SummaryWriter(qLearnLogger.log_directory)
last_time_steps = np.ndarray(0)
# Loads parameters from the ROS param server
# Parameters are stored in a yaml file inside the config directory
# They are loaded at runtime by the launch file
alpha = rospy.get_param("/ipa_kifz/alpha")
epsilon = rospy.get_param("/ipa_kifz/epsilon")
gamma = rospy.get_param("/ipa_kifz/gamma")
epsilon_discount = rospy.get_param("/ipa_kifz/epsilon_discount")
nepisodes = rospy.get_param("/ipa_kifz/nepisodes")
max_nsteps = rospy.get_param("/ipa_kifz/max_nsteps")
# running_step = rospy.get_param("/ipa_kifz/running_step")
# Initialises the algorithm that we are going to use for learning
rospy.logdebug("############### ACTION SPACE =>" + str(env.action_space))
# init Q-learning environment
qlearn = qlearn.QLearn(actions=range(env.action_space.n),
epsilon=epsilon,
alpha=alpha,
gamma=gamma)
initial_epsilon = qlearn.epsilon
# Discrete Action Space!
states = []
for action in range(env.action_space.n):
state = ' '.join(
map(str, [
env.discretized_action_space[action][0][0],
env.discretized_action_space[action][0][1],
env.discretized_action_space[action][0][2],
np.around(env.discretized_action_space[action][0][3], 2),
np.around(env.discretized_action_space[action][0][4], 2),
np.around(env.discretized_action_space[action][0][5], 2),
np.around(env.discretized_action_space[action][0][6], 2)
]))
states.append(state)
states.append(' '.join(map(
str, [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0]))) # initial state
qlearn.initQ(states, range(env.action_space.n))
start_time = time.time()
highest_reward = 0
reward_list = []
# Initialize Logger
# Start training loop
for x in range(nepisodes):
rospy.logdebug("############### START EPISODE=>" + str(x))
# Episode Init: Reset the environment and get first state of the robot
observation = env.reset()
cumulated_reward = 0
done = False
observation[observation == 0.] = 0.0 #Normalize -0. to 0.
state_0 = ' '.join(
map(str, [
observation[0], observation[1], observation[2], observation[3],
observation[4], observation[5], observation[6]
]))
# state_0 = env.init_state(discretized_actions)
# decrease epsilon in each episode
if qlearn.epsilon > 0.01:
qlearn.epsilon *= epsilon_discount
# Show the actual robot pose on screen
# env.render()
# each episode, the robot does not more than max_nsteps with measurements
previous_actions = []
episode_poses = []
for i in range(max_nsteps):
rospy.logwarn("############### Start Step=>" + str(i))
# Pick an action based on the current state
action, previous_actions = qlearn.chooseAction(
state_0, previous_actions)
rospy.logwarn("Next action is:" + str(action))
# Execute the action in the environment and get feedback
observation, reward, done, info = env.step(action)
state_1 = ' '.join(
map(str, [
observation[0], observation[1], observation[2],
observation[3], observation[4], observation[5],
observation[6]
]))
# cumulate reward
if reward < 0:
reward = 0
cumulated_reward += reward
writer.add_scalar('episode_reward', cumulated_reward, x)
# Make the algorithm learn based on the results
rospy.logwarn("# state we were=>" + str(state_0))
rospy.logwarn("# action that we took=>" + str(action))
rospy.logwarn("# reward that action gave=>" + str(reward))
rospy.logwarn("# episode cumulated_reward=>" +
str(cumulated_reward))
rospy.logwarn("# Next state=>" + str(state_1))
qlearn.learn(state_0, action, reward, state_1)
# Save poses for logging
episode_poses.append(observation[:-1])
# Check if done
if not (done):
rospy.logwarn("NOT DONE")
state_0 = state_1
else:
rospy.logwarn("DONE")
last_time_steps = np.append(last_time_steps, [int(i + 1)])
break
rospy.logwarn("############### END Step=>" + str(i))
# rospy.logwarn("# updated q-table after episode " + str(x) + "=>" + str(qlearn.q))
reward_list.append(cumulated_reward)
m, s = divmod(int(time.time() - start_time), 60)
h, m = divmod(m, 60)
rospy.logwarn(
("EP: " + str(x + 1) + " - [alpha: " +
str(round(qlearn.alpha, 2)) + " - gamma: " +
str(round(qlearn.gamma, 2)) + " - epsilon: " +
str(round(qlearn.epsilon, 2)) + "] - Reward: " +
str(cumulated_reward) + " Time: %d:%02d:%02d" % (h, m, s)))
qLearnLogger.log_episode(cumulated_reward, episode_poses, qlearn.q)
rospy.loginfo(
("\n|" + str(nepisodes) + "|" + str(qlearn.alpha) + "|" +
str(qlearn.gamma) + "|" + str(initial_epsilon) + "*" +
str(epsilon_discount) + "|" + str(highest_reward) + "| PICTURE |"))
l = last_time_steps.tolist()
l.sort()
qLearnLogger.save_model(qlearn.q)
# # print("Parameters: a="+str)
# rospy.loginfo("Overall score: {:0.2f}".format(last_time_steps.mean()))
# rospy.loginfo("Best 100 score: {:0.2f}".format(
# reduce(lambda x, y: x + y, l[-100:]) / len(l[-100:])))
writer.close()
env.close()
| [] |
2024-01-10 | christianlandgraf/rl_viewplanning | openai_ros~src~openai_ros~task_envs~ipa_kifz_viewplanning.py | import rospy
import rospkg
from math import ceil, sqrt
import numpy as np
import pandas as pd
from gym import spaces
from openai_ros.robot_envs import ipa_kifz_env
from gym.envs.registration import register
from geometry_msgs.msg import Point, Pose, PoseStamped
from openai_ros.task_envs.task_commons import LoadYamlFileParamsTest
from openai_ros.openai_ros_common import ROSLauncher
import os
import time
import itertools
import tf_conversions
from scipy.spatial.transform import Rotation
import sys
sys.path.append(".")
import open3d
import stl
import sensor_msgs.point_cloud2 as pc2
from ipa_kifz_viewplanning.srv import GetAreaGain, GetAreaGainRequest, GetAreaGainResponse
class IpaKIFZViewplanningEnv(ipa_kifz_env.IpaKIFZEnv):
def __init__(self):
"""
This Task Env is designed for learning the best sensor measurement poses.
"""
# Load Params from the desired Yaml file
self.algorithm = rospy.get_param('/algorithm')
if self.algorithm == "viewpoint_sampling":
yaml_file_name = "ipa_kifz_viewpoint_sampling.yaml"
elif self.algorithm == "generate_dataset":
yaml_file_name = "ipa_kifz_dataset_generation.yaml"
else:
yaml_file_name = "ipa_kifz_viewplanning.yaml"
LoadYamlFileParamsTest(
rospackage_name="openai_ros",
rel_path_from_package_to_file="src/openai_ros/task_envs/config",
yaml_file_name=yaml_file_name)
# Whether to simulate the sensor only or the whole robot
self.sensor_only = rospy.get_param('/sensor_only')
# Workpiece Poses
self.workpiece_name = rospy.get_param(
'/ipa_kifz_viewplanning/workpiece_name')
self.workpiece_pose = rospy.get_param(
'/ipa_kifz_viewplanning/workpiece_pose',
default=[0, 0, 0, 0, 0, 0, 1])
# Number of desired poses and coverage per episode
self.desired_steps = rospy.get_param(
'/ipa_kifz_viewplanning/desired_steps', default=10)
self.desired_coverage = rospy.get_param(
'/ipa_kifz_viewplanning/desired_coverage', default=0.95)
# Whether to return reward at end of episode or iteration
self.use_cumulated_reward = rospy.get_param(
'/ipa_kifz_viewplanning/use_cumulated_reward')
if self.use_cumulated_reward:
self.actions_per_step = self.desired_steps
self.desired_steps = 1
else:
self.actions_per_step = 1
# (Re-)initialize the robot environment
super(IpaKIFZViewplanningEnv, self).__init__()
# Add the workpiece to the environment
self.workpiece_handler()
# Services and Publishers
self.get_area_gain = rospy.ServiceProxy(
'point_cloud_handler/get_area_gain', GetAreaGain)
self.log_pose_pub = rospy.Publisher('/openai/episode_poses',
Pose,
queue_size=10)
# Initial parameters defining the grid extend and location
self.init_pos_z = rospy.get_param('/ipa_kifz_viewplanning/init_pos_z',
default=0)
self.init_rot_qx = rospy.get_param(
'/ipa_kifz_viewplanning/init_rot_qx', default=0)
self.init_rot_qy = rospy.get_param(
'/ipa_kifz_viewplanning/init_rot_qy', default=0)
self.init_rot_qz = rospy.get_param(
'/ipa_kifz_viewplanning/init_rot_qz', default=0)
self.init_rot_qw = rospy.get_param(
'/ipa_kifz_viewplanning/init_rot_qw', default=1)
# Get action space limits
if rospy.has_param('/ipa_kifz_viewplanning/min_range_x'):
self.min_range_x = rospy.get_param(
'/ipa_kifz_viewplanning/min_range_x')
self.min_range_y = rospy.get_param(
'/ipa_kifz_viewplanning/min_range_y')
self.min_range_z = rospy.get_param(
'/ipa_kifz_viewplanning/min_range_z')
self.max_range_x = rospy.get_param(
'/ipa_kifz_viewplanning/max_range_x')
self.max_range_y = rospy.get_param(
'/ipa_kifz_viewplanning/max_range_y')
self.max_range_z = rospy.get_param(
'/ipa_kifz_viewplanning/max_range_z')
else:
self.set_grid_limits()
# For a discretized action space
self.is_discretized = rospy.get_param(
'/ipa_kifz_viewplanning/is_discretized', default=False)
if self.is_discretized:
# Load grid parameters
self.use_grid = rospy.get_param('/ipa_kifz_viewplanning/use_grid',
default=False)
if self.use_grid:
self.triangle_grid = rospy.get_param(
'/ipa_kifz_viewplanning/triangle_grid')
self.steps_yaw = rospy.get_param(
'/ipa_kifz_viewplanning/grid_steps_yaw')
self.step_size_x = rospy.get_param(
'/ipa_kifz_viewplanning/grid_step_size_x')
self.step_size_y = rospy.get_param(
'/ipa_kifz_viewplanning/grid_step_size_y')
self.step_size_z = rospy.get_param(
'/ipa_kifz_viewplanning/grid_step_size_z')
else:
if self.algorithm == "qlearn":
rospy.logerr(
"Q Learning cannot be appied to continuous spaces.")
sys.exit()
pass
# setting up action space for training
# Whether to simultanously test
self.test_mode = rospy.get_param('/ipa_kifz_viewplanning/test_mode',
default=False)
# For Q Learning -> Discretize the action space (positions the robot can go to) as list of tuples!
if self.is_discretized:
# Use a regular x-y-z grid
if self.use_grid:
discretized_actions = self.get_grid()
self.discretized_action_space = discretized_actions
else:
viewpoint_list = self.get_viewpoints()
self.discretized_action_space = viewpoint_list
if self.use_cumulated_reward:
# Build all combinations of viewpoints and save them as individual lists
self.discretized_action_space = list(
itertools.combinations(self.discretized_action_space,
self.desired_poses))
self.discretized_action_space = [
list(i) for i in self.discretized_action_space
]
else:
# Build arrays of one element to be compatible with the upper case
self.discretized_action_space = [
[i] for i in self.discretized_action_space
]
# Set action space to discrete number of actions
self.action_space = spaces.Discrete(
len(self.discretized_action_space))
else:
# Set continuous box action space
# Consists of poses of form (x,y) (restricted to plane to reduce complexity)
lower_bound = [0] * self.actions_per_step * 2
upper_bound = [0] * self.actions_per_step * 2
for i in range(self.actions_per_step):
lower_bound[i * 2] = self.min_range_x
lower_bound[i * 2 + 1] = self.min_range_y
upper_bound[i * 2] = self.max_range_x
upper_bound[i * 2 + 1] = self.max_range_y
self.action_space = spaces.Box(low=np.array(lower_bound),
high=np.array(upper_bound))
# self.action_space = spaces.Box(low=np.array([self.min_range_x, self.min_range_y, self.min_range_z, 0, 210, 0]),
# high=np.array([self.max_range_x, self.max_range_y, self.max_range_z, 360, 330, 360]))
# Setting up Observation Space for training
# Currently, the observation is the sensor position in x,y and z coordinates,
# its quaternions consisting of four values between 0 and 1, and the area gain
self.observation_space = spaces.Box(low=np.array([
self.min_range_x, self.min_range_y, self.min_range_z, 0, 0, 0, 0, 0
]),
high=np.array([
self.max_range_x,
self.max_range_y,
self.max_range_z, 1, 1, 1, 1, 1
]))
self.episode_steps = 1
self.pc_handling_time = 0
self.pc_handling_num = 0
# Initializing variables for testing and debugging
if self.test_mode:
if self.is_discretized:
self.area_gain_control = [0] * len(
self.discretized_action_space)
sys.exit()
def _init_env_variables(self):
"""
Inits variables needed to be initialised each time we reset at the start
of an episode.
:return:
"""
# For Info Purposes
self.cumulated_reward = 0.0
# Set to false Done, because its calculated asyncronously
self._episode_done = False
# Reset number of steps
self.episode_steps = 1
# Number of performed steps in total
self.cumulated_steps = -1
# Cumulated Point Cloud
self.cumulated_point_cloud = None
# All Actions which have been chosen in this episode
self.cumulated_actions = []
# View poses per episode
self.current_poses = []
# Cumulated area gain per episode
self.cumulated_area_gain = 0
return True
def _set_init_pose(self):
pass
def _set_action(self, action):
"""
Perform action, i.e. go to the pose
:param action: The next pose of the robot
"""
if self.is_discretized:
rospy.logwarn("Start Set Action ==>" + str(np.around(action, 2)))
else:
rospy.logwarn("Start Set Action ==>" + str(action))
next_pose = Pose()
# In case of using a cumulated reward approach, reset area gain
if self.use_cumulated_reward:
self.area_gain = 0
self.current_poses = [None] * self.actions_per_step
for i in range(self.actions_per_step):
# Option 1: RL Training, set next Action, i.e. next pose
if self.is_discretized:
next_pose.position.x = self.discretized_action_space[action][
i][0]
next_pose.position.y = self.discretized_action_space[action][
i][1]
next_pose.position.z = self.discretized_action_space[action][
i][2]
next_pose.orientation.x = self.discretized_action_space[
action][i][3]
next_pose.orientation.y = self.discretized_action_space[
action][i][4]
next_pose.orientation.z = self.discretized_action_space[
action][i][5]
next_pose.orientation.w = self.discretized_action_space[
action][i][6]
# Option 2: Debugging, Go through the complete action space for debugging purposes
if self.test_mode:
next_pose.position.x = self.discretized_action_space[
self.episode_steps - 1][i][0]
next_pose.position.y = self.discretized_action_space[
self.episode_steps - 1][i][1]
else:
# Option 3: Random sampling, for dataset generation
if self.algorithm == "viewpoint_sampling" or self.algorithm == "dataset_generation":
next_pose.position.x = np.random.uniform(
self.min_range_x, self.max_range_x)
next_pose.position.y = np.random.uniform(
self.min_range_y, self.max_range_y)
next_pose.position.z = np.random.uniform(
self.min_range_z, self.max_range_z)
rotx = np.random.uniform(0, 360)
roty = np.random.uniform(210, 330)
rotz = np.random.uniform(0, 360)
quat = Rotation.from_euler('XYZ', [rotx, roty, rotz],
degrees=True).as_quat()
next_pose.orientation.x = quat[0]
next_pose.orientation.y = quat[1]
next_pose.orientation.z = quat[2]
next_pose.orientation.w = quat[3]
else:
# Option 4 Approach given pose from action (currently only x,y coordinates )
next_pose.position.x = action[i * 2]
next_pose.position.y = action[i * 2 + 1]
next_pose.position.z = self.init_pos_z #action[2]
# quat = Rotation.from_euler('XYZ', [action[3], action[4], action[5]], degrees=True).as_quat()
next_pose.orientation.x = self.init_rot_qx
next_pose.orientation.y = self.init_rot_qy
next_pose.orientation.z = self.init_rot_qz
next_pose.orientation.w = self.init_rot_qw
# Go to pose
success = self.plan_pose(next_pose)
# Publish pose for logger
self.log_pose_pub.publish(next_pose)
# If multiple poses are evaluated, immediately update the area gain
if success and self.use_cumulated_reward:
self.area_gain += self.evaluate_measurement()
self.current_poses[i] = self.get_current_pose()
# If no robot plan is found, go to default pose (reward = zero) and finish episode
if not success and not self.sensor_only:
joint_goal = self.move_group.get_current_joint_values()
joint_goal[0] = -0.85
joint_goal[1] = -2.0
joint_goal[2] = 2.0
joint_goal[3] = 0.67
joint_goal[4] = 0.85
joint_goal[5] = -2.1
self.move_group.go(joint_goal, wait=True)
self._episode_done = True
rospy.logwarn("END Set Action")
def _get_obs(self):
"""
Here we define what sensor data defines our robots observations
:return: the stitched observations
"""
rospy.logwarn("Start Get Observation ==>")
# Case 1: Reward is gained for every iteration/step
if not self.use_cumulated_reward:
# Get reward and current measurement
self.area_gain = self.evaluate_measurement()
self.current_poses = [self.get_current_pose()]
# Round up to first decimal in case of an discretized action space #TODO parameterize or change?
if self.is_discretized:
self.current_poses[0].position.x = np.around(
self.current_poses[0].position.x, 2)
self.current_poses[0].position.y = np.around(
self.current_poses[0].position.y, 2)
self.current_poses[0].position.z = np.around(
self.current_poses[0].position.z, 2)
self.current_poses[0].orientation.x = np.around(
self.current_poses[0].orientation.x, 2)
self.current_poses[0].orientation.y = np.around(
self.current_poses[0].orientation.y, 2)
self.current_poses[0].orientation.z = np.around(
self.current_poses[0].orientation.z, 2)
self.current_poses[0].orientation.w = np.around(
self.current_poses[0].orientation.w, 2)
if self.cumulated_steps == -1:
self.cumulated_steps += 1
return np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0])
# Compose observation of seven entries representing pose and the area gain
observations = np.array([
self.current_poses[0].position.x,
self.current_poses[0].position.y,
self.current_poses[0].position.z,
self.current_poses[0].orientation.x,
self.current_poses[0].orientation.y,
self.current_poses[0].orientation.z,
self.current_poses[0].orientation.w, self.area_gain
])
else:
observations = []
if self.current_poses:
# Compose observation of seven entries of all poses during this episode
for i in range(self.actions_per_step):
if self.is_discretized and self.use_grid:
self.current_poses[i].position.x = np.around(
self.current_poses[i].position.x, 2)
self.current_poses[i].position.y = np.around(
self.current_poses[i].position.y, 2)
self.current_poses[i].position.z = np.around(
self.current_poses[i].position.z, 2)
observations.extend([
self.current_poses[i].position.x,
self.current_poses[i].position.y,
self.current_poses[i].position.z,
self.current_poses[i].orientation.x,
self.current_poses[i].orientation.y,
self.current_poses[i].orientation.z,
self.current_poses[i].orientation.w
])
# Append the area gain and convert to numpy array
observations.extend([self.area_gain])
observations = np.array(observations)
else:
self.area_gain = 0
observations = np.array([0, 0, 0, 0, 0, 0, 1, self.area_gain])
self.cumulated_area_gain += self.area_gain
rospy.logwarn("END Get Observation ==> " + str(observations[-1]))
return observations
def _is_done(self, observations):
if not self._episode_done:
# We check if the maximum amount of allowed steps is reached
if self.cumulated_area_gain >= self.desired_coverage:
self._episode_done = True
elif self.episode_steps >= self.desired_steps:
self._episode_done = True
# Print to console if finished and return
if self._episode_done:
rospy.logwarn("Episode is done ==>")
return self._episode_done
def _compute_reward(self, observations, done):
# rospy.logwarn("Start Get Reward")
self.episode_steps += 1
self.cumulated_steps += 1
reward = observations[-1]
rospy.logwarn("End Get Reward ==> " + str(reward))
return reward
# Internal TaskEnv Methods
def evaluate_measurement(self):
"""Evaluating a measurement and compute
the area gain, which is used to compose the reward
Args:
point_cloud (sensor_msgs/PointCloud2): the simulated measurement
Returns:
float: the absolute area gain of the given point cloud in cm^2
"""
point_cloud = self.get_open3d_point_cloud()
# Remove Ground/Table from point cloud
# point_cloud_array = np.asarray(point_cloud.points)
# point_cloud_array = point_cloud_array[np.where(point_cloud_array[:,2] >= 1.03)]
# point_cloud.points = open3d.utility.Vector3dVector(point_cloud_array)
if point_cloud.is_empty():
normalized_area_gain = 0
rospy.logerr("point cloud is empty")
else:
start_time = time.time()
# Call the Point Cloud Handler to calculate the area gain
if self.cumulated_point_cloud == None:
get_area_gain_req = GetAreaGainRequest()
get_area_gain_req.standalone_pcd = True
get_area_gain_req.new_pcd = self.convertOpen3dtoROS(
point_cloud)
get_area_gain_resp = self.get_area_gain.call(get_area_gain_req)
self.cumulated_point_cloud = self.convertROStoOpen3d(
get_area_gain_resp.cumulated_pcd)
if get_area_gain_resp.area_gain > 0:
normalized_area_gain = get_area_gain_resp.area_gain / self.workpiece_area
else:
normalized_area_gain = 0
else:
get_area_gain_req = GetAreaGainRequest()
get_area_gain_req.standalone_pcd = False
get_area_gain_req.new_pcd = self.convertOpen3dtoROS(
point_cloud)
get_area_gain_req.previous_pcd = self.convertOpen3dtoROS(
self.cumulated_point_cloud)
get_area_gain_resp = self.get_area_gain.call(get_area_gain_req)
self.cumulated_point_cloud = self.convertROStoOpen3d(
get_area_gain_resp.cumulated_pcd)
if get_area_gain_resp.area_gain > 0:
normalized_area_gain = get_area_gain_resp.area_gain / self.workpiece_area
else:
normalized_area_gain = 0
if self.test_mode:
if self.is_discretized and self.episode_num > 1:
self.test_area_gain(normalized_area_gain)
end_time = time.time()
self.pc_handling_time += end_time - start_time
self.pc_handling_num += 1
# rospy.logerr("Handling Time: "+str(self.pc_handling_time/self.pc_handling_num))
return normalized_area_gain
#############################
# View pose sampling
#############################
def get_viewpoints(self):
"""Read pre-sampled viewpoints
Returns:
list: a list of viewpoints consisting of tuples
of size seven representing the pose (x,y,z,qx,qy,qz,qw)
"""
rospack = rospkg.RosPack()
pkg_path = rospack.get_path('ipa_kifz_viewplanning')
viewpoint_filename = os.path.join(
pkg_path, "config",
"ipa_kifz_viewposes_" + self.workpiece_name + ".csv")
df = pd.read_csv(viewpoint_filename)
viewpoint_list = []
nb_viewpoints = len(df)
for i in range(nb_viewpoints):
viewpoint_pose = tuple(
(np.around(df.loc[i]['x'], 2), np.around(df.loc[i]['y'], 2),
np.around(df.loc[i]['z'], 2), df.loc[i]['qx'],
df.loc[i]['qy'], df.loc[i]['qz'], df.loc[i]['qw']))
viewpoint_list.append(viewpoint_pose)
return viewpoint_list
def set_grid_limits(self):
"""Compute start and end boundaries for action space
This is done by getting the workpiece bounding box
and some manual chosen threshold in xyz direction.
"""
wp_mesh = stl.mesh.Mesh.from_file(self.workpiece_path)
minx = maxx = miny = maxy = minz = maxz = None
for p in wp_mesh.points:
# p contains (x, y, z)
if minx is None:
minx = p[stl.Dimension.X]
maxx = p[stl.Dimension.X]
miny = p[stl.Dimension.Y]
maxy = p[stl.Dimension.Y]
minz = p[stl.Dimension.Z]
maxz = p[stl.Dimension.Z]
else:
maxx = max(p[stl.Dimension.X], maxx)
minx = min(p[stl.Dimension.X], minx)
maxy = max(p[stl.Dimension.Y], maxy)
miny = min(p[stl.Dimension.Y], miny)
maxz = max(p[stl.Dimension.Z], maxz)
minz = min(p[stl.Dimension.Z], minz)
# TODO: Add variables to task configuration yaml
epsilon = -0.05
z_init = 0.3
# Set lower and upper boundaries
x0 = minx - epsilon
xlim = maxx + epsilon
y0 = miny - epsilon
ylim = maxy + epsilon
z0 = maxz + z_init
# Add world coordinate system
trafo_world_wp = np.identity(4)
trafo_world_wp[:3, 3] = self.workpiece_pose[:3]
trafo_world_wp[:3, :3] = Rotation.from_euler(
'xyz', self.workpiece_pose[3:]).as_matrix()
# Convert limits to homogeneuous trafo
trafo_wp_0 = np.identity(4)
trafo_wp_0[:3, 3] = [x0, y0, z0]
trafo_wp_lim = np.identity(4)
trafo_wp_lim[:3, 3] = [xlim, ylim, z0]
# Compute min and max ranges
trafo_world_0 = np.dot(trafo_world_wp, trafo_wp_0)
trafo_world_lim = np.dot(trafo_world_wp, trafo_wp_lim)
self.min_range_x = trafo_world_0[0, 3]
self.min_range_y = trafo_world_0[1, 3]
self.min_range_z = trafo_world_0[2, 3]
self.max_range_x = trafo_world_lim[0, 3]
self.max_range_y = trafo_world_lim[1, 3]
self.max_range_z = trafo_world_lim[2, 3]
return
def get_grid(self):
"""This method samples viewpoints in a regular grid, which is defined by
its type (triangle or square) and step sizes in different directions
Returns:
array: A discrete list of view poses representing the grid
"""
# ...in case of triangle grid
if self.triangle_grid:
self.step_size_x = sqrt(.75 * self.step_size_y**2)
x_steps = ceil(
(self.max_range_x - self.min_range_x) / self.step_size_x) + 1
y_steps = ceil(
(self.max_range_y - self.min_range_y) / self.step_size_y) + 1
z_steps = ceil(
(self.max_range_z - self.min_range_z) / self.step_size_z) + 1
# define view angles for pitch
pitch = 255.0
# ...and yaw, depending on number of yaws
yaw_step = 360 / self.steps_yaw
yaws = []
for i in range(self.steps_yaw):
yaws.append((i + .5) * yaw_step)
poses = []
for i in range(x_steps):
for j in range(y_steps):
for k in range(z_steps):
for l in range(self.steps_yaw):
quat = Rotation.from_euler('xyz',
[0, pitch, yaws[l]],
degrees=True).as_quat()
if (i % 2) == 0:
pose = tuple(
(round(
self.min_range_x +
(i - 0.5) * self.step_size_x, 2),
round(
self.min_range_y +
(j - 0.75) * self.step_size_y, 2),
round(
self.min_range_z +
k * self.step_size_z, 2), quat[0],
quat[1], quat[2], quat[3]))
poses.append(pose)
else:
pose = tuple(
(round(
self.min_range_x +
(i - 0.5) * self.step_size_x, 2),
round(
self.min_range_y +
(j - 0.25) * self.step_size_y, 2),
round(
self.min_range_z +
k * self.step_size_z, 2), quat[0],
quat[1], quat[2], quat[3]))
poses.append(pose)
# in case of a square grid:
else:
# New approach with angle change
x_steps = ceil(
(self.max_range_x - self.min_range_x) / self.step_size_x) + 1
y_steps = ceil(
(self.max_range_y - self.min_range_y) / self.step_size_y) + 1
z_steps = ceil(
(self.max_range_z - self.min_range_z) / self.step_size_z) + 1
# define view angles for pitch
pitch = 255.0
# ...and yaw, depending on number of yaws
yaw_step = 360 / self.steps_yaw
yaws = []
for i in range(self.steps_yaw):
yaws.append((i + .5) * yaw_step)
poses = []
for i in range(x_steps):
for j in range(y_steps):
for k in range(z_steps):
for l in range(self.steps_yaw):
quat = Rotation.from_euler('XYZ',
[0, pitch, yaws[l]],
degrees=True).as_quat()
pose = tuple(
(round(
self.min_range_x +
(i - .5) * self.step_size_x, 2),
round(
self.min_range_y +
(j - .5) * self.step_size_y, 2),
round(self.min_range_z + k * self.step_size_z,
2), quat[0], quat[1], quat[2], quat[3]))
poses.append(pose)
return poses
#############################
# Workpiece Handling
#############################
def workpiece_handler(self):
"""Add a workpiece from some dataset by its name and initial position
"""
self.add_workpiece("test_dataset", self.workpiece_name,
self.workpiece_pose[0], self.workpiece_pose[1],
self.workpiece_pose[2], self.workpiece_pose[3],
self.workpiece_pose[4], self.workpiece_pose[5])
def add_workpiece(self, dataset, workpiece, x, y, z, roll, pitch, yaw):
"""Spawns the workpiece at the given pose.
If necessary, sample a point cloud from the mesh
Add the mesh as MoveIt collision environment for path planning as well
"""
# Add Workpiece to Gazebo using a Launch file
ROSLauncher(rospackage_name="ipa_kifz_viewplanning",
launch_file_name="spawn_workpiece.launch",
arguments="dataset:=" + dataset + " " + "object:=" +
workpiece + " " + "x:=" + str(x) + " " + "y:=" + str(y) +
" " + "z:=" + str(z) + " " + "R:=" + str(roll) + " " +
"P:=" + str(pitch) + " " + "Y:=" + str(yaw))
# Load mesh and transform to world coordinate system
rospack = rospkg.RosPack()
dataset_path = rospack.get_path('ipa_kifz_data')
self.workpiece_path = os.path.join(dataset_path, dataset, "meshes",
workpiece + ".STL")
workpiece_pcd_path = os.path.join(dataset_path, dataset, "pointclouds",
workpiece + ".pcd")
self.workpiece_mesh = open3d.io.read_triangle_mesh(self.workpiece_path)
self.workpiece_area = open3d.geometry.TriangleMesh.get_surface_area(
self.workpiece_mesh)
T = np.eye(4)
T[:3, :3] = Rotation.from_euler('xyz', [roll, pitch, yaw]).as_matrix()
T[:3, 3] = [x, y, z]
self.workpiece_mesh = self.workpiece_mesh.transform(T)
# Sample Mesh and save result
if not os.path.exists(workpiece_pcd_path):
print("Sampling workpiece mesh")
self.workpiece_pcd, self.workpiece_voxel_length = self.sample_wp(
self.workpiece_mesh)
open3d.io.write_point_cloud(workpiece_pcd_path, self.workpiece_pcd)
else:
self.workpiece_pcd = open3d.io.read_point_cloud(workpiece_pcd_path)
_, self.workpiece_voxel_length = self.sample_wp(
self.workpiece_mesh)
if not self.sensor_only:
# Add Workpiece to MoveIt Planning Scene Interface
workpiece_pose = PoseStamped()
workpiece_pose.header.frame_id = "world"
workpiece_pose.pose.position.x = x
workpiece_pose.pose.position.y = y
workpiece_pose.pose.position.z = z
orientation_quat = tf_conversions.transformations.quaternion_from_euler(
roll, pitch, yaw)
workpiece_pose.pose.orientation.x = orientation_quat[0]
workpiece_pose.pose.orientation.y = orientation_quat[1]
workpiece_pose.pose.orientation.z = orientation_quat[2]
workpiece_pose.pose.orientation.w = orientation_quat[3]
self.scene.add_mesh(workpiece, workpiece_pose, self.workpiece_path)
return
def sample_wp(self, workpiece):
"""Sample a workpeice
Args:
workpiece (open3d.geometry.TriangleMesh): a open3d Triangle Mesh
Returns:
[open3d.goemetry.PointCloud]: the sampled point cloud
[float]: a fixed voxel grid size
"""
wp_area = open3d.geometry.TriangleMesh.get_surface_area(workpiece)
point_number = int(wp_area * 300000)
voxel_size = sqrt(wp_area / point_number)
wp_pcd = workpiece.sample_points_uniformly(
number_of_points=point_number)
return wp_pcd, voxel_size
#############################
# Testing and Debugging
#############################
def test_area_gain(self, area_gain):
"""This function should investigate, if the point cloud handler
function is reliable.
For example, by computing the mean absolute error of rewards
calculated for same states (or rather same view poses)
Args:
area_gain ([type]): [description]
"""
rospack = rospkg.RosPack()
log_path = rospack.get_path('ipa_kifz_viewplanning')
logfilename = os.path.join(log_path, "test_results",
"test_area_gain.txt")
self.area_gain_control[self.episode_steps - 1] += area_gain
if os.path.isfile(logfilename):
with open(logfilename, 'a') as filehandle:
if self.episode_steps == len(self.area_gain_control):
filehandle.write(
'%s %s\n\n' %
(area_gain,
(area_gain -
self.area_gain_control[self.episode_steps - 1] /
(self.episode_num - 1))))
else:
filehandle.write(
'%s %s\n' %
(area_gain,
(area_gain -
self.area_gain_control[self.episode_steps - 1] /
(self.episode_num - 1))))
else:
with open(logfilename, 'w') as filehandle:
filehandle.write(
'%s %s\n' %
(area_gain,
(area_gain -
self.area_gain_control[self.episode_steps - 1] /
(self.episode_num - 1))))
| [] |
2024-01-10 | badrinarayan/ReAgent | ml~rl~simulators~recsim.py | #!/usr/bin/env python3
from typing import List, NamedTuple, Optional, Tuple
import ml.rl.types as rlt
import torch
import torch.nn.functional as F
from ml.rl.test.gym.open_ai_gym_memory_pool import OpenAIGymMemoryPool
_DEFAULT_QUALITY_MEANS = [(-3.0, 0.0)] * 14 + [(0.0, 3.0)] * 6
_DEFAULT_QUALITY_VARIANCES = [1.0] * 20
class DocumentFeature(NamedTuple):
topic: torch.Tensor
length: torch.Tensor
quality: torch.Tensor
def as_vector(self):
"""
Convenient function to get single tensor
"""
return torch.cat(
(self.topic, self.length.unsqueeze(dim=2), self.quality.unsqueeze(dim=2)),
dim=2,
)
class RecSim:
"""
An environment described in Section 6 of https://arxiv.org/abs/1905.12767
"""
def __init__(
self,
num_topics: int = 20,
doc_length: float = 4,
quality_means: List[Tuple[float, float]] = _DEFAULT_QUALITY_MEANS,
quality_variances: List[float] = _DEFAULT_QUALITY_VARIANCES,
initial_budget: float = 200,
alpha: float = 1.0,
m: int = 10,
k: int = 3,
num_users: int = 5000,
y: float = 0.3,
device: str = "cpu",
seed: int = 2147483647,
):
self.seed = seed
self.device = torch.device(device)
self.num_users = num_users
self.num_topics = num_topics
self.initial_budget = initial_budget
self.reset()
self.doc_length = doc_length
self.alpha = alpha
self.m = m
self.k = k
self.y = y
self.p_d = torch.ones(self.num_topics, device=self.device) / self.num_topics
assert (
len(quality_variances) == len(quality_means) == num_topics
), f"Expecting {num_topics}: got {quality_means} and {quality_variances}"
mean_ranges = torch.tensor(quality_means, device=self.device)
self.quality_means = (
torch.rand(num_topics, device=self.device, generator=self.generator)
* (mean_ranges[:, 1] - mean_ranges[:, 0])
+ mean_ranges[:, 0]
)
self.quality_variances = torch.tensor(quality_variances, device=self.device)
def reset(self) -> None:
self.generator = torch.Generator(device=self.device)
self.generator.manual_seed(self.seed)
self.users = self.sample_users(self.num_users)
self.active_user_ids = torch.arange(
start=1, end=self.num_users + 1, device=self.device, dtype=torch.long
)
self.user_budgets = torch.full(
(self.num_users,), self.initial_budget, device=self.device
)
self.candidates = None
def obs(self) -> Tuple[torch.Tensor, torch.Tensor, DocumentFeature]:
"""
Agent can observe:
- User interest vector
- Document topic vector
- Document length
- Document quality
"""
if self.candidates is None:
self.candidates = self.sample_documents(len(self.active_user_ids))
return self.active_user_ids, self.users, self.candidates
def step(
self, action: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, int]:
assert self.candidates is not None
slate = self.select(self.candidates, action, True)
user_choice, interest = self.compute_user_choice(slate)
selected_choice = self.select(slate, user_choice, False)
self.update_user_interest(selected_choice)
self.update_user_budget(selected_choice)
num_alive_sessions = self.update_active_users()
self.candidates = None
# TODO: Figure out what was the reward in the paper
# Here, the reward is the length of selected video
reward = selected_choice.length * (user_choice != action.shape[1])
return reward, user_choice, interest, num_alive_sessions
def select(
self, candidates: DocumentFeature, indices: torch.Tensor, add_null: bool
) -> DocumentFeature:
batch_size = candidates.topic.shape[0]
num_candidate = candidates.topic.shape[1]
num_select = indices.shape[1]
offset = torch.arange(
0,
batch_size * num_candidate,
step=num_candidate,
dtype=torch.long,
device=self.device,
).repeat_interleave(num_select)
select_indices = indices.view(-1) + offset
topic = candidates.topic.view(-1, self.num_topics)[select_indices].view(
batch_size, num_select, self.num_topics
)
length = candidates.length.view(-1)[select_indices].view(batch_size, num_select)
quality = candidates.quality.view(-1)[select_indices].view(
batch_size, num_select
)
if add_null:
length = torch.cat(
(length, length.new_full((batch_size, 1), self.doc_length)), dim=1
)
topic = torch.cat(
(topic, topic.new_zeros(batch_size, 1, self.num_topics)), dim=1
)
quality = torch.cat((quality, quality.new_zeros(batch_size, 1)), dim=1)
return DocumentFeature(topic=topic, length=length, quality=quality)
def compute_user_choice(
self, slate: DocumentFeature
) -> Tuple[torch.Tensor, torch.Tensor]:
interest = self.interest(self.users, slate.topic)
user_choice = torch.multinomial(interest.exp(), 1, generator=self.generator)
return user_choice, interest
def update_user_interest(self, selected_choice):
pos_prob = (self.interest(self.users, selected_choice.topic) + 1) / 2
positive_mask = torch.bernoulli(pos_prob, generator=self.generator).bool()
sign = torch.where(
positive_mask,
torch.full_like(pos_prob, 1.0),
torch.full_like(pos_prob, -1.0),
)
interest = self.users * selected_choice.topic.view(
self.users.shape[0], self.num_topics
)
delta_interest = (-self.y * interest.abs() + self.y) * (-interest)
self.users += sign * delta_interest
def update_user_budget(self, selected_choice):
bonus = self.bonus(
self.users,
selected_choice.topic,
selected_choice.length,
selected_choice.quality,
)
self.user_budgets -= (selected_choice.length - bonus).view(-1)
def bonus(self, u, d, length, quality):
assert (
length.shape == quality.shape
), f"Unexpected shape length: {length.shape} quality: {quality}"
return 0.9 / 3.4 * length * self.satisfactory(u, d, quality)
def update_active_users(self) -> int:
alive_indices = (self.user_budgets > 0.0).nonzero().squeeze(1)
if alive_indices.shape[0] < self.user_budgets.shape[0]:
self.user_budgets = self.user_budgets[alive_indices]
self.users = self.users[alive_indices]
self.active_user_ids = self.active_user_ids[alive_indices]
return alive_indices.shape[0]
def interest(self, u, d):
"""
Args:
u: shape [batch, T]
d: shape [batch, k, T]
"""
assert (
u.dim() == 2
and d.dim() == 3
and u.shape[0] == d.shape[0]
and u.shape[1] == d.shape[2]
), f"Shape mismatch u: {u.shape}, d: {d.shape}"
return torch.bmm(u.unsqueeze(1), d.transpose(1, 2)).squeeze(1)
def satisfactory(self, u, d, quality):
assert (
u.dim() == 2
and d.dim() == 3
and quality.dim() == 2
and u.shape[0] == d.shape[0] == quality.shape[0]
and d.shape[1] == quality.shape[1]
), f"Shape mismatch u: {u.shape}, d: {d.shape}, quality: {quality.shape}"
if self.alpha == 1.0:
return quality
return (1 - self.alpha) * self.interest(u, d) + self.alpha * quality
def sample_users(self, n):
"""
User is represented by vector of topic interest, uniformly sampled from [-1, 1]
"""
return torch.rand((n, self.num_topics), generator=self.generator) * 2 - 1
def sample_documents(self, n: int) -> DocumentFeature:
num_docs = n * self.m
topics = torch.multinomial(
self.p_d, num_docs, replacement=True, generator=self.generator
)
means = self.quality_means[topics]
variances = self.quality_variances[topics]
quality = torch.normal(means, variances, generator=self.generator).view(
n, self.m
)
embedding = (
F.one_hot(topics, self.num_topics).view(n, self.m, self.num_topics).float()
)
length = torch.full((n, self.m), self.doc_length, device=self.device)
return DocumentFeature(topic=embedding, quality=quality, length=length)
def rollout_policy(
self, policy, memory_pool: Optional[OpenAIGymMemoryPool] = None
) -> float:
prev_obs = None
prev_action = None
prev_user_choice = None
prev_reward = None
prev_interest = None
policy_reward = 0
while True:
obs = self.obs()
active_user_idxs, user_features, candidate_features = obs
item_idxs = policy(obs, self)
reward, user_choice, interest, num_alive = self.step(item_idxs)
policy_reward += reward.sum().item()
action_features = self.select(
candidate_features, item_idxs, True
).as_vector()
if memory_pool is not None and prev_obs is not None:
prev_active_user_idxs, prev_user_features, prev_candidate_features = (
prev_obs
)
i, j = 0, 0
while i < len(prev_active_user_idxs):
mdp_id = prev_active_user_idxs[i]
state = prev_user_features[i]
possible_actions = prev_action[i]
action = possible_actions[prev_user_choice[i]].view(-1)
possible_actions_mask = torch.ones(self.k + 1, dtype=torch.uint8)
# HACK: Since reward is going to be masked, this is OK
item_reward = prev_reward[i].repeat(self.k + 1)
reward_mask = torch.arange(self.k + 1) == prev_user_choice[i]
propensity = F.softmax(prev_interest[i], dim=0)
if j < len(active_user_idxs) and mdp_id == active_user_idxs[j]:
# not terminated
terminal = False
next_state = user_features[j]
possible_next_actions = action_features[j]
next_action = possible_next_actions[user_choice[j]].view(-1)
possible_next_actions_mask = torch.ones(
self.k + 1, dtype=torch.uint8
)
next_propensity = F.softmax(interest[j], dim=0)
j += 1
else:
terminal = True
next_state = torch.zeros_like(state)
possible_next_actions = torch.zeros_like(action)
next_action = possible_next_actions[0].view(-1)
possible_next_actions_mask = torch.zeros(
self.k + 1, dtype=torch.uint8
)
next_propensity = torch.zeros_like(propensity)
memory_pool.insert_into_memory(
state=state,
action=action,
reward=item_reward,
next_state=next_state,
next_action=next_action,
terminal=terminal,
possible_next_actions=possible_next_actions,
possible_next_actions_mask=possible_next_actions_mask,
time_diff=1.0,
possible_actions=possible_actions,
possible_actions_mask=possible_actions_mask,
policy_id=1,
propensity=propensity,
next_propensity=next_propensity,
reward_mask=reward_mask,
)
i += 1
prev_obs = obs
prev_action = action_features
prev_user_choice = user_choice
prev_reward = reward
prev_interest = interest
if num_alive == 0:
break
return policy_reward
def random_policy(
obs: Tuple[torch.Tensor, torch.Tensor, DocumentFeature], recsim: RecSim
):
active_user_idxs, user_features, candidate_features = obs
item_idxs = torch.multinomial(
torch.ones(active_user_idxs.shape[0], recsim.m), recsim.k
)
return item_idxs
def top_k_policy(
q_network, obs: Tuple[torch.Tensor, torch.Tensor, DocumentFeature], recsim: RecSim
):
active_user_idxs, user_features, candidate_features = obs
slate_with_null = recsim.select(
candidate_features,
torch.repeat_interleave(
torch.arange(recsim.m).unsqueeze(dim=0), active_user_idxs.shape[0], dim=0
),
add_null=True,
)
_user_choice, interest = recsim.compute_user_choice(slate_with_null)
propensity = F.softmax(interest, dim=1)[:, : recsim.m]
tiled_user_features = torch.repeat_interleave(user_features, recsim.m, dim=0)
candidate_feature_vector = candidate_features.as_vector()
action_dim = candidate_feature_vector.shape[2]
flatten_candidate_features = candidate_feature_vector.view(-1, action_dim)
q_network_input = rlt.PreprocessedStateAction.from_tensors(
state=tiled_user_features, action=flatten_candidate_features
)
q_values = q_network(q_network_input).q_value.view(-1, recsim.m)
values = q_values * propensity
top_values, item_idxs = torch.topk(values, recsim.k, dim=1)
return item_idxs
| [] |
2024-01-10 | borisdayma/DALLE-pytorch | dalle_pytorch~dalle_pytorch.py | from math import log2, sqrt
import torch
from torch import nn, einsum
import torch.nn.functional as F
import numpy as np
from axial_positional_embedding import AxialPositionalEmbedding
from einops import rearrange
from dalle_pytorch import distributed_utils
from dalle_pytorch.vae import OpenAIDiscreteVAE, VQGanVAE
from dalle_pytorch.transformer import Transformer, DivideMax
# helpers
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
class always():
def __init__(self, val):
self.val = val
def __call__(self, x, *args, **kwargs):
return self.val
def is_empty(t):
return t.nelement() == 0
def masked_mean(t, mask, dim = 1):
t = t.masked_fill(~mask[:, :, None], 0.)
return t.sum(dim = 1) / mask.sum(dim = 1)[..., None]
def set_requires_grad(model, value):
for param in model.parameters():
param.requires_grad = value
def eval_decorator(fn):
def inner(model, *args, **kwargs):
was_training = model.training
model.eval()
out = fn(model, *args, **kwargs)
model.train(was_training)
return out
return inner
# sampling helpers
def top_k(logits, thres = 0.5):
num_logits = logits.shape[-1]
k = max(int((1 - thres) * num_logits), 1)
val, ind = torch.topk(logits, k)
probs = torch.full_like(logits, float('-inf'))
probs.scatter_(1, ind, val)
return probs
# discrete vae class
class ResBlock(nn.Module):
def __init__(self, chan):
super().__init__()
self.net = nn.Sequential(
nn.Conv2d(chan, chan, 3, padding = 1),
nn.ReLU(),
nn.Conv2d(chan, chan, 3, padding = 1),
nn.ReLU(),
nn.Conv2d(chan, chan, 1)
)
def forward(self, x):
return self.net(x) + x
class DiscreteVAE(nn.Module):
def __init__(
self,
image_size = 256,
num_tokens = 512,
codebook_dim = 512,
num_layers = 3,
num_resnet_blocks = 0,
hidden_dim = 64,
channels = 3,
smooth_l1_loss = False,
temperature = 0.9,
straight_through = False,
kl_div_loss_weight = 0.,
normalization = ((0.5,) * 3, (0.5,) * 3)
):
super().__init__()
assert log2(image_size).is_integer(), 'image size must be a power of 2'
assert num_layers >= 1, 'number of layers must be greater than or equal to 1'
has_resblocks = num_resnet_blocks > 0
self.image_size = image_size
self.num_tokens = num_tokens
self.num_layers = num_layers
self.temperature = temperature
self.straight_through = straight_through
self.codebook = nn.Embedding(num_tokens, codebook_dim)
hdim = hidden_dim
enc_chans = [hidden_dim] * num_layers
dec_chans = list(reversed(enc_chans))
enc_chans = [channels, *enc_chans]
dec_init_chan = codebook_dim if not has_resblocks else dec_chans[0]
dec_chans = [dec_init_chan, *dec_chans]
enc_chans_io, dec_chans_io = map(lambda t: list(zip(t[:-1], t[1:])), (enc_chans, dec_chans))
enc_layers = []
dec_layers = []
for (enc_in, enc_out), (dec_in, dec_out) in zip(enc_chans_io, dec_chans_io):
enc_layers.append(nn.Sequential(nn.Conv2d(enc_in, enc_out, 4, stride = 2, padding = 1), nn.ReLU()))
dec_layers.append(nn.Sequential(nn.ConvTranspose2d(dec_in, dec_out, 4, stride = 2, padding = 1), nn.ReLU()))
for _ in range(num_resnet_blocks):
dec_layers.insert(0, ResBlock(dec_chans[1]))
enc_layers.append(ResBlock(enc_chans[-1]))
if num_resnet_blocks > 0:
dec_layers.insert(0, nn.Conv2d(codebook_dim, dec_chans[1], 1))
enc_layers.append(nn.Conv2d(enc_chans[-1], num_tokens, 1))
dec_layers.append(nn.Conv2d(dec_chans[-1], channels, 1))
self.encoder = nn.Sequential(*enc_layers)
self.decoder = nn.Sequential(*dec_layers)
self.loss_fn = F.smooth_l1_loss if smooth_l1_loss else F.mse_loss
self.kl_div_loss_weight = kl_div_loss_weight
# take care of normalization within class
self.normalization = normalization
self._register_external_parameters()
def _register_external_parameters(self):
"""Register external parameters for DeepSpeed partitioning."""
if (
not distributed_utils.is_distributed
or not distributed_utils.using_backend(
distributed_utils.DeepSpeedBackend)
):
return
deepspeed = distributed_utils.backend.backend_module
deepspeed.zero.register_external_parameter(self, self.codebook.weight)
def norm(self, images):
if not exists(self.normalization):
return images
means, stds = map(lambda t: torch.as_tensor(t).to(images), self.normalization)
means, stds = map(lambda t: rearrange(t, 'c -> () c () ()'), (means, stds))
images = images.clone()
images.sub_(means).div_(stds)
return images
@torch.no_grad()
@eval_decorator
def get_codebook_indices(self, images):
logits = self(images, return_logits = True)
codebook_indices = logits.argmax(dim = 1).flatten(1)
return codebook_indices
def decode(
self,
img_seq
):
image_embeds = self.codebook(img_seq)
b, n, d = image_embeds.shape
h = w = int(sqrt(n))
image_embeds = rearrange(image_embeds, 'b (h w) d -> b d h w', h = h, w = w)
images = self.decoder(image_embeds)
return images
def forward(
self,
img,
return_loss = False,
return_recons = False,
return_logits = False,
temp = None
):
device, num_tokens, image_size, kl_div_loss_weight = img.device, self.num_tokens, self.image_size, self.kl_div_loss_weight
assert img.shape[-1] == image_size and img.shape[-2] == image_size, f'input must have the correct image size {image_size}'
img = self.norm(img)
logits = self.encoder(img)
if return_logits:
return logits # return logits for getting hard image indices for DALL-E training
temp = default(temp, self.temperature)
soft_one_hot = F.gumbel_softmax(logits, tau = temp, dim = 1, hard = self.straight_through)
sampled = einsum('b n h w, n d -> b d h w', soft_one_hot, self.codebook.weight)
out = self.decoder(sampled)
if not return_loss:
return out
# reconstruction loss
recon_loss = self.loss_fn(img, out)
# kl divergence
logits = rearrange(logits, 'b n h w -> b (h w) n')
log_qy = F.log_softmax(logits, dim = -1)
log_uniform = torch.log(torch.tensor([1. / num_tokens], device = device))
kl_div = F.kl_div(log_uniform, log_qy, None, None, 'batchmean', log_target = True)
loss = recon_loss + (kl_div * kl_div_loss_weight)
if not return_recons:
return loss
return loss, out
# main classes
class CLIP(nn.Module):
def __init__(
self,
*,
dim_text = 512,
dim_image = 512,
dim_latent = 512,
num_text_tokens = 10000,
text_enc_depth = 6,
text_seq_len = 256,
text_heads = 8,
num_visual_tokens = 512,
visual_enc_depth = 6,
visual_heads = 8,
visual_image_size = 256,
visual_patch_size = 32,
channels = 3
):
super().__init__()
self.text_emb = nn.Embedding(num_text_tokens, dim_text)
self.text_pos_emb = nn.Embedding(text_seq_len, dim_text)
self.text_transformer = Transformer(causal = False, seq_len = text_seq_len, dim = dim_text, depth = text_enc_depth, heads = text_heads, rotary_emb = False)
self.to_text_latent = nn.Linear(dim_text, dim_latent, bias = False)
assert visual_image_size % visual_patch_size == 0, 'Image dimensions must be divisible by the patch size.'
num_patches = (visual_image_size // visual_patch_size) ** 2
patch_dim = channels * visual_patch_size ** 2
self.visual_patch_size = visual_patch_size
self.to_visual_embedding = nn.Linear(patch_dim, dim_image)
self.visual_pos_emb = nn.Embedding(num_patches, dim_image)
self.visual_transformer = Transformer(causal = False, seq_len = num_patches, dim = dim_image, depth = visual_enc_depth, heads = visual_heads, rotary_emb = False)
self.to_visual_latent = nn.Linear(dim_image, dim_latent, bias = False)
self.temperature = nn.Parameter(torch.tensor(1.))
def forward(
self,
text,
image,
text_mask = None,
return_loss = False
):
b, device, p = text.shape[0], text.device, self.visual_patch_size
text_emb = self.text_emb(text)
text_emb += self.text_pos_emb(torch.arange(text.shape[1], device = device))
image_patches = rearrange(image, 'b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1 = p, p2 = p)
image_emb = self.to_visual_embedding(image_patches)
image_emb += self.visual_pos_emb(torch.arange(image_emb.shape[1], device = device))
enc_text = self.text_transformer(text_emb, mask = text_mask)
enc_image = self.visual_transformer(image_emb)
if exists(text_mask):
text_latents = masked_mean(enc_text, text_mask, dim = 1)
else:
text_latents = enc_text.mean(dim = 1)
image_latents = enc_image.mean(dim = 1)
text_latents = self.to_text_latent(text_latents)
image_latents = self.to_visual_latent(image_latents)
text_latents, image_latents = map(lambda t: F.normalize(t, p = 2, dim = -1), (text_latents, image_latents))
temp = self.temperature.exp()
if not return_loss:
sim = einsum('n d, n d -> n', text_latents, image_latents) * temp
return sim
sim = einsum('i d, j d -> i j', text_latents, image_latents) * temp
labels = torch.arange(b, device = device)
loss = (F.cross_entropy(sim, labels) + F.cross_entropy(sim.t(), labels)) / 2
return loss
# main DALL-E class
class DALLE(nn.Module):
def __init__(
self,
*,
dim,
vae,
num_text_tokens = 10000,
text_seq_len = 256,
depth,
heads = 8,
dim_head = 64,
reversible = False,
attn_dropout = 0.,
ff_dropout = 0,
sparse_attn = False,
attn_types = None,
loss_img_weight = 7,
stable = False,
sandwich_norm = False,
shift_tokens = True,
rotary_emb = True
):
super().__init__()
assert isinstance(vae, (DiscreteVAE, OpenAIDiscreteVAE, VQGanVAE)), 'vae must be an instance of DiscreteVAE'
image_size = vae.image_size
num_image_tokens = vae.num_tokens
image_fmap_size = (vae.image_size // (2 ** vae.num_layers))
image_seq_len = image_fmap_size ** 2
num_text_tokens = num_text_tokens + text_seq_len # reserve unique padding tokens for each position (text seq len)
self.text_emb = nn.Embedding(num_text_tokens, dim)
self.image_emb = nn.Embedding(num_image_tokens, dim)
self.text_pos_emb = nn.Embedding(text_seq_len + 1, dim) if not rotary_emb else always(0) # +1 for <bos>
self.image_pos_emb = AxialPositionalEmbedding(dim, axial_shape = (image_fmap_size, image_fmap_size)) if not rotary_emb else always(0)
self.num_text_tokens = num_text_tokens # for offsetting logits index and calculating cross entropy loss
self.num_image_tokens = num_image_tokens
self.text_seq_len = text_seq_len
self.image_seq_len = image_seq_len
seq_len = text_seq_len + image_seq_len
total_tokens = num_text_tokens + num_image_tokens
self.total_tokens = total_tokens
self.total_seq_len = seq_len
self.vae = vae
set_requires_grad(self.vae, False) # freeze VAE from being trained
self.transformer = Transformer(
dim = dim,
causal = True,
seq_len = seq_len,
depth = depth,
heads = heads,
dim_head = dim_head,
reversible = reversible,
attn_dropout = attn_dropout,
ff_dropout = ff_dropout,
attn_types = attn_types,
image_fmap_size = image_fmap_size,
sparse_attn = sparse_attn,
stable = stable,
sandwich_norm = sandwich_norm,
shift_tokens = shift_tokens,
rotary_emb = rotary_emb
)
self.stable = stable
if stable:
self.norm_by_max = DivideMax(dim = -1)
self.to_logits = nn.Sequential(
nn.LayerNorm(dim),
nn.Linear(dim, self.total_tokens),
)
seq_range = torch.arange(seq_len)
logits_range = torch.arange(total_tokens)
seq_range = rearrange(seq_range, 'n -> () n ()')
logits_range = rearrange(logits_range, 'd -> () () d')
logits_mask = (
((seq_range >= text_seq_len) & (logits_range < num_text_tokens)) |
((seq_range < text_seq_len) & (logits_range >= num_text_tokens))
)
self.register_buffer('logits_mask', logits_mask, persistent=False)
self.loss_img_weight = loss_img_weight
@torch.no_grad()
@eval_decorator
def generate_texts(
self,
tokenizer,
text = None,
*,
filter_thres = 0.5,
temperature = 1.
):
text_seq_len = self.text_seq_len
if text is None or text == "":
text_tokens = torch.tensor([[0]]).cuda()
else:
text_tokens = torch.tensor(tokenizer.tokenizer.encode(text)).cuda().unsqueeze(0)
for _ in range(text_tokens.shape[1], text_seq_len):
device = text_tokens.device
tokens = self.text_emb(text_tokens)
tokens += self.text_pos_emb(torch.arange(text_tokens.shape[1], device = device))
seq_len = tokens.shape[1]
output_transf = self.transformer(tokens)
if self.stable:
output_transf = self.norm_by_max(output_transf)
logits = self.to_logits(output_transf)
# mask logits to make sure text predicts text (except last token), and image predicts image
logits_mask = self.logits_mask[:, :seq_len]
max_neg_value = -torch.finfo(logits.dtype).max
logits.masked_fill_(logits_mask, max_neg_value)
logits = logits[:, -1, :]
filtered_logits = top_k(logits, thres = filter_thres)
probs = F.softmax(filtered_logits / temperature, dim = -1)
sample = torch.multinomial(probs, 1)
text_tokens = torch.cat((text_tokens, sample), dim=-1)
padding_tokens = set(np.arange(self.text_seq_len) + (self.num_text_tokens - self.text_seq_len))
texts = [tokenizer.tokenizer.decode(text_token, pad_tokens=padding_tokens) for text_token in text_tokens]
return text_tokens, texts
@torch.no_grad()
@eval_decorator
def generate_images(
self,
text,
*,
clip = None,
mask = None,
filter_thres = 0.5,
temperature = 1.,
img = None,
num_init_img_tokens = None
):
vae, text_seq_len, image_seq_len, num_text_tokens = self.vae, self.text_seq_len, self.image_seq_len, self.num_text_tokens
total_len = text_seq_len + image_seq_len
text = text[:, :text_seq_len] # make sure text is within bounds
out = text
if exists(img):
image_size = vae.image_size
assert img.shape[1] == 3 and img.shape[2] == image_size and img.shape[3] == image_size, f'input image must have the correct image size {image_size}'
indices = vae.get_codebook_indices(img)
num_img_tokens = default(num_init_img_tokens, int(0.4375 * image_seq_len)) # OpenAI used 14 * 32 initial tokens to prime
assert num_img_tokens < image_seq_len, 'number of initial image tokens for priming must be less than the total image token sequence length'
indices = indices[:, :num_img_tokens]
out = torch.cat((out, indices), dim = -1)
for cur_len in range(out.shape[1], total_len):
is_image = cur_len >= text_seq_len
text, image = out[:, :text_seq_len], out[:, text_seq_len:]
logits = self(text, image, mask = mask)[:, -1, :]
filtered_logits = top_k(logits, thres = filter_thres)
probs = F.softmax(filtered_logits / temperature, dim = -1)
sample = torch.multinomial(probs, 1)
sample -= (num_text_tokens if is_image else 0) # offset sampled token if it is an image token, since logit space is composed of text and then image tokens
out = torch.cat((out, sample), dim=-1)
if out.shape[1] <= text_seq_len:
mask = F.pad(mask, (0, 1), value = True)
text_seq = out[:, :text_seq_len]
img_seq = out[:, -image_seq_len:]
images = vae.decode(img_seq)
if exists(clip):
scores = clip(text_seq, images, return_loss = False)
return images, scores
return images
def forward(
self,
text,
image = None,
mask = None,
return_loss = False
):
assert text.shape[-1] == self.text_seq_len, f'the length {text.shape[-1]} of the text tokens you passed in does not have the correct length ({self.text_seq_len})'
device, total_seq_len = text.device, self.total_seq_len
# make sure padding in text tokens get unique padding token id
text_range = torch.arange(self.text_seq_len, device = device) + (self.num_text_tokens - self.text_seq_len)
text = torch.where(text == 0, text_range, text)
# add <bos>
text = F.pad(text, (1, 0), value = 0)
tokens = self.text_emb(text)
tokens += self.text_pos_emb(torch.arange(text.shape[1], device = device))
seq_len = tokens.shape[1]
if exists(image) and not is_empty(image):
is_raw_image = len(image.shape) == 4
if is_raw_image:
image_size = self.vae.image_size
assert tuple(image.shape[1:]) == (3, image_size, image_size), f'invalid image of dimensions {image.shape} passed in during training'
image = self.vae.get_codebook_indices(image)
image_len = image.shape[1]
image_emb = self.image_emb(image)
image_emb += self.image_pos_emb(image_emb)
tokens = torch.cat((tokens, image_emb), dim = 1)
seq_len += image_len
# when training, if the length exceeds the total text + image length
# remove the last token, since it needs not to be trained
if tokens.shape[1] > total_seq_len:
seq_len -= 1
tokens = tokens[:, :-1]
if self.stable:
alpha = 0.1
tokens = tokens * alpha + tokens.detach() * (1 - alpha)
out = self.transformer(tokens)
if self.stable:
out = self.norm_by_max(out)
logits = self.to_logits(out)
# mask logits to make sure text predicts text (except last token), and image predicts image
logits_mask = self.logits_mask[:, :seq_len]
max_neg_value = -torch.finfo(logits.dtype).max
logits.masked_fill_(logits_mask, max_neg_value)
if not return_loss:
return logits
assert exists(image), 'when training, image must be supplied'
offsetted_image = image + self.num_text_tokens
labels = torch.cat((text[:, 1:], offsetted_image), dim = 1)
logits = rearrange(logits, 'b n c -> b c n')
loss_text = F.cross_entropy(logits[:, :, :self.text_seq_len], labels[:, :self.text_seq_len])
loss_img = F.cross_entropy(logits[:, :, self.text_seq_len:], labels[:, self.text_seq_len:])
loss = (loss_text + self.loss_img_weight * loss_img) / (self.loss_img_weight + 1)
return loss
| [] |
2024-01-10 | borisdayma/DALLE-pytorch | train_dalle.py | import argparse
from pathlib import Path
import time
from glob import glob
import os
import shutil
import torch
import wandb # Quit early if user doesn't have wandb installed.
from torch.nn.utils import clip_grad_norm_
from torch.optim import Adam
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.utils.data import DataLoader
from dalle_pytorch import OpenAIDiscreteVAE, VQGanVAE, DiscreteVAE, DALLE
from dalle_pytorch import distributed_utils
from dalle_pytorch.loader import TextImageDataset
from dalle_pytorch.tokenizer import tokenizer, HugTokenizer, ChineseTokenizer, YttmTokenizer
# libraries needed for webdataset support
import webdataset as wds
from torchvision import transforms as T
from PIL import Image
from io import BytesIO
# argument parsing
parser = argparse.ArgumentParser()
group = parser.add_mutually_exclusive_group(required=False)
group.add_argument('--vae_path', type=str,
help='path to your trained discrete VAE')
group.add_argument('--dalle_path', type=str,
help='path to your partially trained DALL-E')
parser.add_argument('--vqgan_model_path', type=str, default = None,
help='path to your trained VQGAN weights. This should be a .ckpt file. (only valid when taming option is enabled)')
parser.add_argument('--vqgan_config_path', type=str, default = None,
help='path to your trained VQGAN config. This should be a .yaml file. (only valid when taming option is enabled)')
parser.add_argument('--image_text_folder', type=str, required=True,
help='path to your folder of images and text for learning the DALL-E')
parser.add_argument(
'--wds',
type = str,
default='',
help = 'Comma separated list of WebDataset (1) image and (2) text column names. Must contain 2 values, e.g. img,cap.'
)
parser.add_argument('--truncate_captions', dest='truncate_captions', action='store_true',
help='Captions passed in which exceed the max token length will be truncated if this is set.')
parser.add_argument('--random_resize_crop_lower_ratio', dest='resize_ratio', type=float, default=0.75,
help='Random resized crop lower ratio')
parser.add_argument('--chinese', dest='chinese', action='store_true')
parser.add_argument('--taming', dest='taming', action='store_true')
parser.add_argument('--hug', dest='hug', action='store_true')
parser.add_argument('--bpe_path', type=str,
help='path to your BPE json file')
parser.add_argument('--dalle_output_file_name', type=str, default = "dalle",
help='output_file_name')
parser.add_argument('--fp16', action='store_true',
help='(experimental) - Enable DeepSpeed 16 bit precision. Reduces VRAM.')
parser.add_argument('--amp', action='store_true',
help='Apex "O1" automatic mixed precision. More stable than 16 bit precision. Can\'t be used in conjunction with deepspeed zero stages 1-3.')
parser.add_argument('--wandb_name', default='dalle_train_transformer',
help='Name W&B will use when saving results.\ne.g. `--wandb_name "coco2017-full-sparse"`')
parser.add_argument('--wandb_entity', default=None,
help='(optional) Name of W&B team/entity to log to.')
parser.add_argument('--stable_softmax', dest='stable_softmax', action='store_true',
help='Prevent values from becoming too large during softmax. Helps with stability in fp16 and Mixture of Quantization training.')
parser = distributed_utils.wrap_arg_parser(parser)
train_group = parser.add_argument_group('Training settings')
train_group.add_argument('--flops_profiler', dest = 'flops_profiler', action='store_true', help = 'Exits after printing detailed flops/runtime analysis of forward/backward')
train_group.add_argument('--epochs', default = 20, type = int, help = 'Number of epochs')
train_group.add_argument('--save_every_n_steps', default = 1000, type = int, help = 'Save a checkpoint every n steps')
train_group.add_argument('--keep_n_checkpoints', default = None, type = int, help = '(Careful) Deletes old deepspeed checkpoints if there are more than n')
train_group.add_argument('--batch_size', default = 4, type = int, help = 'Batch size')
train_group.add_argument('--ga_steps', default = 1, type = int, help = 'Number of steps to accumulate gradients across per each iteration. DeepSpeed only.')
train_group.add_argument('--learning_rate', default = 3e-4, type = float, help = 'Learning rate')
train_group.add_argument('--clip_grad_norm', default = 0.5, type = float, help = 'Clip gradient norm')
train_group.add_argument('--lr_decay', dest = 'lr_decay', action = 'store_true')
model_group = parser.add_argument_group('Model settings')
model_group.add_argument('--dim', default = 512, type = int, help = 'Model dimension')
model_group.add_argument('--text_seq_len', default = 256, type = int, help = 'Text sequence length')
model_group.add_argument('--depth', default = 2, type = int, help = 'Model depth')
model_group.add_argument('--heads', default = 8, type = int, help = 'Model number of heads')
model_group.add_argument('--dim_head', default = 64, type = int, help = 'Model head dimension')
train_group.add_argument('--ff_dropout', default = 0.0, type = float, help = 'Feed forward dropout.')
train_group.add_argument('--attn_dropout', default = 0.0, type = float, help = 'Feed forward dropout.')
model_group.add_argument('--reversible', dest = 'reversible', action='store_true')
model_group.add_argument('--loss_img_weight', default = 7, type = int, help = 'Image loss weight')
model_group.add_argument('--attn_types', default = 'full', type = str, help = 'comma separated list of attention types. attention type can be: full or sparse or axial_row or axial_col or conv_like.')
model_group.add_argument('--shift_tokens', help = 'Use the shift tokens feature', action = 'store_true')
model_group.add_argument('--rotary_emb', help = 'Use rotary embeddings', action = 'store_true')
args = parser.parse_args()
# helpers
def exists(val):
return val is not None
def get_trainable_params(model):
return [params for params in model.parameters() if params.requires_grad]
def cp_path_to_dir(cp_path, tag):
"""Convert a checkpoint path to a directory with `tag` inserted.
If `cp_path` is already a directory, return it unchanged.
"""
if not isinstance(cp_path, Path):
cp_path = Path(cp_path)
if cp_path.is_dir():
return cp_path
path_sans_extension = cp_path.parent / cp_path.stem
cp_dir = Path(f'{path_sans_extension}-{tag}-cp')
return cp_dir
# constants
WEBDATASET_IMAGE_TEXT_COLUMNS = tuple(args.wds.split(','))
ENABLE_WEBDATASET = True if len(WEBDATASET_IMAGE_TEXT_COLUMNS) == 2 else False
DALLE_OUTPUT_FILE_NAME = args.dalle_output_file_name + ".pt"
VAE_PATH = args.vae_path
VQGAN_MODEL_PATH = args.vqgan_model_path
VQGAN_CONFIG_PATH = args.vqgan_config_path
DALLE_PATH = args.dalle_path
RESUME = exists(DALLE_PATH)
EPOCHS = args.epochs
BATCH_SIZE = args.batch_size
LEARNING_RATE = args.learning_rate
GRAD_CLIP_NORM = args.clip_grad_norm
LR_DECAY = args.lr_decay
SAVE_EVERY_N_STEPS = args.save_every_n_steps
KEEP_N_CHECKPOINTS = args.keep_n_checkpoints
MODEL_DIM = args.dim
TEXT_SEQ_LEN = args.text_seq_len
DEPTH = args.depth
HEADS = args.heads
DIM_HEAD = args.dim_head
REVERSIBLE = args.reversible
LOSS_IMG_WEIGHT = args.loss_img_weight
FF_DROPOUT = args.ff_dropout
ATTN_DROPOUT = args.attn_dropout
STABLE = args.stable_softmax
SHIFT_TOKENS = args.shift_tokens
ROTARY_EMB = args.rotary_emb
ATTN_TYPES = tuple(args.attn_types.split(','))
DEEPSPEED_CP_AUX_FILENAME = 'auxiliary.pt'
if not ENABLE_WEBDATASET:
# quit early if you used the wrong folder name
assert Path(args.image_text_folder).exists(), f'The path {args.image_text_folder} was not found.'
else:
# quit early if no tar files were found
if Path(args.image_text_folder).is_dir():
DATASET = [str(p) for p in Path(args.image_text_folder).glob("**/*") if ".tar" in str(p).lower()] # .name
assert len(DATASET) > 0, 'The directory ({}) does not contain any WebDataset/.tar files.'.format(args.image_text_folder)
print('Found {} WebDataset .tar(.gz) file(s) under given path {}!'.format(len(DATASET), args.image_text_folder))
elif ('http://' in args.image_text_folder.lower()) | ('https://' in args.image_text_folder.lower()):
DATASET = f"pipe:curl -L -s {args.image_text_folder} || true"
print('Found {} http(s) link under given path!'.format(len(DATASET), args.image_text_folder))
elif 'gs://' in args.image_text_folder.lower():
DATASET = f"pipe:gsutil cat {args.image_text_folder} || true"
print('Found {} GCS link under given path!'.format(len(DATASET), args.image_text_folder))
elif '.tar' in args.image_text_folder:
DATASET = args.image_text_folder
print('Found WebDataset .tar(.gz) file under given path {}!'.format(args.image_text_folder))
else:
raise Exception('No folder, no .tar(.gz) and no url pointing to tar files provided under {}.'.format(args.image_text_folder))
# initialize distributed backend
distr_backend = distributed_utils.set_backend_from_args(args)
distr_backend.initialize()
using_deepspeed = \
distributed_utils.using_backend(distributed_utils.DeepSpeedBackend)
# tokenizer
if exists(args.bpe_path):
klass = HugTokenizer if args.hug else YttmTokenizer
tokenizer = klass(args.bpe_path)
elif args.chinese:
tokenizer = ChineseTokenizer()
# reconstitute vae
if RESUME:
dalle_path = Path(DALLE_PATH)
if using_deepspeed:
cp_dir = cp_path_to_dir(dalle_path, 'ds')
assert cp_dir.is_dir(), \
f'DeepSpeed checkpoint directory {cp_dir} not found'
dalle_path = cp_dir / DEEPSPEED_CP_AUX_FILENAME
else:
assert dalle_path.exists(), 'DALL-E model file does not exist'
loaded_obj = torch.load(str(dalle_path), map_location='cpu')
dalle_params, vae_params, weights = loaded_obj['hparams'], loaded_obj['vae_params'], loaded_obj['weights']
opt_state = loaded_obj.get('opt_state')
scheduler_state = loaded_obj.get('scheduler_state')
if vae_params is not None:
vae = DiscreteVAE(**vae_params)
else:
if args.taming:
vae = VQGanVAE(VQGAN_MODEL_PATH, VQGAN_CONFIG_PATH)
else:
vae = OpenAIDiscreteVAE()
dalle_params = dict(
**dalle_params
)
IMAGE_SIZE = vae.image_size
resume_epoch = loaded_obj.get('epoch', 0)
else:
if exists(VAE_PATH):
vae_path = Path(VAE_PATH)
assert vae_path.exists(), 'VAE model file does not exist'
assert not vae_path.is_dir(), \
('Cannot load VAE model from directory; please use a '
'standard *.pt checkpoint. '
'Currently, merging a DeepSpeed-partitioned VAE into a DALLE '
'model is not supported.')
loaded_obj = torch.load(str(vae_path))
vae_params, weights = loaded_obj['hparams'], loaded_obj['weights']
vae = DiscreteVAE(**vae_params)
vae.load_state_dict(weights)
else:
if distr_backend.is_root_worker():
print('using pretrained VAE for encoding images to tokens')
vae_params = None
if args.taming:
vae = VQGanVAE(VQGAN_MODEL_PATH, VQGAN_CONFIG_PATH)
else:
vae = OpenAIDiscreteVAE()
IMAGE_SIZE = vae.image_size
dalle_params = dict(
num_text_tokens=tokenizer.vocab_size,
text_seq_len=TEXT_SEQ_LEN,
dim=MODEL_DIM,
depth=DEPTH,
heads=HEADS,
dim_head=DIM_HEAD,
reversible=REVERSIBLE,
loss_img_weight=LOSS_IMG_WEIGHT,
attn_types=ATTN_TYPES,
ff_dropout=FF_DROPOUT,
attn_dropout=ATTN_DROPOUT,
stable=STABLE,
shift_tokens=SHIFT_TOKENS,
rotary_emb=ROTARY_EMB,
)
resume_epoch = 0
# configure OpenAI VAE for float16s
if isinstance(vae, OpenAIDiscreteVAE) and args.fp16:
vae.enc.blocks.output.conv.use_float16 = True
# helpers
def group_weight(model):
group_decay, group_no_decay = [], []
for params in model.named_parameters():
if 'transformer' in params[0]:
if 'bias' in params[0] or 'norm' in params[0]:
group_no_decay.append(params[1])
continue
group_decay.append(params[1])
assert len(list(model.parameters())) == len(group_decay) + len(group_no_decay)
groups = [dict(params=group_decay), dict(params=group_no_decay, weight_decay=.0)]
return groups
# create dataset and dataloader
is_shuffle = not distributed_utils.using_backend(distributed_utils.HorovodBackend)
imagepreproc = T.Compose([
T.Lambda(lambda img: img.convert('RGB')
if img.mode != 'RGB' else img),
T.RandomResizedCrop(IMAGE_SIZE,
scale=(args.resize_ratio, 1.),
ratio=(1., 1.)),
T.ToTensor(),
])
def imagetransform(b):
return Image.open(BytesIO(b))
def tokenize(s):
return tokenizer.tokenize(
s.decode('utf-8'),
TEXT_SEQ_LEN,
truncate_text=args.truncate_captions).squeeze(0)
if ENABLE_WEBDATASET:
DATASET_SIZE = int(1e9) # You need to set a nominal length for the Dataset in order to avoid warnings from DataLoader
myimg, mycap = WEBDATASET_IMAGE_TEXT_COLUMNS
image_text_mapping = {
myimg: imagetransform,
mycap: tokenize
}
image_mapping = {
myimg: imagepreproc
}
def filter_dataset(item): # For e.g. C@H which (rarely) has no caption available.
if mycap not in item:
return False
if myimg not in item:
return False
return True
w_dataset = wds.WebDataset(DATASET, handler=wds.warn_and_continue)
filtered_dataset = w_dataset.select(filter_dataset)
ds = filtered_dataset.map_dict(**image_text_mapping).map_dict(**image_mapping).to_tuple(mycap, myimg).batched(BATCH_SIZE, partial=True)
else:
ds = TextImageDataset(
args.image_text_folder,
text_len=TEXT_SEQ_LEN,
image_size=IMAGE_SIZE,
resize_ratio=args.resize_ratio,
truncate_captions=args.truncate_captions,
tokenizer=tokenizer,
shuffle=is_shuffle,
)
assert len(ds) > 0, 'dataset is empty'
if distr_backend.is_root_worker():
if not ENABLE_WEBDATASET:
print(f'{len(ds)} image-text pairs found for training')
if not is_shuffle:
data_sampler = torch.utils.data.distributed.DistributedSampler(
ds,
num_replicas=distr_backend.get_world_size(),
rank=distr_backend.get_rank()
)
else:
data_sampler = None
if ENABLE_WEBDATASET:
# WebLoader for WebDataset and DeepSpeed compatibility
dl = wds.WebLoader(ds, batch_size=None, shuffle=False) # optionally add num_workers=2 (n) argument
number_of_batches = DATASET_SIZE // (BATCH_SIZE * distr_backend.get_world_size())
dl = dl.repeat(2).slice(number_of_batches)
dl.length = number_of_batches
else:
# Regular DataLoader for image-text-folder datasets
dl = DataLoader(ds, batch_size=BATCH_SIZE, shuffle=is_shuffle, drop_last=True, sampler=data_sampler)
# initialize DALL-E
dalle = DALLE(vae=vae, **dalle_params)
if not using_deepspeed:
if args.fp16:
dalle = dalle.half()
dalle = dalle.cuda()
if RESUME and not using_deepspeed:
dalle.load_state_dict(weights)
# optimizer
opt = Adam(get_trainable_params(dalle), lr=LEARNING_RATE)
if RESUME and opt_state:
opt.load_state_dict(opt_state)
if LR_DECAY:
scheduler = ReduceLROnPlateau(
opt,
mode="min",
factor=0.5,
patience=10,
cooldown=10,
min_lr=1e-6,
verbose=True,
)
if RESUME and scheduler_state:
scheduler.load_state_dict(scheduler_state)
else:
scheduler = None
if distr_backend.is_root_worker():
# experiment tracker
model_config = dict(
depth=DEPTH,
heads=HEADS,
dim_head=DIM_HEAD
)
run = wandb.init(
project=args.wandb_name,
entity=args.wandb_entity,
resume=False,
config=model_config,
)
# distribute
distr_backend.check_batch_size(BATCH_SIZE)
deepspeed_config = {
'train_batch_size': BATCH_SIZE,
'gradient_accumulation_steps': args.ga_steps,
'gradient_clipping': GRAD_CLIP_NORM,
'fp16': {
'enabled': args.fp16,
},
'amp': {
'enabled': args.amp,
'opt_level': 'O1',
},
"flops_profiler": {
"enabled": args.flops_profiler,
"profile_step": 200,
"module_depth": -1,
"top_modules": 1,
"detailed": True,
"output_file": None # TODO Can't get this to work.
},
}
if deepspeed_config.get('zero_optimization', {}).get('stage', 0) >= 2:
print(f"Checkpoints made with DeepSpeed ZeRO Stages 2 and 3 will be stored in deepspeed checkpoint folder")
print(f"As such, they will require DeepSpeed as a dependency in order to resume from or generate with.")
print("See the deespeed conversion script for details on how to convert your ZeRO stage 2/3 checkpoint to a single file.")
print("If using a single GPU, consider running with apex automatic mixed precision instead for a similar speedup to ZeRO.")
time.sleep(2)
(distr_dalle, distr_opt, distr_dl, distr_scheduler) = distr_backend.distribute(
args=args,
model=dalle,
optimizer=opt,
model_parameters=get_trainable_params(dalle),
training_data=(
(None if ENABLE_WEBDATASET else ds)
if using_deepspeed
else dl
),
# Do not pass the LR scheduler to DeepSpeed so we can manually
# advance it.
lr_scheduler=scheduler if LR_DECAY and not using_deepspeed else None,
config_params=deepspeed_config,
)
# Prefer scheduler in `deepspeed_config`.
if LR_DECAY and distr_scheduler is None:
distr_scheduler = scheduler
avoid_model_calls = using_deepspeed and args.fp16
if RESUME and using_deepspeed:
distr_dalle.load_checkpoint(str(cp_dir))
def save_model(path, epoch=0):
save_obj = {
'hparams': dalle_params,
'vae_params': vae_params,
'epoch': epoch,
}
if using_deepspeed:
cp_dir = cp_path_to_dir(path, 'ds')
if KEEP_N_CHECKPOINTS is not None and distr_backend.is_root_worker():
checkpoints = sorted(glob(str(cp_dir / "global*")), key=os.path.getmtime, reverse=True)
for checkpoint in checkpoints[KEEP_N_CHECKPOINTS:]:
shutil.rmtree(checkpoint)
distr_dalle.save_checkpoint(cp_dir, client_state=save_obj)
if not distr_backend.is_root_worker():
return
# Save auxiliary values so we can reuse the standard routine
# for loading.
save_obj = {
**save_obj,
# Save a nonsense value that directs the user to
# further help.
'weights': (
'To get a working standard checkpoint, '
'look into consolidating DeepSpeed checkpoints.'
),
}
torch.save(save_obj, str(cp_dir / DEEPSPEED_CP_AUX_FILENAME))
if deepspeed_config.get('zero_optimization', {}).get('stage', 0) >= 2: # see https://github.com/lucidrains/DALLE-pytorch/wiki/DeepSpeed-Checkpoints
return
if not distr_backend.is_root_worker():
return
save_obj = {
**save_obj,
'weights': dalle.state_dict(),
'opt_state': opt.state_dict(),
}
save_obj['scheduler_state'] = (scheduler.state_dict() if scheduler else None)
torch.save(save_obj, path)
# training
# Saves a checkpoint before training begins to fail early when mis-configured.
# See https://github.com/lucidrains/DALLE-pytorch/wiki/DeepSpeed-Checkpoints
save_model(DALLE_OUTPUT_FILE_NAME, epoch=resume_epoch)
for epoch in range(resume_epoch, EPOCHS):
if data_sampler:
data_sampler.set_epoch(epoch)
for i, (text, images) in enumerate((dl if ENABLE_WEBDATASET else distr_dl)):
if i % 10 == 0 and distr_backend.is_root_worker():
t = time.time()
if args.fp16:
images = images.half()
text, images = map(lambda t: t.cuda(), (text, images))
loss = distr_dalle(text, images, return_loss=True)
if using_deepspeed:
distr_dalle.backward(loss)
distr_dalle.step()
# Gradients are automatically zeroed after the step
else:
loss.backward()
clip_grad_norm_(distr_dalle.parameters(), GRAD_CLIP_NORM)
distr_opt.step()
distr_opt.zero_grad()
# Collective loss, averaged
avg_loss = distr_backend.average_all(loss)
log = {}
if i % 10 == 0 and distr_backend.is_root_worker():
print(epoch, i, f'loss - {avg_loss.item()}')
log = {
**log,
'epoch': epoch,
'iter': i,
'loss': avg_loss.item()
}
if i % SAVE_EVERY_N_STEPS == 0:
save_model(DALLE_OUTPUT_FILE_NAME, epoch=epoch)
if i % 100 == 0:
if distr_backend.is_root_worker():
sample_text = text[:1]
token_list = sample_text.masked_select(sample_text != 0).tolist()
decoded_text = tokenizer.decode(token_list)
if not avoid_model_calls:
# CUDA index errors when we don't guard this
image = dalle.generate_images(text[:1], filter_thres=0.9) # topk sampling at 0.9
log = {
**log,
}
if not avoid_model_calls:
log['image'] = wandb.Image(image, caption=decoded_text)
if i % 10 == 9 and distr_backend.is_root_worker():
sample_per_sec = BATCH_SIZE * 10 / (time.time() - t)
log["sample_per_sec"] = sample_per_sec
print(epoch, i, f'sample_per_sec - {sample_per_sec}')
if i == 201 and args.flops_profiler:
raise StopIteration("Profiler has finished running. Stopping training early.")
if distr_backend.is_root_worker():
wandb.log(log)
if LR_DECAY:
distr_scheduler.step(avg_loss)
save_model(DALLE_OUTPUT_FILE_NAME, epoch=epoch)
if distr_backend.is_root_worker():
# save trained model to wandb as an artifact every epoch's end
model_artifact = wandb.Artifact('trained-dalle', type='model', metadata=dict(model_config))
model_artifact.add_file(DALLE_OUTPUT_FILE_NAME)
run.log_artifact(model_artifact)
save_model(DALLE_OUTPUT_FILE_NAME, epoch=epoch)
if distr_backend.is_root_worker():
wandb.save(DALLE_OUTPUT_FILE_NAME)
model_artifact = wandb.Artifact('trained-dalle', type='model', metadata=dict(model_config))
model_artifact.add_file(DALLE_OUTPUT_FILE_NAME)
run.log_artifact(model_artifact)
wandb.finish()
| [] |
2024-01-10 | TalkToEve/ai-backend | tests~text_to_speech_test~tts_openai.py.py | import os
import sys
current_dir = os.path.dirname(os.path.abspath(__file__))
parent_dir = os.path.dirname(current_dir)
grandparent_dir = os.path.dirname(parent_dir)
sys.path.append(parent_dir)
sys.path.append(grandparent_dir)
from configurations.config import OPENAI_API_KEY
from openai import OpenAI
client = OpenAI(api_key=OPENAI_API_KEY)
response = client.audio.speech.create(
model="tts-1",
voice="nova",
input="Hello world! This is a streaming test.",
)
response.stream_to_file(current_dir + '/' + "output.mp3") | [] |
2024-01-10 | TalkToEve/ai-backend | modules~functions_auxiliary.py | import os
import sys
current_dir = os.path.dirname(os.path.abspath(__file__))
# Obtain the parent directory
parent_dir = os.path.dirname(current_dir)
grandparent_dir = os.path.dirname(parent_dir)
sys.path.append(parent_dir)
sys.path.append(grandparent_dir)
from langchain.text_splitter import CharacterTextSplitter
from configurations.config import CHUNK_OVERLAP, CHUNK_SIZE
def obtain_number_session(file_name):
parts = file_name.split("_")
return int(parts[2])
def read_txt_files(path):
file_path = path
with open(file_path, 'r') as output_file:
content = output_file.read()
return content
def split_txt_file(raw_text, separator = "\n#"):
text_splitter = CharacterTextSplitter(
separator=separator,
chunk_size= CHUNK_SIZE,
chunk_overlap= CHUNK_OVERLAP,
length_function=len,
)
chunks = text_splitter.split_text(raw_text)
return chunks
def convert_messages_in_txt(conversation_messages):
conversation_txt = ""
for message in conversation_messages:
if message.__class__.__name__ == "AIMessage":
conversation_txt += 'Eve: ' + message.content + '\n'
elif message.__class__.__name__ == "HumanMessage":
conversation_txt += 'Patient: ' + message.content + '\n'
return conversation_txt
def save_as_txt(path, content):
file_path = path
if not os.path.exists(os.path.dirname(file_path)):
#Create the directory
os.makedirs(os.path.dirname(file_path))
with open(file_path, 'w') as output_file:
output_file.write(content)
def read_txt_file(path):
file_path = path
with open(file_path, 'r') as output_file:
content = output_file.read()
return content
def convert_state_str_to_variables(state_str):
lineas = state_str.split('\n')
opciones = {} # Crear un diccionario para almacenar las opciones
for linea in lineas:
if ":" in linea:
clave, valor = linea.split(':')
opciones[clave.strip()] = valor.strip() == "True"
# Encontrar las claves con valores "True"
claves_true = [clave for clave, valor in opciones.items() if valor]
return claves_true[0] | [] |
2024-01-10 | TalkToEve/ai-backend | modules~chatbot~surveys_manager.py | from configurations.config_llm import CAMBALACHE_TEMPERATURE
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain
from langchain.chat_models import ChatOpenAI
import os
import sys
import random
import pandas as pd
import json
# Obtain the path to the current directory
current_dir = os.path.dirname(os.path.abspath(__file__))
parent_dir = os.path.dirname(current_dir)
grandparent_dir = os.path.dirname(parent_dir)
# Add the path to the sys.path list
sys.path.append(parent_dir)
sys.path.append(grandparent_dir)
from configurations.config_surveys import SURVEYS_PATH
class BaseSurveysManager():
def __init__(self,):
pass
def load_survey(self, survey):
raise NotImplementedError
def obtain_questions(self,):
raise NotImplementedError
def obtain_answers(self,):
raise NotImplementedError
def load_responses(self,question_id=None, question = None, answer = None):
raise NotImplementedError
class SurveyManager_v1(BaseSurveysManager):
def __init__(self,):
self.surveys_dataframe = None
self.path_to_lists_surveys = ""
self.survey_name = ""
self.survey_date = ""
self.survey_answered = False
self.survey = {}
self.survey_responses = {}
self.path_to_survey_folder = ""
def load_list_of_surveys(self,path):
# CHeck if the path is a csv file
if not path.endswith(".csv"):
raise Exception("The path must be a csv file")
# Read the path
self.path_to_lists_surveys = path
# Read the csv file
self.surveys_dataframe = pd.read_csv(self.path_to_lists_surveys)
def select_survey_to_answer(self,):
# Obtain the lists of surveys that not was answered
surveys_not_answered = self.surveys_dataframe[self.surveys_dataframe['answered'] == False].values.tolist()
# Generate a random number to select the survey to answer
if len(surveys_not_answered) == 0:
survey_to_answer = None
elif len(surveys_not_answered) == 1:
survey_to_answer = surveys_not_answered[0]
else:
survey_to_answer = random.choice(surveys_not_answered)
if survey_to_answer:
self.survey_name = survey_to_answer[0]
self.survey_date = survey_to_answer[1]
self.survey_answered = survey_to_answer[2]
def check_if_has_survey_to_answer(self,):
# Check if there is a survey to answer
if len(self.surveys_dataframe[self.surveys_dataframe['answered'] == False]):
return True
else:
return False
def load_survey(self, path_to_load_survey=None, survey_id = None):
if survey_id is None:
# Check that self.survey_name and self.survey_date are not empty
if self.survey_name == "" or self.survey_date == "":
raise Exception("The survey must have a name and a date")
# Checj that path_to_load_survey exists
if path_to_load_survey is None:
path_to_load_survey = SURVEYS_PATH
if not os.path.exists(path_to_load_survey):
raise Exception("The path to load the survey does not exist")
if survey_id is None:
survey_id = self.survey_date + '_' + self.survey_name
self.path_to_survey_folder = os.path.join(path_to_load_survey, survey_id)
# Read the json file
with open(os.path.join(self.path_to_survey_folder, survey_id + '.json')) as json_file:
self.survey = json.load(json_file)
def obtain_number_of_questions(self,):
return len(self.survey) - 3
def obtain_question(self, number_question, return_dict = False):
# Check that number_question is a number
if not isinstance(number_question, int):
raise Exception("The number of the question must be a number")
# Check that number_question is a valid number
if number_question >= len(self.survey) - 2:
raise Exception("The number of the question must be a valid number")
if return_dict:
# Obtain the question
return self.survey['question_' + str(number_question)]
else:
# Obtain the question
question = self.survey['question_' + str(number_question)]['question']
answers = self.survey['question_' + str(number_question)]['answers']
return question, answers
def load_response(self, number_question, response):
# Check that number_question is a number
if not isinstance(number_question, int):
raise Exception("The number of the question must be a number")
# Check that number_question is a valid number
if number_question >= len(self.survey) - 2:
raise Exception("The number of the question must be a valid number")
# Check that response is a str
if not isinstance(response, str):
raise Exception("The response must be a string")
# Save the response in the survey_responses
question = self.survey['question_' + str(number_question)]['question']
self.survey_responses[question] = [response]
def save_survey_responses(self,):
# Path to save results in dataframe
survey_name = self.survey_date + '_' + self.survey_name
path_to_result = os.path.join(SURVEYS_PATH, survey_name, 'results' + '_' + survey_name + '.csv')
# Now we need to load the dataframe
df = pd.read_csv(path_to_result)
# Now we need to convert survey_responses in a dataframe
survey_responses_dataframe = pd.DataFrame(self.survey_responses, index=[0])
# Now we need to concatenate the two dataframes
df = pd.concat([df, survey_responses_dataframe], axis=0, ignore_index=True)
# Now we need to randomly the rows of the dataframe
df = df.sample(frac=1).reset_index(drop=True)
# Now we save the dataframe
df.to_csv(path_to_result, index=False)
# Now we need to change the survey dataframe
self.surveys_dataframe.loc[self.surveys_dataframe['survey_name'] == self.survey_name, 'answered'] = True
# Save the new dataframe
self.surveys_dataframe.to_csv(self.path_to_lists_surveys, index=False)
| [] |
2024-01-10 | TalkToEve/ai-backend | tests~speech_to_text_test~stt_openai.py | import os
import sys
current_dir = os.path.dirname(os.path.abspath(__file__))
parent_dir = os.path.dirname(current_dir)
grandparent_dir = os.path.dirname(parent_dir)
sys.path.append(parent_dir)
sys.path.append(grandparent_dir)
from configurations.config import OPENAI_API_KEY
from openai import OpenAI
client = OpenAI(api_key=OPENAI_API_KEY)
audio_file = open(current_dir + "/" + "output.mp3", "rb") # "Hello world! This is a streaming test."
transcript = client.audio.translations.create(
model="whisper-1",
file=audio_file
)
print(transcript) | [] |
2024-01-10 | TalkToEve/ai-backend | modules~chatbot~personal_information.py | from configurations.config_llm import CAMBALACHE_TEMPERATURE
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain
from langchain.chat_models import ChatOpenAI
import os
import sys
# Obtain the path to the current directory
current_dir = os.path.dirname(os.path.abspath(__file__))
parent_dir = os.path.dirname(current_dir)
grandparent_dir = os.path.dirname(parent_dir)
# Add the path to the sys.path list
sys.path.append(parent_dir)
sys.path.append(grandparent_dir)
class PersonalInformationUpdater_v1():
def __init__(self,):
# Define the personal information
self.personal_information = ""
# Define the updater
self.updater = None
self.prompt = None
# Define the requiered inputs variables
self.required_input_variables = ['conversation', 'previous_information']
pass
def load_personal_information(self, personal_information):
# Check that personal_information is a string
if not isinstance(personal_information, str):
raise TypeError("personal_information must be a string")
self.personal_information = personal_information
def load_prompt(self, prompt, model):
# Check if prompt is a PromptTemplate
if not isinstance(prompt, PromptTemplate):
raise TypeError("prompt must be a PromptTemplate")
#Check that model is in chat_models
if not isinstance(model, ChatOpenAI):
raise TypeError("model must be a ChatOpenAI")
# Check that the prompt has the required input variables
for required_input_variable in self.required_input_variables:
if required_input_variable not in prompt.input_variables:
raise ValueError(f"The prompt must have the input variable {required_input_variable}")
self.prompt = prompt
# Load the prompt
self.updater = LLMChain(llm = model, verbose = False, prompt = prompt)
def update(self, conversation):
# Check that the updater is not None
if self.updater is None:
raise ValueError("The updater has not been loaded")
# Check that the conversation is a string
if not isinstance(conversation, str):
raise TypeError("conversation must be a string")
# Check that the previous_information is a string
if not isinstance(self.personal_information, str):
raise TypeError("previous_information must be a string")
# Obtain the new personal information
self.personal_information = self.updater.predict(conversation = conversation, previous_information = self.personal_information)
def get_information(self):
return self.personal_information
| [] |
2024-01-10 | TalkToEve/ai-backend | modules~chatbot~eves.py | import os
import sys
import time
import datetime
from langchain.chains import LLMChain
import threading
current_dir = os.path.dirname(os.path.abspath(__file__))
# Obtain the parent directory
parent_dir = os.path.dirname(current_dir)
grandparent_dir = os.path.dirname(parent_dir)
sys.path.append(parent_dir)
sys.path.append(grandparent_dir)
from chatbot.prompt_config import PromptConfig_v1
from chatbot.conversation_manager import ConversationManager_v1
from chatbot.conversation_analysis import ConversationAnalysis_v1, ConversationAnalysis_v2, ConversationAnalysis_v3
from chatbot.chatbot import Chatbot_v1
from chatbot.speech_to_text import S2T_with_whisper, S2T_with_openai
from chatbot.text_to_speech import T2S_with_openai
from users_manager.user_manager import UserManager
from configurations.config_llm import FT_NAME, CHAT_TEMPERATURE, OPENAI_API_KEY
from configurations.config_prompts import SUMMARY_CONVERSATION_v1, PERSONAL_INFORMATION_v1, EVE_PROMPT_v1, SEARCH_IN_VECTORSTORE_DATABASE_v1
from configurations.config_templates import PERSONAL_INFORMATION_TEMPLATE_PATH
from configurations.config_vectordb import VECTOR_DB_PATH
from functions_auxiliary import read_txt_files
# Import the vector database searcher
from vector_database.vector_database import VectorDatabase
class BaseEve():
def __init__(self, llm = None):
self.model = llm
pass
def response(self, message):
pass
class Eve_v1(BaseEve):
def __init__(self,):
# Configure the API key
os.environ["OPENAI_API_KEY"] = OPENAI_API_KEY
# Initialize the prompt manager
self.prompt_manager = PromptConfig_v1()
# Initialie the conversation manager
self.conversation_manager = ConversationManager_v1()
# Initialize the conversation analysis
self.conversation_analysis = ConversationAnalysis_v1()
# Initialize the chatbot
self.chatbot = Chatbot_v1()
pass
def initialize(self,):
# Load prompts
self.prompt_manager.load_prompts(paths = [SUMMARY_CONVERSATION_v1['path'], PERSONAL_INFORMATION_v1['path'], EVE_PROMPT_v1['path']],
keys = ['summary_conversation', 'personal_information', 'eve_message'],
input_variables = [SUMMARY_CONVERSATION_v1['input_variables'], PERSONAL_INFORMATION_v1['input_variables'], EVE_PROMPT_v1['input_variables']])
# Aca tenemos que chequear si existe o no archivos de la conversacion anteriores
# Load template of personal information
self.conversation_analysis.load_personal_information(personal_information = read_txt_files(PERSONAL_INFORMATION_TEMPLATE_PATH))
# Load prompt for the personal information
self.conversation_analysis.load_prompt_personal_information(prompt = self.prompt_manager.get_prompt('personal_information'))
# Load prompt for the summary conversation
self.conversation_analysis.load_prompt_summary_conversation(prompt = self.prompt_manager.get_prompt('summary_conversation'))
# Load prompt for the chatbot
self.chatbot.load_prompt(prompt = self.prompt_manager.get_prompt('eve_message'))
def response(self, message):
# We obtain the last messages
last_messages = self.conversation_manager.get_n_last_messages()
# Add patient message to memory
self.conversation_manager.add_message(message, is_ai_message = False)
# Obtain the complete current conversation
current_conversation = self.conversation_manager.get_conversation()
# Process the personal information
self.conversation_analysis.update_personal_information(current_conversation)
# Obtain the new personal information
personal_info = self.conversation_analysis.get_personal_information()
# Process the summary conversation
self.conversation_analysis.update_summary_conversation(last_messages , current_conversation)
# Obtain the new summary conversation
summary_conversation = self.conversation_analysis.get_summary_conversation()
# Obtain the response of the chatbot
eve_message = self.chatbot.response(message = message,
personal_information = personal_info,
previous_conversation_summary = summary_conversation,
last_messages = last_messages)
# Add AI assistant message to memory
self.conversation_manager.add_message(eve_message, is_ai_message = True)
return eve_message
class Eve_v2(Eve_v1):
def __init__(self,):
super().__init__()
pass
def response(self, message):
# We obtain the last messages
last_messages = self.conversation_manager.get_n_last_messages()
# Add patient message to memory
self.conversation_manager.add_message(message, is_ai_message = False)
# Obtain the complete current conversation
current_conversation = self.conversation_manager.get_conversation()
# Init the threads for the personal information and the summary conversation
personal_info_thread = threading.Thread(target=self._process_personal_information, args=(current_conversation,))
summary_thread = threading.Thread(target=self._process_summary_conversation, args=(last_messages , current_conversation))
personal_info_thread.start()
summary_thread.start()
personal_info_thread.join()
summary_thread.join()
# Obtain the new personal information
personal_info = self.conversation_analysis.get_personal_information()
# Obtain the new summary conversation
summary_conversation = self.conversation_analysis.get_summary_conversation()
# Obtain the response of the chatbot
eve_message = self.chatbot.response(message = message,
personal_information = personal_info,
previous_conversation_summary = summary_conversation,
last_messages = last_messages)
# Add AI assistant message to memory
self.conversation_manager.add_message(eve_message, is_ai_message = True)
return eve_message
def _process_personal_information(self,current_conversation):
self.conversation_analysis.update_personal_information(current_conversation)
def _process_summary_conversation(self,last_messages , current_conversation):
self.conversation_analysis.update_summary_conversation(last_messages , current_conversation)
# These Eve versions save the current conversation and the personal information in the user folder
class Eve_v3(Eve_v2):
def __init__(self,):
super().__init__()
# User Manager
self.user_manager = UserManager()
pass
def initialize(self,):
# Login process
self.user_manager.user_login()
# Load prompts
self.prompt_manager.load_prompts(paths = [SUMMARY_CONVERSATION_v1['path'], PERSONAL_INFORMATION_v1['path'], EVE_PROMPT_v1['path']],
keys = ['summary_conversation', 'personal_information', 'eve_message'],
input_variables = [SUMMARY_CONVERSATION_v1['input_variables'], PERSONAL_INFORMATION_v1['input_variables'], EVE_PROMPT_v1['input_variables']])
# Load the information requiered for the conversation
self.load_personal_information()
# Load prompt for the summary conversation
self.load_summary_conversation()
# Load prompt for the chatbot
self.chatbot.load_prompt(prompt = self.prompt_manager.get_prompt('eve_message'))
def load_personal_information(self,):
# Obtain the personal information from usermanager
personal_information = self.user_manager.get_user_information(key='personal_information')
#Load the personal information
self.conversation_analysis.load_personal_information(personal_information = personal_information)
# Load prompt for the personal information
self.conversation_analysis.load_prompt_personal_information(prompt = self.prompt_manager.get_prompt('personal_information'))
def load_summary_conversation(self,):
# Load prompt for the summary conversation
self.conversation_analysis.load_prompt_summary_conversation(prompt = self.prompt_manager.get_prompt('summary_conversation'))
def finalize(self,):
# Message of process information and wait a minute
print("Processing the conversation...")
# Update the personal information
self.user_manager.update_user_information(key='personal_information', value=self.conversation_analysis.get_personal_information())
# Save the conversation
self.save_conversation()
# Save the personal information
self.save_personal_information()
def save_conversation(self,):
# Obtain the current conversation
current_conversation = self.conversation_manager.get_conversation()
# Obtain the number of the last session
number_session = self.user_manager.get_user_information(key='last_session_number')
if number_session is None:
number_session = 0
number_session+=1
# The file name is the current date and time (YYYY/MM/DD)
date = datetime.datetime.now().strftime("%Y_%m_%d")
filename = f"{number_session}_session_{date}.txt"
self.user_manager.save_content(content = current_conversation,
folder = 'sessions',
filename = filename)
def save_personal_information(self,):
# Save the personal information
self.user_manager.save_user_information()
class Eve_v4(Eve_v3):
def __init__(self,):
super().__init__()
self.audio_transcriber = S2T_with_whisper()
def response_with_audio(self, input_user, audio_flag=False):
# Check if the input is a text or a audio
if audio_flag:
# If the input is a audio, we process the audio
input_user = self.audio_transcriber.transcribe(input_user)
# Obtain the response of Eve
eve_message = self.response(input_user)
return eve_message
class Eve_v4a(Eve_v3):
def __init__(self,):
super().__init__()
self.audio_transcriber = S2T_with_openai()
def response_with_audio(self, input_user, audio_flag=False):
# Check if the input is a text or a audio
if audio_flag:
# If the input is a audio, we process the audio
input_user = self.audio_transcriber.transcribe(input_user)
# Obtain the response of Eve
eve_message = self.response(input_user)
return eve_message
class Eve_v4b(Eve_v3):
def __init__(self,):
super().__init__()
"Esta version ya tiene incorporada la parte de tts stt."
self.audio_transcriber = S2T_with_openai()
self.audio_generator = T2S_with_openai()
def response_with_audio(self, input_user, audio_flag=False, audio_response_flag = False):
# Check if the input is a text or a audio
if audio_flag:
# If the input is a audio, we process the audio
input_user = self.audio_transcriber.transcribe(input_user)
# Obtain the response of Eve
eve_message = self.response(input_user)
if audio_response_flag:
# If the response is a audio, we process the audio
audio_path_file = self.audio_generator.create(eve_message)
return eve_message, audio_path_file
else:
return eve_message
class Eve_v5(Eve_v4b):
def __init__(self,):
super().__init__()
""" Aca incorpora la busqueda en la base de datos vectorial"""
# Initialize the conversation analysis
self.conversation_analysis = ConversationAnalysis_v2()
self.vectordabase_serch = VectorDatabase(db_path=VECTOR_DB_PATH)
def initialize(self,):
# Load prompts
self.prompt_manager.load_prompts(paths = [SUMMARY_CONVERSATION_v1['path'], PERSONAL_INFORMATION_v1['path'], EVE_PROMPT_v1['path'], SEARCH_IN_VECTORSTORE_DATABASE_v1['path']],
keys = ['summary_conversation', 'personal_information', 'eve_message','search_in_database'],
input_variables = [SUMMARY_CONVERSATION_v1['input_variables'], PERSONAL_INFORMATION_v1['input_variables'], EVE_PROMPT_v1['input_variables'], SEARCH_IN_VECTORSTORE_DATABASE_v1['input_variables']])
# Load the information requiered for the conversation
self.load_personal_information()
# Load prompt for the summary conversation
self.load_summary_conversation()
# Load the description of the database
self.load_description_database()
# Load prompt for the chatbot
self.chatbot.load_prompt(prompt = self.prompt_manager.get_prompt('eve_message'))
def load_description_database(self,):
description = "Petroleum Geo-Services (PGS) is a leading provider of seismic and reservoir services for the oil and gas industry. The company offers a wide range of geophysical services, including seismic data acquisition, processing, and interpretation. PGS operates a fleet of seismic vessels and has a global presence, with offices and operations in various countries around the world. The company's services are used by oil and gas companies to identify and evaluate potential hydrocarbon reserves, optimize field development plans, and monitor reservoir performance. PGS is committed to delivering high-quality data and insights to its clients, helping them make informed decisions and maximize the value of their assets. "
self.conversation_analysis.load_description_search_database(description = description)
# Load prompt for the search database
self.conversation_analysis.load_prompt_search_database(prompt = self.prompt_manager.get_prompt('search_in_database'))
def response(self, message):
## Aca hay que chequear si se entra o no en la busqueda de la base de datos.
search_in_database = self.conversation_analysis.search_database(message)
if search_in_database:
# Add patient message to memory
self.conversation_manager.add_message(message, is_ai_message = False)
# Search in database
eve_message = self.vectordabase_serch.predict(message)
# Add AI assistant message to memory
self.conversation_manager.add_message(eve_message, is_ai_message = True)
else:
# We obtain the last messages
last_messages = self.conversation_manager.get_n_last_messages()
# Add patient message to memory
self.conversation_manager.add_message(message, is_ai_message = False)
# Obtain the complete current conversation
current_conversation = self.conversation_manager.get_conversation()
# Init the threads for the personal information and the summary conversation
personal_info_thread = threading.Thread(target=self._process_personal_information, args=(current_conversation,))
summary_thread = threading.Thread(target=self._process_summary_conversation, args=(last_messages , current_conversation))
personal_info_thread.start()
summary_thread.start()
personal_info_thread.join()
summary_thread.join()
# Obtain the new personal information
personal_info = self.conversation_analysis.get_personal_information()
# Obtain the new summary conversation
summary_conversation = self.conversation_analysis.get_summary_conversation()
# Obtain the response of the chatbot
eve_message = self.chatbot.response(message = message,
personal_information = personal_info,
previous_conversation_summary = summary_conversation,
last_messages = last_messages)
# Add AI assistant message to memory
self.conversation_manager.add_message(eve_message, is_ai_message = True)
return eve_message
class Eve_v5a(Eve_v5):
def __init__(self,):
super().__init__()
""" Aca incorpora la busqueda en la base de datos vectorial"""
# Initialize the conversation analysis
self.conversation_analysis = ConversationAnalysis_v3()
self.n_questions_surveys = 0
def initialize(self,):
# Load prompts
self.prompt_manager.load_prompts(paths = [SUMMARY_CONVERSATION_v1['path'], PERSONAL_INFORMATION_v1['path'], EVE_PROMPT_v1['path'], SEARCH_IN_VECTORSTORE_DATABASE_v1['path']],
keys = ['summary_conversation', 'personal_information', 'eve_message','search_in_database'],
input_variables = [SUMMARY_CONVERSATION_v1['input_variables'], PERSONAL_INFORMATION_v1['input_variables'], EVE_PROMPT_v1['input_variables'], SEARCH_IN_VECTORSTORE_DATABASE_v1['input_variables']])
# Load the information requiered for the conversation
self.load_personal_information()
# Load prompt for the summary conversation
self.load_summary_conversation()
# Load the description of the database
self.load_description_database()
# Load prompt for the chatbot
self.chatbot.load_prompt(prompt = self.prompt_manager.get_prompt('eve_message'))
# Load the survey
self.load_surveys()
# Init the survey
self.init_survey()
def init_survey(self,):
# Check if there is a survey to answer
if self.n_questions_surveys > 0:
# Generate the message of the survey
message = "EVE: I have a survey for you. Do you want to answer it?(Y/N): "
# Obtain the response of the user
response = input(message)
# CHeck if response is Y or N
if response == "Y":
print('EVE: Thanks, Lets start with the survey')
# Create a for loop to load the questions
for i in range(self.n_questions_surveys):
# Obtain the question
question, answers = self.conversation_analysis.obtain_question(i)
complete_answered = question + " (choice: a,b,c,d,...)\n"
abcd = ['a','b','c','d','e','f','g','h','i','j']
for j in range(len(answers)):
complete_answered += f"{abcd[j]}. {answers[j]}\n"
print(complete_answered)
answer = input("Choice:")
# Answer lower case
answer = answer.lower()
# Now we need to match the answer with the index
index_answer = abcd.index(answer)
real_answer = answers[index_answer]
self.conversation_analysis.respond_question( number_question= i, answer=real_answer)
def load_surveys(self,):
user_path = self.user_manager.get_user_folder(self.user_manager.username, self.user_manager.password)
# Survey path
survey_path = os.path.join(user_path, 'surveys', 'surveys.csv')
# Load the surveys
self.n_questions_surveys = self.conversation_analysis.load_survey(path = survey_path)
def load_description_database(self,):
description = "Petroleum Geo-Services (PGS) is a leading provider of seismic and reservoir services for the oil and gas industry. The company offers a wide range of geophysical services, including seismic data acquisition, processing, and interpretation. PGS operates a fleet of seismic vessels and has a global presence, with offices and operations in various countries around the world. The company's services are used by oil and gas companies to identify and evaluate potential hydrocarbon reserves, optimize field development plans, and monitor reservoir performance. PGS is committed to delivering high-quality data and insights to its clients, helping them make informed decisions and maximize the value of their assets. "
self.conversation_analysis.load_description_search_database(description = description)
# Load prompt for the search database
self.conversation_analysis.load_prompt_search_database(prompt = self.prompt_manager.get_prompt('search_in_database'))
def response_with_audio(self, input_user, audio_flag=False, audio_response_flag = False):
# Check if the input is a text or a audio
if audio_flag:
# If the input is a audio, we process the audio
input_user = self.audio_transcriber.transcribe(input_user)
# Obtain the response of Eve
eve_message = self.response(input_user)
if audio_response_flag:
# If the response is a audio, we process the audio
audio_path_file = self.audio_generator.create(eve_message)
return eve_message, audio_path_file
else:
return eve_message
def response(self, message):
## Aca hay que chequear si se entra o no en la busqueda de la base de datos.
search_in_database = self.conversation_analysis.search_database(message)
if search_in_database:
# Add patient message to memory
self.conversation_manager.add_message(message, is_ai_message = False)
# Search in database
eve_message = self.vectordabase_serch.predict(message)
# Add AI assistant message to memory
self.conversation_manager.add_message(eve_message, is_ai_message = True)
else:
# We obtain the last messages
last_messages = self.conversation_manager.get_n_last_messages()
# Add patient message to memory
self.conversation_manager.add_message(message, is_ai_message = False)
# Obtain the complete current conversation
current_conversation = self.conversation_manager.get_conversation()
# Init the threads for the personal information and the summary conversation
personal_info_thread = threading.Thread(target=self._process_personal_information, args=(current_conversation,))
summary_thread = threading.Thread(target=self._process_summary_conversation, args=(last_messages , current_conversation))
personal_info_thread.start()
summary_thread.start()
personal_info_thread.join()
summary_thread.join()
# Obtain the new personal information
personal_info = self.conversation_analysis.get_personal_information()
# Obtain the new summary conversation
summary_conversation = self.conversation_analysis.get_summary_conversation()
# Obtain the response of the chatbot
eve_message = self.chatbot.response(message = message,
personal_information = personal_info,
previous_conversation_summary = summary_conversation,
last_messages = last_messages)
# Add AI assistant message to memory
self.conversation_manager.add_message(eve_message, is_ai_message = True)
return eve_message
class Eve_v5b(Eve_v5):
def __init__(self,):
super().__init__()
"""Aca se cambio la forma en la cual se inicializa Eve"""
# Initialize the conversation analysis
self.conversation_analysis = ConversationAnalysis_v3()
# Atributes for surveys
self.n_questions_surveys = 0
self.n_questions_surveys_answered = 0
self.do_survey = False
self.survey_answered = False
self.temp_path = None
self.username = None
def initialize(self,username = None, password = None):
# Login process
self.user_manager.user_login(username = username, password = password)
self.username = self.user_manager.get_username()
# Load temp path
self.load_tmp_file()
# Load prompts
self.prompt_manager.load_prompts(paths = [SUMMARY_CONVERSATION_v1['path'], PERSONAL_INFORMATION_v1['path'], EVE_PROMPT_v1['path'], SEARCH_IN_VECTORSTORE_DATABASE_v1['path']],
keys = ['summary_conversation', 'personal_information', 'eve_message','search_in_database'],
input_variables = [SUMMARY_CONVERSATION_v1['input_variables'], PERSONAL_INFORMATION_v1['input_variables'], EVE_PROMPT_v1['input_variables'], SEARCH_IN_VECTORSTORE_DATABASE_v1['input_variables']])
# Load the information requiered for the conversation
self.load_personal_information()
# Load prompt for the summary conversation
self.load_summary_conversation()
# Load the description of the database
self.load_description_database()
# Load prompt for the chatbot
self.chatbot.load_prompt(prompt = self.prompt_manager.get_prompt('eve_message'))
# Load the survey
self.load_surveys()
def load_tmp_file(self,):
self.temp_path = os.path.join('app','TEMP',self.username)
# Check if path exists and if not create it
if not os.path.exists(self.temp_path):
os.makedirs(self.temp_path)
# Folders to create
folders_create = ['audio_responses']
for folder in folders_create:
# Check if path exists and if not create the folder
if not os.path.exists(os.path.join(self.temp_path, folder)):
os.mkdir(os.path.join(self.temp_path, folder))
def ask_to_do_survey(self,):
# Check if there is a survey to answer
if self.n_questions_surveys > 0:
# Generate the message of the survey
message = "I have a survey for you. Do you want to answer it?(Y/N): "
return message
def obtain_response_to_do_survey(self,response):
response = response.upper()
if response == "Y":
self.do_survey = True
self.question_survey_id = 0
else:
self.do_survey = False
def get_survey(self, return_dict = False):
# Check if there is a survey to answer
if self.do_survey:
# Check if question_survey_id < n_questions_surveys
if self.question_survey_id < self.n_questions_surveys -1:
if return_dict:
# Obtain the question
return self.conversation_analysis.obtain_question(self.question_survey_id, return_dict=return_dict)
else:
question, answers = self.conversation_analysis.obtain_question(self.question_survey_id)
self.answers = answers
complete_question = question + " (choice: a,b,c,d,...)\n"
abcd = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o',]
for j in range(len(answers)):
complete_question += f"{abcd[j]}. {answers[j]}\n"
return complete_question, self.question_survey_id
elif self.question_survey_id == self.n_questions_surveys -1:
self.do_survey = False
self.survey_answered = True
else:
return None
else:
return None
def response_survey(self, answer_id, survey_id):
self.question_survey_id = survey_id
question, answers = self.conversation_analysis.obtain_question(self.question_survey_id)
answer = answers[answer_id]
# Save the response
self.conversation_analysis.respond_question( number_question = self.question_survey_id, answer=real_answer)
# Actualize the number of questions answered
self.question_survey_id += 1
def load_surveys(self,):
user_path = self.user_manager.get_user_folder(self.user_manager.username, self.user_manager.password)
# Survey path
survey_path = os.path.join(user_path, 'surveys', 'surveys.csv')
# Load the surveys
self.n_questions_surveys = self.conversation_analysis.load_survey(path = survey_path)
def save_surveys_responses(self,):
self.conversation_analysis.save_survey_responses()
def load_description_database(self,):
description = "Petroleum Geo-Services (PGS) is a leading provider of seismic and reservoir services for the oil and gas industry. The company offers a wide range of geophysical services, including seismic data acquisition, processing, and interpretation. PGS operates a fleet of seismic vessels and has a global presence, with offices and operations in various countries around the world. The company's services are used by oil and gas companies to identify and evaluate potential hydrocarbon reserves, optimize field development plans, and monitor reservoir performance. PGS is committed to delivering high-quality data and insights to its clients, helping them make informed decisions and maximize the value of their assets. "
self.conversation_analysis.load_description_search_database(description = description)
# Load prompt for the search database
self.conversation_analysis.load_prompt_search_database(prompt = self.prompt_manager.get_prompt('search_in_database'))
def response_with_audio(self, input_user, audio_flag=False, audio_response_flag = False):
# Check if the input is a text or a audio
if audio_flag:
# If the input is a audio, we process the audio
input_user = self.audio_transcriber.transcribe(input_user)
# Obtain the response of Eve
eve_message = self.response(input_user)
if audio_response_flag:
# If the response is a audio, we process the audio
audio_path = os.path.join(self.temp_path, 'audio_responses')
audio_path_file = self.audio_generator.create(eve_message, path_to_save = audio_path)
return eve_message, audio_path_file
else:
return eve_message
def response(self, message):
## Aca hay que chequear si se entra o no en la busqueda de la base de datos.
search_in_database = self.conversation_analysis.search_database(message)
if search_in_database:
# Add patient message to memory
self.conversation_manager.add_message(message, is_ai_message = False)
# Search in database
eve_message = self.vectordabase_serch.predict(message)
# Add AI assistant message to memory
self.conversation_manager.add_message(eve_message, is_ai_message = True)
else:
# We obtain the last messages
last_messages = self.conversation_manager.get_n_last_messages()
# Add patient message to memory
self.conversation_manager.add_message(message, is_ai_message = False)
# Obtain the complete current conversation
current_conversation = self.conversation_manager.get_conversation()
# Init the threads for the personal information and the summary conversation
personal_info_thread = threading.Thread(target=self._process_personal_information, args=(current_conversation,))
summary_thread = threading.Thread(target=self._process_summary_conversation, args=(last_messages , current_conversation))
personal_info_thread.start()
summary_thread.start()
personal_info_thread.join()
summary_thread.join()
# Obtain the new personal information
personal_info = self.conversation_analysis.get_personal_information()
# Obtain the new summary conversation
summary_conversation = self.conversation_analysis.get_summary_conversation()
# Obtain the response of the chatbot
eve_message = self.chatbot.response(message = message,
personal_information = personal_info,
previous_conversation_summary = summary_conversation,
last_messages = last_messages)
# Add AI assistant message to memory
self.conversation_manager.add_message(eve_message, is_ai_message = True)
return eve_message
def finalize(self,):
# Update the personal information
self.user_manager.update_user_information(key='personal_information', value=self.conversation_analysis.get_personal_information())
# Save the conversation
self.save_conversation()
# Save the personal information
self.save_personal_information()
# Save the surveys responses
if self.survey_answered :
self.save_surveys_responses()
class Eve_v6(Eve_v5):
def __init__(self,):
super().__init__()
"""Aca se cambio la forma en la cual se inicializa Eve"""
# Initialize the conversation analysis
self.conversation_analysis = ConversationAnalysis_v3()
# Atributes for surveys
self.n_questions_surveys = 0
self.n_questions_surveys_answered = 0
self.do_survey = False
self.survey_answered = False
self.temp_path = None
self.username = None
def initialize(self,username = None, password = None):
# Login process
self.user_manager.user_login(username = username, password = password)
self.username = self.user_manager.get_username()
# Load temp path
self.load_tmp_file()
# Load prompts
self.prompt_manager.load_prompts(paths = [SUMMARY_CONVERSATION_v1['path'], PERSONAL_INFORMATION_v1['path'], EVE_PROMPT_v1['path'], SEARCH_IN_VECTORSTORE_DATABASE_v1['path']],
keys = ['summary_conversation', 'personal_information', 'eve_message','search_in_database'],
input_variables = [SUMMARY_CONVERSATION_v1['input_variables'], PERSONAL_INFORMATION_v1['input_variables'], EVE_PROMPT_v1['input_variables'], SEARCH_IN_VECTORSTORE_DATABASE_v1['input_variables']])
# Load the information requiered for the conversation
self.load_personal_information()
# Load prompt for the summary conversation
self.load_summary_conversation()
# Load the description of the database
self.load_description_database()
# Load prompt for the chatbot
self.chatbot.load_prompt(prompt = self.prompt_manager.get_prompt('eve_message'))
def load_tmp_file(self,):
self.temp_path = os.path.join('app','TEMP',self.username)
# Check if path exists and if not create it
if not os.path.exists(self.temp_path):
os.makedirs(self.temp_path)
# Folders to create
folders_create = ['audio_responses']
for folder in folders_create:
# Check if path exists and if not create the folder
if not os.path.exists(os.path.join(self.temp_path, folder)):
os.mkdir(os.path.join(self.temp_path, folder))
def ask_to_do_survey(self,):
# Check if there is a survey to answer
if self.n_questions_surveys > 0:
# Generate the message of the survey
message = "I have a survey for you. Do you want to answer it?(Y/N): "
return message
def obtain_response_to_do_survey(self,response):
response = response.upper()
if response == "Y":
self.do_survey = True
self.question_survey_id = 0
else:
self.do_survey = False
def get_survey(self, return_dict = False):
# Check if there is a survey to answer
if self.do_survey:
# Check if question_survey_id < n_questions_surveys
if self.question_survey_id < self.n_questions_surveys-1:
if return_dict:
# Obtain the question
return self.conversation_analysis.obtain_question(self.question_survey_id, return_dict=return_dict)
else:
question, answers = self.conversation_analysis.obtain_question(self.question_survey_id)
self.answers = answers
complete_question = question + " (choice: a,b,c,d,...)\n"
abcd = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o',]
for j in range(len(answers)):
complete_question += f"{abcd[j]}. {answers[j]}\n"
return complete_question, self.question_survey_id
elif self.question_survey_id == self.n_questions_surveys :
self.do_survey = False
self.survey_answered = True
else:
return "None", "None"
else:
return "None", "None"
def response_survey(self, answer_id, survey_id):
self.question_survey_id = survey_id
question, answers = self.conversation_analysis.obtain_question(self.question_survey_id)
answer = answers[answer_id]
# Save the response
self.conversation_analysis.respond_question( number_question = self.question_survey_id, answer=answer)
# Actualize the number of questions answered
self.question_survey_id += 1
def load_surveys(self, survey_id = None):
user_path = self.user_manager.get_user_folder(self.user_manager.username, self.user_manager.password)
# Survey path
survey_path = os.path.join(user_path, 'surveys', 'surveys.csv')
# Load the surveys
self.n_questions_surveys = self.conversation_analysis.load_survey(path = survey_path, survey_id = survey_id)
self.do_survey = True
self.question_survey_id = 0
def save_surveys_responses(self,):
self.conversation_analysis.save_survey_responses()
def load_description_database(self,):
description = "Petroleum Geo-Services (PGS) is a leading provider of seismic and reservoir services for the oil and gas industry. The company offers a wide range of geophysical services, including seismic data acquisition, processing, and interpretation. PGS operates a fleet of seismic vessels and has a global presence, with offices and operations in various countries around the world. The company's services are used by oil and gas companies to identify and evaluate potential hydrocarbon reserves, optimize field development plans, and monitor reservoir performance. PGS is committed to delivering high-quality data and insights to its clients, helping them make informed decisions and maximize the value of their assets. "
self.conversation_analysis.load_description_search_database(description = description)
# Load prompt for the search database
self.conversation_analysis.load_prompt_search_database(prompt = self.prompt_manager.get_prompt('search_in_database'))
def response_with_audio(self, input_user, audio_flag=False, audio_response_flag = False):
# Check if the input is a text or a audio
if audio_flag:
# If the input is a audio, we process the audio
input_user = self.audio_transcriber.transcribe(input_user)
# Obtain the response of Eve
eve_message = self.response(input_user)
if audio_response_flag:
# If the response is a audio, we process the audio
audio_path = os.path.join(self.temp_path, 'audio_responses')
audio_path_file = self.audio_generator.create(eve_message, path_to_save = audio_path)
return eve_message, audio_path_file
else:
return eve_message
def response(self, message):
## Aca hay que chequear si se entra o no en la busqueda de la base de datos.
search_in_database = self.conversation_analysis.search_database(message)
if search_in_database:
# Add patient message to memory
self.conversation_manager.add_message(message, is_ai_message = False)
# Search in database
eve_message = self.vectordabase_serch.predict(message)
# Add AI assistant message to memory
self.conversation_manager.add_message(eve_message, is_ai_message = True)
else:
# We obtain the last messages
last_messages = self.conversation_manager.get_n_last_messages()
# Add patient message to memory
self.conversation_manager.add_message(message, is_ai_message = False)
# Obtain the complete current conversation
current_conversation = self.conversation_manager.get_conversation()
# Init the threads for the personal information and the summary conversation
personal_info_thread = threading.Thread(target=self._process_personal_information, args=(current_conversation,))
summary_thread = threading.Thread(target=self._process_summary_conversation, args=(last_messages , current_conversation))
personal_info_thread.start()
summary_thread.start()
personal_info_thread.join()
summary_thread.join()
# Obtain the new personal information
personal_info = self.conversation_analysis.get_personal_information()
# Obtain the new summary conversation
summary_conversation = self.conversation_analysis.get_summary_conversation()
# Obtain the response of the chatbot
eve_message = self.chatbot.response(message = message,
personal_information = personal_info,
previous_conversation_summary = summary_conversation,
last_messages = last_messages)
# Add AI assistant message to memory
self.conversation_manager.add_message(eve_message, is_ai_message = True)
return eve_message
def finalize(self,):
# Update the personal information
self.user_manager.update_user_information(key='personal_information', value=self.conversation_analysis.get_personal_information())
# Save the conversation
self.save_conversation()
# Save the personal information
self.save_personal_information()
# Save the surveys responses
if self.survey_answered :
self.save_surveys_responses() | [] |
2024-01-10 | TalkToEve/ai-backend | modules~vector_database~embed_v2.py | import os
import json
from langchain.document_loaders import (
BSHTMLLoader,
DirectoryLoader,
)
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import Chroma
from dotenv import load_dotenv
root_path = os.path.sep.join(os.path.dirname(os.path.realpath(__file__)).split(os.path.sep)[:-2])
import os, sys
sys.path.append(root_path)
from configurations.config_llm import OPENAI_API_KEY
def embed(files_path, db_path, web = False):
load_dotenv()
if web:
loader = DirectoryLoader(
files_path,
glob="*.html",
loader_cls=BSHTMLLoader,
show_progress=True,
loader_kwargs={"get_text_separator": " ", "open_encoding": "latin-1"},
)
else:
loader = DirectoryLoader(
files_path,
glob="*.pdf",
show_progress=True,
loader_kwargs={"get_text_separator": " ", "open_encoding": "latin-1"},
)
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=1000,
chunk_overlap=200,
)
data = loader.load()
documents = text_splitter.split_documents(data)
if web:
# map sources from file directory to web source
with open(f"{files_path}/sitemap.json", "r") as f:
sitemap = json.loads(f.read())
for document in documents:
document.metadata["source"] = sitemap[document.metadata["source"].replace(".html", "").replace(f"{files_path}/", "")]
else:
for document in documents:
document.metadata["source"] = document.metadata["source"].replace(".pdf", "").replace(f"{files_path}/", "")
embedding_model = OpenAIEmbeddings(model="text-embedding-ada-002", openai_api_key=OPENAI_API_KEY)
if os.path.exists(db_path):
db = Chroma(persist_directory=db_path, embedding_function=embedding_model)
db.add_documents(documents)
else:
db = Chroma.from_documents(documents, embedding_model, persist_directory=db_path)
db.persist() | [] |
2024-01-10 | TalkToEve/ai-backend | modules~chatbot~text_to_speech.py | import os
import sys
current_dir = os.path.dirname(os.path.abspath(__file__))
parent_dir = os.path.dirname(current_dir)
grandparent_dir = os.path.dirname(parent_dir)
sys.path.append(parent_dir)
sys.path.append(grandparent_dir)
from configurations.config import OPENAI_API_KEY
from configurations.config_tts_stt import PATH_TO_SAVE
from openai import OpenAI
class T2S_with_openai():
def __init__(self):
self.client = OpenAI(api_key=OPENAI_API_KEY)
self.text_to_speech_model = self.client.audio.speech
self.model = "tts-1"
self.voice = "shimmer"
def create(self, input_, path_to_save = PATH_TO_SAVE):
response = self.text_to_speech_model.create(
model=self.model,
voice=self.voice,
input=input_)
path_file = os.path.join(path_to_save , "output.mp3")
response.stream_to_file(path_file)
return path_file
| [] |
2024-01-10 | donaldmo/files | mian.py | from dotenv import load_dotenv
from langchain.llms import HuggingFaceHub
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
load_dotenv()
llm = HuggingFaceHub(
repo_id='mrm8488/t5-base-finetuned-wikiSQL',
# model_kwargs={"temperature": 0.5, "max_length": 64}
)
template = 'Translate English to SQL: {question}'
prompt = PromptTemplate(
template=template,
input_variables=["question"]
)
llm_chain = LLMChain(prompt=prompt, llm=llm, verbose=True)
question = "What is the average of the respondents using a mobile device?"
print(llm_chain.run(question)) | [
"Translate English to SQL: {question}",
"question"
] |
2024-01-10 | yogeshhk/Sarvadnya | src~agents~autogen_groupchat_viz_open_source.py | # https://github.com/microsoft/autogen/blob/osllm/notebook/open_source_language_model_example.ipynb
# Following ways failed to start local llm server
# >> modelz-llm -m bigscience/bloomz-560m --device auto [NOT FOR WINDOWS]
# >> python -m llama_cpp.server --model <model path>.gguf
# Worked with LMStudio. You can download models from UI or if you have them already, keep them in
# C:\Users\yoges\.cache\lm-studio\models\yogeshhk\Sarvadnya , 'llama-7b.ggmlv3.q4_0.bin' was recognized
# Check using CHAT if it responds well.
# Start server, take the base_path URL and set it as below, at both places.
# Then run this file
# Setup autogen with the correct API
import autogen
from autogen import AssistantAgent, UserProxyAgent
import openai
openai.api_type = "openai"
openai.api_key = "..."
openai.api_base = "http://localhost:1234/v1"
openai.api_version = "2023-05-15"
autogen.oai.ChatCompletion.start_logging()
local_config_list = [
{
'model': 'llama 7B q4_0 ggml',
'api_key': 'any string here is fine',
'api_type': 'openai',
'api_base': "http://localhost:1234/v1",
'api_version': '2023-05-15'
}
]
llm_config = {"config_list": local_config_list, "seed": 42}
user_proxy = autogen.UserProxyAgent(
name="User_proxy",
system_message="A human admin.",
code_execution_config={"last_n_messages": 3, "work_dir": "groupchat"},
human_input_mode="NEVER",
)
coder = autogen.AssistantAgent(
name="Coder", # the default assistant agent is capable of solving problems with code
llm_config=llm_config,
)
critic = autogen.AssistantAgent(
name="Critic",
system_message="""Critic. You are a helpful assistant highly skilled in evaluating the quality of a given visualization code by providing a score from 1 (bad) - 10 (good) while providing clear rationale. YOU MUST CONSIDER VISUALIZATION BEST PRACTICES for each evaluation. Specifically, you can carefully evaluate the code across the following dimensions
- bugs (bugs): are there bugs, logic errors, syntax error or typos? Are there any reasons why the code may fail to compile? How should it be fixed? If ANY bug exists, the bug score MUST be less than 5.
- Data transformation (transformation): Is the data transformed appropriately for the visualization type? E.g., is the dataset appropriated filtered, aggregated, or grouped if needed? If a date field is used, is the date field first converted to a date object etc?
- Goal compliance (compliance): how well the code meets the specified visualization goals?
- Visualization type (type): CONSIDERING BEST PRACTICES, is the visualization type appropriate for the data and intent? Is there a visualization type that would be more effective in conveying insights? If a different visualization type is more appropriate, the score MUST BE LESS THAN 5.
- Data encoding (encoding): Is the data encoded appropriately for the visualization type?
- aesthetics (aesthetics): Are the aesthetics of the visualization appropriate for the visualization type and the data?
YOU MUST PROVIDE A SCORE for each of the above dimensions.
{bugs: 0, transformation: 0, compliance: 0, type: 0, encoding: 0, aesthetics: 0}
Do not suggest code.
Finally, based on the critique above, suggest a concrete list of actions that the coder should take to improve the code.
""",
llm_config=llm_config,
)
groupchat = autogen.GroupChat(agents=[user_proxy, coder, critic], messages=[], max_round=20)
manager = autogen.GroupChatManager(groupchat=groupchat, llm_config=llm_config)
user_proxy.initiate_chat(manager, message="download data from https://raw.githubusercontent.com/uwdata/draco/master/data/cars.csv and plot a visualization that tells us about the relationship between weight and horsepower. Save the plot to a file. Print the fields in a dataset before visualizing it.")
| [] |
2024-01-10 | yogeshhk/Sarvadnya | src~orgpedia~questgen_huggingface.py | # https://gpt-index.readthedocs.io/en/latest/examples/query_engine/sub_question_query_engine.html
# Using LlamaIndex as a Callable Tool
from langchain.agents import Tool
from langchain.chains.conversation.memory import ConversationBufferMemory
from langchain.chat_models import ChatOpenAI
from langchain.agents import initialize_agent
from langchain import HuggingFaceHub
from llama_index import LangchainEmbedding, ServiceContext
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from llama_index.tools import QueryEngineTool, ToolMetadata
from llama_index import VectorStoreIndex, SimpleDirectoryReader, LLMPredictor, ServiceContext
from llama_index.query_engine import SubQuestionQueryEngine
documents = SimpleDirectoryReader('data/experiment').load_data()
repo_id = "tiiuae/falcon-7b"
embed_model = LangchainEmbedding(HuggingFaceEmbeddings())
llm = HuggingFaceHub(repo_id=repo_id, model_kwargs={"temperature": 0.1, 'truncation': 'only_first',
"max_length": 1024})
llm_predictor = LLMPredictor(llm=llm)
service_context = ServiceContext.from_defaults(chunk_size=512, llm_predictor=llm_predictor, embed_model=embed_model)
index = VectorStoreIndex.from_documents(documents=documents, service_context=service_context)
query_engine = index.as_query_engine(similarity_top_k=3)
# setup base query engine as tool
query_engine_tools = [
QueryEngineTool(
query_engine=query_engine,
metadata=ToolMetadata(name='pg_essay', description='Paul Graham essay on What I Worked On')
)
]
query_engine = SubQuestionQueryEngine.from_defaults(query_engine_tools=query_engine_tools)
# response = s_engine.query('Explain childhood')
response = query_engine.query('How was Paul Grahams life different before and after YC?')
print(response)
| [] |
2024-01-10 | yogeshhk/Sarvadnya | src~agents~autogen_two_players_open_source.py | # https://github.com/microsoft/autogen/blob/osllm/notebook/open_source_language_model_example.ipynb
# Following ways failed to start local llm server
# >> modelz-llm -m bigscience/bloomz-560m --device auto [NOT FOR WINDOWS]
# >> python -m llama_cpp.server --model <model path>.gguf
# Worked with LMStudio. You can download models from UI or if you have them already, keep them in
# C:\Users\yoges\.cache\lm-studio\models\yogeshhk\Sarvadnya , 'llama-7b.ggmlv3.q4_0.bin' was recognized
# Check using CHAT if it responds well.
# Start server, take the base_path URL and set it as below, at both places.
# Then run this file
# Setup autogen with the correct API
import autogen
from autogen import AssistantAgent, UserProxyAgent
import openai
openai.api_type = "openai"
openai.api_key = "..."
openai.api_base = "http://localhost:1234/v1"
openai.api_version = "2023-05-15"
autogen.oai.ChatCompletion.start_logging()
local_config_list = [
{
'model': 'Mistral 7B Instruct v01 Q2', # 'llama 7B q4_0 ggml'
'api_key': 'any string here is fine',
'api_type': 'openai',
'api_base': "http://localhost:1234/v1",
'api_version': '2023-05-15'
}
]
# # Perform Completion
# question = "Who are you? Tell it in 2 lines only."
# response = autogen.oai.Completion.create(config_list=local_config_list, prompt=question, temperature=0)
# ans = autogen.oai.Completion.extract_text(response)[0]
#
# print("Answer is:", ans)
#
# # Student Teacher
#
# small = AssistantAgent(name="small model",
# max_consecutive_auto_reply=2,
# system_message="You should act as a student! Give response in 2 lines only.",
# llm_config={
# "config_list": local_config_list,
# "temperature": 0.5,
# })
#
# big = AssistantAgent(name="big model",
# max_consecutive_auto_reply=2,
# system_message="Act as a teacher.Give response in 2 lines only.",
# llm_config={
# "config_list": local_config_list,
# "temperature": 0.5,
# })
#
# big.initiate_chat(small, message="Who are you?")
# Entrepreneur - Accountant
ennreprenuer = AssistantAgent(name="Entrepreneur",
max_consecutive_auto_reply=2,
system_message="Act as a Entrepreneur! You want to get the task done from the Accountant",
llm_config={
"config_list": local_config_list,
"temperature": 0.5,
})
accountant = AssistantAgent(name="Accountant",
max_consecutive_auto_reply=2,
system_message="Act as a Accountant. You want to help the Entrepreneur to get the task done",
llm_config={
"config_list": local_config_list,
"temperature": 0.5,
})
accountant.initiate_chat(ennreprenuer, message="I want to help prepare and file the taxes.")
| [] |
2024-01-10 | yogeshhk/Sarvadnya | src~ask_almanack~cmd_main.py | """Ask a question to the notion database."""
import faiss
from langchain import OpenAI
from langchain.chains import VectorDBQAWithSourcesChain
import pickle
import argparse
from config import *
import pathlib
temp = pathlib.PosixPath
pathlib.PosixPath = pathlib.WindowsPath
parser = argparse.ArgumentParser(description='Ask a question to the notion DB.')
parser.add_argument('question', type=str, help='The question to ask the notion DB')
args = parser.parse_args()
# Load the LangChain.
index = faiss.read_index(DOCS_INDEX)
with open(FAISS_STORE_PKL, "rb") as f:
store = pickle.load(f)
store.index = index
chain = VectorDBQAWithSourcesChain.from_llm(llm=OpenAI(temperature=0), vectorstore=store)
result = chain({"question": args.question})
print(f"Answer: {result['answer']}")
print(f"Sources: {result['sources']}")
| [] |
2024-01-10 | yogeshhk/Sarvadnya | src~ask_gst~streamlit_main.py | import streamlit as st
from langchain.llms import VertexAI # Need to set GCP Credentials first
# https://ai.gopubby.com/get-started-with-google-vertex-ai-api-and-langchain-integration-360262d05216
# https://python.langchain.com/docs/integrations/llms/google_vertex_ai_palm
# from langchain import PromptTemplate, LLMChain
from langchain.document_loaders.csv_loader import CSVLoader
from langchain.document_loaders import UnstructuredHTMLLoader
from langchain.document_loaders import PyPDFLoader
from langchain.vectorstores import FAISS
from langchain.embeddings import HuggingFaceHubEmbeddings
from langchain.chains import RetrievalQA
## DO NOT RUN THIS IN ANY IDE but on command line `streamlit run streamlit_main.py`
template = """
You are a Goods and Services Tax (GST) Expert. Give accurate answer to the following question.
Under no circumstances do you give any answer outside of GST.
### QUESTION
{question}
### END OF QUESTION
Answer:
"""
st.title('GST FAQs')
#
# def generate_response(question):
# prompt = PromptTemplate(template=template, input_variables=["question"])
# llm = VertexAI()
# llm_chain = LLMChain(prompt=prompt, llm=llm)
# response = llm_chain.run({'question': question})
# st.info(response)
def build_QnA_db():
loader = CSVLoader(file_path='./data/nlp_faq_engine_faqs.csv')
docs = loader.load()
# loader = PyPDFLoader("./data/Final-GST-FAQ-edition.pdf")
# docs = loader.load_and_split()
loader = UnstructuredHTMLLoader("data/cbic-gst_gov_in_fgaq.html")
docs += loader.load()
embeddings = HuggingFaceHubEmbeddings()
db = FAISS.from_documents(docs, embeddings)
retriver = db.as_retriever()
llm = VertexAI() # model_name="gemini-pro", deafult=
chain = RetrievalQA.from_chain_type(llm=llm, retriever=retriver, verbose=False, chain_type="stuff")
return chain
if "chain" not in st.session_state:
st.session_state["chain"] = build_QnA_db()
def generate_response_from_db(question):
chain = st.session_state["chain"]
response = chain.run(question)
st.info(response)
with st.form('my_form'):
text = st.text_area('Ask Question:', '... about GST')
submitted = st.form_submit_button('Submit')
if submitted:
generate_response_from_db(text)
| [
"\n You are a Goods and Services Tax (GST) Expert. Give accurate answer to the following question.\n Under no circumstances do you give any answer outside of GST.\n \n ### QUESTION\n {question}\n ### END OF QUESTION\n \n Answer:\n "
] |
2024-01-10 | yogeshhk/Sarvadnya | src~ask_suntzu~cmd_main.py | # Ref https://github.com/amrrs/QABot-LangChain/blob/main/Q%26A_Bot_with_Llama_Index_and_LangChain.ipynb
#from gpt_index import SimpleDirectoryReader, GPTListIndex, GPTSimpleVectorIndex, LLMPredictor, PromptHelper
from langchain import OpenAI
from llama_index import SimpleDirectoryReader, LangchainEmbedding, GPTListIndex,GPTSimpleVectorIndex, PromptHelper
from llama_index import LLMPredictor, ServiceContext
import sys
import os
def construct_index(directory_path):
# set maximum input size
max_input_size = 4096
# set number of output tokens
num_outputs = 256
# set maximum chunk overlap
max_chunk_overlap = 20
# set chunk size limit
chunk_size_limit = 600
prompt_helper = PromptHelper(max_input_size, num_outputs, max_chunk_overlap, chunk_size_limit=chunk_size_limit)
# define LLM
llm_predictor = LLMPredictor(llm=OpenAI(temperature=0, model_name="text-davinci-002", max_tokens=num_outputs))
documents = SimpleDirectoryReader(directory_path).load_data()
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor)
index_obj = GPTSimpleVectorIndex.from_documents(documents, service_context=service_context)
index_obj.save_to_disk('model/index.json')
return index_obj
def ask_bot(input_index='model/index.json'):
index_obj = GPTSimpleVectorIndex.load_from_disk(input_index)
while True:
query = input('What do you want to ask the bot? \n')
if query == "nothing":
return
response = index_obj.query(query, response_mode="compact")
print("\nBot says: \n\n" + response.response + "\n\n\n")
index = construct_index("data/")
ask_bot('model/index.json')
| [] |
2024-01-10 | yogeshhk/Sarvadnya | CodeByOthers~langchain-chatbot-shashankdeshpande~pages~3_%F0%9F%93%84_chat_with_your_documents.py | import os
import utils
import streamlit as st
from streaming import StreamHandler
from langchain.chat_models import ChatOpenAI
from langchain.document_loaders import PyPDFLoader
from langchain.memory import ConversationBufferMemory
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.chains import ConversationalRetrievalChain
from langchain.vectorstores import DocArrayInMemorySearch
from langchain.text_splitter import RecursiveCharacterTextSplitter
st.set_page_config(page_title="ChatPDF", page_icon="📄")
st.header('Chat with your documents')
st.write('Has access to custom documents and can respond to user queries by referring to the content within those documents')
with st.expander("Implementation details"):
st.markdown("""
- LLM - [OpenAI](https://python.langchain.com/docs/ecosystem/integrations/openai#llm)
- Document Loader - [PyPDFLoader](https://python.langchain.com/docs/modules/data_connection/document_loaders/how_to/pdf#using-pypdf)
- Document Splitter - [RecursiveCharacterTextSplitter](https://python.langchain.com/docs/modules/data_connection/document_transformers/text_splitters/recursive_text_splitter)
- Embeddings - [HuggingFaceEmbeddings](https://python.langchain.com/docs/modules/data_connection/text_embedding/integrations/huggingfacehub)
- Vector store - [DocArrayInMemorySearch](https://python.langchain.com/docs/modules/data_connection/vectorstores/integrations/docarray_in_memory)
- Document Retriever - [Vector store-backed retriever: Maximum Marginal Relevance](https://python.langchain.com/docs/modules/data_connection/retrievers/how_to/vectorstore#maximum-marginal-relevance-retrieval)
- Memory - [ConversationBufferMemory](https://python.langchain.com/docs/modules/memory/how_to/buffer)
- Chain - [ConversationalRetrievalChain](https://python.langchain.com/docs/modules/agents/agent_types/react)
""")
class CustomDataChatbot:
def __init__(self):
utils.configure_openai_api_key()
self.openai_model = "gpt-3.5-turbo"
def save_file(self, file):
folder = 'tmp'
if not os.path.exists(folder):
os.makedirs(folder)
file_path = f'./{folder}/{file.name}'
with open(file_path, 'wb') as f:
f.write(file.getvalue())
return file_path
@st.spinner('Analyzing documents..')
def setup_qa_chain(self, uploaded_files):
# Load documents
docs = []
for file in uploaded_files:
file_path = self.save_file(file)
loader = PyPDFLoader(file_path)
docs.extend(loader.load())
# Split documents
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=1500,
chunk_overlap=200
)
splits = text_splitter.split_documents(docs)
# Create embeddings and store in vectordb
embeddings = HuggingFaceEmbeddings(model_name="all-MiniLM-L6-v2")
vectordb = DocArrayInMemorySearch.from_documents(splits, embeddings)
# Define retriever
retriever = vectordb.as_retriever(
search_type='mmr',
search_kwargs={'k':2, 'fetch_k':4}
)
# Setup memory for contextual conversation
memory = ConversationBufferMemory(
memory_key='chat_history',
return_messages=True
)
# Setup LLM and QA chain
llm = ChatOpenAI(model_name=self.openai_model, temperature=0, streaming=True)
qa_chain = ConversationalRetrievalChain.from_llm(llm, retriever=retriever, memory=memory, verbose=True)
return qa_chain
@utils.enable_chat_history
def main(self):
# User Inputs
uploaded_files = st.sidebar.file_uploader(label='Upload PDF files', type=['pdf'], accept_multiple_files=True)
if not uploaded_files:
st.error("Please upload PDF documents to continue!")
st.stop()
user_query = st.chat_input(placeholder="Ask me anything!")
if uploaded_files and user_query:
qa_chain = self.setup_qa_chain(uploaded_files)
utils.display_msg(user_query, 'user')
with st.chat_message("assistant"):
st_cb = StreamHandler(st.empty())
response = qa_chain.run(user_query, callbacks=[st_cb])
st.session_state.messages.append({"role": "assistant", "content": response})
if __name__ == "__main__":
obj = CustomDataChatbot()
obj.main() | [] |
2024-01-10 | yogeshhk/Sarvadnya | src~generative-ai~intro_palm_langchain.py | # Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Prerequisites
# - Select or create a Google Cloud project.
# - Make sure that billing is enabled for your project.
# - Enable the Vertex AI API
# - Create credentials json (Ref https://www.youtube.com/watch?v=rWcLDax-VmM)
# - Set Environment variable GOOGLE_APPLICATION_CREDENTIALS as the above created json
# - Create conda environment with python=3.10 (?)
# - Activating that environment, pip install google-cloud-aiplatform==1.27
# - Install langchain
# ----------------------------------------
# Langchain Usage
# Ref https://python.langchain.com/docs/modules/model_io/models/llms/integrations/google_vertex_ai_palm
from langchain.llms import VertexAI
from langchain import PromptTemplate, LLMChain
template = """Question: {question}
Answer: Let's think step by step."""
prompt = PromptTemplate(template=template, input_variables=["question"])
llm = VertexAI()
llm_chain = LLMChain(prompt=prompt, llm=llm)
question = "What NFL team won the Super Bowl in the year Justin Beiber was born?"
response = llm_chain.run(question)
print(response)
llm = VertexAI(model_name="code-bison")
llm_chain = LLMChain(prompt=prompt, llm=llm)
question = "Write a python function that identifies if the number is a prime number?"
response = llm_chain.run(question)
print(response)
| [
"question",
"Question: {question}\nAnswer: Let's think step by step."
] |
2024-01-10 | yogeshhk/Sarvadnya | src~bAbi_tasks~bAbi_cognition_meter.py | # Reference paper:
# "Towards AI-Complete Question Answering: A Set of Prerequisite Toy Tasks" https://arxiv.org/abs/1502.05698
# Reference blog:
# https://medium.com/technology-hits/does-chatgpt-really-understand-the-language-4855683b0143
# Assuming OPENAI_API_KEY set in Environment variables
from langchain.llms import OpenAI, HuggingFaceHub
from langchain import PromptTemplate
from langchain import LLMChain
import pandas as pd
from colorama import Fore
bool_score = True
total_score = 0
count = 0
template = "{context} {query}"
prompt = PromptTemplate(template=template, input_variables=['context', 'query'])
llms = [{'name': 'Flan', 'model': HuggingFaceHub(repo_id="google/flan-t5-small", model_kwargs={"temperature": 1e-10})},
{'name': 'OpenAI', 'model': OpenAI(temperature=0)}]
df = pd.read_excel(r'data/Test2.xlsx')
for llm_dict in llms:
llm_name = llm_dict['name']
llm_model = llm_dict['model']
chain = LLMChain(llm=llm_model, prompt=prompt)
df.reset_index()
for index, row in df.iterrows():
context = (row['Context']).replace("\n", " ")
queries = (row['Queries']).split("\n")
labels = (row['Labels']).split("\n")
for query, label in zip(queries, labels):
print(Fore.BLUE + f"Context: {context}")
print(Fore.YELLOW + f"Query:{query}")
print(Fore.GREEN + f"Label: {label}")
keywords = {'context': context, 'query': query}
print(Fore.MAGENTA + f"{llm_name} Response: {chain.run(keywords).strip()}")
if bool_score:
str_score = input(Fore.RED + 'Score? 0 for Wrong, 1 for Perfect : ')
total_score += float(str_score)
count += 1
print(Fore.CYAN + "---")
if count:
print(f"Overall score for {llm_name}: {(total_score / count):.2f}")
print(Fore.CYAN + "===========================")
| [
"{context} {query}",
"context"
] |
2024-01-10 | yogeshhk/Sarvadnya | src~ask_paulgraham~main_driver_langchain_only.py | # https://github.com/kylesteckler/generative-ai/blob/main/notebooks/knowledge_based_system.ipynb
from langchain.document_loaders import PyPDFLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import Chroma
from langchain.embeddings import VertexAIEmbeddings
from langchain.llms import VertexAI
from langchain.chains import RetrievalQA, ConversationalRetrievalChain
documents = PyPDFLoader(file_path='../data/On-Paul-Graham-2.pdf').load()
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=800,
chunk_overlap=400,
length_function=len,
)
chunks = text_splitter.split_documents(documents)
embedding = VertexAIEmbeddings() # PaLM embedding API
# set persist directory so the vector store is saved to disk
db = Chroma.from_documents(chunks, embedding, persist_directory="./vectorstore")
# vector store
retriever = db.as_retriever(
search_type="similarity",
search_kwargs={"k": 5} # number of nearest neighbors to retrieve
)
# PaLM API
# You can also set temperature, top_p, top_k
llm = VertexAI(
model_name="text-bison",
max_output_tokens=1024
)
# q/a chain
qa = RetrievalQA.from_chain_type(
llm=llm,
chain_type="stuff",
retriever=retriever,
return_source_documents=True
)
def ask_question(question: str):
response = qa({"query": question})
print(f"Response: {response['result']}\n")
citations = {doc.metadata['source'] for doc in response['source_documents']}
print(f"Citations: {citations}\n")
# uncomment below to print source chunks used
print(f"Source Chunks Used: {response['source_documents']}")
ask_question("What is the theme of the documents?")
| [] |
2024-01-10 | yogeshhk/Sarvadnya | src~ask_paulgraham~main_driver_langchain_llamaindex.py | ## LlamaIndex is not working well
# https://github.com/PradipNichite/Youtube-Tutorials/blob/main/LlamaIndex_Tutorial.ipynb
# https://github.com/jerryjliu/llama_index/issues/544
# https://colab.research.google.com/drive/16QMQePkONNlDpgiltOi7oRQgmB8dU5fl?usp=sharing#scrollTo=3323ec57
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from langchain.llms import HuggingFaceHub
from langchain.embeddings import VertexAIEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import Chroma
from langchain.llms import GPT4All
from langchain.llms.base import LLM
import torch
from llama_index.node_parser.simple import SimpleNodeParser
from llama_index.langchain_helpers.text_splitter import TokenTextSplitter
from llama_index import (
GPTVectorStoreIndex,
LangchainEmbedding,
LLMPredictor,
ServiceContext,
StorageContext,
download_loader,
PromptHelper
)
from transformers import pipeline
PyMuPDFReader = download_loader("PyMuPDFReader")
documents = PyMuPDFReader().load(file_path='../data/On-Paul-Graham-2.pdf', metadata=True)
# ensure document texts are not bytes objects
for doc in documents:
doc.text = doc.text.decode()
# GPT4ALL locall does not give any results
local_llm_path = "../models/ggml-gpt4all-j-v1.3-groovy.bin"
llm = GPT4All(model=local_llm_path, streaming=True) # , backend='gptj', streaming=True, n_ctx=512)
llm_predictor = LLMPredictor(llm=llm)
class FlanLLM(LLM):
model_name = "google/flan-t5-xl"
pipeline = pipeline("text2text-generation", model=model_name, device="cpu",
model_kwargs={"torch_dtype": torch.bfloat16})
def _call(self, prompt, stop=None):
return self.pipeline(prompt, max_length=9999)[0]["generated_text"]
def _identifying_params(self):
return {"name_of_model": self.model_name}
def _llm_type(self):
return "custom"
huggingFace_llm = HuggingFaceHub(repo_id="tiiuae/falcon-7b", model_kwargs={"temperature": 1e-10}) # "google/flan-t5-xl"
llm_predictor = LLMPredictor(llm=huggingFace_llm) # FlanLLM()
prompt_helper = PromptHelper(max_input_size=512, num_output=256, max_chunk_overlap=-1000)
huggingFace_embed = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
embed_model = LangchainEmbedding(huggingFace_embed)
service_context = ServiceContext.from_defaults(
llm_predictor=llm_predictor,
embed_model=embed_model,
prompt_helper=prompt_helper,
node_parser=SimpleNodeParser(text_splitter=TokenTextSplitter(chunk_size=300, chunk_overlap=20))
)
index = GPTVectorStoreIndex.from_documents(documents, service_context=service_context)
query_engine = index.as_query_engine()
response = query_engine.query("What is this text about?")
print(response)
response = query_engine.query("who is this text about?")
print(response)
index.storage_context.persist(persist_dir="./storage")
from llama_index import load_index_from_storage
storage_context = StorageContext.from_defaults(persist_dir="./storage")
new_index = load_index_from_storage(storage_context, service_context=service_context)
query_engine = new_index.as_query_engine(similarity_top_k=1, service_context=service_context)
# response_stream = query_engine.query("who is this text about?")
# response_stream.print_response_stream()
response = query_engine.query("list 5 important points from this book")
print(response)
response = query_engine.query("what naval says about wealth creation")
print(response)
| [] |
2024-01-10 | yogeshhk/Sarvadnya | src~wikipedia_langchain_palm~main_driver.py | # https://github.com/kylesteckler/generative-ai/blob/main/notebooks/knowledge_based_system.ipynb
from langchain.document_loaders import WikipediaLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import Chroma
from langchain.embeddings import VertexAIEmbeddings
from langchain.llms import VertexAI
from langchain.memory import ConversationBufferMemory
from langchain.chains import RetrievalQA, ConversationalRetrievalChain
docs = WikipediaLoader(query="Machine Learning", load_max_docs=10).load()
docs += WikipediaLoader(query="Deep Learning", load_max_docs=10).load()
docs += WikipediaLoader(query="Neural Networks", load_max_docs=10).load()
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=800,
chunk_overlap=400,
length_function=len,
)
chunks = text_splitter.split_documents(docs)
# Look at the first two chunks
print(chunks[0:2])
print(f'Number of documents: {len(docs)}')
print(f'Number of chunks: {len(chunks)}')
embedding = VertexAIEmbeddings() # PaLM embedding API
# set persist directory so the vector store is saved to disk
db = Chroma.from_documents(chunks, embedding, persist_directory="./vectorstore")
# vector store
retriever = db.as_retriever(
search_type="similarity",
search_kwargs={"k": 5} # number of nearest neighbors to retrieve
)
# PaLM API
# You can also set temperature, top_p, top_k
llm = VertexAI(
model_name="text-bison",
max_output_tokens=1024
)
# q/a chain
qa = RetrievalQA.from_chain_type(
llm=llm,
chain_type="stuff",
retriever=retriever,
return_source_documents=True
)
def ask_question(question: str):
response = qa({"query": question})
print(f"Response: {response['result']}\n")
citations = {doc.metadata['source'] for doc in response['source_documents']}
print(f"Citations: {citations}\n")
# uncomment below to print source chunks used
print(f"Source Chunks Used: {response['source_documents']}")
ask_question("What is a gradient boosted tree?")
ask_question("When was the transformer invented?")
ask_question("What technology underpins large language models?")
# preserve chat history in memory
memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
chat_session = ConversationalRetrievalChain.from_llm(
llm=llm,
retriever=retriever,
memory=memory
)
chat_session({'question': 'What technology underpins large language models?'})
# With chat history it will understand that "they" refers to transformers
chat_session({'question': 'When were they invented?'}) | [] |
2024-01-10 | yogeshhk/Sarvadnya | src~orgpedia~questgen_llamaindex_example.py | # https://gpt-index.readthedocs.io/en/latest/examples/evaluation/QuestionGeneration.html
from llama_index import KeywordTableIndex, SimpleDirectoryReader, LLMPredictor, ServiceContext
from llama_index.evaluation import DatasetGenerator
from langchain import HuggingFaceHub
documents = SimpleDirectoryReader('data/experiment').load_data()
repo_id = "tiiuae/falcon-7b"
llm_predictor = LLMPredictor(llm=HuggingFaceHub(repo_id=repo_id,
model_kwargs={"temperature": 0.1, 'truncation': 'only_first',
"max_length": 512}))
service_context = ServiceContext.from_defaults(chunk_size=64, llm_predictor=llm_predictor)
data_generator = DatasetGenerator.from_documents(documents, service_context=service_context)
eval_questions = data_generator.generate_questions_from_nodes()
print(eval_questions)
| [] |
2024-01-10 | yogeshhk/Sarvadnya | src~career_transition~midcareer_transition.py | #
# Assuming OPENAI_API_KEY set in Environment variables
from langchain.llms import OpenAI, HuggingFaceHub
from langchain import PromptTemplate
from langchain import LLMChain
from colorama import Fore
def ask_inputs():
edu_phases = {0: "> 18 (School)", 1: "18-21 (College)", 2: "22-30 (Early Job)", 3: "30-50(Mid Career)",
4: "50-60(Giving back?)"}
edu_phases_str = " choose career phase: age (phase): \n"
for i, ph in edu_phases.items():
edu_phases_str += f"[{i}] {ph}\n"
edu_phases_str += "=> "
phase_input = int(input(Fore.MAGENTA + edu_phases_str))
phase = "If my current career phase is shown in the format as 'Age range in years (phase)' then I am in '" + \
edu_phases[phase_input] + "' of my life."
domains = input(Fore.YELLOW + "What are your current domains, such as project management, testing, etc: ")
ratings = {0: "No", 1: "Can try", 2: "Absolutely"}
expertize = {0: "a novice", 1: "an intermediate", 2: "an expert"}
ratings_str = "\nChoose level: \n"
for i, rt in ratings.items():
ratings_str += f"[{i}] {rt}\n"
math_question = "Can you write gradient descent equation in two variables? "
maths_input = int(input(Fore.RED + math_question + ratings_str + "\n=> "))
maths = "In mathematics, I am " + expertize[maths_input]
programming_question = "Can you code matrix multiplication, now? "
programming_input = int(input(Fore.BLUE + programming_question + ratings_str + "\n=> "))
programming = "In programming, I am " + expertize[programming_input]
machinelearning_question = "Can you explain Confusion Matrix? "
ml_input = int(input(Fore.MAGENTA + machinelearning_question + ratings_str + "\n=> "))
ml = "In machine learning, I am " + expertize[ml_input]
prep = int(input(Fore.YELLOW + "In how many months you have to switch "))
return phase, domains, maths, programming, ml, prep
def main():
phase, domains, maths, programming, ml, prep = ask_inputs()
prompt_str = "You are an expert career counsellor specializing in guiding career transitions to data science. " \
"{phase}. So far I have been working in domains of {domains}. {maths}. {programming}. {ml}. I wish " \
"to change my career to data science in coming {prep} months, so I have only that much time to " \
"prepare. With the above background suggest a detailed month-wise plan for preparation, including " \
"articles to read, YouTube videos to watch, courses to take, certifications to do, etc. \n Plan: \n"
prompt_template = PromptTemplate(template=prompt_str, input_variables=['phase', 'domains', 'maths',
'programming', 'ml', 'prep'])
# prompt_template.format()
models_list = [
# {'name': 'Vicuna', 'model': HuggingFaceHub(repo_id="jeffwan/vicuna-13b")},
{'name': 'OpenAI', 'model': OpenAI(temperature=0)}]
for llm_dict in models_list:
print(Fore.CYAN + "===========================")
llm_name = llm_dict['name']
print(Fore.RED + llm_name)
llm_model = llm_dict['model']
chain = LLMChain(llm=llm_model, prompt=prompt_template, verbose=False)
response = chain.run(phase=phase, domains=domains, maths=maths, programming=programming, ml=ml, prep=prep)
print(Fore.GREEN + response)
print(Fore.CYAN + "===========================")
if __name__ == "__main__":
main()
| [
"programming",
"You are an expert career counsellor specializing in guiding career transitions to data science. {phase}. So far I have been working in domains of {domains}. {maths}. {programming}. {ml}. I wish to change my career to data science in coming {prep} months, so I have only that much time to prepare. With the above background suggest a detailed month-wise plan for preparation, including articles to read, YouTube videos to watch, courses to take, certifications to do, etc. \n Plan: \n",
"domains"
] |
2024-01-10 | yogeshhk/Sarvadnya | src~agents~autogen_functioncall_open_source.py | # https://github.com/microsoft/autogen/blob/osllm/notebook/open_source_language_model_example.ipynb
# Following ways failed to start local llm server
# >> modelz-llm -m bigscience/bloomz-560m --device auto [NOT FOR WINDOWS]
# >> python -m llama_cpp.server --model <model path>.gguf
# Worked with LMStudio. You can download models from UI or if you have them already, keep them in
# C:\Users\yoges\.cache\lm-studio\models\yogeshhk\Sarvadnya , 'llama-7b.ggmlv3.q4_0.bin' was recognized
# Check using CHAT if it responds well.
# Start server, take the base_path URL and set it as below, at both places.
# Then run this file
# Setup autogen with the correct API
import autogen
from autogen import AssistantAgent, UserProxyAgent
from autogen.agentchat.contrib.math_user_proxy_agent import MathUserProxyAgent
import openai
openai.api_type = "openai"
openai.api_key = "..."
openai.api_base = "http://localhost:1234/v1"
openai.api_version = "2023-05-15"
autogen.oai.ChatCompletion.start_logging()
local_config_list = [
{
'model': 'Mistral 7B Instruct v01 Q2', # 'llama 7B q4_0 ggml'
'api_key': 'any string here is fine',
'api_type': 'openai',
'api_base': "http://localhost:1234/v1",
'api_version': '2023-05-15'
}
]
llm_config = {
"functions": [
{
"name": "python",
"description": "run cell in ipython and return the execution result.",
"parameters": {
"type": "object",
"properties": {
"cell": {
"type": "string",
"description": "Valid Python cell to execute.",
}
},
"required": ["cell"],
},
},
{
"name": "sh",
"description": "run a shell script and return the execution result.",
"parameters": {
"type": "object",
"properties": {
"script": {
"type": "string",
"description": "Valid shell script to execute.",
}
},
"required": ["script"],
},
},
],
"config_list": local_config_list,
"request_timeout": 120,
}
chatbot = autogen.AssistantAgent(
name="chatbot",
system_message="For coding tasks, only use the functions you have been provided with. Reply TERMINATE when the " +
"task is done.",
llm_config=llm_config,
)
# create a UserProxyAgent instance named "user_proxy"
user_proxy = autogen.UserProxyAgent(
name="user_proxy",
is_termination_msg=lambda x: x.get("content", "") and x.get("content", "").rstrip().endswith("TERMINATE"),
human_input_mode="NEVER",
max_consecutive_auto_reply=10,
code_execution_config={"work_dir": "coding"},
)
# define functions according to the function desription
from IPython import get_ipython
def exec_python(cell):
ipython = get_ipython()
result = ipython.run_cell(cell)
log = str(result.result)
if result.error_before_exec is not None:
log += f"\n{result.error_before_exec}"
if result.error_in_exec is not None:
log += f"\n{result.error_in_exec}"
return log
def exec_sh(script):
return user_proxy.execute_code_blocks([("sh", script)])
# register the functions
user_proxy.register_function(
function_map={
"python": exec_python,
"sh": exec_sh,
}
)
# start the conversation
user_proxy.initiate_chat(
chatbot,
message="Draw two agents chatting with each other with an example dialog. Don't add plt.show().",
)
| [] |
2024-01-10 | yogeshhk/Sarvadnya | src~agents~autogen_groupchat_research_open_source.py | # https://github.com/microsoft/autogen/blob/osllm/notebook/open_source_language_model_example.ipynb
# Following ways failed to start local llm server
# >> modelz-llm -m bigscience/bloomz-560m --device auto [NOT FOR WINDOWS]
# >> python -m llama_cpp.server --model <model path>.gguf
# Worked with LMStudio. You can download models from UI or if you have them already, keep them in
# C:\Users\yoges\.cache\lm-studio\models\yogeshhk\Sarvadnya , 'llama-7b.ggmlv3.q4_0.bin' was recognized
# Check using CHAT if it responds well.
# Start server, take the base_path URL and set it as below, at both places.
# Then run this file
# Setup autogen with the correct API
import autogen
from autogen import AssistantAgent, UserProxyAgent
import openai
openai.api_type = "openai"
openai.api_key = "..."
openai.api_base = "http://localhost:1234/v1"
openai.api_version = "2023-05-15"
autogen.oai.ChatCompletion.start_logging()
local_config_list = [
{
'model': 'llama 7B q4_0 ggml',
'api_key': 'any string here is fine',
'api_type': 'openai',
'api_base': "http://localhost:1234/v1",
'api_version': '2023-05-15'
}
]
user_proxy = autogen.UserProxyAgent(
name="Admin",
system_message="A human admin. Interact with the planner to discuss the plan. Plan execution needs to be approved by this admin.",
code_execution_config=False,
)
engineer = autogen.AssistantAgent(
name="Engineer",
llm_config=local_config_list,
system_message='''Engineer. You follow an approved plan. You write python/shell code to solve tasks. Wrap the code in a code block that specifies the script type. The user can't modify your code. So do not suggest incomplete code which requires others to modify. Don't use a code block if it's not intended to be executed by the executor.
Don't include multiple code blocks in one response. Do not ask others to copy and paste the result. Check the execution result returned by the executor.
If the result indicates there is an error, fix the error and output the code again. Suggest the full code instead of partial code or code changes. If the error can't be fixed or if the task is not solved even after the code is executed successfully, analyze the problem, revisit your assumption, collect additional info you need, and think of a different approach to try.
''',
)
scientist = autogen.AssistantAgent(
name="Scientist",
llm_config=local_config_list,
system_message="""Scientist. You follow an approved plan. You are able to categorize papers after seeing their abstracts printed. You don't write code."""
)
planner = autogen.AssistantAgent(
name="Planner",
system_message='''Planner. Suggest a plan. Revise the plan based on feedback from admin and critic, until admin approval.
The plan may involve an engineer who can write code and a scientist who doesn't write code.
Explain the plan first. Be clear which step is performed by an engineer, and which step is performed by a scientist.
''',
llm_config=local_config_list,
)
executor = autogen.UserProxyAgent(
name="Executor",
system_message="Executor. Execute the code written by the engineer and report the result.",
human_input_mode="NEVER",
code_execution_config={"last_n_messages": 3, "work_dir": "paper"},
)
critic = autogen.AssistantAgent(
name="Critic",
system_message="Critic. Double check plan, claims, code from other agents and provide feedback. Check whether the plan includes adding verifiable info such as source URL.",
llm_config=local_config_list,
)
groupchat = autogen.GroupChat(agents=[user_proxy, engineer, scientist, planner, executor, critic], messages=[], max_round=50)
manager = autogen.GroupChatManager(groupchat=groupchat, llm_config=local_config_list)
user_proxy.initiate_chat(
manager,
message="""
find papers on LLM applications from arxiv in the last week, create a markdown table of different domains.
""",
) | [] |
2024-01-10 | yogeshhk/Sarvadnya | src~agents~autogen_mathchat_open_source.py | # https://github.com/microsoft/autogen/blob/osllm/notebook/open_source_language_model_example.ipynb
# Following ways failed to start local llm server
# >> modelz-llm -m bigscience/bloomz-560m --device auto [NOT FOR WINDOWS]
# >> python -m llama_cpp.server --model <model path>.gguf
# Worked with LMStudio. You can download models from UI or if you have them already, keep them in
# C:\Users\yoges\.cache\lm-studio\models\yogeshhk\Sarvadnya , 'llama-7b.ggmlv3.q4_0.bin' was recognized
# Check using CHAT if it responds well.
# Start server, take the base_path URL and set it as below, at both places.
# Then run this file
# Setup autogen with the correct API
import autogen
from autogen import AssistantAgent, UserProxyAgent
from autogen.agentchat.contrib.math_user_proxy_agent import MathUserProxyAgent
import openai
openai.api_type = "openai"
openai.api_key = "..."
openai.api_base = "http://localhost:1234/v1"
openai.api_version = "2023-05-15"
autogen.oai.ChatCompletion.start_logging()
local_config_list = [
{
'model': 'llama 7B q4_0 ggml',
'api_key': 'any string here is fine',
'api_type': 'openai',
'api_base': "http://localhost:1234/v1",
'api_version': '2023-05-15'
}
]
# 1. create an AssistantAgent instance named "assistant"
assistant = autogen.AssistantAgent(
name="assistant",
system_message="You are a helpful assistant.",
llm_config={
"request_timeout": 600,
"seed": 42,
"config_list": local_config_list,
}
)
# 2. create the MathUserProxyAgent instance named "mathproxyagent"
# By default, the human_input_mode is "NEVER", which means the agent will not ask for human input.
mathproxyagent = MathUserProxyAgent(
name="mathproxyagent",
human_input_mode="NEVER",
code_execution_config={"use_docker": False},
)
# given a math problem, we use the mathproxyagent to generate a prompt to be sent to the assistant as the initial
# message. the assistant receives the message and generates a response. The response will be sent back to the
# mathproxyagent for processing. The conversation continues until the termination condition is met, in MathChat,
# the termination condition is the detect of "\boxed{}" in the response.
math_problem = "Find all $x$ that satisfy the inequality $(2x+10)(x+3)<(3x+9)(x+8)$. Express your answer in interval " \
"notation."
mathproxyagent.initiate_chat(assistant, problem=math_problem)
| [] |
2024-01-10 | yogeshhk/Sarvadnya | src~midcurve_llm~midcurve_generator.py | # Assuming OPENAI_API_KEY set in Environment variables
import matplotlib.pyplot as plt
from langchain.llms import OpenAI, HuggingFaceHub
from langchain import PromptTemplate
from langchain import LLMChain
prompt = """
You are a geometric transformation program that transforms input 2D polygonal profile to output 1D
polyline profile.
Input 2D polygonal profile is defined by set of connected lines with the format as:
input : [line_1, line_2, line_3,....] where lines are defined by two points, where each point is defined by x and y
coordinates. So line_1 is defined as ((x_1, y_1), (x_2,y_2)) and similarly the other lines.
Output is also defined similar to the input as a set of connected lines where lines are defined by two points,
where each point is defined by x and y coordinates. So, output : [line_1, line_2, line_3,....]
Below are some example transformations, specified as pairs of 'input' and the corresponding 'output'.
After learning from these examples, predict the 'output' of the last 'input' specified. Do not write code or
explain the logic but just give the list of lines with coordinates as specified in the 'output' format.
input:[((5.0,5.0), (10.0,5.0)), ((10.0,5.0), (10.0,30.0)), ((10.0,30.0), (35.0,30.0)), ((35.0,30.0), (35.0, 35.0)),
((35.0, 35.0), (5.0,35.0)), ((5.0,35.0), (5.0,5.0))]
output: [((7.5,5.0), (7.5, 32.5)), ((7.5, 32.5), (35.0, 32.5)), ((35.0, 32.5) (7.5, 32.5))]
input: [((5,5), (10, 5)), ((10, 5), (10, 20)), ((10, 20), (5, 20)), ((5, 20),(5,5))]
output: [((7.5, 5), (7.5, 20))]
input: [((0,25.0), (10.0,25.0)), ((10.0,25.0),(10.0, 45.0)), ((10.0, 45.0),(15.0,45.0)), ((15.0,45.0), (15.0,25.0)),
((15.0,25.0),(25.0,25.0)), ((25.0,25.0),(25.0,20.0)), ((25.0,20.0),(15.0,20.0)), ((15.0,20.0),(15.0,0)),
((15.0,0),(10.0,0)), ((10.0,0),(10.0,20.0)), ((10.0,20.0),(0,20.0)), ((0,20.0),(0,25.0))]
output: [((12.5,0), (12.5, 22.5)), ((12.5, 22.5),(12.5,45.0)), ((12.5, 22.5), (0,22.5)), ((12.5, 22.5), (25.0,22.5))]
input:[((0, 25.0), (25.0,25.0)),((25.0,25.0),(25.0,20.0)), ((25.0,20.0),(15.0, 20.0)), ((15.0, 20.0),(15.0,0)),
((15.0,0),(10.0,0)), ((10.0,0),(10.0,20.0)), ((10.0,20.0),(0,20.0)), ((0,20.0),(0, 25.0))]
output:
"""
# llms = [{'name': 'Flan', 'model': HuggingFaceHub(repo_id="google/flan-t5-small", model_kwargs={"temperature": 1e-10})},
# # {'name': 'OpenAI', 'model': OpenAI(temperature=0)},
# {'name': 'Bloom', 'model': HuggingFaceHub(repo_id="bigscience/bloom", model_kwargs={"temperature": 1e-10})}
# ]
#
# for llm_dict in llms:
# llm_name = llm_dict['name']
# llm_model = llm_dict['model']
# result = llm_model(prompt).strip()
# llm_dict['result'] = result
# print(f"model: {llm_name}, result: {result}")
fig, ax = plt.subplots(figsize=(12, 6))
def plot_polyline(lines):
for line in lines:
point1 = line[0]
point2 = line[1]
xs = [point1[0], point2[0]]
ys = [point1[1], point2[1]]
plt.plot(xs, ys)
tshape = [((0, 25.0), (25.0,25.0)),((25.0,25.0),(25.0,20.0)), ((25.0,20.0),(15.0, 20.0)), ((15.0, 20.0),(15.0,0)), ((15.0,0),(10.0,0)), ((10.0,0),(10.0,20.0)), ((10.0,20.0),(0,20.0)), ((0,20.0),(0, 25.0))]
actual = [((12.5,0), (12.5,22.5)), ((12.5,22.5),(25.0,22.5)), ((12.5,22.5),(0,22.5))]
chatgpt = [((2.5, 0), (2.5, 22.5)), ((2.5, 22.5), (2.5, 45.0)), ((2.5, 22.5), (25.0, 22.5)), ((2.5, 22.5), (12.5, 22.5)), ((2.5, 22.5), (0, 22.5)), ((2.5, 22.5), (25.0, 22.5))]
perplexity = [((12.5,0), (12.5, 22.5)), ((12.5, 22.5),(12.5,45.0)), ((12.5, 22.5), (0,22.5)), ((12.5, 22.5), (25.0,22.5))]
bard = [((12.5, 0), (12.5, 25.0)), ((12.5, 25.0), (25.0, 25.0)), ((25.0, 25.0), (25.0, 0))]
plot_polyline(bard)
plt.xlim([-5, 30])
plt.ylim([-5, 30])
plt.show()
| [
"\nYou are a geometric transformation program that transforms input 2D polygonal profile to output 1D \npolyline profile. \n\nInput 2D polygonal profile is defined by set of connected lines with the format as: \ninput : [line_1, line_2, line_3,....] where lines are defined by two points, where each point is defined by x and y \ncoordinates. So line_1 is defined as ((x_1, y_1), (x_2,y_2)) and similarly the other lines. \n\nOutput is also defined similar to the input as a set of connected lines where lines are defined by two points, \nwhere each point is defined by x and y coordinates. So, output : [line_1, line_2, line_3,....]\n\nBelow are some example transformations, specified as pairs of 'input' and the corresponding 'output'. \n\nAfter learning from these examples, predict the 'output' of the last 'input' specified. Do not write code or \nexplain the logic but just give the list of lines with coordinates as specified in the 'output' format.\n\ninput:[((5.0,5.0), (10.0,5.0)), ((10.0,5.0), (10.0,30.0)), ((10.0,30.0), (35.0,30.0)), ((35.0,30.0), (35.0, 35.0)), \n((35.0, 35.0), (5.0,35.0)), ((5.0,35.0), (5.0,5.0))]\noutput: [((7.5,5.0), (7.5, 32.5)), ((7.5, 32.5), (35.0, 32.5)), ((35.0, 32.5) (7.5, 32.5))]\n\ninput: [((5,5), (10, 5)), ((10, 5), (10, 20)), ((10, 20), (5, 20)), ((5, 20),(5,5))]\noutput: [((7.5, 5), (7.5, 20))]\n\ninput: [((0,25.0), (10.0,25.0)), ((10.0,25.0),(10.0, 45.0)), ((10.0, 45.0),(15.0,45.0)), ((15.0,45.0), (15.0,25.0)), \n((15.0,25.0),(25.0,25.0)), ((25.0,25.0),(25.0,20.0)), ((25.0,20.0),(15.0,20.0)), ((15.0,20.0),(15.0,0)), \n((15.0,0),(10.0,0)), ((10.0,0),(10.0,20.0)), ((10.0,20.0),(0,20.0)), ((0,20.0),(0,25.0))]\noutput: [((12.5,0), (12.5, 22.5)), ((12.5, 22.5),(12.5,45.0)), ((12.5, 22.5), (0,22.5)), ((12.5, 22.5), (25.0,22.5))]\n\ninput:[((0, 25.0), (25.0,25.0)),((25.0,25.0),(25.0,20.0)), ((25.0,20.0),(15.0, 20.0)), ((15.0, 20.0),(15.0,0)), \n((15.0,0),(10.0,0)), ((10.0,0),(10.0,20.0)), ((10.0,20.0),(0,20.0)), ((0,20.0),(0, 25.0))]\noutput: \n\n"
] |
2024-01-10 | lostella/gluonts | src~gluonts~nursery~temporal_hierarchical_forecasting~model~cop_deepar~_network.py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
from typing import Dict, List, Optional, Tuple
import mxnet as mx
import numpy as np
from gluonts.core.component import Type, validated
from gluonts.itertools import prod
from gluonts.mx.model.deepar import DeepAREstimator
from gluonts.mx.model.deepar._network import DeepARPredictionNetwork
from gluonts.mx.model.deepvar_hierarchical._estimator import projection_mat
from gluonts.mx.model.deepvar_hierarchical._network import coherency_error
from gluonts.mx.distribution import Distribution, EmpiricalDistribution
from gluonts.mx import Tensor
from gluonts.mx.distribution import TransformedPiecewiseLinear
from gluonts.nursery.temporal_hierarchical_forecasting.utils import utils
from gluonts.nursery.temporal_hierarchical_forecasting.model.cop_deepar import (
gluonts_fixes,
gnn,
)
def reconcile_samples(
reconciliation_mat: Tensor,
samples: Tensor,
non_negative: bool = False,
num_iters: int = 10,
) -> Tensor:
if not non_negative:
return mx.nd.dot(samples, reconciliation_mat, transpose_b=True)
else:
# Dykstra's projection method: Projection onto the intersection of convex sets.
x = samples
p = mx.nd.zeros_like(x)
q = mx.nd.zeros_like(x)
for _ in range(num_iters):
# Projection onto the non-negative orthant.
y = mx.nd.relu(x + p)
p = x + p - y
# Projection onto the null space.
x = mx.nd.dot(y + q, reconciliation_mat, transpose_b=True)
q = y + q - x
return x
class COPNetwork(mx.gluon.HybridBlock):
@validated()
def __init__(
self,
estimators: List[DeepAREstimator],
prediction_length: int,
temporal_hierarchy: utils.TemporalHierarchy,
do_reconciliation: bool,
dtype: Type,
use_gnn: bool,
use_mlp: bool,
adj_mat_option: str,
non_negative: bool = False,
naive_reconciliation: bool = False,
prediction: bool = False,
loss_function: str = "crps_univariate",
**kwargs,
) -> None:
super().__init__(**kwargs)
self.prediction_length = prediction_length
self.temporal_hierarchy = temporal_hierarchy
self.use_gnn = use_gnn
self.use_mlp = use_mlp
self.adj_mat_option = adj_mat_option
self.do_reconciliation = do_reconciliation
self.non_negative = non_negative
self.loss_function = loss_function
self.dtype = dtype
if naive_reconciliation:
M = utils.naive_reconcilation_mat(
self.temporal_hierarchy.agg_mat, self.temporal_hierarchy.nodes
)
else:
M = projection_mat(S=self.temporal_hierarchy.agg_mat)
self.M = mx.nd.array(M)
self.estimators = estimators
self.models = []
with self.name_scope():
for estimator in estimators:
if not prediction:
self.network = estimator.create_training_network()
else:
self.network = gluonts_fixes.create_prediction_network(
estimator
)
self.register_child(self.network)
self.models.append(self.network)
if self.use_gnn:
# GNN Layer: Do message passing for `L-1` times, where `L` is the number of levels of the hierarchy.
self.gnn = gnn.GNN(
units=self.estimators[0].num_cells,
num_layers=len(self.temporal_hierarchy.agg_multiples) - 1,
adj_matrix=mx.nd.array(
self.temporal_hierarchy.adj_mat(
option=self.adj_mat_option
)
),
use_mlp=self.use_mlp,
)
def get_target_related_feat_at_agg_level(
self,
agg_level: int,
past_target: Tensor,
past_observed_values: Tensor,
past_is_pad: Tensor,
future_target: Optional[Tensor] = None,
future_observed_values: Optional[Tensor] = None,
) -> Dict:
"""
Aggregate target at the given aggregate level along with updating observed value and pad indicators.
:param agg_level:
:param past_target:
:param past_observed_values:
:param past_is_pad:
:param future_target:
:param future_observed_values:
:return:
"""
agg_multiple = self.temporal_hierarchy.agg_multiples[agg_level]
# Truncating the history length of the base time series to the nearest multiple.
base_history_length = (
past_target.shape[1] // agg_multiple
) * agg_multiple
past_target_agg = (
utils.agg_series(
past_target.slice_axis(
axis=1, begin=-base_history_length, end=None
),
agg_multiple=agg_multiple,
)
.squeeze(axis=-1)
.slice_axis(
axis=1, begin=-self.models[agg_level].history_length, end=None
)
)
past_is_pad_agg = (
utils.agg_series(
past_is_pad.slice_axis(
axis=1, begin=-base_history_length, end=None
),
agg_multiple=agg_multiple,
)
.squeeze(axis=-1)
.slice_axis(
axis=1, begin=-self.models[agg_level].history_length, end=None
)
)
past_is_pad_agg = mx.nd.where(
past_is_pad_agg == 0.0,
mx.nd.zeros_like(past_is_pad_agg),
mx.nd.ones_like(past_is_pad_agg),
)
past_observed_values_agg = (
utils.agg_series(
past_observed_values.slice_axis(
axis=1, begin=-base_history_length, end=None
),
agg_multiple=agg_multiple,
)
.squeeze(axis=-1)
.slice_axis(
axis=1, begin=-self.models[agg_level].history_length, end=None
)
)
past_observed_values_agg = mx.nd.where(
# We sum observed values of base time series at `agg_multiple` time steps;
# if all of them are 1, then the observed value for the aggregated time series is 1 and 0 otherwise.
# We could redefine agg_series to actually compute mean, but overloading that term might cause other
# problems later.
past_observed_values_agg == agg_multiple,
mx.nd.ones_like(past_observed_values_agg),
mx.nd.zeros_like(past_observed_values_agg),
)
target_related_feat_agg = {
"past_target": past_target_agg,
"past_is_pad": past_is_pad_agg,
"past_observed_values": past_observed_values_agg,
}
if future_target is not None:
future_target_agg = utils.agg_series(
future_target, agg_multiple=agg_multiple
).squeeze(axis=-1)
future_observed_values_agg = utils.agg_series(
future_observed_values, agg_multiple=agg_multiple
).squeeze(axis=-1)
future_observed_values_agg = mx.nd.where(
future_observed_values_agg == agg_multiple,
mx.nd.ones_like(future_observed_values_agg),
mx.nd.zeros_like(future_observed_values_agg),
)
target_related_feat_agg.update(
{
"future_target": future_target_agg,
"future_observed_values": future_observed_values_agg,
}
)
return target_related_feat_agg
def _embeddings_to_distr(
self,
F,
embeddings_at_all_levels: Tensor,
scales: List,
) -> Distribution:
distr_output = self.models[0].distr_output
distr_args_at_all_levels: Dict = {
arg_name: [] for arg_name in distr_output.args_dim.keys()
}
scales_ls = []
start_ix = 0
for i, num_nodes in enumerate(
self.temporal_hierarchy.num_nodes_per_level
):
end_ix = start_ix + num_nodes
distr_args = self.models[i].proj_distr_args(
embeddings_at_all_levels[..., start_ix:end_ix, :]
)
for j, arg_ls in enumerate(distr_args_at_all_levels.values()):
arg_ls.append(distr_args[j])
scales_ls.append(scales[i].broadcast_like(distr_args[0]))
start_ix = end_ix
# Last dimension contains parameters at all time-levels and aggregation can be done on it.
distr_args_at_all_levels = {
arg_name: F.concat(*arg_ls, dim=-1)
for arg_name, arg_ls in distr_args_at_all_levels.items()
}
scale_at_all_levels = F.concat(*scales_ls, dim=-1)
distr_at_all_levels = distr_output.distribution(
distr_args=distr_args_at_all_levels.values(),
scale=scale_at_all_levels,
)
if isinstance(distr_at_all_levels, TransformedPiecewiseLinear):
distr_at_all_levels = TransformedPiecewiseLinear(
base_distribution=gluonts_fixes.PiecewiseLinearWithSampling(
gamma=distr_at_all_levels.base_distribution.gamma,
slopes=distr_at_all_levels.base_distribution.slopes,
knot_spacings=distr_at_all_levels.base_distribution.knot_spacings,
),
transforms=distr_at_all_levels.transforms,
)
return distr_at_all_levels
def _distr_to_samples(
self,
distr_at_all_levels: Distribution,
num_samples: int,
):
if num_samples == 1:
samples_at_all_levels = distr_at_all_levels.sample(
num_samples=num_samples, dtype=self.dtype
)
# get rid of the redundant axis introduced by `sample`.
samples_at_all_levels = samples_at_all_levels.squeeze(axis=0)
else:
samples_at_all_levels = distr_at_all_levels.sample_rep(
num_samples=num_samples, dtype=self.dtype
)
return samples_at_all_levels
class COPDeepARTrainingNetwork(COPNetwork):
@validated()
def __init__(
self,
num_batches_per_epoch: int,
epochs: int,
warmstart_epoch_frac: float,
num_samples_for_loss: int = 200,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.warmstart_epoch_frac = warmstart_epoch_frac
self.epochs = epochs
self.num_batches_per_epoch = num_batches_per_epoch
self.batch_no = 0
self.num_samples_for_loss = num_samples_for_loss
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(
self,
F,
feat_static_cat: Tensor,
feat_static_real: Tensor,
past_time_feat: Tensor,
past_target: Tensor,
past_observed_values: Tensor,
past_is_pad: Optional[Tensor],
future_time_feat: Tensor,
future_target: Tensor,
future_observed_values: Tensor,
agg_features_dict: Dict,
) -> Tensor:
"""
Computes the loss for training COPDeepAR, all inputs tensors representing
time series have NTC layout.
Parameters
----------
F
feat_static_cat : (batch_size, num_features)
feat_static_real : (batch_size, num_features)
past_time_feat : (batch_size, history_length, num_features)
past_target : (batch_size, history_length, *target_shape)
past_observed_values : (batch_size, history_length, *target_shape, seq_len)
future_time_feat : (batch_size, prediction_length, num_features)
future_target : (batch_size, prediction_length, *target_shape)
future_observed_values : (batch_size, prediction_length, *target_shape)
agg_features_dict: Dictionary of features for aggregated levels
Returns loss with shape (batch_size, context + prediction_length, 1)
-------
"""
embeddings_at_all_levels_ls = []
target_at_all_levels_ls = []
scale_ls = []
for i, agg_multiple in enumerate(
self.temporal_hierarchy.agg_multiples
):
if agg_multiple != 1:
past_time_feat_agg = agg_features_dict[f"level_{i}"][
"past_time_feat_agg"
]
future_time_feat_agg = agg_features_dict[f"level_{i}"][
"future_time_feat_agg"
]
else:
past_time_feat_agg = past_time_feat
future_time_feat_agg = future_time_feat
target_related_feat_agg = (
self.get_target_related_feat_at_agg_level(
agg_level=i,
past_target=past_target,
past_is_pad=past_is_pad,
past_observed_values=past_observed_values,
future_target=future_target,
future_observed_values=future_observed_values,
)
)
rnn_outputs, _, scale, _, _ = self.models[i].unroll_encoder(
F=F,
feat_static_cat=feat_static_cat,
feat_static_real=feat_static_real,
past_time_feat=past_time_feat_agg,
future_time_feat=future_time_feat_agg,
**target_related_feat_agg,
)
scale_ls.append(scale.expand_dims(axis=-1))
# put together target sequence
# (batch_size, seq_len, *target_shape)
target = F.concat(
target_related_feat_agg["past_target"].slice_axis(
axis=1,
begin=self.models[i].history_length
- self.models[i].context_length,
end=None,
),
target_related_feat_agg["future_target"],
dim=1,
)
# We reconcile blocks/windows of time steps: e.g., if we have 28 values of daily data, then we
# reconcile 4 windows where each window has a length of 7 if number of leaves in the hierarchy is 7.
window_size = self.temporal_hierarchy.num_leaves // agg_multiple
num_windows = (
self.models[i].context_length
+ self.models[i].prediction_length
) // window_size
embeddings_at_all_levels_ls.append(
rnn_outputs.reshape(
(
rnn_outputs.shape[0],
num_windows,
-1,
rnn_outputs.shape[-1],
)
)
)
target_at_all_levels_ls.append(
target.reshape((target.shape[0], num_windows, -1))
)
# Last dimension contains embeddings at all time-levels and message passing/aggregation can be done on it.
# Shape: (bs, num_windows, total_num_time_steps_of_hierarchy, embedding_dim)
embeddings_at_all_levels = F.concat(
*embeddings_at_all_levels_ls, dim=-2
)
if self.use_gnn:
embeddings_at_all_levels = self.gnn(embeddings_at_all_levels)
distr_at_all_levels = self._embeddings_to_distr(
F,
embeddings_at_all_levels,
scale_ls,
)
target_at_all_levels = F.concat(*target_at_all_levels_ls, dim=-1)
if self.loss_function == "nll":
loss = distr_at_all_levels.loss(x=target_at_all_levels)
# Determine which epoch we are currently in.
self.batch_no += 1
epoch_no = self.batch_no // self.num_batches_per_epoch + 1
epoch_frac = epoch_no / self.epochs
if epoch_frac > self.warmstart_epoch_frac:
print(
f"epoch_frac: {epoch_frac}. Switching the loss function to CRPS"
)
self.loss_function = "crps_univariate"
else:
samples_at_all_levels = self._distr_to_samples(
distr_at_all_levels,
num_samples=self.num_samples_for_loss,
)
if self.do_reconciliation:
reconciled_samples_at_all_levels = reconcile_samples(
reconciliation_mat=self.M,
samples=samples_at_all_levels,
non_negative=self.non_negative,
)
else:
reconciled_samples_at_all_levels = samples_at_all_levels
loss = (
EmpiricalDistribution(
samples=reconciled_samples_at_all_levels, event_dim=1
)
.loss(x=target_at_all_levels)
.expand_dims(axis=-1)
)
return loss
class COPDeepARPredictionNetwork(COPNetwork):
@validated()
def __init__(
self,
return_forecasts_at_all_levels: bool = False,
num_parallel_samples: int = 100,
**kwargs,
) -> None:
super().__init__(prediction=True, **kwargs)
self.return_forecasts_at_all_levels = return_forecasts_at_all_levels
self.num_parallel_samples = num_parallel_samples
def _decode_one_window(
self,
F,
model: DeepARPredictionNetwork,
window_size: int,
offset: int,
static_feat: Tensor,
past_target: Tensor,
time_feat: Tensor,
scale: Tensor,
begin_states: List,
) -> Tuple[Tensor, Tensor]:
"""
Computes RNN outputs by unrolling the LSTM starting with a initial
input and state.
Parameters
----------
static_feat : Tensor
static features. Shape: (batch_size, num_static_features).
past_target : Tensor
target history. Shape: (batch_size, history_length).
time_feat : Tensor
time features. Shape: (batch_size, prediction_length,
num_time_features).
Note: They still need to be for all `prediction_length` time steps.
This function will slice the features it needs.
scale : Tensor
tensor containing the scale of each element in the batch.
Shape: (batch_size, 1, 1).
begin_states : List
list of initial states for the LSTM layers. The shape of each
tensor of the list should be (batch_size, num_cells)
Returns
--------
Tensor
A tensor containing sampled paths.
Shape: (batch_size, num_sample_paths, window_size).
"""
rnn_outputs_ls = []
# for each future time-units we draw new samples for this time-unit and
# update the state
for k in range(offset, offset + window_size):
# (batch_size * num_samples, 1, *target_shape, num_lags)
lags = model.get_lagged_subsequences(
F=F,
sequence=past_target,
sequence_length=model.history_length + k,
indices=model.shifted_lags,
subsequences_length=1,
)
# (batch_size * num_samples, 1, *target_shape, num_lags)
lags_scaled = F.broadcast_div(lags, scale.expand_dims(axis=-1))
# from (batch_size * num_samples, 1, *target_shape, num_lags)
# to (batch_size * num_samples, 1, prod(target_shape) * num_lags)
input_lags = F.reshape(
data=lags_scaled,
shape=(-1, 1, prod(model.target_shape) * len(model.lags_seq)),
)
# (batch_size * num_samples, 1, prod(target_shape) * num_lags +
# num_time_features + num_static_features)
decoder_input = F.concat(
input_lags,
time_feat.slice_axis(axis=1, begin=k, end=k + 1),
# observed_values.expand_dims(axis=1),
static_feat,
dim=-1,
)
# output shape: (batch_size * num_samples, 1, num_cells)
# state shape: (batch_size * num_samples, num_cells)
rnn_outputs, begin_states = model.rnn.unroll(
inputs=decoder_input,
length=1,
begin_state=begin_states,
layout="NTC",
merge_outputs=True,
)
rnn_outputs_ls.append(rnn_outputs)
distr_args = model.proj_distr_args(rnn_outputs)
# compute likelihood of target given the predicted parameters
distr = model.distr_output.distribution(distr_args, scale=scale)
# (batch_size * num_samples, 1, *target_shape)
new_samples = distr.sample(dtype=self.dtype)
# (batch_size * num_samples, seq_len, *target_shape)
past_target = F.concat(past_target, new_samples, dim=1)
# (batch_size * num_samples, prediction_length, *target_shape)
rnn_outputs = F.concat(*rnn_outputs_ls, dim=1)
return rnn_outputs, begin_states
def sampling_decoder(
self,
F,
state_ls,
scale_ls,
static_feat_ls,
past_target_ls,
future_time_feat_agg_ls,
):
num_windows = (
self.prediction_length // self.temporal_hierarchy.num_leaves
)
num_nodes_per_level = self.temporal_hierarchy.num_nodes_per_level
reconciled_samples_at_all_levels_ls = []
for j in range(num_windows):
embeddings_at_all_levels_ls = []
for i, agg_multiple in enumerate(
self.temporal_hierarchy.agg_multiples
):
rnn_outputs, states = self._decode_one_window(
F=F,
model=self.models[i],
window_size=num_nodes_per_level[i],
offset=j * num_nodes_per_level[i],
past_target=past_target_ls[i],
time_feat=future_time_feat_agg_ls[i],
static_feat=static_feat_ls[i],
scale=scale_ls[i],
begin_states=state_ls[i],
)
state_ls[i] = states
embeddings_at_all_levels_ls.append(
rnn_outputs.reshape(
(rnn_outputs.shape[0], -1, rnn_outputs.shape[-1])
)
)
# Last dimension contains embeddings at all time-levels and message passing/aggregation can be done on it.
# Shape: (bs, total_num_time_steps_of_hierarchy, embedding_dim)
embeddings_at_all_levels = F.concat(
*embeddings_at_all_levels_ls, dim=-2
)
if self.use_gnn:
embeddings_at_all_levels = self.gnn(embeddings_at_all_levels)
distr_at_all_levels = self._embeddings_to_distr(
F,
embeddings_at_all_levels,
scale_ls,
)
samples_at_all_levels = self._distr_to_samples(
distr_at_all_levels,
num_samples=1,
)
if self.do_reconciliation:
reconciled_samples_at_all_levels = reconcile_samples(
reconciliation_mat=self.M,
samples=samples_at_all_levels,
non_negative=self.non_negative,
)
else:
reconciled_samples_at_all_levels = samples_at_all_levels
rec_err = coherency_error(
S=self.temporal_hierarchy.agg_mat,
samples=reconciled_samples_at_all_levels.asnumpy(),
)
print(f"Reconciliation error: {rec_err}")
cumsum_nodes_per_level = np.cumsum([0] + num_nodes_per_level)
for i in range(len(self.temporal_hierarchy.agg_multiples)):
# (batch_size * num_samples, seq_len, *target_shape)
reconciled_samples = (
reconciled_samples_at_all_levels.slice_axis(
axis=-1,
begin=cumsum_nodes_per_level[i],
end=cumsum_nodes_per_level[i + 1],
)
)
past_target_ls[i] = F.concat(
past_target_ls[i], reconciled_samples, dim=1
)
reconciled_samples_at_all_levels_ls.append(
reconciled_samples_at_all_levels.reshape(
shape=(
-1,
self.num_parallel_samples,
reconciled_samples_at_all_levels.shape[-1],
)
).expand_dims(axis=-2)
)
reconciled_samples_at_all_levels = F.concat(
*reconciled_samples_at_all_levels_ls, dim=-2
)
print(reconciled_samples_at_all_levels.shape)
return reconciled_samples_at_all_levels
# noinspection PyMethodOverriding,PyPep8Naming
def hybrid_forward(
self,
F,
feat_static_cat: Tensor, # (batch_size, num_features)
feat_static_real: Tensor, # (batch_size, num_features)
past_time_feat: Tensor, # (batch_size, history_length, num_features)
past_target: Tensor, # (batch_size, history_length, *target_shape)
past_observed_values: Tensor, # (batch_size, history_length, *target_shape)
future_time_feat: Tensor, # (batch_size, prediction_length, num_features)
past_is_pad: Tensor,
agg_features_dict: Dict,
) -> Tensor:
"""
Predicts samples, all tensors should have NTC layout.
Parameters
----------
F
feat_static_cat : (batch_size, num_features)
feat_static_real : (batch_size, num_features)
past_time_feat : (batch_size, history_length, num_features)
past_target : (batch_size, history_length, *target_shape)
past_observed_values : (batch_size, history_length, *target_shape)
future_time_feat : (batch_size, prediction_length, num_features)
agg_features_dict: Dictionary of features for aggregated levels
Returns
-------
Tensor
Predicted samples
"""
(
state_ls,
scale_ls,
static_feat_ls,
past_target_ls,
future_time_feat_agg_ls,
) = ([], [], [], [], [])
for i, agg_multiple in enumerate(
self.temporal_hierarchy.agg_multiples
):
if agg_multiple != 1:
past_time_feat_agg = agg_features_dict[f"level_{i}"][
"past_time_feat_agg"
]
future_time_feat_agg = agg_features_dict[f"level_{i}"][
"future_time_feat_agg"
]
else:
past_time_feat_agg = past_time_feat
future_time_feat_agg = future_time_feat
target_related_feat_agg = (
self.get_target_related_feat_at_agg_level(
agg_level=i,
past_target=past_target,
past_is_pad=past_is_pad,
past_observed_values=past_observed_values,
)
)
# unroll the decoder in "prediction mode", i.e. with past data only
_, states, scale, static_feat, imputed_sequence = self.models[
i
].unroll_encoder(
F=F,
feat_static_cat=feat_static_cat,
feat_static_real=feat_static_real,
past_time_feat=past_time_feat_agg,
future_observed_values=None,
future_time_feat=None,
future_target=None,
**target_related_feat_agg,
)
# blows-up the dimension of each tensor to batch_size *
# self.num_parallel_samples for increasing parallelism
repeated_past_target = imputed_sequence.repeat(
repeats=self.num_parallel_samples, axis=0
)
repeated_states = [
s.repeat(repeats=self.num_parallel_samples, axis=0)
for s in states
]
repeated_time_feat = future_time_feat_agg.repeat(
repeats=self.num_parallel_samples, axis=0
)
repeated_static_feat = static_feat.repeat(
repeats=self.num_parallel_samples, axis=0
).expand_dims(axis=1)
repeated_scale = scale.repeat(
repeats=self.num_parallel_samples, axis=0
)
state_ls.append(repeated_states)
scale_ls.append(repeated_scale)
static_feat_ls.append(repeated_static_feat)
past_target_ls.append(repeated_past_target)
future_time_feat_agg_ls.append(repeated_time_feat)
reconciled_samples_at_all_levels = self.sampling_decoder(
F,
state_ls=state_ls,
scale_ls=scale_ls,
static_feat_ls=static_feat_ls,
past_target_ls=past_target_ls,
future_time_feat_agg_ls=future_time_feat_agg_ls,
)
if self.return_forecasts_at_all_levels:
return reconciled_samples_at_all_levels
else:
reconciled_samples_at_bottom_level = (
reconciled_samples_at_all_levels.slice_axis(
axis=-1,
begin=-self.temporal_hierarchy.num_leaves,
end=None,
)
)
reconciled_samples_at_bottom_level = (
reconciled_samples_at_bottom_level.reshape(
(
reconciled_samples_at_bottom_level.shape[0],
reconciled_samples_at_bottom_level.shape[1],
-1,
)
)
)
return reconciled_samples_at_bottom_level
| [] |
2024-01-10 | nan-wang/langchain | tests~integration_tests~vectorstores~test_docarray.py | from langchain.docstore.document import Document
from langchain.vectorstores.docarray import DocArray
from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings
def test_docarray() -> None:
texts = ['foo', 'bar', 'baz']
docsearch = DocArray.from_texts(texts, FakeEmbeddings())
output = docsearch.similarity_search('foo', k=1)
assert output == [Document(page_content='foo')]
def test_docarray_with_scores() -> None:
texts = ['foo', 'bar', 'baz']
metadatas = [{'page': i} for i in range(len(texts))]
docsearch = DocArray.from_texts(texts, FakeEmbeddings(), metadatas=metadatas)
output = docsearch.similarity_search_with_score('foo', k=3)
docs = [o[0] for o in output]
scores = [o[1] for o in output]
assert docs == list([Document(page_content=t, metadata={'page': idx}) for idx, t in enumerate(texts)])
assert scores[0] > scores[1] > scores[2]
| [] |
2024-01-10 | nan-wang/langchain | langchain~vectorstores~docarray.py | from __future__ import annotations
import uuid
from typing import Any, Callable, Iterable, List, Optional, Tuple
import numpy as np
from docarray import DocumentArray
from docarray import Document as DDocument
from langchain.embeddings.base import Embeddings
from langchain.docstore.document import Document
from langchain.vectorstores.base import VectorStore
class DocArray(VectorStore):
def __init__(
self,
index: DocumentArray,
embedding_function: Callable):
if not isinstance(index, DocumentArray):
raise ValueError(f'client should be an instance of docarray.DocumentArray')
self._index = index
self._embedding_function = embedding_function
@classmethod
def from_texts(
cls,
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
**kwargs: Any,
) -> DocArray:
embeddings = embedding.embed_documents(texts)
docs = DocumentArray.empty(len(texts))
docs.texts = texts
docs.embeddings = np.array(embeddings)
return cls(docs, embedding.embed_query)
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> List[str]:
docs = DocumentArray()
ids = ids or [str(uuid.uuid4()) for _ in texts]
for i, text in enumerate(texts):
embedding = self._embedding_function(text)
metadata = metadatas[i] if metadatas else {}
docs.append(DDocument(id=ids[i], embedding=np.array(embedding), tags=metadata))
self._index.extend(docs)
return ids
def similarity_search_with_score(
self,
query: str,
k: int = 4
) -> List[Tuple[Document, float]]:
embedding = self._embedding_function(query)
docs = self.similarity_search_with_score_by_vector(embedding, k)
return docs
def similarity_search_by_vector(
self, embedding: List[float], k: int = 4, **kwargs: Any
) -> List[Document]:
docs = self.similarity_search_with_score_by_vector(embedding, k)
return [d for d, _ in docs]
def similarity_search(
self, query: str, k: int = 4, **kwargs: Any
) -> List[Document]:
docs = self.similarity_search_with_score(query, k)
return [d for d, _ in docs]
def similarity_search_with_score_by_vector(
self,
embedding: List[float],
k: int = 4
) -> List[Tuple[Document, float]]:
q = DocumentArray([DDocument(embedding=np.array(embedding))])
q.match(self._index, metric='cosine', limit=k)
docs = []
for m in q[0].matches:
docs.append((Document(page_content=m.text, metadata=m.tags), m.scores['cosine'].value))
return docs
@classmethod
def from_da(cls, index: DocumentArray, embedding_function: Callable) -> DocArray:
return cls(index=index, embedding_function=embedding_function)
| [] |
2024-01-10 | nan-wang/langchain | langchain~document_loaders~html.py | """Loader that uses unstructured to load HTML files."""
from typing import List
from langchain.document_loaders.unstructured import UnstructuredFileLoader
class UnstructuredHTMLLoader(UnstructuredFileLoader):
"""Loader that uses unstructured to load HTML files."""
def _get_elements(self) -> List:
from unstructured.partition.html import partition_html
return partition_html(filename=self.file_path)
| [] |
2024-01-10 | huangym1950/Auto-GPT-ZH | autogpt~llm_utils.py | from __future__ import annotations
import time
from ast import List
import openai
from colorama import Fore, Style
from openai.error import APIError, RateLimitError
from autogpt.config import Config
from autogpt.logs import logger
CFG = Config()
openai.api_key = CFG.openai_api_key
def call_ai_function(
function: str, args: list, description: str, model: str | None = None
) -> str:
"""Call an AI function
This is a magic function that can do anything with no-code. See
https://github.com/Torantulino/AI-Functions for more info.
Args:
function (str): The function to call
args (list): The arguments to pass to the function
description (str): The description of the function
model (str, optional): The model to use. Defaults to None.
Returns:
str: The response from the function
"""
if model is None:
model = CFG.smart_llm_model
# For each arg, if any are None, convert to "None":
args = [str(arg) if arg is not None else "None" for arg in args]
# parse args to comma separated string
args = ", ".join(args)
messages = [
{
"role": "system",
"content": f"You are now the following python function: ```# {description}"
f"\n{function}```\n\nOnly respond with your `return` value.",
},
{"role": "user", "content": args},
]
return create_chat_completion(model=model, messages=messages, temperature=0)
# Overly simple abstraction until we create something better
# simple retry mechanism when getting a rate error or a bad gateway
def create_chat_completion(
messages: list, # type: ignore
model: str | None = None,
temperature: float = CFG.temperature,
max_tokens: int | None = None,
) -> str:
"""Create a chat completion using the OpenAI API
Args:
messages (list[dict[str, str]]): The messages to send to the chat completion
model (str, optional): The model to use. Defaults to None.
temperature (float, optional): The temperature to use. Defaults to 0.9.
max_tokens (int, optional): The max tokens to use. Defaults to None.
Returns:
str: The response from the chat completion
"""
response = None
num_retries = 10
warned_user = False
if CFG.debug_mode:
print(
Fore.GREEN
+ f"Creating chat completion with model {model}, temperature {temperature},"
f" max_tokens {max_tokens}" + Fore.RESET
)
for attempt in range(num_retries):
backoff = 2 ** (attempt + 2)
try:
if CFG.use_azure:
response = openai.ChatCompletion.create(
deployment_id=CFG.get_azure_deployment_id_for_model(model),
model=model,
messages=messages,
temperature=temperature,
max_tokens=max_tokens,
)
else:
response = openai.ChatCompletion.create(
model=model,
messages=messages,
temperature=temperature,
max_tokens=max_tokens,
)
break
except RateLimitError:
if CFG.debug_mode:
print(
Fore.RED + "Error: ",
f"Reached rate limit, passing..." + Fore.RESET,
)
if not warned_user:
logger.double_check(
f"已到达请求频率限制,将在稍后自动重试。请设置一个{Fore.CYAN + Style.BRIGHT}已付费{Style.RESET_ALL}的OpenAI API账户来绕过此限制。"
+ f"您可以在此处阅读更多信息:{Fore.CYAN}https://github.com/Significant-Gravitas/Auto-GPT#openai-api-keys-configuration{Fore.RESET}"
)
warned_user = True
except APIError as e:
if e.http_status == 502:
pass
else:
raise
if attempt == num_retries - 1:
raise
if CFG.debug_mode:
print(
Fore.RED + "Error: ",
f"API Bad gateway. Waiting {backoff} seconds..." + Fore.RESET,
)
time.sleep(backoff)
if response is None:
logger.typewriter_log(
"FAILED TO GET RESPONSE FROM OPENAI",
Fore.RED,
"Auto-GPT has failed to get a response from OpenAI's services. "
+ f"Try running Auto-GPT again, and if the problem the persists try running it with `{Fore.CYAN}--debug{Fore.RESET}`.",
)
logger.double_check()
if CFG.debug_mode:
raise RuntimeError(f"Failed to get response after {num_retries} retries")
else:
quit(1)
return response.choices[0].message["content"]
def create_embedding_with_ada(text) -> list:
"""Create an embedding with text-ada-002 using the OpenAI SDK"""
num_retries = 10
for attempt in range(num_retries):
backoff = 2 ** (attempt + 2)
try:
if CFG.use_azure:
return openai.Embedding.create(
input=[text],
engine=CFG.get_azure_deployment_id_for_model(
"text-embedding-ada-002"
),
)["data"][0]["embedding"]
else:
return openai.Embedding.create(
input=[text], model="text-embedding-ada-002"
)["data"][0]["embedding"]
except RateLimitError:
pass
except APIError as e:
if e.http_status == 502:
pass
else:
raise
if attempt == num_retries - 1:
raise
if CFG.debug_mode:
print(
Fore.RED + "Error: ",
f"API Bad gateway. Waiting {backoff} seconds..." + Fore.RESET,
)
time.sleep(backoff)
| [
"You are now the following python function: ```# PLACEHOLDER\nPLACEHOLDER```\n\nOnly respond with your `return` value."
] |
2024-01-10 | nasingfaund/e2b | api-service~models~base.py | from typing import TypedDict, Dict, Any
from enum import Enum
from langchain.chat_models import ChatOpenAI
from langchain.llms import Anthropic, HuggingFaceEndpoint
from langchain.schema import BaseLanguageModel
from langchain.callbacks.base import BaseCallbackManager
from .providers.replicate import ReplicateFix
from .providers.hugging_face import HuggingFaceHubFix, HuggingFaceEndpointFix
class ModelProvider(Enum):
OpenAI = "OpenAI"
Replicate = "Replicate"
Anthropic = "Anthropic"
HuggingFace = "HuggingFace"
class ModelConfig(TypedDict):
# Provider is string and not ModelProvider because we deserialize it form request's JSON body
provider: str
args: Dict[str, Any]
def get_model(
config: ModelConfig,
callback_manager: BaseCallbackManager,
) -> BaseLanguageModel:
match config["provider"]:
case ModelProvider.Anthropic.value:
return Anthropic(
**config["args"],
verbose=True,
streaming=True,
callback_manager=callback_manager,
)
case ModelProvider.OpenAI.value:
return ChatOpenAI(
**config["args"],
request_timeout=3600,
verbose=True,
streaming=True,
callback_manager=callback_manager,
)
case ModelProvider.Replicate.value:
return ReplicateFix(
model=config["args"]["model"],
replicate_api_token=config["args"]["replicate_api_token"],
model_kwargs=config["args"],
verbose=True,
callback_manager=callback_manager,
)
case ModelProvider.HuggingFace.value:
if config["args"].get("endpoint_url"):
return HuggingFaceEndpointFix(
huggingfacehub_api_token=config["args"]["huggingfacehub_api_token"],
endpoint_url=config["args"]["endpoint_url"],
verbose=True,
model_kwargs={
**config["args"],
"huggingfacehub_api_token": None,
"endpoint_url": None,
},
callback_manager=callback_manager,
task="text-generation",
)
elif config["args"].get("repo_id"):
return HuggingFaceHubFix(
huggingfacehub_api_token=config["args"]["huggingfacehub_api_token"],
repo_id=config["args"]["repo_id"],
model_kwargs={
**config["args"],
"huggingfacehub_api_token": None,
"repo_id": None,
},
verbose=True,
callback_manager=callback_manager,
) # type: ignore
raise ValueError(
f"Missing endpoint_url or repo_id for the HuggingFace integration."
)
case _:
raise ValueError(f"Provider {config['provider']} no found.")
| [] |
2024-01-10 | BaeSuyoung/MovING_sum | ImportantSceneSelection~customLDA_class.py | # util
import pandas as pd
import urllib.request
import os
import re
import matplotlib.pyplot as plt
from tqdm import tqdm
import time
import nltk
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.decomposition import LatentDirichletAllocation
# nltk 추가 다운
nltk.download('punkt')
nltk.download('stopwords')
nltk.download('wordnet')
nltk.download('omw-1.4')
# LDA
import gensim
from gensim import corpora
from gensim.models.coherencemodel import CoherenceModel
import pyLDAvis.gensim_models # 시각화
# custom LDA 모델
class LDAmodel:
def __init__(self, dataset, print=True, target_col='content'):
self.dataset = pd.DataFrame({'Movie_name':dataset['Movie_name'],
'content':dataset[target_col],
'content_token':dataset[target_col]
})
self.model = None
self.topic_table = None
self.print = print # flag to print
def train(self, additional_stop_words=[], num_topics=70, passes=60, iterations=200, random_state=3569):
self.additional_stop_words = additional_stop_words
self.num_topics = num_topics # n개의 토픽, k=n
self.passes = passes
self.iterations = iterations
self.random_state = random_state
# 이름(대문자) 추출 후, 불용어로 사용
more_stop_words = []
r = re.compile('[A-Z]{2,}')
for idx in range(0, len(self.dataset)):
more_stop_words = more_stop_words + r.findall(self.dataset.loc[idx]['content_token'])
# 중복 제거
newlist = []
for x in more_stop_words:
x = x.lower()
if x not in newlist:
newlist.append(x)
more_stop_words = newlist
# 전부 소문자로
self.dataset['content_token'] = self.dataset['content_token'].str.lower()
if self.print: print("[LDAmodel] 소문자 변경 완료")
# 토큰화
self.dataset['content_token'] = self.dataset.apply(lambda row: nltk.word_tokenize(row['content_token']), axis=1)
if self.print: print("[LDAmodel] 토큰화 완료")
# 불용어 제거
stop_words = stopwords.words('english')
stop_words = stop_words + more_stop_words + self.additional_stop_words
self.dataset['content_token'] = self.dataset['content_token'].apply(lambda x: [word for word in x if word not in (stop_words)])
if self.print: print("[LDAmodel] 불용어 제거 완료")
# 표제어 추출
self.dataset['content_token'] = self.dataset['content_token'].apply(lambda x: [WordNetLemmatizer().lemmatize(word, pos='v') for word in x])
if self.print: print("[LDAmodel] 표제어 추출 완료")
# 길이 3 이하 제거
tokenized_doc = self.dataset['content_token'].apply(lambda x: [word for word in x if len(word) > 3])
if self.print: print("[LDAmodel] 길이 3 이하 제거 완료")
# TF-IDF 행렬 만들기
# 역토큰화
detokenized_doc = []
for i in range(len(self.dataset)):
t = ' '.join(tokenized_doc[i])
detokenized_doc.append(t)
# 다시 self.dataset['content_token']에 재저장
self.dataset['content_token'] = detokenized_doc
# 상위 1,000개의 단어를 보존
vectorizer = TfidfVectorizer(stop_words='english', max_features= 1000)
X = vectorizer.fit_transform(self.dataset['content_token'])
if self.print: print("[LDAmodel] TF-IDF 행렬 생성 완료")
# TF-IDF 행렬의 크기 확인
if self.print: print('[LDAmodel] TF-IDF 행렬의 크기 :', X.shape)
# 정수 인코딩과 단어 집합 만들기
self.dictionary = corpora.Dictionary(tokenized_doc)
#self.dictionary.filter_extremes(no_below=15, no_above=0.5, keep_n=100000)
self.corpus = [self.dictionary.doc2bow(text) for text in tokenized_doc]
if self.print: print("[LDAmodel] 정수 인코딩과 단어 집합 생성 완료")
#print(len(corpus), corpus[1]) # 수행된 결과에서 두번째 출력. 첫번째 문서의 인덱스는 0
# LDA 모델 훈련시키기
self.model = gensim.models.ldamodel.LdaModel(corpus=self.corpus, num_topics=self.num_topics, id2word=self.dictionary, passes=self.passes, iterations=self.iterations, random_state=self.random_state)
if self.print: print('[LDAmodel] 학습 완료')
#topics = self.model.print_topics(num_words=10)
#for topic in topics:
# print(topic)
# def visualization(self):
# pyLDAvis.enable_notebook()
# vis = pyLDAvis.gensim_models.prepare(self.model, self.corpus, self.dictionary)
# pyLDAvis.display(vis)
def make_topic_table_per_doc(self):
if self.model is None:
print('[LDAmodel] plz train first')
return
self.topic_table = pd.DataFrame()
min=999999
max=-1
# 몇 번째 문서인지를 의미하는 문서 번호와 해당 문서의 토픽 비중을 한 줄씩 꺼내온다.
for i, topic_list in enumerate(self.model[self.corpus]):
doc = topic_list[0] if self.model.per_word_topics else topic_list
doc = sorted(doc, key=lambda x: (x[1]), reverse=True)
# 각 문서에 대해서 비중이 높은 토픽순으로 토픽을 정렬한다.
# EX) 정렬 전 0번 문서 : (2번 토픽, 48.5%), (8번 토픽, 25%), (10번 토픽, 5%), (12번 토픽, 21.5%),
# Ex) 정렬 후 0번 문서 : (2번 토픽, 48.5%), (8번 토픽, 25%), (12번 토픽, 21.5%), (10번 토픽, 5%)
# 48 > 25 > 21 > 5 순으로 정렬이 된 것.
# 모든 문서에 대해서 각각 아래를 수행
for j, (topic_num, prop_topic) in enumerate(doc): # 몇 번 토픽인지와 비중을 나눠서 저장한다.
if j == 0: # 정렬을 한 상태이므로 가장 앞에 있는 것이 가장 비중이 높은 토픽
self.topic_table = self.topic_table.append(pd.Series([int(topic_num), round(prop_topic,4), topic_list]), ignore_index=True)
if min > topic_num:
min = topic_num
if max < topic_num:
max = topic_num
# 가장 비중이 높은 토픽과, 가장 비중이 높은 토픽의 비중과, 전체 토픽의 비중을 저장한다.
else:
break
if self.print: print('[LDAmodel] 생성 완료, 토픽 인덱스 범위 (' + str(min) + '-' + str(max) + ')')
self.topic_table = self.topic_table.reset_index() # 문서 번호을 의미하는 열(column)로 사용하기 위해서 인덱스 열을 하나 더 만든다.
self.topic_table.columns = ['문서 번호', '가장 비중이 높은 토픽', '가장 높은 토픽의 비중', '각 토픽의 비중']
return(self.topic_table)
def n_doc_topic_list(self, n, num_words=10):
if self.topic_table is None:
print('[LDAmodel] plz make topic_table first')
return
topics = self.model.print_topics(num_topics=-1, num_words=num_words)
topic_list = []
for topic in topics:
topic_list.append(topic[1].split("\"")[1::2])
#print(len(topic_list))
return(topic_list[int(self.topic_table.loc[n]['가장 비중이 높은 토픽'])])
def evaluation(self):
if self.model is None:
print('[LDAmodel] plz train first')
return
cm = CoherenceModel(model=self.model, corpus=self.corpus, coherence='u_mass')
coherence = cm.get_coherence()
return coherence, self.model.log_perplexity(self.corpus) # coherence, perplexity
| [] |
2024-01-10 | CatherineWong/laps | src~models~sample_generator.py | """
sample_generator.py | Author: Gabe Grand.
Queries GPT to generate new samples based on existing samples.
"""
import json
import os
from typing import Set
import numpy as np
from openai.api_resources.completion import Completion
from openai.error import APIConnectionError, InvalidRequestError, RateLimitError
from openai.openai_object import OpenAIObject
import src.models.model_loaders as model_loaders
from dreamcoder.frontier import Frontier, FrontierEntry
from dreamcoder.program import EtaLongVisitor, Program
from dreamcoder.task import Task
from dreamcoder.type import Type, TypeConstructor
from src.experiment_iterator import RANDOM_GENERATOR
from src.models.gpt_base import DEFAULT_LINE_SEPARATOR, GPTBase, Prompt
from src.models.laps_grammar import LAPSGrammar
from src.task_loaders import ALL, PROGRAMS, TEST, TRAIN
ModelRegistry = model_loaders.ModelLoaderRegistries[model_loaders.SAMPLE_GENERATOR]
@ModelRegistry.register
class GPTSampleGenerator(GPTBase, model_loaders.ModelLoader):
name = "gpt_sample_generator"
query_results_file = "gpt_query_results.json"
# Parse error codes
ERROR_PARSE = "parse"
ERROR_INFER = "infer"
ERROR_INVALID_TYPE = "invalid_type"
ERROR_FREE_VARIABLES = "free_variables"
ERROR_ETA_LONG = "eta_long"
ERROR_LIKELIHOOD = "likelihood"
# Final task is the last task in body_tasks
FINAL_TASK_ORIGIN_DEFAULT = "default"
# Final task is drawn randomly from train tasks not in the current batch
FINAL_TASK_ORIGIN_RANDOM_TRAIN = "random_train"
# Final task is drawn randomly from unsolved train tasks
FINAL_TASK_ORIGIN_UNSOLVED_TRAIN = "unsolved_train"
# Final task is drawn randomly from test tasks
FINAL_TASK_ORIGIN_RANDOM_TEST = "random_test"
@staticmethod
def load_model(experiment_state, **kwargs):
return GPTSampleGenerator(experiment_state=experiment_state, **kwargs)
def __init__(self, experiment_state=None, engine=None):
super().__init__(engine=engine)
def generate_samples(
self,
experiment_state,
task_splits: list,
task_ids_in_splits: dict,
# Sampling
n_samples: int,
n_samples_per_query: int = None,
max_queries: int = None,
max_retries: int = None,
evaluate_samples: bool = False,
# Prompt construction
body_task_types: list = [PROGRAMS],
final_task_types: list = [PROGRAMS],
final_task_origin: str = FINAL_TASK_ORIGIN_DEFAULT,
function_name_classes: list = [LAPSGrammar.DEFAULT_FUNCTION_NAMES],
prepend_dsl_description: bool = False,
line_separator: str = DEFAULT_LINE_SEPARATOR,
# GPT parameters
temperature: float = 0.40,
max_tokens_completion_beta: float = 2.0,
# Utility
debug: bool = False,
use_cached: bool = False,
query_print_frequency: int = 1,
compute_likelihoods: bool = True,
verbose: bool = False,
):
"""
Queries OpenAI API to generate new samples based on training data.
params:
# LAPS parameters
experiment_state: experiment_state
task_splits: list of task splits
task_ids_in_splits: dict of task_ids_in_splits
# Sampling parameters
n_samples: Total number of unique, valid samples to generate from GPT.
Prompting will continue until this number is reached or max_queries is exceeded.
n_samples_per_query: Number of samples to take from GPT per query. Each query uses a new, random prompt.
Defaults to a single query with n_samples.
max_queries: Maximum number of queries to make to GPT. Defaults to 2 * min_queries, where min_queries is
the minimum number of queries required to generate n_samples.
max_retries: Max number of retries per query.
Intention is to more gracefully handle `InvalidRequestError` when max tokens is exceeded via iterative backoff.
Iteratively removes last item from body_tasks until query success or max_retries is exceeded.
Defaults to a very permissive behavior where the query will retry until reduced to a single task before failing.
evaluate_samples: Exhaustively check whether each valid program solves any training task. If True, programs that solve
a training task will be added to that task's frontier and programs that don't solve any training task will be added
to a sample task's frontier. If False, all programs will be added to a sample task's frontier.
# Prompt construction parameters
body_task_types: List of task types in [LANGUAGE, PROGRAMS] to include in the body of the prompt.
final_task_types: List of task types in [LANGUAGE, PROGRAMS] to include in the final task of the prompt.
final_task_origin: Origin of the final task in the prompt.
function_name_classes: List of 'name_classes' specifying what naming scheme to use for functions
programs used for the inductive prompt. Name classes will be applied in order as they are avaialble for each
function, falling back on DEFAULT (the DreamCoder parseable function names).
prepend_dsl_description: Prepends an automatically-constructed description of all fns in the DSL to the prompt.
# GPT-specific parameters
temperature: GPT temperature sampling value in `[0., 1.]` range.
max_tokens_completion_beta: Multiplicative factor for the maximum number of tokens in the completion.
max_tokens is set to the number of tokens in the last program in the prompt,
times the value of max_tokens_completion_beta.
# Utility parameters
debug: If True, replaces live query to GPT with a random sample
from the training set.
use_cached: If True, replaces live query to GPT with a cached query
stored in `query_results_filepath`.
query_print_frequency: Number of queries to make before printing a status update.
compute_likelihoods: If True, compute likelihoods for each sample.
verbose: If True, print extra status updates including parse errors.
"""
if task_splits != [TRAIN]:
raise ValueError(
f"GPTSampleGenerator expected task_splits=[{TRAIN}], got task_splits={task_splits}"
)
task_split = task_splits[0]
# GPT-generated programs must type-infer to a request type in this set
train_task_request_types = self.get_valid_request_types(
experiment_state, TRAIN, task_ids_in_splits[TRAIN]
)
train_programs = set()
for f in experiment_state.get_frontiers_for_ids(
task_split=TRAIN, task_ids=task_ids_in_splits[TRAIN]
):
train_programs.update([e.program for e in f.entries])
query_results_filepath = os.path.join(
os.getcwd(),
experiment_state.get_checkpoint_directory(),
self.query_results_file,
)
if use_cached and not os.path.exists(query_results_filepath):
print(
f"WARNING: No query results found at {query_results_filepath}. Disabling use_cached."
)
use_cached = False
# Default to drawing all samples from the same prompt
if n_samples_per_query is None:
n_samples_per_query = n_samples
# Set the number of prompt attempts to something reasonable
min_queries = np.ceil(n_samples / n_samples_per_query)
if max_queries is None:
max_queries = int(2 * min_queries)
elif max_queries < min_queries:
raise ValueError(
f"max_queries={max_queries} must be >= min_queries={min_queries}"
)
results_by_query = []
sampled_programs = set()
parse_results_valid = []
for query_id in range(max_queries):
# Construct an initial prompt with the max number of tasks we think
# we can fit based on estimates from GPT-2 tokenizer.
prompt = self.construct_initial_prompt(
experiment_state=experiment_state,
task_ids_in_splits=task_ids_in_splits,
body_task_types=body_task_types,
final_task_types=final_task_types,
final_task_origin=final_task_origin,
function_name_classes=function_name_classes,
prepend_dsl_description=prepend_dsl_description,
line_separator=line_separator,
max_tokens_completion_beta=max_tokens_completion_beta,
verbose=verbose,
)
# Iteratively remove tasks from the prompt until query success.
max_retries = len(prompt.body_task_data)
for retry_i in range(max_retries):
if retry_i > 0:
prompt.remove_last_body_task()
print(
f"Retry ({retry_i} / {max_retries}): Prompt reduced to {len(prompt)} tasks."
)
token_stats = self.get_token_stats(
prompt=prompt, max_tokens_completion_beta=max_tokens_completion_beta
)
if use_cached:
# Load cached prompt for query_id
with open(query_results_filepath, "r") as f:
prompt_json = json.load(f)["results_by_query"][query_id][
"prompt"
]
prompt.load_from_dict(prompt_json)
if query_id % query_print_frequency == 0:
print(
f"[QUERY {query_id}/{max_queries}]: Prompting GPT ({len(prompt)} tasks, {token_stats['token_count_prompt']} tokens) for {n_samples_per_query} samples ({token_stats['max_tokens_completion']} max tokens)..."
)
completion, cache_used = self.get_completion_for_prompt(
query_id=query_id,
experiment_state=experiment_state,
prompt_text=prompt.serialize(),
query_results_filepath=query_results_filepath,
n_samples_per_query=n_samples_per_query,
temperature=temperature,
max_tokens=token_stats["max_tokens_completion"],
line_separator=line_separator,
use_cached=use_cached,
debug=debug,
)
if not isinstance(completion, OpenAIObject):
if isinstance(completion, InvalidRequestError):
if retry_i >= max_retries - 1:
raise ValueError(f"Max retries {max_retries} exceeded.")
continue
elif isinstance(completion, RateLimitError):
raise completion
elif isinstance(completion, APIConnectionError):
raise completion
elif isinstance(completion, dict):
# completion is a dict when debug=True
assert debug
else:
raise ValueError(
f"Unexpected completion type: {type(completion)}"
)
parse_results = self.parse_completion(
completion,
experiment_state=experiment_state,
task_split=task_split,
task_ids=task_ids_in_splits[task_split],
valid_request_types=train_task_request_types,
function_name_classes=function_name_classes,
evaluate_samples=evaluate_samples,
compute_likelihoods=compute_likelihoods,
verbose=verbose,
)
results_by_query.append(
{
"query_id": query_id,
"token_stats": token_stats,
"prompt": prompt.to_dict(),
"completion": completion.to_dict_recursive()
if not debug
else completion,
"parse_results": parse_results,
}
)
for result_data in parse_results:
result_data["query_id"] = query_id
if result_data["valid"]:
p = Program.parse(result_data["program"])
if (p not in train_programs) and (p not in sampled_programs):
sampled_programs.add(p)
parse_results_valid.append(result_data)
# Stop as soon as target n_samples is reached, even if there are more valid programs in the results.
if len(sampled_programs) >= n_samples:
break
if query_id % query_print_frequency == 0:
print(
f"[QUERY {query_id}/{max_queries}]: Returned {len(list(filter(lambda x: x['valid'], parse_results)))}/{n_samples_per_query} valid samples."
)
if verbose:
print(prompt)
if PROGRAMS not in prompt.final_task_types:
print("Ground truth program:")
print(prompt.final_task_data["task_program"])
print("GPT completions:")
for result_data in parse_results:
if result_data.get("tasks_solved", False):
status_emoji = "🏆"
elif result_data["valid"]:
status_emoji = "✅"
else:
status_emoji = "❌"
print(f"{status_emoji} {result_data['text']}")
print(
f"[STATUS]: Sampled {len(sampled_programs)}/{n_samples} unique, valid samples."
)
break
if len(sampled_programs) >= n_samples:
break
all_tasks_solved = set()
for result_data in parse_results_valid:
if result_data.get("tasks_solved", False):
all_tasks_solved.update(result_data["tasks_solved"])
# Save results to file.
query_results = {
"params": {
"n_samples": n_samples,
"n_samples_per_query": n_samples_per_query,
"max_queries": max_queries,
"temperature": temperature,
"engine": self.engine,
"line_separator": line_separator,
"use_cached": use_cached,
"debug": debug,
"body_task_types": body_task_types,
"final_task_types": final_task_types,
"final_task_origin": final_task_origin,
"function_name_classes": function_name_classes,
"compute_likelihoods": compute_likelihoods,
},
"results": {
"n_queries": query_id + 1,
"n_sampled_programs": len(sampled_programs),
"n_tasks_solved": len(all_tasks_solved),
"programs_valid": parse_results_valid,
"tasks_solved": list(all_tasks_solved),
},
"results_by_query": results_by_query,
}
if not debug and not cache_used:
with open(query_results_filepath, "w") as f:
json.dump(query_results, f, indent=4)
print(f"Wrote results: {query_results_filepath}")
# Update experiment_state.
self.add_samples_to_experiment_state(
experiment_state=experiment_state,
task_split=task_split,
parse_results_valid=parse_results_valid,
evaluate_samples=evaluate_samples,
compute_likelihoods=compute_likelihoods,
)
return query_results
def get_valid_request_types(
self,
experiment_state,
split,
task_ids,
):
request_types = set(
[t.request for t in experiment_state.get_tasks_for_ids(split, task_ids)]
)
assert len(request_types) > 0
return request_types
def construct_initial_prompt(
self,
experiment_state,
task_ids_in_splits,
body_task_types,
final_task_types,
final_task_origin,
function_name_classes,
prepend_dsl_description,
line_separator,
max_tokens_completion_beta,
verbose,
):
rng = experiment_state.metadata[RANDOM_GENERATOR]
non_empty_task_ids = [
f.task.name
for f in experiment_state.get_non_empty_frontiers_for_split(TRAIN)
]
# Random ordering of the body tasks
body_task_ids = list(rng.permutation(task_ids_in_splits[TRAIN]))
# Filter body_task_ids to only include tasks that have non-empty frontiers.
body_task_ids = [t for t in body_task_ids if t in non_empty_task_ids]
if len(body_task_ids) < 2:
raise ValueError(
"At least 2 tasks must have non-empty frontiers to construct a prompt."
)
if final_task_origin == GPTSampleGenerator.FINAL_TASK_ORIGIN_DEFAULT:
final_task_id = body_task_ids[-1]
body_task_ids = body_task_ids[:-1]
elif final_task_origin == GPTSampleGenerator.FINAL_TASK_ORIGIN_RANDOM_TRAIN:
final_task_id = rng.choice(
[
t.name
for t in experiment_state.tasks[TRAIN]
if t.name not in task_ids_in_splits[TRAIN]
]
)
elif final_task_origin == GPTSampleGenerator.FINAL_TASK_ORIGIN_UNSOLVED_TRAIN:
final_task_id = rng.choice(
[
t.name
for t in experiment_state.tasks[TRAIN]
if t.name not in non_empty_task_ids
]
)
elif final_task_origin == GPTSampleGenerator.FINAL_TASK_ORIGIN_RANDOM_TEST:
final_task_id = rng.choice([t.name for t in experiment_state.tasks[TEST]])
else:
raise ValueError(f"Unknown final_task_origin={final_task_origin}")
# Iteratively add tasks to the body until we exceed the token budget
prompt = None
for body_task_i in range(len(body_task_ids)):
body_task_ids_for_prompt = body_task_ids[: body_task_i + 1]
prompt_i = Prompt(
experiment_state=experiment_state,
body_task_ids=body_task_ids_for_prompt,
final_task_id=final_task_id,
body_task_types=body_task_types,
final_task_types=final_task_types,
final_task_split=(
TEST
if final_task_origin
== GPTSampleGenerator.FINAL_TASK_ORIGIN_RANDOM_TEST
else TRAIN
),
function_name_classes=function_name_classes,
prepend_dsl_description=prepend_dsl_description,
line_separator=line_separator,
# TODO(gg): Support for configuring prompt prefixes.
)
# Estimate token budgets
token_stats = self.get_token_stats(
prompt=prompt_i, max_tokens_completion_beta=max_tokens_completion_beta
)
if token_stats["token_count_prompt"] <= token_stats["max_tokens_prompt"]:
prompt = prompt_i
if verbose:
print(
f"Prompt construction ({body_task_i+1} / {len(body_task_ids)}): {token_stats['token_count_prompt']} (prompt; max {token_stats['max_tokens_prompt']}) + {token_stats['max_tokens_completion']} (completion allocation) = {token_stats['token_count_prompt'] + token_stats['max_tokens_completion']} tokens"
)
else:
break
if prompt is None:
raise ValueError(f"Failed to construct prompt.")
assert body_task_i > 0
return prompt
def get_token_stats(self, prompt, max_tokens_completion_beta):
token_count_last_program = self.count_tokens_gpt2(
str(prompt.get_last_program())
)
token_count_prompt = self.count_tokens_gpt2(prompt.serialize())
# Allocate some multiple of the last program's tokens for the completion
max_tokens_completion = int(
token_count_last_program * max_tokens_completion_beta
)
# The completion shouldn't take up more than 50% of the tokens
max_tokens_completion = min(
max_tokens_completion, int(self.ENGINE_MAX_TOKENS / 2)
)
# Allocate the remainder of the token budget to the prompt
max_tokens_prompt = int(self.ENGINE_MAX_TOKENS - max_tokens_completion)
token_stats = {
"token_count_prompt": token_count_prompt,
"token_count_last_program": token_count_last_program,
"max_tokens_prompt": max_tokens_prompt,
"max_tokens_completion": max_tokens_completion,
}
return token_stats
def get_completion_for_prompt(
self,
query_id,
experiment_state,
prompt_text,
query_results_filepath,
n_samples_per_query,
temperature,
max_tokens,
line_separator,
use_cached,
debug,
):
if debug:
# Debugging query that returns programs.
cache_used = True
completion = self.query_mock(
experiment_state, n_samples=n_samples_per_query
)
# For debugging only - does not verify that the cached completion matches the desired query parameters
elif use_cached and os.path.exists(query_results_filepath):
cache_used = True
print("Using cached examples....")
with open(query_results_filepath, "r") as f:
query_results = json.load(f)
# Ensure that the cached query matches the desired query parameters.
assert (
query_results["params"]["n_samples_per_query"]
== n_samples_per_query
)
assert query_results["params"]["temperature"] == temperature
assert query_results["params"]["engine"] == self.ENGINE
assert query_results["params"]["line_separator"] == line_separator
# Get the cached completion for the particular query_id.
assert (
query_results["results_by_query"][query_id]["query_id"] == query_id
)
completion_data = query_results["results_by_query"][query_id][
"completion"
]
completion = Completion()
completion.refresh_from(completion_data)
else:
cache_used = False
completion = self.query_completion(
prompt_text,
n_samples=n_samples_per_query,
temperature=temperature,
max_tokens=max_tokens,
line_separator=line_separator,
)
return completion, cache_used
def parse_completion(
self,
completion,
experiment_state,
task_split: str,
task_ids: list,
valid_request_types: Set[TypeConstructor] = None,
function_name_classes: list = [LAPSGrammar.DEFAULT_FUNCTION_NAMES],
evaluate_samples: bool = True,
compute_likelihoods: bool = True,
verbose: bool = False,
):
grammar = experiment_state.models[model_loaders.GRAMMAR]
parse_results = []
for choice in completion["choices"]:
program_str_gpt = choice["text"]
# CHECK 1: Does the program parse?
try:
# Write the program back into the DreamCoder form from whatever it was initially in.
program_str = grammar.show_program(
program_str_gpt, input_name_class=function_name_classes
)
p = Program.parse(program_str)
except Exception as e:
if verbose:
print(f"Failed to parse ({type(e)}): {program_str_gpt}")
parse_results.append(
{
"text": program_str_gpt,
"valid": False,
"error": GPTSampleGenerator.ERROR_PARSE,
}
)
continue
# CHECK 2: Does the program typecheck?
try:
p_type = p.infer()
except Exception:
if verbose:
print(f"Type inference failure for: {str(p)}")
parse_results.append(
{
"text": program_str_gpt,
"valid": False,
"error": GPTSampleGenerator.ERROR_INFER,
}
)
continue
# CHECK 3: Is the inferred type in the set of valid request types?
if valid_request_types is not None:
if p_type not in valid_request_types:
if verbose:
print(
f"Inferred type {str(p_type)} not in `valid_request_types` {valid_request_types} for program: {str(p)}"
)
parse_results.append(
{
"text": program_str_gpt,
"valid": False,
"error": GPTSampleGenerator.ERROR_INVALID_TYPE,
}
)
continue
# CHECK 4: Does the program have free variables?
if not p.closed:
if verbose:
print(f"Program has free variables: {str(p)}")
parse_results.append(
{
"text": program_str_gpt,
"valid": False,
"error": GPTSampleGenerator.ERROR_FREE_VARIABLES,
}
)
continue
# CHECK 5: Can we convert the program to eta long form?
if compute_likelihoods:
try:
# Hack to avoid fatal error when computing likelihood summaries during rescoreFrontier
p = EtaLongVisitor(request=p_type).execute(p)
except:
if verbose:
print(f"Error converting to ETA Long for {p}")
parse_results.append(
{
"text": program_str_gpt,
"valid": False,
"error": GPTSampleGenerator.ERROR_ETA_LONG,
}
)
continue
# CHECK 6: Can we compute a log likelihood?
if compute_likelihoods:
try:
grammar.logLikelihood(p_type, p)
except:
if verbose:
print(f"Unable to compute likelihood under grammar: {p}")
parse_results.append(
{
"text": program_str_gpt,
"valid": False,
"error": GPTSampleGenerator.ERROR_LIKELIHOOD,
}
)
continue
# CHECK 7: Does the program solve any tasks?
task_attempted = None
tasks_solved = []
if evaluate_samples:
for task in experiment_state.get_tasks_for_ids(task_split, task_ids):
if task.check(p, timeout=grammar.DEFAULT_EVALUATION_TIMEOUT):
tasks_solved.append(task.name)
# TODO: break on first solved task?
if len(task_ids) == 1:
task_attempted = task_ids[0]
parse_results.append(
{
"text": program_str_gpt,
"valid": True,
"program": str(p),
"type": str(p_type),
"type_json": p_type.json(),
"hash": abs(hash(str(p))),
"task_attempted": task_attempted,
"tasks_solved": tasks_solved,
}
)
return parse_results
def add_samples_to_experiment_state(
self,
experiment_state,
task_split: str,
parse_results_valid: list,
evaluate_samples: bool = False,
compute_likelihoods: bool = True,
add_samples: bool = True,
):
grammar = experiment_state.models[model_loaders.GRAMMAR]
for result_data in parse_results_valid:
program = Program.parse(result_data["program"])
# If the program solves any tasks, add it to the respective task frontier(s).
if evaluate_samples and len(result_data["tasks_solved"]) > 0:
for task in experiment_state.get_tasks_for_ids(
task_split=task_split, task_ids=result_data["tasks_solved"]
):
new_frontier = Frontier(
frontier=[
FrontierEntry(
program=program,
logPrior=0.0,
logLikelihood=0.0,
origin=self.name,
)
],
task=task,
)
if compute_likelihoods:
try:
new_frontier = grammar.rescoreFrontier(new_frontier)
except:
# GG: This should really never happen due to the CHECK 6 but finding it does in practice on clevr dataset
print(
f"ERROR calling rescoreFrontier on GPT-generated program {program}"
)
continue
experiment_state.task_frontiers[task_split][
task
] = experiment_state.task_frontiers[task_split][task].combine(
new_frontier
)
# If the program doesn't solve any tasks, add it to the experiment state as a sample.
elif add_samples:
sample_task = Task(
name=f"sample_{result_data['task_attempted']}",
request=Type.fromjson(result_data["type_json"]),
examples=[],
)
sample_frontier = Frontier(
frontier=[
FrontierEntry(
program=program,
logPrior=0.0,
logLikelihood=0.0,
origin=self.name,
)
],
task=sample_task,
)
if compute_likelihoods:
sample_frontier = grammar.rescoreFrontier(sample_frontier)
# If a sample task already exists for this task, combine
if sample_task in experiment_state.sample_tasks[task_split]:
experiment_state.sample_frontiers[task_split][
sample_task
] = experiment_state.sample_frontiers[task_split][
sample_task
].combine(
sample_frontier
)
# Otherwise, create a new sample task
else:
experiment_state.sample_tasks[task_split].append(sample_task)
experiment_state.sample_frontiers[task_split][
sample_task
] = sample_frontier
# Otherwise, discard the sample
else:
continue
def query_mock(self, experiment_state, n_samples: int = 3, **kwargs):
"""Debugging query that returns a sample of programs from the task."""
rng = experiment_state.metadata[RANDOM_GENERATOR]
frontiers = experiment_state.get_frontiers_for_ids(
task_split=TRAIN, task_ids=ALL
)
frontiers = rng.choice(frontiers, size=n_samples, replace=True)
program_str_list = [str(f.entries[0].program) for f in frontiers]
completion = dict(choices=[dict(text=p_str) for p_str in program_str_list])
return completion
@ModelRegistry.register
class CodexSampleGenerator(GPTSampleGenerator):
"""For backwards compatibility with templates that reference `codex_sample_generator`."""
name = "codex_sample_generator"
def __init__(self, experiment_state=None):
super().__init__(engine=GPTBase.ENGINE_CODEX)
| [
"results_by_query",
"None"
] |
2024-01-10 | CatherineWong/laps | src~models~gpt_solver.py | """
gpt_solver.py | Author: Gabe Grand.
Queries GPT to solve tasks.
"""
import json
import os
from collections import defaultdict
from openai.error import InvalidRequestError
from openai.openai_object import OpenAIObject
import src.models.model_loaders as model_loaders
from src.experiment_iterator import RANDOM_GENERATOR, SKIPPED_MODEL_FN
from src.models.gpt_base import DEFAULT_LINE_SEPARATOR, Prompt
from src.models.laps_grammar import LAPSGrammar
from src.models.sample_generator import GPTSampleGenerator
from src.task_loaders import LANGUAGE, PROGRAMS, TRAIN
ModelRegistry = model_loaders.ModelLoaderRegistries[model_loaders.LLM_SOLVER]
@ModelRegistry.register
class GPTSolver(GPTSampleGenerator):
name = "gpt_solver"
results_file = "gpt_solver_results.json"
@staticmethod
def load_model(experiment_state, **kwargs):
return GPTSolver(experiment_state=experiment_state, **kwargs)
def __init__(self, experiment_state=None, engine=None):
super().__init__(self, engine=engine)
def infer_programs_for_tasks(
self,
experiment_state,
task_split: str,
task_batch_ids: list,
# Sampling
n_samples_per_query: int = None,
n_queries_per_task: int = None,
n_queries_per_task_base_dsl: int = 0,
early_stop_on_solution: bool = True,
max_retries: int = None,
add_samples: bool = False,
# Prompt construction
body_task_types: list = [LANGUAGE, PROGRAMS],
final_task_types: list = [LANGUAGE],
function_name_classes: list = [LAPSGrammar.DEFAULT_FUNCTION_NAMES],
prepend_dsl_description: bool = False,
line_separator: str = DEFAULT_LINE_SEPARATOR,
# GPT parameters
temperature: float = 0.40,
max_tokens_completion_beta: float = 2.0,
# Resume from prior runs
resume_strategy: str = None,
# Utilities
verbose: bool = False,
):
if (resume_strategy == "first" and experiment_state.is_first_iteration()) or (
resume_strategy == "every"
):
# If RESUME_CHECKPOINT_DIRECTORY not defined, default to self checkpoint directory
results_filepath_ext = os.path.join(
os.getcwd(),
experiment_state.get_checkpoint_directory_maybe_resume(),
task_split,
self.results_file,
)
if os.path.exists(results_filepath_ext):
with open(results_filepath_ext, "r") as f:
results_json = json.load(f)
# Update experiment state from file
self.add_samples_to_experiment_state(
experiment_state=experiment_state,
task_split=task_split,
parse_results_valid=results_json["parse_results_valid"],
evaluate_samples=True,
compute_likelihoods=True,
add_samples=add_samples,
)
# Copy external results file to checkpoint directory
results_filepath = os.path.join(
os.getcwd(),
experiment_state.get_checkpoint_directory(),
task_split,
self.results_file,
)
os.makedirs(os.path.dirname(results_filepath), exist_ok=True)
with open(results_filepath, "w") as f:
json.dump(results_json, f, indent=4)
print(f"Loaded GPT results from: {results_filepath_ext}")
return {
SKIPPED_MODEL_FN: True,
}
else:
print(f"GPT results not found at: {results_filepath_ext}")
if experiment_state.is_first_iteration() and task_split == TRAIN:
raise ValueError("Unable to resume first iteration.")
task_to_solutions = defaultdict(list)
results_by_query = []
parse_results_valid = []
for task_i, task_id in enumerate(task_batch_ids):
for query_i in range(n_queries_per_task + n_queries_per_task_base_dsl):
# After `n_queries_per_task`, run some `n_queries_per_task_base_dsl` with no abstractions
include_abstractions = query_i < n_queries_per_task
prompt = self.construct_initial_prompt(
experiment_state=experiment_state,
task_split=task_split,
task_id=task_id,
body_task_types=body_task_types,
final_task_types=final_task_types,
function_name_classes=function_name_classes,
include_abstractions=include_abstractions,
prepend_dsl_description=prepend_dsl_description,
line_separator=line_separator,
max_tokens_completion_beta=max_tokens_completion_beta,
verbose=False,
)
max_retries = len(prompt.body_task_data) or max_retries
for retry_i in range(max_retries):
if retry_i > 0:
prompt.remove_last_body_task()
print(
f"Retry ({retry_i} / {max_retries}): Prompt reduced to {len(prompt)} tasks."
)
token_stats = self.get_token_stats(
prompt=prompt,
max_tokens_completion_beta=max_tokens_completion_beta,
)
completion = self.query_completion(
prompt,
n_samples=n_samples_per_query,
temperature=temperature,
max_tokens=token_stats["max_tokens_completion"],
line_separator=line_separator,
)
if not isinstance(completion, OpenAIObject):
if isinstance(completion, InvalidRequestError):
if retry_i >= max_retries - 1:
raise ValueError(f"Max retries {max_retries} exceeded.")
continue
else:
raise ValueError(
f"Unexpected completion type: {type(completion)}"
)
valid_request_types = self.get_valid_request_types(
experiment_state, task_split, task_batch_ids
)
parse_results = self.parse_completion(
completion,
experiment_state=experiment_state,
task_split=task_split,
task_ids=[task_id],
valid_request_types=valid_request_types,
function_name_classes=function_name_classes,
evaluate_samples=True,
compute_likelihoods=True,
verbose=verbose,
)
results_by_query.append(
{
"task_id": task_id,
"query_i": query_i,
"include_abstractions": include_abstractions,
"token_stats": token_stats,
"prompt": prompt.to_dict(),
"completion": completion.to_dict_recursive(),
"parse_results": parse_results,
}
)
task_solved = False
for result_data in parse_results:
result_data["query_i"] = query_i
result_data["include_abstractions"] = include_abstractions
if result_data["valid"]:
parse_results_valid.append(result_data)
if result_data.get("tasks_solved"):
# Sanity check
assert len(result_data["tasks_solved"]) == 1
assert result_data["tasks_solved"][0] == task_id
task_to_solutions[task_id].append(result_data)
task_solved = True
# Print query results
if verbose:
print("-" * 12)
print(prompt)
print("-" * 12)
print(f"GPT ({self.ENGINE}) completions:")
for result_data in parse_results:
if result_data.get("tasks_solved"):
status_emoji = "🏆"
elif result_data["valid"]:
status_emoji = "❎"
else:
status_emoji = "❌"
print(f"{status_emoji} {result_data['text']}")
print("")
print(
f"[TASK {task_i}/{len(task_batch_ids)} QUERY {query_i}/{n_queries_per_task}]: {task_id}",
flush=True,
)
if (
n_queries_per_task_base_dsl > 0
and query_i >= n_queries_per_task
):
print(
f"Queried using Base DSL: {LAPSGrammar.DEFAULT_FUNCTION_NAMES}"
)
n_tasks_solved = len(
[
t
for t, results in task_to_solutions.items()
if len(results) > 0
]
)
print(
f"Tasks solved so far: {n_tasks_solved}/{task_i+1}", flush=True
)
# Query succeeded: break from retry loop
break
if task_solved and early_stop_on_solution:
break
tasks_solved = [
t for t, results in task_to_solutions.items() if len(results) > 0
]
# Collect results
results = {
"params": {
"n_samples_per_query": n_samples_per_query,
"n_queries_per_task": n_queries_per_task,
"n_queries_per_task_base_dsl": n_queries_per_task_base_dsl,
"temperature": temperature,
"engine": self.ENGINE,
"line_separator": line_separator,
"body_task_types": body_task_types,
"final_task_types": final_task_types,
"function_name_classes": function_name_classes,
},
"summary": {
"n_tasks_solved": len(tasks_solved),
"tasks_solved": list(tasks_solved),
},
"task_to_solutions": task_to_solutions,
"parse_results_valid": parse_results_valid,
"results_by_query": results_by_query,
}
if n_queries_per_task_base_dsl:
tasks_solved_base_dsl = [
t
for t, results in task_to_solutions.items()
if len(results) > 0
and len(list(filter(lambda x: x["include_abstractions"], results)))
== 0
]
results["summary"]["n_tasks_solved_base_dsl"] = len(
tasks_solved_base_dsl
)
results["summary"]["tasks_solved_base_dsl"] = tasks_solved_base_dsl
# Save results to file
results_filepath = os.path.join(
os.getcwd(),
experiment_state.get_checkpoint_directory(),
task_split,
self.results_file,
)
os.makedirs(os.path.dirname(results_filepath), exist_ok=True)
with open(results_filepath, "w") as f:
json.dump(results, f, indent=4)
if verbose:
print(f"Wrote results: {results_filepath}")
# Update experiment_state
self.add_samples_to_experiment_state(
experiment_state=experiment_state,
task_split=task_split,
parse_results_valid=parse_results_valid,
evaluate_samples=True,
compute_likelihoods=True,
add_samples=add_samples,
)
def construct_initial_prompt(
self,
experiment_state,
task_split,
task_id,
body_task_types,
final_task_types,
function_name_classes,
include_abstractions,
prepend_dsl_description,
line_separator,
max_tokens_completion_beta,
verbose,
):
rng = experiment_state.metadata[RANDOM_GENERATOR]
non_empty_task_ids = [
f.task.name
for f in experiment_state.get_non_empty_frontiers_for_split(TRAIN)
]
# Random ordering of the body tasks
body_task_ids = list(rng.permutation(non_empty_task_ids))
if len(body_task_ids) < 2:
raise ValueError(
"At least 2 tasks must have non-empty frontiers to construct a prompt."
)
prompt = None
for body_task_i in range(len(body_task_ids)):
body_task_ids_for_prompt = body_task_ids[: body_task_i + 1]
prompt_i = Prompt(
experiment_state=experiment_state,
body_task_ids=body_task_ids_for_prompt,
final_task_id=task_id,
body_task_types=body_task_types,
final_task_types=final_task_types,
final_task_split=task_split,
function_name_classes=function_name_classes,
include_abstractions=include_abstractions,
prepend_dsl_description=prepend_dsl_description,
line_separator=line_separator,
)
# Estimate token budgets
token_stats = self.get_token_stats(
prompt=prompt_i, max_tokens_completion_beta=max_tokens_completion_beta
)
if token_stats["token_count_prompt"] <= token_stats["max_tokens_prompt"]:
prompt = prompt_i
if verbose:
print(
f"Prompt construction ({body_task_i+1} / {len(body_task_ids)}): {token_stats['token_count_prompt']} (prompt; max {token_stats['max_tokens_prompt']}) + {token_stats['max_tokens_completion']} (completion allocation) = {token_stats['token_count_prompt'] + token_stats['max_tokens_completion']} tokens"
)
else:
break
if prompt is None:
raise ValueError(f"Failed to construct prompt.")
assert body_task_i > 0
return prompt
| [
"None"
] |
2024-01-10 | CatherineWong/laps | src~models~gpt_base.py | """
gpt_base.py | Author: Gabe Grand.
Base class containing utilities for working with the GPT language model.
"""
import json
import os
import time
from abc import ABCMeta, abstractmethod
from typing import Union
import openai
from openai.error import (
APIConnectionError,
APIError,
InvalidRequestError,
RateLimitError,
)
from transformers import GPT2TokenizerFast
from src.experiment_iterator import RANDOM_GENERATOR
from src.models.laps_grammar import LAPSGrammar
from src.models.model_loaders import GRAMMAR
from src.task_loaders import LANGUAGE, PROGRAMS, TEST, TRAIN
DEFAULT_LINE_SEPARATOR = "\n"
class BasePrompt(metaclass=ABCMeta):
TASK_TYPES = [LANGUAGE, PROGRAMS]
DEFAULT_MESSAGE_SEPARATOR = (
DEFAULT_LINE_SEPARATOR + "======" + DEFAULT_LINE_SEPARATOR
)
DEFAULT_PREFIX_PROGRAM = ""
DEFAULT_PREFIX_LANGUAGE = "-- " # Haskell-style comment
# https://platform.openai.com/docs/api-reference/chat
ROLE_ASSISTANT = "assistant"
ROLE_SYSTEM = "system"
ROLE_USER = "user"
@abstractmethod
def __str__(self):
pass
@abstractmethod
def to_dict(self):
pass
@abstractmethod
def load_from_dict(self):
pass
@abstractmethod
def to_chat_format(self):
pass
def __repr__(self):
return self.json()
def json(self):
return json.dumps(self.to_dict(), indent=4)
def serialize(self):
return self.__str__()
def chat_message(self, text, role=None):
role = role or self.ROLE_USER
return {
"role": role,
"content": text,
}
class Prompt(BasePrompt):
def __init__(
self,
experiment_state,
body_task_ids: list,
final_task_id: str,
body_task_types: list = [LANGUAGE, PROGRAMS],
final_task_types: list = [LANGUAGE],
final_task_split: str = TRAIN,
line_separator: str = DEFAULT_LINE_SEPARATOR,
prefix_language: str = BasePrompt.DEFAULT_PREFIX_LANGUAGE,
prefix_program: str = BasePrompt.DEFAULT_PREFIX_PROGRAM,
function_name_classes: list = [
LAPSGrammar.HUMAN_READABLE,
LAPSGrammar.DEFAULT_FUNCTION_NAMES,
],
include_abstractions: bool = True,
prepend_dsl_description: bool = False,
):
assert isinstance(body_task_ids, list)
assert len(body_task_ids) > 0
assert isinstance(final_task_id, str)
assert final_task_split in (TRAIN, TEST)
# Enforce canonical ordering of task_types
body_task_types = [t for t in self.TASK_TYPES if t in body_task_types]
final_task_types = [t for t in self.TASK_TYPES if t in final_task_types]
assert len(body_task_types) > 0
assert len(final_task_types) > 0
assert PROGRAMS in body_task_types
self.experiment_state = experiment_state
self.grammar = experiment_state.models[GRAMMAR]
self.rng = experiment_state.metadata[RANDOM_GENERATOR]
self.body_task_types = body_task_types
self.final_task_types = final_task_types
self.final_task_split = final_task_split
self.line_separator = line_separator
self.prefix_language = prefix_language
self.prefix_program = prefix_program
self.function_name_classes = function_name_classes
self.body_task_data = [
self._get_task_data(
task_split=TRAIN,
task_id=task_id,
task_types=body_task_types,
beta_reduce_program=(not include_abstractions),
)
for task_id in body_task_ids
]
self.final_task_data = self._get_task_data(
task_id=final_task_id,
task_types=final_task_types,
task_split=final_task_split,
beta_reduce_program=(not include_abstractions),
)
self.prepend_dsl_description = prepend_dsl_description
self.dsl_description = (
self._get_dsl_description(include_abstractions=include_abstractions)
if prepend_dsl_description
else ""
)
def __len__(self):
return len(self.body_task_data) + 1
def __str__(self):
return (
self.line_separator.join([x["content"] for x in self.to_message_list()])
+ "\n"
)
def to_message_list(self):
prompt_list = []
if self.prepend_dsl_description:
prompt_list += [
self.chat_message(self.dsl_description, role=self.ROLE_SYSTEM)
]
# Write the body tasks
prompt_list += [self.chat_message("Here are some example programs:")]
for task_data in self.body_task_data:
if LANGUAGE in self.body_task_types:
prompt_list += [
self.chat_message(self.prefix_language + task_data["task_language"])
]
if PROGRAMS in self.body_task_types:
prompt_list += [
self.chat_message(
self.prefix_program + task_data["task_program"],
role=self.ROLE_ASSISTANT,
)
]
# Write the final task
if LANGUAGE in self.final_task_types:
prompt_list += [
self.chat_message(
self.prefix_language + self.final_task_data["task_language"],
)
]
if PROGRAMS in self.final_task_types:
prompt_list += [
self.chat_message(
self.prefix_program + self.final_task_data["task_program"],
role=self.ROLE_ASSISTANT,
)
]
return prompt_list
def to_chat_format(self):
messages = self.to_message_list()
return messages
def to_dict(self):
return {
"dsl_description": self.dsl_description,
"body_task_data": self.body_task_data,
"final_task_data": self.final_task_data,
}
def load_from_dict(self, d):
self.dsl_description = d["dsl_description"]
self.body_task_data = d["body_task_data"]
self.final_task_data = d["final_task_data"]
def get_last_program(self):
if PROGRAMS in self.final_task_types:
return self.final_task_data["task_program"]
else:
return self.body_task_data[-1]["task_program"]
def remove_last_body_task(self):
if len(self.body_task_data) > 1:
self.body_task_data = self.body_task_data[:-1]
else:
raise ValueError("Cannot remove single remaining body task from prompt.")
def _get_task_data(
self,
task_id: str,
task_types: list,
task_split: str = TRAIN,
use_mdl_program: bool = True,
beta_reduce_program: bool = False,
):
frontier = self.experiment_state.get_frontiers_for_ids(task_split, [task_id])[0]
# Optionally, get the program
if PROGRAMS in task_types:
programs = [e.program for e in frontier.entries]
if use_mdl_program:
task_program = self.rng.choice(self.grammar.get_mdl_programs(programs))
else:
task_program = self.rng.choice(programs)
if beta_reduce_program:
task_program = task_program.betaNormalForm()
task_program = self.grammar.show_program(
task_program, name_classes=self.function_name_classes
)
else:
task_program = None
# Optionally, get the language
if LANGUAGE in task_types:
task_language = self.rng.choice(
self.experiment_state.get_language_for_ids(task_split, [task_id])[0]
)
# Remove any line separators from the language
task_language = task_language.replace(self.line_separator, " ")
else:
task_language = None
return {
"task_id": task_id,
"task_program": task_program,
"task_language": task_language,
}
def _get_dsl_description(self, include_abstractions: bool = True):
dsl_fns = []
for primitive in self.grammar.primitives:
if primitive.isInvented and (not include_abstractions):
# Optionally, skip abstractions
continue
fn_name = self.grammar.get_name(
production_key=str(primitive), name_classes=self.function_name_classes
)
fn_type = primitive.infer()
if primitive.isInvented:
fn_body = str(
self.grammar.show_program(
str(primitive)[
1:
], # Remove leading `#` so that any inlined abstractions are replaced with their fn_name
name_classes=[
LAPSGrammar.HUMAN_READABLE,
LAPSGrammar.NUMERIC_FUNCTION_NAMES,
],
)
)
else:
fn_body = str(primitive)
fn_description = self.grammar.get_function_description(str(primitive))
dsl_fns.append((primitive, fn_name, fn_type, fn_body, fn_description))
dsl_description = (
"You are an expert programmer working in a language based on lambda calculus.\n"
+ "Your goal is to write programs that accomplish the tasks specified by the user.\n"
)
if "dsl_description_prefix" in self.experiment_state.metadata:
dsl_description += (
self.experiment_state.metadata["dsl_description_prefix"] + "\n"
)
dsl_description += "\nWrite programs using the available functions:\n\n"
for primitive, fn_name, fn_type, fn_body, fn_description in dsl_fns:
docstring = f"{fn_name} :: {fn_type}"
if primitive.isInvented:
docstring += f"\n{fn_body}"
if fn_description is not None:
docstring += f"\ndescription: {fn_description}"
dsl_description += docstring + "\n\n"
return dsl_description
class GPTBase(object):
# https://platform.openai.com/docs/models
ENGINE_CODEX = "code-davinci-002"
ENGINE_GPT_3_5_TURBO = "gpt-3.5-turbo-0301"
ENGINE_GPT_4 = "gpt-4-0314"
ENGINE_DEFAULT = ENGINE_CODEX
# Max tokens for BOTH the prompt and the completion.
MAX_TOKENS_PER_ENGINE = {
ENGINE_CODEX: 4096, # 8001
ENGINE_GPT_3_5_TURBO: 4096,
ENGINE_GPT_4: 8192,
}
# Models that use chat completion format
CHAT_ENGINES = [ENGINE_GPT_3_5_TURBO, ENGINE_GPT_4]
# OpenAI organization IDs
ORG_MIT_CODE = "org-8jXkUFeFDJqIpWtgvtpuPjwm" # Use for all other models
ORG_PERSONAL = "org-fYb48minYCuDB6m3hu9SJVW8" # Use for Codex
def __init__(self, experiment_state=None, engine=None):
super().__init__()
if not os.getenv("OPENAI_API_KEY"):
raise ValueError(
"OPENAI_API_KEY is not set. Please set this in the shell via `export OPENAI_API_KEY=...`"
)
openai.api_key = os.environ["OPENAI_API_KEY"]
self.ENGINE = engine or self.ENGINE_DEFAULT
self.ENGINE_MAX_TOKENS = self.MAX_TOKENS_PER_ENGINE[self.ENGINE]
# Used for computing approximate token counts for queries
self.tokenizer = GPT2TokenizerFast.from_pretrained("gpt2")
self.tokenizer.model_max_length = self.ENGINE_MAX_TOKENS
os.environ["TOKENIZERS_PARALLELISM"] = str(False)
def query_completion(
self,
prompt: Union[Prompt, str],
n_samples: int,
best_of: int = 1,
temperature: float = 0.75,
max_tokens: int = 256, # Max tokens for completion only.
stop: str = DEFAULT_LINE_SEPARATOR,
line_separator: str = DEFAULT_LINE_SEPARATOR,
top_p=None,
logprobs=None,
max_attempts_rate_limit=5,
rate_limit_seconds=30,
):
pause_for_rate_limit = False
completion = None
for idx in range(max_attempts_rate_limit):
if pause_for_rate_limit:
print(
f"ERR: OpenAI rate limit. On attempt {idx}/{max_attempts_rate_limit} after waiting {rate_limit_seconds}s."
)
time.sleep(rate_limit_seconds)
rate_limit_seconds *= 2 # Exponential backoff
try:
completion = self._create_completion(
prompt=prompt,
temperature=temperature if top_p is None else 1.0,
top_p=top_p if temperature is None else 1.0,
n_samples=n_samples,
stop=stop,
best_of=best_of,
line_separator=line_separator,
max_tokens=max_tokens,
logprobs=logprobs,
)
return completion
except InvalidRequestError as e:
print(e)
return e
except (RateLimitError, APIConnectionError, APIError) as e:
print(e)
pause_for_rate_limit = True
completion = e
return completion
def is_chat_format(self):
return self.ENGINE in self.CHAT_ENGINES
def _create_completion(
self,
prompt,
temperature,
top_p,
n_samples,
best_of,
stop,
line_separator,
max_tokens,
logprobs,
):
if self.is_chat_format():
# Convert prompt text to ChatCompletion format
if isinstance(prompt, BasePrompt):
messages = prompt.to_chat_format()
else:
messages = [{"role": "user", "content": str(prompt)}]
openai.organization = self.ORG_MIT_CODE
completion = openai.ChatCompletion.create(
model=self.ENGINE,
messages=messages,
temperature=temperature if top_p is None else 1.0,
top_p=top_p if temperature is None else 1.0,
n=n_samples,
stop=stop,
max_tokens=max_tokens,
)
# Convert ChatCompletion -> Completion format
for choice in completion["choices"]:
choice["text"] = choice["message"]["content"]
else:
openai.organization = self.ORG_PERSONAL
completion = openai.Completion.create(
model=self.ENGINE,
prompt=str(prompt),
temperature=temperature if top_p is None else 1.0,
top_p=top_p if temperature is None else 1.0,
n=n_samples,
stop=stop,
max_tokens=max_tokens,
logprobs=logprobs,
)
return completion
def count_tokens_gpt2(self, text):
# TODO(gg): Consider preprocessing to collapse whitespace, which could
# bring the behavior more in line with the Codex tokenizer.
return len(self.tokenizer(text, truncation=False)["input_ids"])
| [
"task_language",
"[]",
"task_program",
"Here are some example programs:"
] |
2024-01-10 | guangyliu/LatentOps | code~examples~big_ae~run_latent_generation.py | #!/usr/bin/env python3
# coding=utf-8
# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Conditional text generation with the auto-regressive models of the library (GPT/GPT-2/Transformer-XL/XLNet)
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import glob
import logging
import os
import pickle
import random
import torch
import torch.nn.functional as F
import numpy as np
from torch.utils.data import DataLoader, Dataset, SequentialSampler, RandomSampler, TensorDataset
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from pytorch_transformers import GPT2Config, OpenAIGPTConfig, XLNetConfig, TransfoXLConfig, BertConfig
from pytorch_transformers import GPT2LMHeadModel, GPT2Tokenizer, GPT2ForLatentConnector
from pytorch_transformers import OpenAIGPTLMHeadModel, OpenAIGPTTokenizer
from pytorch_transformers import XLNetLMHeadModel, XLNetTokenizer
from pytorch_transformers import TransfoXLLMHeadModel, TransfoXLTokenizer
from pytorch_transformers import BertForLatentConnector, BertTokenizer
from collections import defaultdict
from modules import VAE #, DAAE
from utils import (TextDataset_Split, TextDataset_2Tokenizers, BucketingDataLoader)
import pdb
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(__name__)
MAX_LENGTH = int(10000) # Hardcoded max length to avoid infinite loop
ALL_MODELS = sum((tuple(conf.pretrained_config_archive_map.keys()) for conf in (GPT2Config, OpenAIGPTConfig, XLNetConfig, TransfoXLConfig)), ())
MODEL_CLASSES = {
'gpt2': (GPT2Config, GPT2ForLatentConnector, GPT2Tokenizer),
'bert': (BertConfig, BertForLatentConnector, BertTokenizer)
}
# Padding text to help Transformer-XL and XLNet with short prompts as proposed by Aman Rusia
# in https://github.com/rusiaaman/XLNet-gen#methodology
# and https://medium.com/@amanrusia/xlnet-speaks-comparison-to-gpt-2-ea1a4e9ba39e
PADDING_TEXT = """ In 1991, the remains of Russian Tsar Nicholas II and his family
(except for Alexei and Maria) are discovered.
The voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the
remainder of the story. 1883 Western Siberia,
a young Grigori Rasputin is asked by his father and a group of men to perform magic.
Rasputin has a vision and denounces one of the men as a horse thief. Although his
father initially slaps him for making such an accusation, Rasputin watches as the
man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous,
with people, even a bishop, begging for his blessing. <eod> </s> <eos>"""
def set_seed(args):
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def load_and_cache_examples(args, tokenizer, evaluate=False):
if isinstance(tokenizer, list):
dataset = TextDataset_2Tokenizers(tokenizer, args, file_path=args.eval_data_file if evaluate else args.train_data_file, block_size=args.block_size)
else:
dataset = TextDataset_Split(tokenizer, args, file_path=args.eval_data_file if evaluate else args.train_data_file, block_size=args.block_size)
return dataset
def build_dataload_and_cache_examples(args, tokenizer, evaluate=False):
if isinstance(tokenizer, list):
if not evaluate:
args.batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
file_path=args.train_data_file
else:
args.batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
file_path=args.eval_data_file
dataloader = BucketingDataLoader(file_path, args.batch_size, args.max_seq_length, tokenizer, args, bucket=100, shuffle=False)
else:
pass
return dataloader
def top_k_top_p_filtering(logits, top_k=0, top_p=0.0, filter_value=-float('Inf')):
""" Filter a distribution of logits using top-k and/or nucleus (top-p) filtering
Args:
logits: logits distribution shape (vocabulary size)
top_k > 0: keep only top k tokens with highest probability (top-k filtering).
top_p > 0.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering).
Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751)
From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317
"""
assert logits.dim() == 1 # batch size 1 for now - could be updated for more but the code would be less clear
top_k = min(top_k, logits.size(-1)) # Safety check
if top_k > 0:
# Remove all tokens with a probability less than the last token of the top-k
indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
logits[indices_to_remove] = filter_value
if top_p > 0.0:
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
# Remove tokens with cumulative probability above the threshold
sorted_indices_to_remove = cumulative_probs > top_p
# Shift the indices to the right to keep also the first token above the threshold
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
indices_to_remove = sorted_indices[sorted_indices_to_remove]
logits[indices_to_remove] = filter_value
return logits
def sample_sequence(model, length, context, num_samples=1, temperature=1, top_k=0, top_p=0.0, is_xlnet=False, device='cpu'):
context = torch.tensor(context, dtype=torch.long, device=device)
context = context.unsqueeze(0).repeat(num_samples, 1)
generated = context
with torch.no_grad():
for _ in trange(length):
inputs = {'input_ids': generated}
if is_xlnet:
# XLNet is a direct (predict same token, not next token) and bi-directional model by default
# => need one additional dummy token in the input (will be masked), attention mask and target mapping (see model docstring)
input_ids = torch.cat((generated, torch.zeros((1, 1), dtype=torch.long, device=device)), dim=1)
perm_mask = torch.zeros((1, input_ids.shape[1], input_ids.shape[1]), dtype=torch.float, device=device)
perm_mask[:, :, -1] = 1.0 # Previous tokens don't see last token
target_mapping = torch.zeros((1, 1, input_ids.shape[1]), dtype=torch.float, device=device)
target_mapping[0, 0, -1] = 1.0 # predict last token
inputs = {'input_ids': input_ids, 'perm_mask': perm_mask, 'target_mapping': target_mapping}
outputs = model(**inputs) # Note: we could also use 'past' with GPT-2/Transfo-XL/XLNet (cached hidden-states)
next_token_logits = outputs[0][0, -1, :] / temperature
filtered_logits = top_k_top_p_filtering(next_token_logits, top_k=top_k, top_p=top_p)
next_token = torch.multinomial(F.softmax(filtered_logits, dim=-1), num_samples=1)
generated = torch.cat((generated, next_token.unsqueeze(0)), dim=1)
return generated
def sample_sequence_conditional_old(model, length, context, past=None, num_samples=1, temperature=1, top_k=0, top_p=0.0, device='cpu', decoder_tokenizer=None, eos_id=50259):
context = torch.tensor(context, dtype=torch.long, device=device)
context = context.unsqueeze(0).repeat(num_samples, 1)
generated = context
with torch.no_grad():
# while True:
# mask = torch.zeros_like(past[:,0],dtype=torch.long) + 1
for _ in range(length):
inputs = {'input_ids': generated, 'past': past}
outputs = model(**inputs) # Note: we could also use 'past' with GPT-2/Transfo-XL/XLNet (cached hidden-states)
next_token_logits = outputs[0][:, -1, :] / temperature
next_token = torch.multinomial(F.softmax(next_token_logits, dim=-1), num_samples=1)
generated = torch.cat((generated, next_token), dim=1)
# next_token_logits = outputs[0][0, -1, :] / temperature
# filtered_logits = top_k_top_p_filtering(next_token_logits, top_k=top_k, top_p=top_p)
# next_token = torch.multinomial(F.softmax(filtered_logits, dim=-1), num_samples=1)
# generated = torch.cat((generated, next_token.unsqueeze(0)), dim=1)
# pdb.set_trace()
# if next_token.unsqueeze(0)[0,0].item() == decoder_tokenizer.encode('<EOS>')[0]:
# break
return generated
def sample_sequence_conditional(model, length, context, past=None, num_samples=1, temperature=1, top_k=0, top_p=0.0,
device='cpu', decoder_tokenizer=None, eos_id=50259, loss=False):
context = torch.tensor(context, dtype=torch.long, device=device)
context = context.unsqueeze(0).repeat(num_samples, 1)
generated = context
# sets = set()
bz = context.size(0)
t1, t2, t3 = 0, 0, 0
alist = list(range(bz))
with torch.no_grad():
# while True:
# mask = torch.zeros_like(past[:, 0], dtype=torch.long) + 1
for ii in range(length):
inputs = {'input_ids': generated, 'past': past}
outputs = model(
**inputs) # Note: we could also use 'past' with GPT-2/Transfo-XL/XLNet (cached hidden-states)
next_token_logits = outputs[0][:, -1, :] / temperature
# next_token = torch.multinomial(F.softmax(next_token_logits, dim=-1), num_samples=1)
next_token = F.softmax(next_token_logits, dim=-1).max(-1, keepdim=True)[1]
generated = torch.cat((generated, next_token), dim=1)
tmp = (next_token.squeeze() == eos_id)
if ii == 0:
tmp22 = torch.zeros_like(tmp, device='cuda')
tmp22 = torch.logical_or(tmp22, tmp)
if False not in tmp22:
break
if loss:
outputs = model(input_ids=generated, past=past, labels=generated, label_ignore=decoder_tokenizer.pad_token_id)
rec_loss = (-outputs[0]).tolist()
return generated,rec_loss
return generated
def latent_code_from_text(text, tokenizer_encoder, model_vae, args):
tokenized1 = tokenizer_encoder.encode(text)
tokenized1 = [101] + tokenized1 + [102]
coded1 = torch.Tensor([tokenized1])
coded1 =torch.Tensor.long(coded1)
with torch.no_grad():
x0 = coded1
x0 = x0.to(args.device)
pooled_hidden_fea = model_vae.encoder(x0, attention_mask=(x0 > 0).float())[1]
mean, logvar = model_vae.encoder.linear(pooled_hidden_fea).chunk(2, -1)
latent_z = mean.squeeze(1)
coded_length = len(tokenized1)
return latent_z, coded_length
def text_from_latent_code(latent_z, model_vae, args, tokenizer_decoder):
past = latent_z
context_tokens = tokenizer_decoder.encode('<BOS>')
length = 128 # maximum length, but not used
out = sample_sequence_conditional(
model=model_vae.decoder,
context=context_tokens,
past=past,
length= length, # Chunyuan: Fix length; or use <EOS> to complete a sentence
num_samples=latent_z.size(0),
temperature=args.temperature,
top_k=args.top_k,
top_p=args.top_p,
device=args.device,
decoder_tokenizer = tokenizer_decoder,
eos_id=model_vae.eos_token_id
)
text_x1 = tokenizer_decoder.decode(out[0,:].tolist(), clean_up_tokenization_spaces=False)
text_x1 = text_x1.split()[1:-1]
text_x1 = ' '.join(text_x1)
return text_x1
# a wrapper function to choose between different play modes
def evaluate_latent_space(args, model_vae, encoder_tokenizer, decoder_tokenizer, prefix=""):
eval_dataloader = build_dataload_and_cache_examples(args, [encoder_tokenizer, decoder_tokenizer], evaluate=True)
# Eval!
logger.info("***** Running recontruction evaluation {} *****".format(prefix))
logger.info(" Num examples = %d", len(eval_dataloader))
logger.info(" Batch size = %d", args.per_gpu_eval_batch_size)
model_vae.eval()
model_vae = model_vae.module if hasattr(model_vae, 'module') else model_vae # Take care of distributed/parallel training
if args.play_mode == 'reconstruction':
result = calc_rec_lgy(model_vae, eval_dataloader, encoder_tokenizer, decoder_tokenizer, args, ns=100)
result_file_name = "eval_recontruction_results.txt"
sampling(model_vae,decoder_tokenizer,args)
elif args.play_mode == 'reconstruction_std':
result = calc_rec_lgy(model_vae, eval_dataloader, encoder_tokenizer, decoder_tokenizer, args, ns=100)
sampling(model_vae,decoder_tokenizer,args,result['std'])
sampling(model_vae,decoder_tokenizer,args,1.0)
result_file_name = "eval_recontruction_results.txt"
elif args.play_mode == 'interpolation':
result = calc_interpolate(model_vae, eval_dataloader, encoder_tokenizer, decoder_tokenizer, args, ns=100)
result_file_name = "eval_interpolation_results.txt"
elif args.play_mode == 'sampling':
sampling(model_vae,decoder_tokenizer,args)
result = None
else:
logger.info("Please specify the corrent play mode [reconstrction, interpolation]")
result_file_name=''
eval_output_dir = args.output_dir
output_eval_file = os.path.join(eval_output_dir, result_file_name)
# with open(output_eval_file, "w") as writer:
# logger.info("***** Eval {} results *****".format(args.play_mode))
# for key in sorted(result.keys()):
# logger.info(" %s \n %s", key, str(result[key]))
# writer.write("%s \n %s\n" % (key, str(result[key])))
return result
def calc_rec(model_vae, eval_dataloader, encoder_tokenizer, decoder_tokenizer, args, ns=1):
count = 0
result = defaultdict(str)
for batch in tqdm(eval_dataloader, desc="Evaluating recontruction"):
# pdb.set_trace()
x0, x1, x_lengths = batch
max_len_values, _ = x_lengths.max(0)
x0 = x0[:,:max_len_values[0]]
x1 = x1[:,:max_len_values[1]]
x0 = x0.to(args.device)
x1 = x1.to(args.device)
x_lengths = x_lengths.to(args.device)
context_tokens = decoder_tokenizer.encode('<BOS>')
with torch.no_grad():
text_x0 = encoder_tokenizer.decode(x0[0,:x_lengths[0,0]].tolist(), clean_up_tokenization_spaces=True)[0]
# result["INPUT TEXT " + str(count)].append(text_x0)
pooled_hidden_fea = model_vae.encoder(x0, attention_mask=(x0 > 0).float())[1]
# Connect hidden feature to the latent space
# latent_z, loss_kl = model_vae.connect(pooled_hidden_fea)
mean, logvar = model_vae.encoder.linear(pooled_hidden_fea).chunk(2, -1)
latent_z = mean.squeeze(1)
past = latent_z
out = sample_sequence_conditional(
model=model_vae.decoder,
context=context_tokens,
past=past,
num_samples=latent_z.size(0),
length=x_lengths[0,1], # Chunyuan: Fix length; or use <EOS> to complete a sentence
temperature=args.temperature,
top_k=args.top_k,
top_p=args.top_p,
device=args.device,
decoder_tokenizer = decoder_tokenizer
)
text_x1 = decoder_tokenizer.decode(out[0,:].tolist(), clean_up_tokenization_spaces=True)
text_x1 = text_x1.split()[1:-1]
text_x1 = ' '.join(text_x1) + '\n'
result[text_x0] = text_x1
count += 1
if count>args.total_sents:
break
return result
def calc_rec_lgy(model_vae, eval_dataloader, encoder_tokenizer, decoder_tokenizer, args, ns=1):
from nltk.translate.bleu_score import corpus_bleu
count = 0
result = defaultdict(str)
ref=[]
cand=[]
result_file_name = "eval_recontruction_results.txt"
eval_output_dir = args.output_dir
output_eval_file = os.path.join(eval_output_dir, result_file_name)
std = 0
if os.path.exists(output_eval_file):
os.remove(output_eval_file)
for batch in tqdm(eval_dataloader, desc="Evaluating recontruction"):
# pdb.set_trace()
with open(output_eval_file, "a+") as writer:
x0, x1, x_lengths = batch
max_len_values, _ = x_lengths.max(0)
x0 = x0[:,:max_len_values[0]]
x1 = x1[:,:max_len_values[1]]
x0 = x0.to(args.device)
x1 = x1.to(args.device)
x_lengths = x_lengths.to(args.device)
context_tokens = decoder_tokenizer.encode('<BOS>')
with torch.no_grad():
text_x0 = encoder_tokenizer.decode(x0[0,:x_lengths[0,0]].tolist(), clean_up_tokenization_spaces=True)[0]
# result["INPUT TEXT " + str(count)].append(text_x0)
pooled_hidden_fea = model_vae.encoder(x0, attention_mask=(x0 > 0).float())[1]
# Connect hidden feature to the latent space
# latent_z, loss_kl = model_vae.connect(pooled_hidden_fea)
mean, logvar = model_vae.encoder.linear(pooled_hidden_fea).chunk(2, -1)
latent_z = mean.squeeze(1)
past = latent_z
out = sample_sequence_conditional(
model=model_vae.decoder,
context=context_tokens,
past=past,
length=x_lengths[0,1], # Chunyuan: Fix length; or use <EOS> to complete a sentence
num_samples=latent_z.size(0),
temperature=args.temperature,
top_k=args.top_k,
top_p=args.top_p,
device=args.device,
decoder_tokenizer = decoder_tokenizer,
eos_id=model_vae.eos_token_id
)
for i in range(latent_z.size(0)):
std += latent_z.std().item()
text_x0_ = decoder_tokenizer.decode(x1[i,:].tolist(), clean_up_tokenization_spaces=False).split(' <EOS>')[0]
text_x0_ = text_x0_.split()[1:]
ref.append([text_x0_])
text_x0_ = ' '.join(text_x0_)
text_x1 = decoder_tokenizer.decode(out[i,:].tolist(), clean_up_tokenization_spaces=False).split(' <EOS>')[0]
text_x1 = text_x1.split()[1:]
cand.append(text_x1)
text_x1 = ' '.join(text_x1) + '\n'
# result[text_x0] = text_x1
count += 1
writer.write("%s\n%s\n" % (text_x0_.strip(), str(text_x1).strip()))
if count>args.total_sents:
std = round(std/count,2)
break
bleu = corpus_bleu(ref,cand)*100
print('\n'+eval_output_dir.split('/')[-1]+'std:',std)
print('\nBLEU:',bleu,'\n')
return {'bleu':bleu ,'std':std}
def calc_interpolate(model_vae, eval_dataloader, encoder_tokenizer, decoder_tokenizer, args, ns=1):
count = 0
latent_codes = []
sample_interval = 0
for batch in tqdm(eval_dataloader, desc="Evaluating interpolation"):
# pdb.set_trace()
x0, x1, x_lengths = batch
max_len_values, _ = x_lengths.max(0)
x0 = x0[:,:max_len_values[0]]
x0 = x0.to(args.device)
x_lengths = x_lengths.to(args.device)
with torch.no_grad():
if sample_interval == 0 or sample_interval == args.total_sents:
text_x0 = encoder_tokenizer.decode(x0[0,:x_lengths[0,0]].tolist(), clean_up_tokenization_spaces=True)[0]
pooled_hidden_fea = model_vae.encoder(x0, attention_mask=(x0 > 0).float())[1]
# Connect hidden feature to the latent space
mean, logvar = model_vae.encoder.linear(pooled_hidden_fea).chunk(2, -1)
latent_z = mean.squeeze(1)
latent_codes.append(latent_z)
if sample_interval == 5:
latent_codes.append(latent_z)
sample_interval = 0
continue
else:
sample_interval += 1
continue
count += 1
if count>args.total_sents:
break
context_tokens = decoder_tokenizer.encode('<BOS>')
result = defaultdict(str)
latent_codes_interpolation = []
num_steps = args.num_interpolation_steps
for step in range(num_steps+1):
latent_z = latent_codes[0] + (latent_codes[1] - latent_codes[0]) * step * 1.0/num_steps
past = latent_z
out = sample_sequence_conditional(
model=model_vae.decoder,
context=context_tokens,
past=past,
length=x_lengths[0,1], # Chunyuan: Fix length; or use <EOS> to complete a sentence
temperature=args.temperature,
top_k=args.top_k,
top_p=args.top_p,
device=args.device,
decoder_tokenizer = decoder_tokenizer
)
text_x1 = decoder_tokenizer.decode(out[0,:].tolist(), clean_up_tokenization_spaces=True)
text_x1 = text_x1.split()[1:-1]
text_x1 = ' '.join(text_x1)
result[step] = text_x1
return result
def interpolate(model_vae, tokenizer_encoder, tokenizer_decoder, args):
# and then in the main function
latent_z1, coded_length1 = latent_code_from_text(args.sent_source, tokenizer_encoder, model_vae, args)
latent_z2, coded_length2 = latent_code_from_text(args.sent_target, tokenizer_encoder, model_vae, args)
result = defaultdict(str)
num_steps = args.num_interpolation_steps + 1
for step in range(num_steps+1):
latent_z = latent_z1 + (latent_z2 - latent_z1) * step * 1.0/num_steps
text_interpolate = text_from_latent_code(latent_z, model_vae, args, tokenizer_decoder)
result[step] = text_interpolate
print(text_interpolate.split('<EOS>')[0])
return result
def sampling(model_vae, tokenizer_decoder, args,std=1.0):
if std == 1.0:
result_file_name = "eval_sample_results.txt"
else:
result_file_name = "eval_sample_results_std.txt"
eval_output_dir = args.output_dir
output_eval_file = os.path.join(eval_output_dir, result_file_name)
if os.path.exists(output_eval_file):
os.remove(output_eval_file)
ccc = args.total_sents//args.per_gpu_eval_batch_size
for i in tqdm(range(ccc)):
latent_z = torch.normal(0, std, size=(args.per_gpu_eval_batch_size, args.latent_size)).cuda()
past = latent_z
context_tokens = tokenizer_decoder.encode('<BOS>')
length = 50 # maximum length, but not used
out = sample_sequence_conditional(
model=model_vae.decoder,
context=context_tokens,
past=past,
length= length, # Chunyuan: Fix length; or use <EOS> to complete a sentence
num_samples=latent_z.size(0),
temperature=args.temperature,
top_k=args.top_k,
top_p=args.top_p,
device=args.device,
decoder_tokenizer = tokenizer_decoder,
eos_id=model_vae.eos_token_id
)
for i in range(latent_z.size(0)):
text_x1 = tokenizer_decoder.decode(out[i,:].tolist(), clean_up_tokenization_spaces=False).split(' <EOS>')[0]
text_x1 = text_x1.split()[1:]
text_x1 = ' '.join(text_x1) + '\n'
with open(output_eval_file, "a+") as writer:
writer.write("%s" % (text_x1))
def analogy(model_vae, tokenizer_encoder, tokenizer_decoder, args):
latent_z1, coded_length1 = latent_code_from_text(args.sent_source, tokenizer_encoder, model_vae, args)
latent_z2, coded_length2 = latent_code_from_text(args.sent_target, tokenizer_encoder, model_vae, args)
latent_z3, coded_length3 = latent_code_from_text(args.sent_input, tokenizer_encoder, model_vae, args)
result = defaultdict(str)
latent_z = latent_z3 + args.degree_to_target * (latent_z2 - latent_z1)
import pdb
pdb.set_trace()
text_analogy = text_from_latent_code(latent_z, model_vae, args, tokenizer_decoder)
result[0] = text_analogy
print(text_analogy.split('<EOS>')[0])
return result
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--train_data_file", default=None, type=str, required=True,
help="The input training data file (a text file).")
parser.add_argument("--eval_data_file", default=None, type=str,
help="An input evaluation data file to evaluate the perplexity on (a text file).")
parser.add_argument("--checkpoint_dir", default=None, type=str, required=True,
help="The directory where checkpoints are saved.")
parser.add_argument("--output_dir", default=None, type=str, required=True,
help="The output directory where the model predictions and checkpoints will be written.")
parser.add_argument("--dataset", default='Snli', type=str, help="The dataset.")
## Variational auto-encoder
parser.add_argument("--latent_size", default=128, type=int, help="Latent space dimension.")
parser.add_argument("--total_sents", default=10, type=int, help="Total sentences to test recontruction.")
parser.add_argument("--num_interpolation_steps", default=10, type=int, help="Total sentences to test recontruction.")
parser.add_argument("--play_mode", default="interpolation", type=str,
help="interpolation or reconstruction.")
## Encoder options
parser.add_argument("--encoder_model_type", default="bert", type=str,
help="The encoder model architecture to be fine-tuned.")
parser.add_argument("--encoder_model_name_or_path", default="bert-base-cased", type=str,
help="The encoder model checkpoint for weights initialization.")
parser.add_argument("--encoder_config_name", default="", type=str,
help="Optional pretrained config name or path if not the same as model_name_or_path")
parser.add_argument("--encoder_tokenizer_name", default="", type=str,
help="Optional pretrained tokenizer name or path if not the same as model_name_or_path")
## Decoder options
parser.add_argument("--decoder_model_type", default="gpt2", type=str,
help="The decoder model architecture to be fine-tuned.")
parser.add_argument("--decoder_model_name_or_path", default="bert-base-cased", type=str,
help="The decoder model checkpoint for weights initialization.")
parser.add_argument("--decoder_config_name", default="", type=str,
help="Optional pretrained config name or path if not the same as model_name_or_path")
parser.add_argument("--decoder_tokenizer_name", default="", type=str,
help="Optional pretrained tokenizer name or path if not the same as model_name_or_path")
parser.add_argument("--per_gpu_train_batch_size", default=1, type=int,
help="Batch size per GPU/CPU for training.")
parser.add_argument("--per_gpu_eval_batch_size", default=1, type=int,
help="Batch size per GPU/CPU for evaluation.")
parser.add_argument('--gloabl_step_eval', type=int, default=661,
help="Evaluate the results at the given global step")
parser.add_argument("--max_seq_length", default=512, type=int,
help="Optional input sequence length before tokenization. The sequence will be dropped if it is longer the max_seq_length")
# Interact with users
parser.add_argument("--interact_with_user_input", action='store_true', help="Use user input to interact_with.")
parser.add_argument("--sent_source", type=str, default="")
parser.add_argument("--sent_target", type=str, default="")
parser.add_argument("--sent_input", type=str, default="")
parser.add_argument("--degree_to_target", type=float, default="1.0")
## Variational auto-encoder
parser.add_argument("--nz", default=32, type=int,
help="Latent space dimension.")
parser.add_argument("--prompt", type=str, default="")
parser.add_argument("--padding_text", type=str, default="")
parser.add_argument("--length", type=int, default=20)
parser.add_argument("--temperature", type=float, default=1.0)
parser.add_argument("--top_k", type=int, default=0)
parser.add_argument("--top_p", type=float, default=1.0)
parser.add_argument("--no_cuda", action='store_true',
help="Avoid using CUDA when available")
parser.add_argument('--seed', type=int, default=42,
help="random seed for initialization")
parser.add_argument("--block_size", default=-1, type=int,
help="Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens).")
parser.add_argument("--do_lower_case", action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--use_philly", action='store_true',
help="Use Philly for computing.")
parser.add_argument("--std", default=1.0, type=float)
args = parser.parse_args()
args.device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
set_seed(args)
args.encoder_model_type = args.encoder_model_type.lower()
args.decoder_model_type = args.decoder_model_type.lower()
global_step = args.gloabl_step_eval
output_encoder_dir = os.path.join(args.checkpoint_dir, 'checkpoint-encoder-{}'.format(global_step))
output_decoder_dir = os.path.join(args.checkpoint_dir, 'checkpoint-decoder-{}'.format(global_step))
checkpoints = [ [output_encoder_dir, output_decoder_dir] ]
logger.info("Evaluate the following checkpoints: %s", checkpoints)
# Load a trained Encoder model and vocabulary that you have fine-tuned
encoder_config_class, encoder_model_class, encoder_tokenizer_class = MODEL_CLASSES[args.encoder_model_type]
# import ipdb
# ipdb.set_trace()
model_encoder = encoder_model_class.from_pretrained(output_encoder_dir, latent_size=args.latent_size)
tokenizer_encoder = encoder_tokenizer_class.from_pretrained(args.encoder_tokenizer_name if args.encoder_tokenizer_name else args.encoder_model_name_or_path, do_lower_case=args.do_lower_case)
model_encoder.to(args.device)
if args.block_size <= 0:
args.block_size = tokenizer_encoder.max_len_single_sentence # Our input block size will be the max possible for the model
args.block_size = min(args.block_size, tokenizer_encoder.max_len_single_sentence)
# Load a trained Decoder model and vocabulary that you have fine-tuned
decoder_config_class, decoder_model_class, decoder_tokenizer_class = MODEL_CLASSES[args.decoder_model_type]
model_decoder = decoder_model_class.from_pretrained(output_decoder_dir, latent_size=args.latent_size)
tokenizer_decoder = decoder_tokenizer_class.from_pretrained(args.decoder_tokenizer_name if args.decoder_tokenizer_name else args.decoder_model_name_or_path, do_lower_case=args.do_lower_case)
model_decoder.to(args.device)
if args.block_size <= 0:
args.block_size = tokenizer_decoder.max_len_single_sentence # Our input block size will be the max possible for the model
args.block_size = min(args.block_size, tokenizer_decoder.max_len_single_sentence)
# Load full model
output_full_dir = os.path.join(args.checkpoint_dir, 'checkpoint-full-{}'.format(global_step))
checkpoint = torch.load(os.path.join(output_full_dir, 'training.bin'))
# Chunyuan: Add Padding token to GPT2
special_tokens_dict = {'pad_token': '<PAD>', 'bos_token': '<BOS>', 'eos_token': '<EOS>'}
num_added_toks = tokenizer_decoder.add_special_tokens(special_tokens_dict)
print('We have added', num_added_toks, 'tokens to GPT2')
model_decoder.resize_token_embeddings(len(tokenizer_decoder)) # Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e. the length of the tokenizer.
assert tokenizer_decoder.pad_token == '<PAD>'
# Evaluation
try:
model_vae = VAE(model_encoder, model_decoder, tokenizer_encoder, tokenizer_decoder, args)
model_vae.load_state_dict(checkpoint['model_state_dict'])
except:
model_vae = DAAE(model_encoder, model_decoder, tokenizer_encoder, tokenizer_decoder, args)
model_vae.load_state_dict(checkpoint['model_state_dict'])
logger.info("Pre-trained Optimus is successfully loaded")
model_vae.to(args.device)
if args.interact_with_user_input:
if args.play_mode == 'interpolation':
if len(args.sent_source) > 0 and len(args.sent_source) > 0:
result = interpolate(model_vae, tokenizer_encoder, tokenizer_decoder, args)
else:
print('Please check: specify the source and target sentences!')
if args.play_mode == 'analogy':
if len(args.sent_source) > 0 and len(args.sent_source) > 0 and len(args.sent_input) > 0:
result = analogy(model_vae, tokenizer_encoder, tokenizer_decoder, args)
else:
print('Please check: specify the source, target and input analogy sentences!')
else:
result = evaluate_latent_space(args, model_vae, tokenizer_encoder, tokenizer_decoder, prefix=global_step)
if __name__ == '__main__':
main()
| [] |
2024-01-10 | Jegama/calvinist-parrot | app~parrot_toolkit~devotional.py | from dotenv import load_dotenv
from serpapi import GoogleSearch
import pythonbible as bible
from bs4 import BeautifulSoup
from datetime import datetime as dt
import pytz
import os, requests
import pandas as pd
bsb = pd.read_csv('app/bsb.tsv', sep='\t')
load_dotenv()
from openai import OpenAI
client = OpenAI()
et = pytz.timezone('US/Eastern')
from sqlalchemy import Column, String, create_engine, Text, engine
from sqlalchemy.orm import sessionmaker, declarative_base
import google_connector as gc
# create engine
pool = gc.connect_with_connector('devotionals')
Base = declarative_base()
# if temp folder doesn't exist create it
if not os.path.exists('temp'):
os.makedirs('temp')
# create table
class Devotionals(Base):
__tablename__ = 'devotionals'
devotionals_id = Column(String, primary_key=True)
news_articles = Column(Text)
bible_verse = Column(String)
title = Column(String)
devotional_text = Column(Text)
def __repr__(self):
return f"<Devotionals(devotionals_id='{self.devotionals_id}', news_articles='{self.news_articles}', bible_verse='{self.bible_verse}', title='{self.title}', devotional_text='{self.devotional_text}'')>"
# create the table in the database
Base.metadata.create_all(pool)
SERPAPI_API_KEY = os.environ.get("SERPAPI_API_KEY")
def get_article(soup, found=False):
# Find all the <div> elements on the page
divs = soup.find_all('div')
start = len(divs)
# Calculate the length of the text in each <div> element
div_text_lengths = [len(div.text) for div in divs]
# Find the index of the <div> element with the longest text
max_index = div_text_lengths.index(max(div_text_lengths))
# Select the <div> element with the longest text
longest_div = divs[max_index]
end = len(longest_div.find_all('div'))
if end == 0:
article = soup.text
article = article.replace('\n', '').replace('\xa0', ' ').replace('\t', ' ').replace('\r', ' ').replace(' ', ' ').replace(' ', ' ').replace(' ', ' ').replace(' ', ' ').replace(' ', ' ')
return " ".join(article.split(' ')[:50])
found = False if start - end < 50 else True
if found:
article = longest_div.text
article = article.replace('\n', '').replace('\xa0', ' ').replace('\t', ' ').replace('\r', ' ').replace(' ', ' ').replace(' ', ' ').replace(' ', ' ').replace(' ', ' ').replace(' ', ' ')
return " ".join(article.split(' ')[:50])
else:
return get_article(longest_div, False)
def fech_news():
articles = []
params = {
"engine": "google",
"q": "latest news",
"api_key": SERPAPI_API_KEY
}
search = GoogleSearch(params)
results = search.get_dict()
links = []
for i in results['top_stories'][:5]:
links.append(i['link'])
response = requests.get(i['link'])
soup = BeautifulSoup(response.content, 'html.parser')
divs = soup.find_all('div')
if len(divs) != 0:
articles.append(get_article(soup))
return articles, "\n - ".join(links)
system_message = "You are a member of the Silicon Valley Reformed Baptist Church. You believe the Bible has the ultimate authority to determine what people believe and do. Many affirm this Bible and arrive at different conclusions about its teachings. In light of this reality, we have adopted the 1689 London Baptist Confession of Faith that expresses our understanding of the Bible's vision for the church to promote clarity and transparency at Silicon Valley Reformed Baptist Church. You write devotionals for other reformed believers to encourage them to grow in their faith."
def generate_message(devotional_type, now, latest_news):
message = f"""You are writing a {devotional_type} devotional for {now.strftime('%A, %B %d, %Y')}.
Here are snippets of the latest news:
---------------------
{latest_news}
---------------------
Please output a response as Markdown code snippet formatted in the following schema:
{{
"bible_verse": string, \\ The Bible verse reference of the passage you are using (e.g. Romans 3.10-12)
"title": string \\ The title of your devotional
"devotional": string \\ The devotional text, 3 paragraphs long
}}
If it's a morning devotional, focus on encouraging people on growing on their faith, if it's an evening devotional, focus on conforting people on their faith. Remember that you are writing for other reformed believers. They can either believe on the 1689 London Baptist Confession of Faith or the Westminster Confession of Faith."""
return message
def generate_devotional():
now = dt.now(et)
devotional_type = "evening" if now.hour >= 17 or now.hour < 5 else "morning"
print('Getting news...')
articles, links = fech_news()
latest_news = "\n\n---\n\n".join(articles)
id = now.strftime("%Y_%m_%d")
id += f"_{devotional_type}_devotional"
message = generate_message(devotional_type, now, latest_news)
response = client.chat.completions.create(
model="gpt-3.5-turbo-1106",
messages=[
{"role": "system", "content": system_message},
{"role": "user", "content": message}
],
temperature = 0
)
devotional_data = response.choices[0].message.content
try:
devotional_data = eval(devotional_data)
success = True
except:
success = False
if success:
Session = sessionmaker(bind=pool)
devotionals = Devotionals(
devotionals_id=id,
news_articles=links,
bible_verse=devotional_data['bible_verse'],
title=devotional_data['title'],
devotional_text=devotional_data['devotional']
)
session = Session()
session.add(devotionals)
session.commit()
session.close()
# function to check if devotional exists based on devotional id
def check_if_devotional_exists(devotional_id):
# create session
Session = sessionmaker(bind=pool)
session = Session()
# query the database
devotional = session.query(Devotionals).filter_by(devotionals_id=devotional_id).first()
# close the session
session.close()
return devotional
def get_bsb_text(verse):
return bsb.loc[bsb['Verse'] == verse, 'Berean Standard Bible'].values[0]
def get_text(verse):
references = bible.get_references(verse)
for i in references:
verse_id = bible.convert_reference_to_verse_ids(i)
reference_out = bible.format_scripture_references([i])
text_out = ''
for j in verse_id:
temp = bible.convert_verse_ids_to_references([j])
temp_ref = bible.format_scripture_references(temp)
try:
text_out += f'{get_bsb_text(temp_ref)}\n'
version = 'BSB'
except:
text_out += f'{bible.get_verse_text(j)}\n'
version = 'ASV'
return f' \n{text_out} - {reference_out} ({version})' | [] |
2024-01-10 | Jegama/calvinist-parrot | app~ai_parrot~CalvinistParrotAgent.py | import os, llama_index
from dotenv import load_dotenv
from langchain.agents import AgentExecutor
from langchain.chat_models import ChatOpenAI
from langchain.embeddings import OpenAIEmbeddings
from langchain.memory import ConversationBufferMemory
from langchain.memory.chat_message_histories import StreamlitChatMessageHistory
from llama_index import ServiceContext
from ai_parrot.ccelTools import toolkit
from ai_parrot.CustomConversationalChatAgent import ConversationalChatAgent
load_dotenv()
OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY")
llm = ChatOpenAI(
model_name="gpt-3.5-turbo-16k",
temperature=0,
)
llm_embeddings = OpenAIEmbeddings()
service_context = ServiceContext.from_defaults(
llm=llm, embed_model=llm_embeddings
)
llama_index.set_global_service_context(service_context)
class CalvinistParrot():
def create_agent(self):
msgs = StreamlitChatMessageHistory()
memory = ConversationBufferMemory(
chat_memory=msgs, return_messages=True, memory_key="chat_history", output_key="output"
)
chat_agent = ConversationalChatAgent.from_llm_and_tools(llm=llm, tools=toolkit)
executor = AgentExecutor.from_agent_and_tools(
agent=chat_agent,
tools=toolkit,
memory=memory,
return_intermediate_steps=True,
handle_parsing_errors=True,
verbose=True
)
return executor, msgs | [] |
2024-01-10 | Jegama/calvinist-parrot | app~ai_parrot~ccelTools.py | from langchain.tools import Tool
import requests, string
def formater(output):
sources = {}
alphabet = string.ascii_lowercase
a = 0
for source in output['source_nodes']:
if source['node']['metadata']['title'] not in sources.keys():
sources[source['node']['metadata']['title']] = {'authors': source['node']['metadata']['authors'], 'score': [f"{alphabet[a]}. {round(source['score'], 3)}"]}
else:
sources[source['node']['metadata']['title']]['score'].append(f"{alphabet[a]}. {round(source['score'], 3)}")
a += 1
source_text = "Sources:\n"
n = 1
for source in sources.keys():
source_text += f"{n}. {source} by {sources[source]['authors']} - Confidence: {', '.join(sources[source]['score'])}\n"
n += 1
return source_text
def early_christian_literature(question):
response = requests.post('https://early-christian-literature-west2-e4y6sp3yrq-wl.a.run.app/query', json={'question': question})
output = response.json()
return output['response'] + '\n\n' + formater(output)
def reformed_commentaries(question):
response = requests.post('https://reformed-commentaries-west2-e4y6sp3yrq-wl.a.run.app/query', json={'question': question})
output = response.json()
return output['response'] + '\n\n' + formater(output)
def systematic_theology(question):
response = requests.post('https://systematic-theology-west2-e4y6sp3yrq-wl.a.run.app/query', json={'question': question})
output = response.json()
return output['response'] + '\n\n' + formater(output)
toolkit = [
Tool(
name="Early Christian Literature",
func=early_christian_literature,
description="Early texts, letters, and documents from the initial centuries of Christianity. - Includes books like ANF02. Fathers of the Second Century: Hermas, Tatian, Athenagoras, Theophilus, and Clement of Alexandria (Entire), ANF04. Fathers of the Third Century: Tertullian, Part Fourth; Minucius Felix; Commodian; Origen, Parts First and Second, ANF06. Fathers of the Third Century: Gregory Thaumaturgus, Dionysius the Great, Julius Africanus, Anatolius, and Minor Writers, Methodius, Arnobius and authors like Schaff, Philip (1819-1893) (Editor), Lightfoot, Joseph Barber (1828-1889), Pearse, Roger."
),
Tool(
name="Reformed Commentaries",
func=reformed_commentaries,
description="Reformed books focusing on the interpretation, analysis, and study of biblical texts. - Includes books like Harmony of the Law - Volume 3, Preface to the Letter of St. Paul to the Romans, Why Four Gospels? and authors like Pink, Arthur W., Calvin, Jean, Calvin, John (1509 - 1564)."
),
Tool(
name="Systematic Theology",
func=systematic_theology,
description="Comprehensive exploration of Christian doctrines and theology. - Includes books like A Body of Practical Divinity, Doctrinal Theology, History of Dogma - Volume IV and authors like Hodge, Charles (1797-1878), Hopkins, Samuel (1721-1803), Gill, John (1697-1771)."
),
] | [] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.