date_collected
stringclasses 1
value | repo_name
stringlengths 6
116
| file_name
stringlengths 2
220
| file_contents
stringlengths 13
357k
| prompts
sequence |
---|---|---|---|---|
2024-01-10 | ihower/rails-pycall-langchain | examples~4-math.py | # https://python.langchain.com/en/latest/modules/chains/examples/llm_math.html
from langchain import OpenAI, LLMMathChain
llm = OpenAI(temperature=0, openai_api_key= "sk-your-key-here")
llm_math = LLMMathChain.from_llm(llm, verbose=True)
llm_math.run("What is 13 raised to the .3432 power?") | [] |
2024-01-10 | ihower/rails-pycall-langchain | examples~1-llm.py | # https://python.langchain.com/en/latest/getting_started/getting_started.html
from langchain.llms import OpenAI
llm = OpenAI(temperature=0.9,
model_name="text-davinci-003",
openai_api_key="sk-your-key-here")
text = "What would be a good company name for a company that makes colorful socks?"
print( llm(text) ) | [] |
2024-01-10 | ihower/rails-pycall-langchain | examples~6-agent.py |
from langchain.agents import load_tools
from langchain.agents import initialize_agent
from langchain.agents import AgentType
from langchain.llms import OpenAI
llm = OpenAI(temperature=0, openai_api_key= 'sk-your-key-here')
tools = load_tools(["requests_all", "llm-math"], llm=llm)
agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True)
agent.run("請去 https://ihower.tw/test.json 抓取 JSON,然後將 bar 的值乘上10是多少?")
# > Entering new AgentExecutor chain...
# Thought: I need to get the value of bar from the JSON file.
# Action: requests_get
# Action Input: https://ihower.tw/test.json
# Observation: { "foo": "bar", "bar": 123 }
#
# Thought: I need to multiply the value of bar by 10
# Action: Calculator
# Action Input: 123 * 10
# Observation: Answer: 1230
#
# Thought: I now know the final answer
# Final Answer: 1230
| [] |
2024-01-10 | limanling/clip-event | src~clip-event~clip.py | '''
from openai
'''
import hashlib
import os
import urllib
import warnings
from typing import Union, List
import torch
from PIL import Image
from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize
from tqdm import tqdm
from model_clip import build_model
from model_simple_tokenizer import SimpleTokenizer as _Tokenizer
__all__ = ["available_models", "load", "tokenize"]
_tokenizer = _Tokenizer()
_MODELS = {
"RN50": "https://openaipublic.azureedge.net/clip/models/afeb0e10f9e5a86da6080e35cf09123aca3b358a0c3e3b6c78a7b63bc04b6762/RN50.pt",
"RN101": "https://openaipublic.azureedge.net/clip/models/8fa8567bab74a42d41c5915025a8e4538c3bdbe8804a470a72f30b0d94fab599/RN101.pt",
"RN50x4": "https://openaipublic.azureedge.net/clip/models/7e526bd135e493cef0776de27d5f42653e6b4c8bf9e0f653bb11773263205fdd/RN50x4.pt",
"ViT-B/32": "https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt",
}
def _download(url: str, root: str = os.path.expanduser("~/.cache/clip")):
os.makedirs(root, exist_ok=True)
filename = os.path.basename(url)
expected_sha256 = url.split("/")[-2]
download_target = os.path.join(root, filename)
if os.path.exists(download_target) and not os.path.isfile(download_target):
raise RuntimeError(f"{download_target} exists and is not a regular file")
if os.path.isfile(download_target):
if hashlib.sha256(open(download_target, "rb").read()).hexdigest() == expected_sha256:
return download_target
else:
warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file")
with urllib.request.urlopen(url) as source, open(download_target, "wb") as output:
with tqdm(total=int(source.info().get("Content-Length")), ncols=80, unit='iB', unit_scale=True) as loop:
while True:
buffer = source.read(8192)
if not buffer:
break
output.write(buffer)
loop.update(len(buffer))
if hashlib.sha256(open(download_target, "rb").read()).hexdigest() != expected_sha256:
raise RuntimeError(f"Model has been downloaded but the SHA256 checksum does not not match")
return download_target
def _transform(n_px):
return Compose([
Resize(n_px, interpolation=Image.BICUBIC),
CenterCrop(n_px),
lambda image: image.convert("RGB"),
ToTensor(),
Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),
])
def available_models() -> List[str]:
"""Returns the names of available CLIP models"""
return list(_MODELS.keys())
def load(name: str, device: Union[str, torch.device] = "cuda" if torch.cuda.is_available() else "cpu", jit=True):
"""Load a CLIP model
Parameters
----------
name : str
A model name listed by `clip.available_models()`, or the path to a model checkpoint containing the state_dict
device : Union[str, torch.device]
The device to put the loaded model
jit : bool
Whether to load the optimized JIT model (default) or more hackable non-JIT model.
Returns
-------
model : torch.nn.Module
The CLIP model
preprocess : Callable[[PIL.Image], torch.Tensor]
A torchvision transform that converts a PIL image into a tensor that the returned model can take as its input
"""
if name in _MODELS:
model_path = _download(_MODELS[name])
elif os.path.isfile(name):
model_path = name
else:
raise RuntimeError(f"Model {name} not found; available models = {available_models()}")
try:
# loading JIT archive
model = torch.jit.load(model_path, map_location=device if jit else "cpu").eval()
state_dict = None
except RuntimeError:
# loading saved state dict
if jit:
warnings.warn(f"File {model_path} is not a JIT archive. Loading as a state dict instead")
jit = False
state_dict = torch.load(model_path, map_location="cpu")
if not jit:
model = build_model(state_dict or model.state_dict()).to(device)
if str(device) == "cpu":
model.float()
return model, _transform(model.visual.input_resolution)
# patch the device names
device_holder = torch.jit.trace(lambda: torch.ones([]).to(torch.device(device)), example_inputs=[])
device_node = [n for n in device_holder.graph.findAllNodes("prim::Constant") if "Device" in repr(n)][-1]
def patch_device(module):
graphs = [module.graph] if hasattr(module, "graph") else []
if hasattr(module, "forward1"):
graphs.append(module.forward1.graph)
for graph in graphs:
for node in graph.findAllNodes("prim::Constant"):
if "value" in node.attributeNames() and str(node["value"]).startswith("cuda"):
node.copyAttributes(device_node)
model.apply(patch_device)
patch_device(model.encode_image)
patch_device(model.encode_text)
# patch dtype to float32 on CPU
if str(device) == "cpu":
float_holder = torch.jit.trace(lambda: torch.ones([]).float(), example_inputs=[])
float_input = list(float_holder.graph.findNode("aten::to").inputs())[1]
float_node = float_input.node()
def patch_float(module):
graphs = [module.graph] if hasattr(module, "graph") else []
if hasattr(module, "forward1"):
graphs.append(module.forward1.graph)
for graph in graphs:
for node in graph.findAllNodes("aten::to"):
inputs = list(node.inputs())
for i in [1, 2]: # dtype can be the second or third argument to aten::to()
if inputs[i].node()["value"] == 5:
inputs[i].node().copyAttributes(float_node)
model.apply(patch_float)
patch_float(model.encode_image)
patch_float(model.encode_text)
model.float()
return model, _transform(model.input_resolution.item())
def tokenize(texts: Union[str, List[str]], context_length: int = 77) -> torch.LongTensor:
"""
Returns the tokenized representation of given input string(s)
Parameters
----------
texts : Union[str, List[str]]
An input string or a list of input strings to tokenize
context_length : int
The context length to use; all CLIP models use 77 as the context length
Returns
-------
A two-dimensional tensor containing the resulting tokens, shape = [number of input strings, context_length]
"""
if isinstance(texts, str):
texts = [texts]
sot_token = _tokenizer.encoder["<|startoftext|>"]
eot_token = _tokenizer.encoder["<|endoftext|>"]
all_tokens = [[sot_token] + _tokenizer.encode(text) + [eot_token] for text in texts]
result = torch.zeros(len(all_tokens), context_length, dtype=torch.long)
for i, tokens in enumerate(all_tokens):
if len(tokens) > context_length:
# since CLIP only supports 77 lengths, we only use the first 77 tokens. The assumption is that the important info is encoded earlier
result[i, :context_length] = torch.tensor(tokens[:context_length])
result[i, context_length - 1] = torch.tensor(eot_token)
# raise RuntimeError(f"Input {texts[i]} is too long for context length {context_length}")
else:
result[i, :len(tokens)] = torch.tensor(tokens)
return result
| [] |
2024-01-10 | piyush-an/DAMG7245-Spring23 | async_openai~dispatch_openai_requests.py | import os
import openai
import asyncio
from typing import Any
from dotenv import load_dotenv
load_dotenv()
openai.organization = os.getenv("openai_org")
openai.api_key = os.getenv("openai_api")
async def dispatch_openai_requests(
messages_list: list[list[dict[str,Any]]],
model: str,
temperature: float,
max_tokens: int,
top_p: float,
) -> list[str]:
"""Dispatches requests to OpenAI API asynchronously.
Args:
messages_list: List of messages to be sent to OpenAI ChatCompletion API.
model: OpenAI model to use.
temperature: Temperature to use for the model.
max_tokens: Maximum number of tokens to generate.
top_p: Top p to use for the model.
Returns:
List of responses from OpenAI API.
"""
async_responses = [
openai.ChatCompletion.acreate(
model=model,
messages=x,
temperature=temperature,
max_tokens=max_tokens,
top_p=top_p,
)
for x in messages_list
]
return await asyncio.gather(*async_responses)
predictions = asyncio.run(
dispatch_openai_requests(
messages_list=[
[{"role": "user", "content": "Write a poem about asynchronous execution."}],
[{"role": "user", "content": "Write a poem about asynchronous pirates."}],
],
model="gpt-3.5-turbo",
temperature=0.3,
max_tokens=200,
top_p=1.0,
)
)
for i, x in enumerate(predictions):
print(f"Response {i}: {x['choices'][0]['message']['content']}\n\n") | [
"Write a poem about asynchronous pirates.",
"Write a poem about asynchronous execution."
] |
2024-01-10 | visoar/product-photo | src~gen_prompt.py | # @title gen_prompt_with_gpt
import threading
import openai
class ThreadWithReturnValue(threading.Thread):
def __init__(self, group=None, target=None, name=None,
args=(), kwargs={}, Verbose=None):
threading.Thread.__init__(self, group, target, name, args, kwargs)
self._return = None
def run(self):
if self._target is not None:
self._return = self._target(*self._args,
**self._kwargs)
def join(self, *args):
threading.Thread.join(self, *args)
return self._return
def gen_prompt_with_gpt(api_key: str, prompt: str):
openai.api_key = api_key
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "user", "content": """
You are a professional AI image caption generator,
generates a 30 words image scene in english based on user input.
1. Scene format: [Product] [in | on | close by | on top of | below | ...] [something], ...
2. Increase/Decrease thing weight of by using the format {importance thing}+/{background}-
User input: chair
Scene in english: chair+ in a sunny modern living room-, filled with leafy plants+
User input:`""" + prompt + """`
Scene in english:
"""}
]
)
prompt = completion.choices[0].message.content
print(' Image prompt: ' + prompt)
return prompt
def gen_prompt(api_key: str, prompt: str):
return ThreadWithReturnValue(target=gen_prompt_with_gpt, args=(api_key, prompt,))
| [
"\nYou are a professional AI image caption generator,\ngenerates a 30 words image scene in english based on user input.\n\n1. Scene format: [Product] [in | on | close by | on top of | below | ...] [something], ...\n2. Increase/Decrease thing weight of by using the format {importance thing}+/{background}-\n\nUser input: chair\nScene in english: chair+ in a sunny modern living room-, filled with leafy plants+\n\nUser input:`PLACEHOLDER`\nScene in english:\n"
] |
2024-01-10 | lukas/otto | server~skills~story_skill.py | from dotenv import load_dotenv
from skills.base import Skill
import openai
from openai import ChatCompletion
load_dotenv()
class StorySkill(Skill):
function_name = "story"
parameter_names = ['description']
examples = [
[
"Tell me a story"
"story()"
],
[
"Tell me a story about a cat and a dog",
"story(description=\"cat and dog\")"
],
[
"Make up a story about two best friends that fly to the moon",
"story(description=\"two best friends that fly to the moon\")"
]]
def start(self, args: dict[str, str]):
prompt = "Tell me a story"
if ('description' in args):
prompt += args['description']
self.message_function("Ok I'm thinking of a story...")
s = self._ask_gpt_story(prompt)
# Could also do this with llama
def _ask_gpt_story(self, question: str):
response = ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a story telling assistant."},
{"role": "user", "content": question},
]
)
answer = response["choices"][0]["message"]["content"]
self.message_function(answer)
if __name__ == '__main__':
# for testing
story_skill = StorySkill(print)
story_skill.start({"description": "frog and moose"})
| [
"description",
"Tell me a story",
"You are a story telling assistant."
] |
2024-01-10 | lukas/otto | finetune~training_data_f.py | import argparse
import json
import re
import openai
from openai import ChatCompletion
# openai.api_base = "https://api.wandb.ai/proxy/openai/v1"
# wandb_key = os.environ["WANDB_API_KEY"]
# openai_key = os.environ['OPENAI_API_KEY']
# openai.api_key = f"{wandb_key}:{openai_key}"
# OPENAI_MODEL = "gpt-3.5-turbo"
OPENAI_MODEL = "gpt-4"
TEMPERATURE = 0.7
def parse_example_file(file) -> (str, list[dict], list[dict]):
with open(file, "r") as f:
cur_user_prompt = ""
cur_answer = ""
section = ""
line_number = 0
prompt = ""
examples = []
prompt_examples = []
for line in f:
if line.startswith("####"):
# section header
if line.startswith("#### Prompt:"):
section = "prompt"
elif line.startswith("#### Prompt Examples:"):
section = "prompt examples"
elif line.startswith("#### Examples:"):
section = "examples"
else:
raise ValueError(
f"Malformed section header in line {line_number} in {file}"
)
else:
if section == "prompt":
if line.strip != "":
prompt += line
elif section == "examples" or section == "prompt examples":
if line.startswith("### User:"):
if cur_user_prompt != "":
raise ValueError(
f"Malformed file: two user prompts in a row in line {line_number} in {file}"
)
# get the string after ### User:
cur_user_prompt = line.split(":", 2)[1].strip()
elif line.startswith("### Assistant:"):
# get part of line after the :
cur_answer = line.split(":", 1)[1].strip()
example = {"user": cur_user_prompt,
"answer": cur_answer}
if section == "prompt examples":
prompt_examples.append(example)
else:
examples.append(example)
cur_user_prompt = ""
elif line.startswith("###"):
raise ValueError(
f"Malformed file: line needs to start with ### User or ### Assistant at line {line_number} in {file}"
)
line_number += 1
return (prompt, prompt_examples, examples)
def create_training_data_file(files):
examples = []
for file in files:
prompt, prompt_examples, file_examples = parse_example_file(file)
examples.append(file_examples)
for example in examples:
for line in example:
print(json.dumps(line))
def prompt_examples_to_string(prompt_examples: [dict]):
prompt_examples_str = ""
for example in prompt_examples:
prompt_examples_str += (
f"### User: {example['user']}\n### Assistant: {example['answer']}\n"
)
return prompt_examples_str
def create_prompt(prompt: str, prompt_examples: list[dict]):
data_collection_prompt = f"""
I am collecting training data for a voice assistant.
The voice assistant has the command:
{prompt}
Some examples of how a user might say this command and the response is:
{prompt_examples_to_string(prompt_examples)}
Generate an examples of a user querying this command and the correct response.
Use the following format
User:
Assistant:
"""
return data_collection_prompt
format_function = [
{
"name": "validate",
"description": "validates the user assistant pair",
"parameters": {
"type": "object",
"properties": {
"user": {"type": "string", "description": "The query from the user" },
"assistant":{"type": "string", "description": "The command to call" }}
},
"required": ["user", "assistant"],
},
]
def create_prompts(files: [str]):
for file in files:
prompt, prompt_examples, file_examples = parse_example_file(file)
data_collection_prompt = create_prompt(prompt, prompt_examples)
print(data_collection_prompt)
def collect_training_data(file: str, n_generations: int=5):
prompt, prompt_examples, file_examples = parse_example_file(file)
data_collection_prompt = create_prompt(prompt, prompt_examples)
print(data_collection_prompt)
response = ChatCompletion.create(
model=OPENAI_MODEL,
messages=[
{
"role": "system",
"content": "You are helpful assistant generating training data for a voice assistant.",
},
{"role": "user", "content": data_collection_prompt},
],
n = n_generations,
functions=format_function,
function_call = {"name": "validate"},
temperature=TEMPERATURE,
)
for c in response["choices"]:
try:
if c.message.function_call != None:
response = json.loads(c.message.function_call.arguments)
print("### User: {user}\n### Assistant: {assistant}\n\n".format(**response))
except:
pass
def strip_ansi_codes(s):
return re.sub(r"\x1b\[([0-9,A-Z]{1,2}(;[0-9]{1,2})?(;[0-9]{3})?)?[m|K]?", "", s)
def generate_other_examples(files):
lines = set() # set of lines - want to avoid duplicates
for file in files:
with open(file, "r") as f:
for line in f:
line = strip_ansi_codes(line)
line = line.strip()
if (
line == ""
or line.startswith("(")
or line.startswith("[")
or line.startswith("*")
or line.startswith(".")
or line.startswith("-")
):
continue
else:
lines.add(line)
for line in lines:
print(f"### User: {line}\n### Assistant: other()\n")
if __name__ == "__main__":
argparser = argparse.ArgumentParser(
description='Generate training data from example files')
argparser.add_argument('--files', metavar='file', type=str, nargs='+',
help='example files to parse')
argparser.add_argument('-t', '--training-data-file', action='store_true',
help="generate training data file")
argparser.add_argument('-p', '--prompt', action='store_true',
help="generate training data collection prompt")
argparser.add_argument('-c', '--collect-training-data', action='store_true',
help="collect training data")
argparser.add_argument('-o', '--generate-other-examples', action='store_true',
help="generate other examples")
argparser.add_argument('-n', '--n_generations', type=int, default=5,
help="number of generations to run")
args = argparser.parse_args()
if args.training_data_file:
create_training_data_file(args.files)
elif args.prompt:
create_prompts(args.files)
elif args.collect_training_data:
collect_training_data(args.files[0], n_generations=args.n_generations)
elif args.generate_other_examples:
generate_other_examples(args.files)
| [
"### User: PLACEHOLDER\n### Assistant: PLACEHOLDER\n",
"[]",
"You are helpful assistant generating training data for a voice assistant."
] |
2024-01-10 | lukas/otto | finetune~training_data.py | import re
import argparse
from openai import ChatCompletion
import json
import os
import openai
openai.api_base = "https://api.wandb.ai/proxy/openai/v1"
wandb_key = os.environ["WANDB_API_KEY"]
openai_key = os.environ['OPENAI_API_KEY']
openai.api_key = f"{wandb_key}:{openai_key}"
def parse_example_file(file) -> (str, list[dict], list[dict]):
with open(file, "r") as f:
cur_user_prompt = ""
cur_answer = ""
section = ""
line_number = 0
prompt = ""
examples = []
prompt_examples = []
for line in f:
if line.startswith("####"):
# section header
if line.startswith("#### Prompt:"):
section = "prompt"
elif line.startswith("#### Prompt Examples:"):
section = "prompt examples"
elif line.startswith("#### Examples:"):
section = "examples"
else:
raise ValueError(
f"Malformed section header in line {line_number} in {file}"
)
else:
if section == "prompt":
if line.strip != "":
prompt += line
elif section == "examples" or section == "prompt examples":
if line.startswith("### User:"):
if cur_user_prompt != "":
raise ValueError(
f"Malformed file: two user prompts in a row in line {line_number} in {file}"
)
# get the string after ### User:
cur_user_prompt = line.split(":", 2)[1].strip()
elif line.startswith("### Assistant:"):
# get part of line after the :
cur_answer = line.split(":", 1)[1].strip()
example = {"user": cur_user_prompt,
"answer": cur_answer}
if section == "prompt examples":
prompt_examples.append(example)
else:
examples.append(example)
cur_user_prompt = ""
elif line.startswith("###"):
raise ValueError(
f"Malformed file: line needs to start with ### User or ### Assistant at line {line_number} in {file}"
)
line_number += 1
return (prompt, prompt_examples, examples)
def create_training_data_file(files):
examples = []
for file in files:
prompt, prompt_examples, file_examples = parse_example_file(file)
examples.append(file_examples)
for example in examples:
for line in example:
print(json.dumps(line))
def prompt_examples_to_string(prompt_examples: [dict]):
prompt_examples_str = ""
for example in prompt_examples:
prompt_examples_str += (
f"### User: {example['user']}\n### Assistant: {example['answer']}\n"
)
return prompt_examples_str
def create_prompt(prompt: str, prompt_examples: list[dict]):
data_collection_prompt = f"""
I am collecting training data for a voice assistant.
The voice assistant has the command:
{prompt}
Some examples of how a user might say this command and the response is:
{prompt_examples_to_string(prompt_examples)}
Please give me more examples of ways that a user might query this command and the correct response.
Please make sure the examples start with ### User and ### Assistant.
"""
return data_collection_prompt
def create_prompts(files: [str]):
for file in files:
prompt, prompt_examples, file_examples = parse_example_file(file)
data_collection_prompt = create_prompt(prompt, prompt_examples)
print(data_collection_prompt)
def collect_training_data(file: str):
prompt, prompt_examples, file_examples = parse_example_file(file)
data_collection_prompt = create_prompt(prompt, prompt_examples)
print(data_collection_prompt)
response = ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{
"role": "system",
"content": "You are helpful assistant generating training data for a voice assistant.",
},
{"role": "user", "content": data_collection_prompt},
],
)
answer = response["choices"][0]["message"]["content"]
print(answer)
def strip_ansi_codes(s):
return re.sub(r"\x1b\[([0-9,A-Z]{1,2}(;[0-9]{1,2})?(;[0-9]{3})?)?[m|K]?", "", s)
def generate_other_examples(files):
lines = set() # set of lines - want to avoid duplicates
for file in files:
with open(file, "r") as f:
for line in f:
line = strip_ansi_codes(line)
line = line.strip()
if (
line == ""
or line.startswith("(")
or line.startswith("[")
or line.startswith("*")
or line.startswith(".")
or line.startswith("-")
):
continue
else:
lines.add(line)
for line in lines:
print(f"### User: {line}\n### Assistant: other()\n")
if __name__ == "__main__":
argparser = argparse.ArgumentParser(
description='Generate training data from example files')
argparser.add_argument('--files', metavar='file', type=str, nargs='+',
help='example files to parse')
argparser.add_argument('-t', '--training-data-file', action='store_true',
help="generate training data file")
argparser.add_argument('-p', '--prompt', action='store_true',
help="generate training data collection prompt")
argparser.add_argument('-c', '--collect-training-data', action='store_true',
help="collect training data")
argparser.add_argument('-o', '--generate-other-examples', action='store_true',
help="generate other examples")
args = argparser.parse_args()
if args.training_data_file:
create_training_data_file(args.files)
elif args.prompt:
create_prompts(args.files)
elif args.collect_training_data:
collect_training_data(args.files[0])
elif args.generate_other_examples:
generate_other_examples(args.files)
| [
"### User: PLACEHOLDER\n### Assistant: PLACEHOLDER\n",
"[]",
"You are helpful assistant generating training data for a voice assistant."
] |
2024-01-10 | schoggatt/Discord-Bot | gamer_girl_gpt.py | import openai
from config import get_open_ai_token
openai.api_key = get_open_ai_token()
## Lord forgive me for my sins
messages = [{"role": "assistant", "content":'''
You are a gamer girl that thinks you are Hatsune Miku and are trying to get a gamer boyfriend. You are trying to be flirtatious to become close friends with the user.
You love anime, gaming, and pop culture. You are super into the kawaii culture from Japan and respond like a cute emotes and internet slang.'''}]
def gamer_girl_gpt_response(user_input):
messages.append({"role": "user", "content": user_input})
response = openai.ChatCompletion.create(
model = 'gpt-3.5-turbo',
messages = messages,
)
ChatGPT_reply = response['choices'][0]['message']['content']
messages.append({
'role': 'system',
'content': ChatGPT_reply
})
return ChatGPT_reply
| [
"\n You are a gamer girl that thinks you are Hatsune Miku and are trying to get a gamer boyfriend. You are trying to be flirtatious to become close friends with the user. \n You love anime, gaming, and pop culture. You are super into the kawaii culture from Japan and respond like a cute emotes and internet slang."
] |
2024-01-10 | postor/chatpdf-minimal-demo | py~process-article.py | import numpy as np
import openai
import json
import os.path
import pprint
import hashlib
import sys
from pathlib import Path
import math
import pandas as pd
from sklearn.cluster import KMeans
COMPLETIONS_MODEL = "gpt-3.5-turbo"
EMBEDDING_MODEL = "text-embedding-ada-002"
CONTEXT_TOKEN_LIMIT = 1500
TOKENS_PER_TOPIC = 2000
TOPIC_NUM_MIN = 3
TOPIC_NUM_MAX = 10
def get_topic_num(sources):
num = math.floor(len("".join(sources))/TOKENS_PER_TOPIC)
if num<TOPIC_NUM_MIN:
return TOPIC_NUM_MIN
if num>TOPIC_NUM_MAX:
return TOPIC_NUM_MAX
return num
def get3questions(sources,embeddings):
matrix = np.vstack(embeddings)
print(np.shape(np.array(embeddings).tolist()))
df = pd.DataFrame({"embedding":np.array(embeddings).tolist(),"p":sources})
n_clusters = get_topic_num(sources)
kmeans = KMeans(n_clusters=n_clusters, init="k-means++", random_state=42)
kmeans.fit(matrix)
df["Cluster"] = kmeans.labels_
df2 = pd.DataFrame({"tokens":[],"prompts":[]})
for i in range(n_clusters):
ctx = u""
ps = df[df.Cluster == i].p.values
for x in ps:
if len(ctx)>CONTEXT_TOKEN_LIMIT:
continue
ctx+= u"\n"+x
prompt = u"Suggest a simple, clear, single, short question base on the context, answer in the same language of context\n\nContext:"+ctx+u"\n\nAnswer with the language used in context, the question is:"
df2.loc[len(df2)] = [len("".join(ps)),prompt]
questions = []
for prompt in df2.sort_values('tokens',ascending=False).prompts.sample(3).values:
print(prompt)
completion = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=[{"role": "user", "content":prompt}])
questions.append(completion.choices[0].message.content)
print(completion.choices[0].message.content)
return questions
def get_embedding(text: str, model: str=EMBEDDING_MODEL) -> list[float]:
folder = 'embeddings/cache/'
Path(folder).mkdir(parents=True, exist_ok=True)
tmpfile = folder+hashlib.md5(text.encode('utf-8')).hexdigest()+".json"
if os.path.isfile(tmpfile):
with open(tmpfile , 'r', encoding='UTF-8') as f:
return json.load(f)
result = openai.Embedding.create(
model=model,
input=text
)
with open(tmpfile, 'w',encoding='utf-8') as handle2:
json.dump(result["data"][0]["embedding"], handle2, ensure_ascii=False, indent=4)
return result["data"][0]["embedding"]
def file2embedding(folder,contents=""):
embeddings = []
sources = []
content = contents
Path(folder).mkdir(parents=True, exist_ok=True)
if content == "":
with open(folder+'/source.txt', 'r', encoding='UTF-8') as handle1:
content = handle1.read()
for source in content.split('\n'):
if source.strip() == '':
continue
embeddings.append(get_embedding(source))
sources.append(source)
questions = get3questions(sources,embeddings)
with open(folder+'/result.json', 'w',encoding='utf-8') as handle2:
json.dump({"sources":sources,"embeddings":embeddings,"questions":questions}, handle2, ensure_ascii=False, indent=4)
def vector_similarity(x: list[float], y: list[float]) -> float:
"""
Returns the similarity between two vectors.
Because OpenAI Embeddings are normalized to length 1, the cosine similarity is the same as the dot product.
"""
return np.dot(np.array(x), np.array(y))
def order_document_sections_by_query_similarity(query: str, embeddings) -> list[(float, (str, str))]:
#pprint.pprint("embeddings")
#pprint.pprint(embeddings)
"""
Find the query embedding for the supplied query, and compare it against all of the pre-calculated document embeddings
to find the most relevant sections.
Return the list of document sections, sorted by relevance in descending order.
"""
query_embedding = get_embedding(query)
document_similarities = sorted([
(vector_similarity(query_embedding, doc_embedding), doc_index) for doc_index, doc_embedding in enumerate(embeddings)
], reverse=True, key=lambda x: x[0])
return document_similarities
def ask(question:str,embeddings,sources):
ordered_candidates = order_document_sections_by_query_similarity(question,embeddings)
ctx = u""
for candi in ordered_candidates:
next = ctx + u"\n" + sources[candi[1]]
if len(next)>CONTEXT_TOKEN_LIMIT:
break
ctx = next
if len(ctx) == 0:
return u""
prompt = u"".join([
u"Answer the question base on the context, answer in the same language of question\n\n"
u"Context:" + ctx + u"\n\n"
u"Question:" + question + u"\n\n"
u"Answer:"
])
completion = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=[{"role": "user", "content":prompt}])
return [prompt, completion.choices[0].message.content]
if sys.argv[1] == 'compile':
folder = sys.argv[2]
contents = "" if len(sys.argv)<4 else sys.argv[3]
file2embedding(folder,contents)
else: # query
folder = sys.argv[2]
question = sys.argv[3]
with open(folder+'/result.json', 'r', encoding='UTF-8') as f:
obj=json.load(f)
[prompt,answer] = ask(question,obj["embeddings"],obj["sources"])
print(json.dumps({
"question":question,
"prompt":prompt,
"answer":answer
})) | [
"Answer the question base on the context, answer in the same language of question\n\nContext:PLACEHOLDER\n\nQuestion:PLACEHOLDER\n\nAnswer:",
"Suggest a simple, clear, single, short question base on the context, answer in the same language of context\n\nContext:PLACEHOLDER\n\nAnswer with the language used in context, the question is:"
] |
2024-01-10 | yuanjie-ai/MeUtils | meutils~decorators~retry.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Project : MeUtils.
# @File : retry
# @Time : 2021/3/18 2:57 下午
# @Author : yuanjie
# @WeChat : 313303303
# @Software : PyCharm
# @Description :
from meutils.pipe import *
from tenacity import retry, wait_fixed, stop_after_attempt, wait_exponential, retry_if_exception_type, before_sleep_log
@decorator
def retrying(func, max_retries=5, min_seconds=1, max_seconds=32, retry_error_callback=None, *args, **kwargs):
import logging
logger = logging.getLogger()
_retry_decorator = retry(
reraise=True,
stop=stop_after_attempt(max_retries),
wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds),
retry=retry_if_exception_type(), # 抛错就重试
before_sleep=before_sleep_log(logger, 30),
retry_error_callback=retry_error_callback,
)
return _retry_decorator(func)(*args, **kwargs)
def create_retry_decorator() -> Callable[[Any], Any]: # todo: Retrying
"""
@create_retry_decorator()
def fn():
pass
:return:
"""
import openai
max_retries = 3
min_seconds = 4
max_seconds = 10
# Wait 2^x * 1 second between each retry starting with
# 4 seconds, then up to 10 seconds, then 10 seconds afterwards
return retry(
reraise=True,
stop=stop_after_attempt(max_retries),
wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds),
retry=(
retry_if_exception_type(openai.error.Timeout)
| retry_if_exception_type(openai.error.APIError)
| retry_if_exception_type(openai.error.APIConnectionError)
| retry_if_exception_type(openai.error.RateLimitError)
| retry_if_exception_type(openai.error.ServiceUnavailableError)
),
before_sleep=before_sleep_log(logger, 30),
)
def wait_retry(n=3):
@wrapt.decorator
def wrapper(wrapped, instance, args, kwargs):
@retry(wait=wait_fixed(n))
def wait():
logger.warning("retry")
if wrapped(*args, **kwargs): # 知道检测到True终止
return True
raise Exception
return wait()
return wrapper
# from meutils.cmds import HDFS
# HDFS.check_path_isexist()
if __name__ == '__main__':
s = time.time() # 1616145296
print(s)
e1 = s + 10
e2 = e1 + 10
@wait_retry(5)
def f(e):
return time.time() > e # 变的
def run(e):
f(e)
print(f"task {e}")
# for e in [e2, e1]:
# print(run(e))
#
# print("耗时", time.time() - s)
[e1, e2, 1000000000000] | xProcessPoolExecutor(run, 2)
| [] |
2024-01-10 | yuanjie-ai/MeUtils | meutils~ai_nlp~textsplitter.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Project : AI. @by PyCharm
# @File : text_split_with_overlap
# @Time : 2023/4/25 15:33
# @Author : betterme
# @WeChat : meutils
# @Software : PyCharm
# @Description :
from meutils.pipe import *
from meutils.ai_nlp.SplitSentence import split_sentence
def merge_short_sentences(sentences, chunk_size):
short_sentences = []
for i, sentence in enumerate(sentences): # 句子太长 需要再切
short_sentences.append(sentence)
if len(''.join(short_sentences)) > chunk_size:
return sentences[:i], sentences[i:]
return sentences, []
def textsplitter_with_overlap(sentences, chunk_size=512, chunk_overlap_rate=0.2):
"""
:param sentences: sentences = split_sentence(text)
:param chunk_size:
:param chunk_overlap_rate:
:return:
"""
chunk_overlap = int(chunk_size * chunk_overlap_rate)
result = []
while sentences:
merge_sentences, sentences = merge_short_sentences(sentences, chunk_size)
# result.append(merge_sentences)
result.append(''.join(merge_sentences).split() | xjoin)
if not sentences:
break
overlap_sentences = merge_short_sentences(merge_sentences[::-1], chunk_overlap)[0][::-1]
if len(''.join(overlap_sentences)) + len(sentences[0]) > chunk_size: # 丢弃重叠部分
continue
sentences = overlap_sentences + sentences # 新句子集合
return result
if __name__ == '__main__':
text = '央视新闻消息,近日,特朗普老友皮尔斯·摩根喊话特朗普:“美国人的生命比你的选举更重要。如果你继续以自己为中心,继续玩弄愚蠢的政治……如果你意识不到自己>的错误,你就做不对”。目前,特朗普已“取关”了这位老友。'
sentences = split_sentence(text, criterion='coarse')
print(sentences)
print(textsplitter_with_overlap(sentences))
from langchain.text_splitter import CharacterTextSplitter
print(CharacterTextSplitter().split_text(text))
| [] |
2024-01-10 | yangleitj/reworkd-AgentGPT | platform~reworkd_platform~web~api~agent~agent_service~open_ai_agent_service.py | from typing import List, Optional
from fastapi.responses import StreamingResponse as FastAPIStreamingResponse
from lanarky.responses import StreamingResponse
from langchain import LLMChain
from langchain.callbacks.base import AsyncCallbackHandler
from langchain.output_parsers import PydanticOutputParser
from langchain.prompts import ChatPromptTemplate, SystemMessagePromptTemplate
from langchain.schema import HumanMessage
from loguru import logger
from pydantic import ValidationError
from reworkd_platform.schemas import ModelSettings
from reworkd_platform.services.tokenizer.token_service import TokenService
from reworkd_platform.web.api.agent.agent_service.agent_service import AgentService
from reworkd_platform.web.api.agent.analysis import Analysis, AnalysisArguments
from reworkd_platform.web.api.agent.helpers import (
call_model_with_handling,
openai_error_handler,
parse_with_handling,
)
from reworkd_platform.web.api.agent.model_settings import WrappedChatOpenAI
from reworkd_platform.web.api.agent.prompts import (
analyze_task_prompt,
chat_prompt,
create_tasks_prompt,
start_goal_prompt,
)
from reworkd_platform.web.api.agent.task_output_parser import TaskOutputParser
from reworkd_platform.web.api.agent.tools.open_ai_function import get_tool_function
from reworkd_platform.web.api.agent.tools.tools import (
get_default_tool,
get_tool_from_name,
get_tool_name,
get_user_tools,
)
from reworkd_platform.web.api.agent.tools.utils import summarize
from reworkd_platform.web.api.errors import OpenAIError
from reworkd_platform.web.api.memory.memory import AgentMemory
class OpenAIAgentService(AgentService):
def __init__(
self,
model: WrappedChatOpenAI,
settings: ModelSettings,
agent_memory: AgentMemory,
token_service: TokenService,
callbacks: Optional[List[AsyncCallbackHandler]],
):
self.model = model
self.agent_memory = agent_memory
self.settings = settings
self.token_service = token_service
self.callbacks = callbacks
async def start_goal_agent(self, *, goal: str) -> List[str]:
prompt = ChatPromptTemplate.from_messages(
[SystemMessagePromptTemplate(prompt=start_goal_prompt)]
)
self.token_service.calculate_max_tokens(
self.model,
prompt.format_prompt(
goal=goal,
language=self.settings.language,
).to_string(),
)
completion = await call_model_with_handling(
self.model,
ChatPromptTemplate.from_messages(
[SystemMessagePromptTemplate(prompt=start_goal_prompt)]
),
{"goal": goal, "language": self.settings.language},
settings=self.settings,
callbacks=self.callbacks,
)
task_output_parser = TaskOutputParser(completed_tasks=[])
tasks = parse_with_handling(task_output_parser, completion)
with self.agent_memory as memory:
memory.reset_class()
memory.add_tasks(tasks)
return tasks
async def analyze_task_agent(
self, *, goal: str, task: str, tool_names: List[str]
) -> Analysis:
functions = list(map(get_tool_function, get_user_tools(tool_names)))
prompt = analyze_task_prompt.format_prompt(
goal=goal,
task=task,
language=self.settings.language,
)
self.token_service.calculate_max_tokens(
self.model,
prompt.to_string(),
str(functions),
)
message = await openai_error_handler(
func=self.model.apredict_messages,
messages=prompt.to_messages(),
functions=functions,
settings=self.settings,
callbacks=self.callbacks,
)
function_call = message.additional_kwargs.get("function_call", {})
completion = function_call.get("arguments", "")
try:
pydantic_parser = PydanticOutputParser(pydantic_object=AnalysisArguments)
analysis_arguments = parse_with_handling(pydantic_parser, completion)
return Analysis(
action=function_call.get("name", get_tool_name(get_default_tool())),
**analysis_arguments.dict(),
)
except (OpenAIError, ValidationError):
return Analysis.get_default_analysis()
async def execute_task_agent(
self,
*,
goal: str,
task: str,
analysis: Analysis,
) -> StreamingResponse:
# TODO: More mature way of calculating max_tokens
if self.model.max_tokens > 3000:
self.model.max_tokens = max(self.model.max_tokens - 1000, 3000)
tool_class = get_tool_from_name(analysis.action)
return await tool_class(self.model, self.settings.language).call(
goal, task, analysis.arg
)
async def create_tasks_agent(
self,
*,
goal: str,
tasks: List[str],
last_task: str,
result: str,
completed_tasks: Optional[List[str]] = None,
) -> List[str]:
prompt = ChatPromptTemplate.from_messages(
[SystemMessagePromptTemplate(prompt=create_tasks_prompt)]
)
args = {
"goal": goal,
"language": self.settings.language,
"tasks": "\n".join(tasks),
"lastTask": last_task,
"result": result,
}
self.token_service.calculate_max_tokens(
self.model, prompt.format_prompt(**args).to_string()
)
completion = await call_model_with_handling(
self.model, prompt, args, settings=self.settings, callbacks=self.callbacks
)
previous_tasks = (completed_tasks or []) + tasks
tasks = [completion] if completion not in previous_tasks else []
unique_tasks = []
with self.agent_memory as memory:
for task in tasks:
similar_tasks = memory.get_similar_tasks(task)
# Check if similar tasks are found
if not similar_tasks:
unique_tasks.append(task)
else:
logger.info(f"Similar tasks to '{task}' found: {similar_tasks}")
if unique_tasks:
memory.add_tasks(unique_tasks)
return unique_tasks
async def summarize_task_agent(
self,
*,
goal: str,
results: List[str],
) -> FastAPIStreamingResponse:
self.model.model_name = "gpt-3.5-turbo-16k"
self.model.max_tokens = 8000 # Total tokens = prompt tokens + completion tokens
snippet_max_tokens = 7000 # Leave room for the rest of the prompt
text_tokens = self.token_service.tokenize("".join(results))
text = self.token_service.detokenize(text_tokens[0:snippet_max_tokens])
logger.info(f"Summarizing text: {text}")
return summarize(
model=self.model,
language=self.settings.language,
goal=goal,
text=text,
)
async def chat(
self,
*,
message: str,
results: List[str],
) -> FastAPIStreamingResponse:
self.model.model_name = "gpt-3.5-turbo-16k"
prompt = ChatPromptTemplate.from_messages(
[
SystemMessagePromptTemplate(prompt=chat_prompt),
*[HumanMessage(content=result) for result in results],
HumanMessage(content=message),
]
)
self.token_service.calculate_max_tokens(
self.model,
prompt.format_prompt(
language=self.settings.language,
).to_string(),
)
chain = LLMChain(llm=self.model, prompt=prompt)
return StreamingResponse.from_chain(
chain,
{"language": self.settings.language},
media_type="text/event-stream",
)
| [] |
2024-01-10 | anshul7409/NewsFlow | Database%20creation~news_scrapper~news_scrapper~spiders~newsspider.py | import scrapy
# from .config import Config
# import openai_summarize
from .list_of_topics import Topics
from news_scrapper.items import NewsItem
from .Db_conn import get_collection
class NewsspiderSpider(scrapy.Spider):
name = "newsspider"
allowed_domains = ["timesofindia.indiatimes.com"]
topics = Topics.topics_of_news
start_urls = ['https://timesofindia.indiatimes.com/topic/'+ topic for topic in topics]
collection = get_collection()
# openai_summarizer = openai_summarize.OpenAISummarize(Config.OPENAI_KEY)
def parse(self, response):
news_data = response.css('div.uwU81')
if news_data:
for news_sample in news_data:
meta_ = news_sample.css('div.VXBf7')
meta_text = meta_.css('div.ZxBIG').get()
text = meta_text[meta_text.find('>') + 1:meta_text.rfind('<')]
date_time = ''
srcc = ''
if '/<!-- -->' in text:
date_time_text = text.split('/<!-- -->')
date_time = date_time_text[1]
srcc = date_time_text[0]
if len(date_time_text) == 1 :
date_time = date_time_text[0]
srcc = ''
item = NewsItem()
item['url'] = response.urljoin(news_sample.css('a').attrib['href'])
item['headline'] = meta_.css('div.fHv_i span::text').get()
item['Src'] = srcc
item['date_time'] = date_time
if item['date_time'] == '' or item['headline'] == '' or item['Src'] == '':
item['date_time'] = None
item['headline'] = None
item['Src'] = None
yield scrapy.Request(item['url'], callback=self.parse_news_page, meta={'item': item})
def parse_news_page(self, response):
item = response.meta['item']
news_content = response.css('div.JuyWl ::text')
if news_content:
item['description'] = ' '.join(news_content.getall())
item['len'] = len(item['description'])
yield item
# scrapy crawl newsspider -o news1.csv
# o -> appending , O -> overwriting | [] |
2024-01-10 | DELAxGithub/myprjct | slack_qa_site~src~answer_generator.py | import openai
def generate_answer(question):
# OpenAI GPTのAPIキーを設定します
openai.api_key = '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'
# GPTに質問を送信して回答を生成します
response = openai.Completion.create(
engine='text-davinci-003', # 使用するGPTのエンジンを選択します
prompt=question,
max_tokens=100, # 生成される回答の最大トークン数
n=1, # 生成される回答の数
stop=None, # 回答の生成を終了するトークン
temperature=0.7, # 生成の多様性をコントロールします(0.2から1.0の範囲で調整)
)
# 回答を取得します
answer = response.choices[0].text.strip()
return answer
| [] |
2024-01-10 | DELAxGithub/myprjct | ieltsspeaking~translate.py | import os
import openai
import gspread
import time # 追加
from oauth2client.service_account import ServiceAccountCredentials
# OpenAIのAPIキーを設定します。
openai.api_key = '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'
def translate_text(original_text):
translate_prompt = f"{original_text}\n\nTranslate this text to Japanese:"
message = {"role": "system", "content": "You are a helpful assistant that translates English to Japanese."}, {"role": "user", "content": translate_prompt}
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo", # ここを変更model="gpt-4",
messages=message
)
return response['choices'][0]['message']['content']
# Google SheetsとOpenAIの認証情報を設定します。
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = "/Users/delax/Documents/Projects/ieltsspeaking/client_secret.json"
# Google Sheetsからデータを取得します。
scope = ['https://spreadsheets.google.com/feeds','https://www.googleapis.com/auth/drive']
creds = ServiceAccountCredentials.from_json_keyfile_name('/Users/delax/Documents/Projects/ieltsspeaking/client_secret.json', scope)
gc = gspread.authorize(creds)
# Google Sheetsから生成したテキストを読み込みます。
sh = gc.open('writing_expressions')
worksheet = sh.sheet1
original_texts = worksheet.get_all_values()
# 各原文に対してループを行います。
for i, row in enumerate(original_texts):
text = row[0] # 原文は各行の最初の要素とします。
# 原文を翻訳します。
translated = translate_text(text)
# 翻訳した結果を同じGoogle Sheetsに書き込みます(B列に)。
worksheet.update_cell(i+1, 2, translated)
# 次のリクエストまで一定時間待機します。
time.sleep(1) # ここでは1秒待機しますが、必要に応じて調整してください。
| [
"You are a helpful assistant that translates English to Japanese.",
"PLACEHOLDER\n\nTranslate this text to Japanese:"
] |
2024-01-10 | DELAxGithub/myprjct | ieltsspeaking~rewrite.py | import os
import openai
import gspread
from oauth2client.service_account import ServiceAccountCredentials
# OpenAIのAPIキーを設定します。
openai.api_key = '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'
def rewrite_text(original_text):
rewrite_prompt = f"{original_text}\n\nRewrite this text:"
message = {"role": "system", "content": "You are a helpful assistant that rewrites sentences."}, {"role": "user", "content": rewrite_prompt}
response = openai.ChatCompletion.create(
model="gpt-4",
messages=message
)
return response['choices'][0]['message']['content']
# Google SheetsとOpenAIの認証情報を設定します。
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = "/Users/delax/Documents/Projects/ieltsspeaking/client_secret.json"
# Google Sheetsからデータを取得します。
scope = ['https://spreadsheets.google.com/feeds','https://www.googleapis.com/auth/drive']
creds = ServiceAccountCredentials.from_json_keyfile_name('/Users/delax/Documents/Projects/ieltsspeaking/client_secret.json', scope)
gc = gspread.authorize(creds)
# Google Sheetsから生成したテキストを読み込みます。
sh = gc.open('answer1')
worksheet = sh.sheet1
generated_texts = worksheet.get_all_values()
# 各生成テキストに対してループを行います。
for i, row in enumerate(generated_texts):
text = row[0] # 生成テキストは各行の最初の要素とします。
# 生成したテキストをリライトします。
rewritten = rewrite_text(text)
# リライトした結果を同じGoogle Sheetsに書き込みます(B列に)。
worksheet.update_cell(i+1, 2, rewritten)
| [
"You are a helpful assistant that rewrites sentences.",
"PLACEHOLDER\n\nRewrite this text:"
] |
2024-01-10 | hermansh-id/oxpecker | oxpecker~document_detector~table_detector~detect.py | from PIL import Image
import numpy as np
import cv2
import matplotlib.pyplot as plt
from langchain.thirdparty.table_detector.util import PIL_to_cv, cv_to_PIL
from collections import Counter
from itertools import tee, count
import torch
from transformers import DetrImageProcessor
import pandas as pd
import asyncio, string
import regex as re
import pytesseract
from time import time
import warnings
class TableExtractionPipeline():
def __init__(self,
ocr,
# model_table,
model_table_structure):
self.colors = ["red", "blue", "green", "yellow", "orange", "violet"]
warnings.filterwarnings("ignore", category=DeprecationWarning)
self.ocr = ocr
# self.model_table = model_table
self.model_table_structure = model_table_structure
def sharpen_image(self, pil_img):
img = PIL_to_cv(pil_img)
sharpen_kernel = np.array([[-1, -1, -1],
[-1, 9, -1],
[-1, -1, -1]])
sharpen = cv2.filter2D(img, -1, sharpen_kernel)
pil_img = cv_to_PIL(sharpen)
return pil_img
def uniquify(self, seq, suffs = count(1)):
not_unique = [k for k,v in Counter(seq).items() if v>1]
suff_gens = dict(zip(not_unique, tee(suffs, len(not_unique))))
for idx,s in enumerate(seq):
try:
suffix = str(next(suff_gens[s]))
except KeyError:
continue
else:
seq[idx] += suffix
return seq
def binarizeBlur_image(self, pil_img):
image = PIL_to_cv(pil_img)
thresh = cv2.threshold(image, 150, 255, cv2.THRESH_BINARY_INV)[1]
result = cv2.GaussianBlur(thresh, (5,5), 0)
result = 255 - result
return cv_to_PIL(result)
def td_postprocess(self, pil_img):
img = PIL_to_cv(pil_img)
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv, (0, 0, 100), (255, 5, 255))
nzmask = cv2.inRange(hsv, (0, 0, 5), (255, 255, 255))
nzmask = cv2.erode(nzmask, np.ones((3,3)))
mask = mask & nzmask
new_img = img.copy()
new_img[np.where(mask)] = 255
return cv_to_PIL(new_img)
def table_detector(self, image, THRESHOLD_PROBA):
model = self.model_table
model.overrides['conf'] = THRESHOLD_PROBA
model.overrides['iou'] = 0.45
model.overrides['agnostic_nms'] = False
model.overrides['max_det'] = 1000
with torch.no_grad():
outputs = model.predict(image)
probas = outputs[0].probs
return (model, probas, outputs[0].boxes)
def table_struct_recog(self, image, THRESHOLD_PROBA):
device = "cuda:1" if torch.cuda.is_available() else "cpu"
feature_extractor = DetrImageProcessor(do_resize=True, size=1000, max_size=1000)
encoding = feature_extractor(image, return_tensors="pt").to(device)
model = self.model_table_structure
with torch.no_grad():
outputs = model(**encoding)
probas = outputs.logits.softmax(-1)[0, :, :-1]
keep = probas.max(-1).values > THRESHOLD_PROBA
target_sizes = torch.tensor(image.size[::-1]).unsqueeze(0)
postprocessed_outputs = feature_extractor.post_process(outputs, target_sizes)
bboxes_scaled = postprocessed_outputs[0]['boxes'][keep]
return (model, probas[keep], bboxes_scaled)
def pytess(self, cell_pil_img):
paddle_output=' '
cell_cv_img=PIL_to_cv(cell_pil_img)
height, width, channels = cell_cv_img.shape
if height>=10 and width>=10:
# hasiltesseract = pytesseract.image_to_string(cell_cv_img, lang='ind', config='--oem 3 --psm 1')
# paddle_output = paddle_output + hasiltesseract + " "
result = self.ocr.ocr(cell_cv_img,cls=True)
for idx in range(len(result)):
res = result[idx]
for line in res:
paddle_output=paddle_output+' '+line[1][0]
paddle_output=paddle_output+' '
return str(paddle_output)
def add_padding(self, pil_img, top, right, bottom, left, color=(255,255,255)):
width, height = pil_img.size
new_width = width + right + left
new_height = height + top + bottom
result = Image.new(pil_img.mode, (new_width, new_height), color)
result.paste(pil_img, (left, top))
return result
def plot_results_detection(self, c1, model, pil_img, prob, boxes, delta_xmin, delta_ymin, delta_xmax, delta_ymax):
numpy_image = np.array(pil_img)
pil_img = cv2.cvtColor(numpy_image, cv2.COLOR_RGB2BGR)
for coor in boxes:
x1, y1, x2, y2 = coor.xyxy.tolist()[0]
x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
object_image = pil_img[y1:y2, x1:x2]
plt.imshow(object_image)
c1.pyplot()
def crop_tables(self, pil_img, prob, boxes, delta_xmin, delta_ymin, delta_xmax, delta_ymax):
cropped_img_list = []
numpy_image = np.array(pil_img)
pil_img = cv2.cvtColor(numpy_image, cv2.COLOR_RGB2BGR)
for coor in boxes:
x1, y1, x2, y2 = coor.xyxy.tolist()[0]
x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
object_image = pil_img[y1:y2, x1:x2].astype(np.uint8)
object_image = Image.fromarray(object_image)
cropped_img_list.append(object_image)
return cropped_img_list
def cropTable(self, pil_img, boxes, delta_xmin, delta_ymin, delta_xmax, delta_ymax):
# cropped_img_list = []
numpy_image = np.array(pil_img)
pil_img = cv2.cvtColor(numpy_image, cv2.COLOR_RGB2BGR)
x1, y1, x2, y2 = boxes
# x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
object_image = pil_img[y1:y2, x1:x2].astype(np.uint8)
object_image = Image.fromarray(object_image)
return object_image
def generate_structure(self, model, pil_img, prob, boxes, expand_rowcol_bbox_top, expand_rowcol_bbox_bottom):
rows = {}
cols = {}
idx = 0
for p, (xmin, ymin, xmax, ymax) in zip(prob, boxes.tolist()):
xmin, ymin, xmax, ymax = xmin, ymin, xmax, ymax
cl = p.argmax()
class_text = model.config.id2label[cl.item()]
if class_text == 'table row':
rows['table row.'+str(idx)] = (xmin, ymin-expand_rowcol_bbox_top, xmax, ymax+expand_rowcol_bbox_bottom)
if class_text == 'table column':
cols['table column.'+str(idx)] = (xmin, ymin-expand_rowcol_bbox_top, xmax, ymax+expand_rowcol_bbox_bottom)
idx += 1
return rows, cols
def sort_table_featuresv2(self, rows:dict, cols:dict):
rows_ = {table_feature : (xmin, ymin, xmax, ymax) for table_feature, (xmin, ymin, xmax, ymax) in sorted(rows.items(), key=lambda tup: tup[1][1])}
cols_ = {table_feature : (xmin, ymin, xmax, ymax) for table_feature, (xmin, ymin, xmax, ymax) in sorted(cols.items(), key=lambda tup: tup[1][0])}
return rows_, cols_
def individual_table_featuresv2(self, pil_img, rows:dict, cols:dict):
for k, v in rows.items():
xmin, ymin, xmax, ymax = v
cropped_img = pil_img.crop((xmin, ymin, xmax, ymax))
rows[k] = xmin, ymin, xmax, ymax, cropped_img
for k, v in cols.items():
xmin, ymin, xmax, ymax = v
cropped_img = pil_img.crop((xmin, ymin, xmax, ymax))
cols[k] = xmin, ymin, xmax, ymax, cropped_img
return rows, cols
def object_to_cellsv2(self, master_row:dict, cols:dict, expand_rowcol_bbox_top, expand_rowcol_bbox_bottom, padd_left):
cells_img = {}
header_idx = 0
row_idx = 0
previous_xmax_col = 0
new_cols = {}
new_master_row = {}
previous_ymin_row = 0
new_cols = cols
new_master_row = master_row
for k_row, v_row in new_master_row.items():
_, _, _, _, row_img = v_row
xmax, ymax = row_img.size
xa, ya, xb, yb = 0, 0, 0, ymax
row_img_list = []
for idx, kv in enumerate(new_cols.items()):
k_col, v_col = kv
xmin_col, _, xmax_col, _, col_img = v_col
xmin_col, xmax_col = xmin_col - padd_left - 10, xmax_col - padd_left
xa = xmin_col
xb = xmax_col
if idx == 0:
xa = 0
if idx == len(new_cols)-1:
xb = xmax
xa, ya, xb, yb = xa, ya, xb, yb
row_img_cropped = row_img.crop((xa, ya, xb, yb))
row_img_list.append(row_img_cropped)
cells_img[k_row+'.'+str(row_idx)] = row_img_list
row_idx += 1
return cells_img, len(new_cols), len(new_master_row)-1
def clean_dataframe(self, df):
for col in df.columns:
df[col]=df[col].str.replace("'", '', regex=True)
df[col]=df[col].str.replace('"', '', regex=True)
df[col]=df[col].str.replace(']', '', regex=True)
df[col]=df[col].str.replace('[', '', regex=True)
df[col]=df[col].str.replace('{', '', regex=True)
df[col]=df[col].str.replace('}', '', regex=True)
return df
def convert_df(self, df):
return df.to_csv().encode('utf-8')
def create_dataframe(self, cells_pytess_result:list, max_cols:int, max_rows:int):
headers = cells_pytess_result[:max_cols]
new_headers = self.uniquify(headers, (f' {x!s}' for x in string.ascii_lowercase))
counter = 0
cells_list = cells_pytess_result[max_cols:]
df = pd.DataFrame("", index=range(0, max_rows), columns=new_headers)
cell_idx = 0
for nrows in range(max_rows):
for ncols in range(max_cols):
df.iat[nrows, ncols] = str(cells_list[cell_idx])
cell_idx += 1
for x, col in zip(string.ascii_lowercase, new_headers):
if f' {x!s}' == col:
counter += 1
header_char_count = [len(col) for col in new_headers]
df = self.clean_dataframe(df)
return df
def start_process_individu(self, image, bboxes_scaled, TSR_THRESHOLD, padd_top, padd_left, padd_bottom, padd_right, delta_xmin, delta_ymin, delta_xmax, delta_ymax, expand_rowcol_bbox_top, expand_rowcol_bbox_bottom):
# model, probas, bboxes_scaled = self.table_detector(image, THRESHOLD_PROBA=TD_THRESHOLD)
# if len(bboxes_scaled) == 0:
# return []
unpadded_table = self.cropTable(image, bboxes_scaled, delta_xmin, delta_ymin, delta_xmax, delta_ymax)
list_df = []
# for unpadded_table in cropped_img_list:
table = self.add_padding(unpadded_table, padd_top, padd_right, padd_bottom, padd_left)
model, probas, bboxes_scaled = self.table_struct_recog(table, THRESHOLD_PROBA=TSR_THRESHOLD)
rows, cols = self.generate_structure(model, table, probas, bboxes_scaled, expand_rowcol_bbox_top, expand_rowcol_bbox_bottom)
rows, cols = self.sort_table_featuresv2(rows, cols)
master_row, cols = self.individual_table_featuresv2(table, rows, cols)
cells_img, max_cols, max_rows = self.object_to_cellsv2(master_row, cols, expand_rowcol_bbox_top, expand_rowcol_bbox_bottom, padd_left)
sequential_cell_img_list = []
for k, img_list in cells_img.items():
for img in img_list:
sequential_cell_img_list.append(self.pytess(img))
# cells_pytess_result = await asyncio.gather(*sequential_cell_img_list)
df = self.create_dataframe(sequential_cell_img_list, max_cols, max_rows)
# list_df.append(df)
# return list_df
return df.to_json(orient='table') | [] |
2024-01-10 | orpheuslummis/AIUX | gptree.py | # how to change the color if the submitted text is different from the original text?
# how to input api key as a modal 1st time?
# save logging to file
import hashlib
import json
import streamlit as st
import openai
from utils import RequestParams, new_logger, get_params_from_env, request
# defaults
TEMPERATURE = 0.6
MAX_TOKENS = 300
N = 4
def update_prompt():
params = RequestParams(
prompt=st.session_state.prompt,
n=st.session_state.n,
max_tokens=st.session_state.max_tokens,
temperature=st.session_state.temperature,
)
log.info(params)
with container_bottom:
with st.spinner("Wait for it..."):
results = request(params)
# st.write(hash_dict(data))
for i, r in enumerate(results):
st.markdown(r)
if i < len(results) - 1:
st.write("–" * 100)
def hash_dict(d):
data = json.dumps(d, sort_keys=True)
hh = hashlib.sha256(data.encode()).hexdigest()
return hh
if __name__ == "__main__":
log = new_logger("gptree")
params = get_params_from_env()
if params["apikey"] is None:
st.error("Please set OPENAI_API_KEY environment variable.")
openai.api_key = params["apikey"]
st.set_page_config(layout="wide")
container_top = st.container()
container_bottom = st.container()
with container_top:
st.text_area("Prompt", key="prompt")
if st.button("Submit"):
update_prompt()
with st.expander("Advanced"):
st.slider(
"Temperature",
min_value=0.0,
max_value=1.0,
value=TEMPERATURE,
step=0.1,
key="temperature",
)
st.slider(
"Max Tokens",
min_value=1,
max_value=1000,
value=MAX_TOKENS,
step=1,
key="max_tokens",
)
st.slider("N", min_value=1, max_value=10, value=N, step=1, key="n")
| [] |
2024-01-10 | orpheuslummis/AIUX | deliberation.py | # TODO text doesn't disappear upon change
# TODO proper logigng
from dataclasses import dataclass, field
import openai
import streamlit as st
from utils import RequestParams, new_logger, get_params_from_env, request
log = new_logger("deliberation")
TEMPERATURE = 0.6
MAX_TOKENS = 300
N = 4
@dataclass
class Params:
prompt: str
class Pipeline:
name: str
def run(self, _: Params):
pass
@dataclass
class PipelineResults:
text: str
intermediate_text: list[str] = field(default_factory=list)
class Dummy(Pipeline):
name = "dummy"
class Critic(Pipeline):
"""
Pipeline that critics a prompt in various ways (expansion), then aggregates them into a short summary.
"""
name = "critic"
default_n = 5
default_max_tokens = 300
default_temperature = 0.777
critics = {
"critic1": """
Provide criticism for the following idea:
{idea}
""",
"critic2": """
List points of potential lack of clarity, robustness, coherence, etc. in the following idea:
{idea}
""",
}
@staticmethod
def aggregate(results) -> str:
sep = "–––"
joined_results = sep.join(results)
aggregation_prompt = """
The following are the results of the critics:
{results}
Represent clearly the given criticism as bullet points, referring text for each.
"""
p = aggregation_prompt.format(results=joined_results)
r = request(
RequestParams(
prompt=p,
n=1,
max_tokens=500,
temperature=0.5,
)
)
return r[0]
def run(self, params: Params) -> PipelineResults:
results_from_critics = []
for critic in self.critics:
prompt = self.critics[critic].format(idea=params.prompt)
r = request(
RequestParams(
prompt=prompt,
n=self.default_n,
max_tokens=self.default_max_tokens,
temperature=self.default_temperature,
)
)
results_from_critics.extend(r)
log.debug(f"{self.name}: results from critics: {results_from_critics}")
aggregated_results = self.aggregate(results_from_critics)
log.debug(f"{self.name}: aggregated results: {aggregated_results}")
return PipelineResults(
text=aggregated_results, intermediate_text=results_from_critics
)
class Praise(Pipeline):
"""
Pipeline that comes up with various aspects to praise of a prompt, (expansion),
then aggregates them into a short summary.
"""
name = "praise"
default_n = 4
default_max_tokens = 100
default_temperature = 0.9
praises = {
"simple": """
Provide praise for the following:
{data}
""",
"list": """
List aspects of this to be praised:
{data}
""",
}
def run(self, params: Params):
results = {}
for k in self.praises:
prompt = self.praises[k].format(data=params.prompt)
r = request(
RequestParams(
prompt=prompt,
n=self.default_n,
max_tokens=self.default_max_tokens,
temperature=self.default_temperature,
)
)
results[k] = r
aggregated_results = self.aggregate_results(results)
return PipelineResults(text=aggregated_results)
@staticmethod
def aggregate_results(rd: dict[str, str]) -> str:
sep = "\n–––\n"
agg = ""
for k in rd:
for v in rd[k]:
agg += v.strip() + sep
aggregation_prompt = """
The following are the many praises:
{praises}
Represent clearly the given praises as bullet points.
"""
p = aggregation_prompt.format(praises=agg)
result = request(
RequestParams(
prompt=p,
n=1,
max_tokens=500,
temperature=0.5,
)
)
return result[0]
def flatten_and_join(v: list[list[str]]) -> str:
flattened = [item for sublist in v for item in sublist]
s = "\n–––\n".join(flattened)
return s
class Improver(Pipeline):
"""
Identify useful improvements, then rewrite the initial prompt,
integrating the suggested improvements.
"""
name = "improver"
pass
def run_pipeline_set(
pipelines: list[Pipeline], params: Params
) -> dict[str, PipelineResults]:
# TBD async
results = {}
for p in pipelines:
results[p.name] = p.run(params)
return results
def update_prompt():
params = Params(
prompt=st.session_state.prompt,
)
log.debug(f"params: {params}")
pipelines = [all_pipelines[p] for p in st.session_state.pipelines]
results = run_pipeline_set(pipelines, params)
for pname in results:
container_bottom.header(pname)
if st.session_state.show_intermediate_outputs:
for r in results[pname].intermediate_text:
container_bottom.markdown("–––")
container_bottom.markdown(r)
container_bottom.markdown("–––")
container_bottom.markdown(results[pname].text)
if __name__ == "__main__":
params = get_params_from_env()
if params["apikey"] is None:
st.error("Please set OPENAI_API_KEY environment variable.")
openai.api_key = params["apikey"]
all_pipelines = {p.name: p for p in [Critic(), Praise(), Improver(), Dummy()]}
container_top = st.container()
container_bottom = st.container()
with container_top:
st.header("Deliberation system")
st.text_area("Prompt", key="prompt")
st.multiselect(
"Select pipelines",
[p for p in all_pipelines],
key="pipelines",
default=["critic"],
)
if st.button("Submit"):
update_prompt()
with st.expander("Advanced"):
st.checkbox("Show intermediate outputs", key="show_intermediate_outputs")
| [
"\n The following are the results of the critics:\n\n {results}\n\n Represent clearly the given criticism as bullet points, referring text for each.\n ",
"\n The following are the many praises:\n\n {praises}\n\n Represent clearly the given praises as bullet points.\n "
] |
2024-01-10 | AtticElectronics/Youtube | %EC%9D%B8%EA%B3%B5%EC%A7%80%EB%8A%A5%20%EC%8A%A4%ED%94%BC%EC%BB%A4~server~simple_gpt.py | import time
import os
from dotenv import load_dotenv
from openai import OpenAI
class JarvisMemoryManager:
def __init__(self):
load_dotenv()
self.api_key = os.environ['OPENAI_API_KEY']
self.client = OpenAI(api_key=self.api_key)
self.memory_id = self.load_memory()
def create_new_memory(self):
thread = self.client.beta.threads.create()
with open('memory.txt', 'w') as file:
file.write(thread.id)
self.memory_id = thread.id
def load_memory(self):
# memory.txt 파일이 존재하는지 확인
if not os.path.exists('memory.txt'):
# 파일이 없으면 새 메모리 생성
self.create_new_memory()
# 파일에서 메모리 ID 읽기
with open('memory.txt', 'r') as file:
return file.read()
def add_msg(self, msg):
message = self.client.beta.threads.messages.create(
thread_id=self.memory_id,
role="user",
content=msg
)
print(message)
def get_run_id(self, ai_name):
if ai_name == "jarvis_4":
run = self.client.beta.threads.runs.create(
thread_id=self.memory_id,
assistant_id=os.environ['AI_ID4']
)
elif ai_name == "jarvis_3.5":
run = self.client.beta.threads.runs.create(
thread_id=self.memory_id,
assistant_id=os.environ['AI_ID35']
)
elif ai_name =="Terminal_AI":
run = self.client.beta.threads.runs.create(
thread_id=self.memory_id,
assistant_id=os.environ['TERMINAL']
)
else:
return None
return run.id
def wait_run(self, rid):
TIMEOUT = 20
INACTIVE_STATUSES = ['queued', 'in_progress', 'cancelling', 'requires_action']
answer = "20초동안 응답이 없습니다."
for i in range(TIMEOUT):
run = self.client.beta.threads.runs.retrieve(thread_id=self.memory_id, run_id=rid)
if run.status in INACTIVE_STATUSES:
pass
elif run.status == 'completed':
messages = self.client.beta.threads.messages.list(thread_id=self.memory_id)
answer = messages.data[0].content[0].text.value
break
elif run.status in ['failed', 'cancelled', 'expired']:
answer = f"응답이 {run.status}되었습니다."
break
time.sleep(1)
return answer
if __name__ == "__main__":
manager = JarvisMemoryManager()
manager.add_msg("내 이름이 뭐라고?")
rid = manager.get_run_id("jarvis_3.5")
print(manager.wait_run(rid))
manager.add_msg("내 이름은 김철수야")
rid = manager.get_run_id("jarvis_3.5")
print(manager.wait_run(rid))
manager.add_msg("내 이름이 뭐라고?")
rid = manager.get_run_id("jarvis_3.5")
print(manager.wait_run(rid))
manager.create_new_memory()
manager.add_msg("내 이름이 뭐라고?")
rid = manager.get_run_id("jarvis_3.5")
print(manager.wait_run(rid))
| [] |
2024-01-10 | cccs-eric/CyberGPT | tools~borealis_tools.py | import re
import requests
# Import things that are needed generically
from langchain.tools import BaseTool, Tool
from typing import Optional
from llms.azure_llms import create_llm
from langchain.prompts import PromptTemplate
from langchain import PromptTemplate, LLMChain
tool_llm = create_llm(temp=0.4)
tool_llm_temp0 = create_llm(temp=0)
from langchain.callbacks.manager import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
# your shodan API key
BOREALIS_KEY = 'af0b5bd9bdee4c4db548711a326bab23'
info_template = """Create a analysis report using this data, include all links generated by modules in a References section: {info}"""
info_prompt_template = PromptTemplate(input_variables=["info"], template=info_template)
answer_chain = LLMChain(llm=tool_llm, prompt=info_prompt_template)
def extract_ips_urls_domains(text):
ip_pattern = r'\b(?:\d{1,3}\.){3}\d{1,3}\b'
url_pattern = r'https?://(?:[-\w.]|(?:%[\da-fA-F]{2}))+'
domain_pattern = r'(?:[-\w]+\.)+[a-zA-Z]{2,}'
ips = re.findall(ip_pattern, text)
urls = re.findall(url_pattern, text)
domains = re.findall(domain_pattern, text)
return ips, urls, domains
def make_request(url):
try:
req = requests.get(url)
return req.text
except requests.exceptions.RequestException as e:
print(f"An error occurred while making the request: {e}")
return "Borealis request failed for : "+str(url)
def get_borealis_response(query_list, type="all"):
response = []
modules = 'BEAVER,VIRUSTOTAL,MOOSE,STONEWALL,AUWL,SAFEBROWSING,ALPHABETSOUP,MAXMIND'
for elem in query_list:
if type == "all":
url = 'https://ingestion.collaboration.cyber.gc.ca/borealis/process/'+ elem +'?modules='+ modules +'&subscription-key=' + BOREALIS_KEY
answer = make_request(url)
response.append(answer_chain.run(answer))
else:
print("Invalid type specified. Please provide 'all' as the type.")
return response
def borealis_processing(query):
ips, urls, domains = extract_ips_urls_domains(query)
if len(ips) > 0 or len(domains) > 0:
query_list = ips+domains
response = get_borealis_response(query_list)
result = '\n'.join(response)
return (result)
else:
return None
class borealis_tool(BaseTool):
name = "Borealis IP URL Domain Lookup"
description = "Use Borealis for getting information about ip, url and domains from multiple threat vetting tools"
def _run(
self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None
) -> str:
"""Use the tool."""
try:
return borealis_processing(query)
except Exception as e:
return str(e)
async def _arun(
self, query: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None
) -> str:
"""Use the tool asynchronously."""
raise NotImplementedError("custom_search does not support async")
borealis_lookup = borealis_tool()
borealis_tool = Tool(
name = "Borealis IP URL Domain Lookup",
description = "Use Borealis for getting information about ip, url and domains from multiple threat vetting tools",
func= borealis_lookup.run
)
| [
"info",
"Use Borealis for getting information about ip, url and domains from multiple threat vetting tools",
"Create a analysis report using this data, include all links generated by modules in a References section: {info}"
] |
2024-01-10 | cccs-eric/CyberGPT | tools~ip_report_tool.py | #####################################################
#
# General Sequential Multi-Tool
#
# INPUT -> TOOL 1 -> TOOL 2 -> TOOL 3
# v v v
# SUMMARY -> SUMMARY -> SUMMARY -> REPORT
#####################################################
# Import things that are needed generically
from traceback import print_exc
from langchain import LLMChain, PromptTemplate
from langchain.tools import BaseTool, Tool
from typing import Any, Optional
from langchain.chains.question_answering import load_qa_chain
from llms.azure_llms import create_llm
from langchain.callbacks.manager import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from tools.borealis_tools import borealis_tool
from langchain.chains.summarize import load_summarize_chain
from tools.ipapi_tools import ipapi_tool
from tools.opencti_tools import openCTI_tool
from tools.shodan_tools import shodan_ip_lookup_tool
tool_name = "IP Report Tool"
tool_description = "Queries all tools that require an IP address as the input. Produces a comprehensive, detailed report for the user."
tool_llm = create_llm()
template = """You have many IP analysis tools at your disposal.
Create a brief technical report based on the output provided from each tool.
The report should include brief but technical details in point form.
Report:
{report}"""
prompt_template = PromptTemplate(input_variables=["report"], template=template)
reporter_chain = LLMChain(llm=tool_llm, prompt=prompt_template, verbose=True)
ip_tools=[
borealis_tool,
openCTI_tool,
shodan_ip_lookup_tool,
ipapi_tool
]
class ip_report_tool(BaseTool):
name = tool_name
description = tool_description
def _run(
self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None
) -> str:
"""Use the tool."""
try:
responses = [f"Tool: {tool.name}\n" + tool(query) for tool in ip_tools]
report = ""
for response in responses:
report += f"{response}\n\n"
report.replace("User: Analyze above data and report on exposed services and potential vulnerabilities", "")
report = reporter_chain.run(report=report)
return report
except:
print_exc()
return "Tool not available for use."
async def _arun(
self, query: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None
) -> str:
"""Use the tool asynchronously."""
raise NotImplementedError("custom_search does not support async")
qa_retrieve = ip_report_tool()
ip_report_tool = Tool(
name = tool_name,
description = tool_description,
func= qa_retrieve.run
) | [
"You have many IP analysis tools at your disposal.\nCreate a brief technical report based on the output provided from each tool.\nThe report should include brief but technical details in point form.\n\nReport:\n{report}"
] |
2024-01-10 | cccs-eric/CyberGPT | tools~opencti_tools.py | import re
import json
import requests
# Import things that are needed generically
from langchain.tools import BaseTool, Tool
from typing import Optional
from llms.azure_llms import create_llm
from langchain.prompts import PromptTemplate
from langchain import PromptTemplate, LLMChain
from tools.borealis_tools import extract_ips_urls_domains
tool_llm = create_llm(temp=0.4)
tool_llm_temp0 = create_llm(temp=0)
from langchain.callbacks.manager import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
info_template = """Extract data from json into a readable report, include all ips, domains and numerical information if there is any: {info}"""
info_prompt_template = PromptTemplate(input_variables=["info"], template=info_template)
answer_chain = LLMChain(llm=tool_llm, prompt=info_prompt_template)
def queryOpenCTI(searchIP):
# Define the GraphQL endpoint URL
url = 'https://opencti.collaboration.cyber.gc.ca/graphql?'
# Set the headers for the request
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer ae69080b-5047-469f-852c-f8c01c794a7b',
}
var1 = searchIP
# Define the GraphQL query with the variable inserted
query = '''
query($search: String!) {
stixCyberObservables(search: $search) {
edges {
node {
entity_type
id
observable_value
created_at
updated_at
x_opencti_score
createdBy {
id
name
}
stixCoreRelationships {
edges {
node {
fromType
fromId
from {
__typename
... domainNameFragment
... ipv4Fragment
... malwareFragment
... textFragment
}
entity_type
relationship_type
confidence
toId
to {
__typename
... domainNameFragment
... ipv4Fragment
... malwareFragment
... textFragment
}
}
}
}
}
}
}
stixCoreRelationship(id: "e62ca35f-dfc5-4b43-a905-8fece8572cd6") {
createdBy {
__typename
id
entity_type
name
}
}
}
fragment domainNameFragment on DomainName {
value
}
fragment ipv4Fragment on IPv4Addr {
value
}
fragment malwareFragment on Malware {
name
}
fragment textFragment on Text {
value
}
'''
payload = {
'query': query,
'variables': {
'search': var1
}
}
response = requests.post(url, headers=headers, json=payload)
if response.status_code == 200:
return str(response.json())
else:
print('Request failed with status code:', response.status_code)
def get_openCTI_response(query_list):
data = []
for elem in query_list:
data.append(answer_chain.run(queryOpenCTI(elem)))
return "\n".join(data)
def openCTI_search_processing(query):
ips, urls, domains = extract_ips_urls_domains(query)
if len(ips) > 0 or len(domains) > 0:
query_list = ips+domains
response = get_openCTI_response(query_list)
return response
else:
return None
class openCTI_tool(BaseTool):
name = "Associated IP Lookup"
description = "use for getting IPs associated with a domain or IP"
def _run(
self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None
) -> str:
"""Use the tool."""
try:
return openCTI_search_processing(query)
except Exception as e:
return str(e)
async def _arun(
self, query: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None
) -> str:
"""Use the tool asynchronously."""
raise NotImplementedError("custom_search does not support async")
openCTI_lookup = openCTI_tool()
openCTI_tool = Tool(
name = "Associated IP Lookup",
description = "use for getting IPs associated with a domain or IP",
func= openCTI_lookup.run
)
| [
"Extract data from json into a readable report, include all ips, domains and numerical information if there is any: {info}",
"use for getting IPs associated with a domain or IP",
"info"
] |
2024-01-10 | cccs-eric/CyberGPT | tools~ipapi_tools.py | import re
import requests
# Import things that are needed generically
from langchain.tools import BaseTool, Tool
from typing import Optional
from llms.azure_llms import create_llm
from langchain.prompts import PromptTemplate
from langchain import PromptTemplate, LLMChain
from tools.borealis_tools import extract_ips_urls_domains
tool_llm = create_llm(temp=0.4)
tool_llm_temp0 = create_llm(temp=0)
from langchain.callbacks.manager import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
def make_request(url):
try:
req = requests.get(url)
return req.text
except requests.exceptions.RequestException as e:
print(f"An error occurred while making the request: {e}")
return "IP-API request failed for : "+str(url)
def get_ipapi_response(query_list):
response = []
for elem in query_list:
url = 'http://ip-api.com/json/'+elem
answer = make_request(url)
response.append(answer)
return response
def ipapi_processing(query):
ips, urls, domains = extract_ips_urls_domains(query)
if len(ips) > 0 or len(domains) > 0:
query_list = ips+domains
response = get_ipapi_response(query_list)
result = '\n'.join(response)
return (result)
else:
return None
class ipapi_tool(BaseTool):
name = "IP Lookup"
description = "use for getting an ip address from a domain, as well as geolocation and internet provider information"
def _run(
self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None
) -> str:
"""Use the tool."""
try:
return ipapi_processing(query)
except Exception as e:
return str(e)
async def _arun(
self, query: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None
) -> str:
"""Use the tool asynchronously."""
raise NotImplementedError("custom_search does not support async")
ipapi_lookup = ipapi_tool()
ipapi_tool = Tool(
name = "IP Lookup",
description = "use for getting an ip address from a domain, as well as geolocation and internet provider information",
func= ipapi_lookup.run
)
| [
"use for getting an ip address from a domain, as well as geolocation and internet provider information"
] |
2024-01-10 | nishantkr18/mail-using-doc | src~load_data.py | """
Documents are loaded into memory
"""
from typing import List
from langchain.document_loaders import DirectoryLoader, TextLoader
from langchain.docstore.document import Document
def load_data() -> List[Document]:
"""
The function that loads the data.
"""
loader = DirectoryLoader('./docs/', glob="**/*.txt", loader_cls=TextLoader,
loader_kwargs={'autodetect_encoding': True},
)
try:
docs = loader.load()
print(f"{len(docs)} documents loaded.")
except:
print("Error loading documents.")
raise
return docs
| [] |
2024-01-10 | nishantkr18/mail-using-doc | src~process_data.py | """
This module contains the code to:
1. Split the data into chunks (sentences).
2. Create vector embeddings of these sentences.
3. Store them in a vectorstore.
"""
from typing import List
from langchain.text_splitter import CharacterTextSplitter
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.vectorstores import Chroma
from langchain.docstore.document import Document
from chromadb.config import Settings
import chromadb
def process_data(docs: List[Document]):
"""
The function that processes the data.
"""
# Split into sentences
source_chunks = []
splitter = CharacterTextSplitter(
separator=".", chunk_size=500, chunk_overlap=0)
for source in docs:
for chunk in splitter.split_text(source.page_content):
source_chunks.append(
Document(page_content=chunk, metadata=source.metadata))
print('chunks created: ', len(source_chunks))
# Create vector embeddings and store in vectorstore.
print('Creating embeddings...')
embedding = HuggingFaceEmbeddings()
print('Creating vectorstore...')
client = chromadb.Client(Settings(
chroma_db_impl="duckdb+parquet",
persist_directory="./.vectorstore"
))
client.persist()
# Cleaning up the client
client.reset()
vectorstore = Chroma(client=client)
vectorstore = Chroma.from_documents(
documents=source_chunks, embedding=embedding, client=client)
return vectorstore
| [] |
2024-01-10 | mitergpt/ai_edu_sys | Api_Conn~Distributed_Request~gpt_conn.py | import threading
import openai
import time
import random
api_key_1 = "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*13112dasdasd"
api_key_2 = "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*2%eqeqeqe"
lock_1 = threading.Lock()
lock_2 = threading.Lock()
thread_count_1,thread_count_2=0,0
thread_count=[thread_count_1,thread_count_2] #记录下每条路径的执行的进程数目的情况
time_list=[[],[]]
def make_request(server_i,api_key, message_1, message_2, thread_count, lock):
start_time = time.time()
cycle=3
with lock:
thread_count += 1
openai.api_key = api_key
while cycle>0 :
try:
response = openai.ChatCompletion.create(
model="gpt-4-turbo",
messages=[
{"role": "system", "content": message_1},
{"role": "user", "content": message_2},
],
temperature=0,
max_tokens=1024
)
except:
cycle -=1
return
with lock:
thread_count -= 1
end_time = time.time()
wait_time=end_time-start_time #处理时间
time_list[server_i].append(wait_time) #存入最近一段时间的处理数据集,
return response
#对节点的时间进行统计,负载均衡
while True:
total_1,total_2=0,0
for a in time_list[0]:
total_1+=a
for b in time_list[1]:
total_2+=b
time.sleep(1)
p=total_1/total_2+total_1
if random.random()>p :
thread_1 = threading.Thread(target=make_request, args=(1,api_key_1, '我是刘洋', '你是喜羊羊',thread_count[0],lock_1))
else :
thread_1 = threading.Thread(target=make_request, args=(2,api_key_2, '我是刘洋', '你是喜羊羊',thread_count[1],lock_2))
if random.random()>p :
thread_2 = threading.Thread(target=make_request, args=(1,api_key_1, '我是刘洋', '你是喜羊羊',thread_count[0],lock_1))
else :
thread_2=threading.Thread(target=make_request, args=(2,api_key_2, '我是刘洋', '你是喜羊羊',thread_count[1],lock_2))
thread_1.start()
thread_2.start()
thread_1.join()
thread_2.join()
"""后续需要把这些api进行扩展,还需要进一步考虑负载均衡,备份节点和运行速度,三者之间的平衡"""
| [] |
2024-01-10 | yousenwang/langchain_llm | download.py | from langchain.embeddings import HuggingFaceInstructEmbeddings
instructor_embeddings = HuggingFaceInstructEmbeddings(
model_name="hkunlp/instructor-xl",
model_kwargs={"device": "cuda"}
) | [] |
2024-01-10 | kazuhiro4949/open-interpreter | interpreter~interpreter.py | """
Right off the bat, to any contributors (a message from Killian):
First of all, THANK YOU. Open Interpreter is ALIVE, ALL OVER THE WORLD because of YOU.
While this project is rapidly growing, I've decided it's best for us to allow some technical debt.
The code here has duplication. It has imports in weird places. It has been spaghettified to add features more quickly.
In my opinion **this is critical** to keep up with the pace of demand for this project.
At the same time, I plan on pushing a significant re-factor of `interpreter.py` and `code_interpreter.py` ~ September 16th.
After the re-factor, Open Interpreter's source code will be much simpler, and much more fun to dive into.
Especially if you have ideas and **EXCITEMENT** about the future of this project, chat with me on discord: https://discord.gg/6p3fD6rBVm
- killian
"""
from .cli import cli
from .utils import merge_deltas, parse_partial_json
from .message_block import MessageBlock
from .code_block import CodeBlock
from .code_interpreter import CodeInterpreter
from .get_hf_llm import get_hf_llm
import os
import time
import traceback
import json
import platform
import openai
import litellm
import pkg_resources
import getpass
import requests
import tokentrim as tt
from rich import print
from rich.markdown import Markdown
from rich.rule import Rule
try:
import readline
except:
# Sometimes this doesn't work (https://stackoverflow.com/questions/10313765/simple-swig-python-example-in-vs2008-import-error-internal-pyreadline-erro)
pass
# Function schema for gpt-4
function_schema = {
"name": "run_code",
"description":
"Executes code on the user's machine and returns the output",
"parameters": {
"type": "object",
"properties": {
"language": {
"type": "string",
"description":
"The programming language",
"enum": ["python", "R", "shell", "applescript", "javascript", "html"]
},
"code": {
"type": "string",
"description": "The code to execute"
}
},
"required": ["language", "code"]
},
}
# Message for when users don't have an OpenAI API key.
missing_api_key_message = """> OpenAI API key not found
To use `GPT-4` (recommended) please provide an OpenAI API key.
To use `Code-Llama` (free but less capable) press `enter`.
"""
# Message for when users don't have an OpenAI API key.
missing_azure_info_message = """> Azure OpenAI Service API info not found
To use `GPT-4` (recommended) please provide an Azure OpenAI API key, a API base, a deployment name and a API version.
To use `Code-Llama` (free but less capable) press `enter`.
"""
confirm_mode_message = """
**Open Interpreter** will require approval before running code. Use `interpreter -y` to bypass this.
Press `CTRL-C` to exit.
"""
class Interpreter:
def __init__(self):
self.messages = []
self.temperature = 0.001
self.api_key = None
self.auto_run = False
self.local = False
self.model = "gpt-4"
self.debug_mode = False
self.api_base = None # Will set it to whatever OpenAI wants
self.context_window = 2000 # For local models only
self.max_tokens = 750 # For local models only
# Azure OpenAI
self.use_azure = False
self.azure_api_base = None
self.azure_api_version = None
self.azure_deployment_name = None
self.azure_api_type = "azure"
# Get default system message
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'system_message.txt'), 'r') as f:
self.system_message = f.read().strip()
# Store Code Interpreter instances for each language
self.code_interpreters = {}
# No active block to start
# (blocks are visual representation of messages on the terminal)
self.active_block = None
# Note: While Open Interpreter can use Llama, we will prioritize gpt-4.
# gpt-4 is faster, smarter, can call functions, and is all-around easier to use.
# This makes gpt-4 better aligned with Open Interpreters priority to be easy to use.
self.llama_instance = None
def cli(self):
# The cli takes the current instance of Interpreter,
# modifies it according to command line flags, then runs chat.
cli(self)
def get_info_for_system_message(self):
"""
Gets relevant information for the system message.
"""
info = ""
# Add user info
username = getpass.getuser()
current_working_directory = os.getcwd()
operating_system = platform.system()
info += f"[User Info]\nName: {username}\nCWD: {current_working_directory}\nOS: {operating_system}"
if not self.local:
# Open Procedures is an open-source database of tiny, structured coding tutorials.
# We can query it semantically and append relevant tutorials/procedures to our system message:
# Use the last two messages' content or function call to semantically search
query = []
for message in self.messages[-2:]:
message_for_semantic_search = {"role": message["role"]}
if "content" in message:
message_for_semantic_search["content"] = message["content"]
if "function_call" in message and "parsed_arguments" in message["function_call"]:
message_for_semantic_search["function_call"] = message["function_call"]["parsed_arguments"]
query.append(message_for_semantic_search)
# Use them to query Open Procedures
url = "https://open-procedures.replit.app/search/"
try:
relevant_procedures = requests.get(url, data=json.dumps(query)).json()["procedures"]
info += "\n\n# Recommended Procedures\n" + "\n---\n".join(relevant_procedures) + "\nIn your plan, include steps and, if present, **EXACT CODE SNIPPETS** (especially for depracation notices, **WRITE THEM INTO YOUR PLAN -- underneath each numbered step** as they will VANISH once you execute your first line of code, so WRITE THEM DOWN NOW if you need them) from the above procedures if they are relevant to the task. Again, include **VERBATIM CODE SNIPPETS** from the procedures above if they are relevent to the task **directly in your plan.**"
except:
# For someone, this failed for a super secure SSL reason.
# Since it's not stricly necessary, let's worry about that another day. Should probably log this somehow though.
pass
elif self.local:
# Tell Code-Llama how to run code.
info += "\n\nTo run code, write a fenced code block (i.e ```python, R or ```shell) in markdown. When you close it with ```, it will be run. You'll then be given its output."
# We make references in system_message.txt to the "function" it can call, "run_code".
return info
def reset(self):
"""
Resets the interpreter.
"""
self.messages = []
self.code_interpreters = {}
def load(self, messages):
self.messages = messages
def handle_undo(self, arguments):
# Removes all messages after the most recent user entry (and the entry itself).
# Therefore user can jump back to the latest point of conversation.
# Also gives a visual representation of the messages removed.
if len(self.messages) == 0:
return
# Find the index of the last 'role': 'user' entry
last_user_index = None
for i, message in enumerate(self.messages):
if message.get('role') == 'user':
last_user_index = i
removed_messages = []
# Remove all messages after the last 'role': 'user'
if last_user_index is not None:
removed_messages = self.messages[last_user_index:]
self.messages = self.messages[:last_user_index]
print("") # Aesthetics.
# Print out a preview of what messages were removed.
for message in removed_messages:
if 'content' in message and message['content'] != None:
print(Markdown(f"**Removed message:** `\"{message['content'][:30]}...\"`"))
elif 'function_call' in message:
print(Markdown(f"**Removed codeblock**")) # TODO: Could add preview of code removed here.
print("") # Aesthetics.
def handle_help(self, arguments):
commands_description = {
"%debug [true/false]": "Toggle debug mode. Without arguments or with 'true', it enters debug mode. With 'false', it exits debug mode.",
"%reset": "Resets the current session.",
"%undo": "Remove previous messages and its response from the message history.",
"%save_message [path]": "Saves messages to a specified JSON path. If no path is provided, it defaults to 'messages.json'.",
"%load_message [path]": "Loads messages from a specified JSON path. If no path is provided, it defaults to 'messages.json'.",
"%help": "Show this help message.",
}
base_message = [
"> **Available Commands:**\n\n"
]
# Add each command and its description to the message
for cmd, desc in commands_description.items():
base_message.append(f"- `{cmd}`: {desc}\n")
additional_info = [
"\n\nFor further assistance, please join our community Discord or consider contributing to the project's development."
]
# Combine the base message with the additional info
full_message = base_message + additional_info
print(Markdown("".join(full_message)))
def handle_debug(self, arguments=None):
if arguments == "" or arguments == "true":
print(Markdown("> Entered debug mode"))
print(self.messages)
self.debug_mode = True
elif arguments == "false":
print(Markdown("> Exited debug mode"))
self.debug_mode = False
else:
print(Markdown("> Unknown argument to debug command."))
def handle_reset(self, arguments):
self.reset()
print(Markdown("> Reset Done"))
def default_handle(self, arguments):
print(Markdown("> Unknown command"))
self.handle_help(arguments)
def handle_save_message(self, json_path):
if json_path == "":
json_path = "messages.json"
if not json_path.endswith(".json"):
json_path += ".json"
with open(json_path, 'w') as f:
json.dump(self.messages, f, indent=2)
print(Markdown(f"> messages json export to {os.path.abspath(json_path)}"))
def handle_load_message(self, json_path):
if json_path == "":
json_path = "messages.json"
if not json_path.endswith(".json"):
json_path += ".json"
with open(json_path, 'r') as f:
self.load(json.load(f))
print(Markdown(f"> messages json loaded from {os.path.abspath(json_path)}"))
def handle_command(self, user_input):
# split the command into the command and the arguments, by the first whitespace
switch = {
"help": self.handle_help,
"debug": self.handle_debug,
"reset": self.handle_reset,
"save_message": self.handle_save_message,
"load_message": self.handle_load_message,
"undo": self.handle_undo,
}
user_input = user_input[1:].strip() # Capture the part after the `%`
command = user_input.split(" ")[0]
arguments = user_input[len(command):].strip()
action = switch.get(command,
self.default_handle) # Get the function from the dictionary, or default_handle if not found
action(arguments) # Execute the function
def chat(self, message=None, return_messages=False):
# Connect to an LLM (an large language model)
if not self.local:
# gpt-4
self.verify_api_key()
# ^ verify_api_key may set self.local to True, so we run this as an 'if', not 'elif':
if self.local:
# Code-Llama
if self.llama_instance == None:
# Find or install Code-Llama
try:
self.llama_instance = get_hf_llm(self.model, self.debug_mode, self.context_window)
if self.llama_instance == None:
# They cancelled.
return
except:
traceback.print_exc()
# If it didn't work, apologize and switch to GPT-4
print(Markdown("".join([
f"> Failed to install `{self.model}`.",
f"\n\n**Common Fixes:** You can follow our simple setup docs at the link below to resolve common errors.\n\n```\nhttps://github.com/KillianLucas/open-interpreter/tree/main/docs\n```",
f"\n\n**If you've tried that and you're still getting an error, we have likely not built the proper `{self.model}` support for your system.**",
"\n\n*( Running language models locally is a difficult task!* If you have insight into the best way to implement this across platforms/architectures, please join the Open Interpreter community Discord and consider contributing the project's development. )",
"\n\nPress enter to switch to `GPT-4` (recommended)."
])))
input()
# Switch to GPT-4
self.local = False
self.model = "gpt-4"
self.verify_api_key()
# Display welcome message
welcome_message = ""
if self.debug_mode:
welcome_message += "> Entered debug mode"
# If self.local, we actually don't use self.model
# (self.auto_run is like advanced usage, we display no messages)
if not self.local and not self.auto_run:
if self.use_azure:
notice_model = f"{self.azure_deployment_name} (Azure)"
else:
notice_model = f"{self.model.upper()}"
welcome_message += f"\n> Model set to `{notice_model}`\n\n**Tip:** To run locally, use `interpreter --local`"
if self.local:
welcome_message += f"\n> Model set to `{self.model}`"
# If not auto_run, tell the user we'll ask permission to run code
# We also tell them here how to exit Open Interpreter
if not self.auto_run:
welcome_message += "\n\n" + confirm_mode_message
welcome_message = welcome_message.strip()
# Print welcome message with newlines on either side (aesthetic choice)
# unless we're starting with a blockquote (aesthetic choice)
if welcome_message != "":
if welcome_message.startswith(">"):
print(Markdown(welcome_message), '')
else:
print('', Markdown(welcome_message), '')
# Check if `message` was passed in by user
if message:
# If it was, we respond non-interactivley
self.messages.append({"role": "user", "content": message})
self.respond()
else:
# If it wasn't, we start an interactive chat
while True:
try:
user_input = input("> ").strip()
except EOFError:
break
except KeyboardInterrupt:
print() # Aesthetic choice
break
# Use `readline` to let users up-arrow to previous user messages,
# which is a common behavior in terminals.
try:
readline.add_history(user_input)
except:
# Sometimes this doesn't work (https://stackoverflow.com/questions/10313765/simple-swig-python-example-in-vs2008-import-error-internal-pyreadline-erro)
pass
# If the user input starts with a `%`
if user_input.startswith("%"):
self.handle_command(user_input)
continue
# Add the user message to self.messages
self.messages.append({"role": "user", "content": user_input})
# Respond, but gracefully handle CTRL-C / KeyboardInterrupt
try:
self.respond()
except KeyboardInterrupt:
pass
finally:
# Always end the active block. Multiple Live displays = issues
self.end_active_block()
if return_messages:
return self.messages
def verify_api_key(self):
"""
Makes sure we have an AZURE_API_KEY or OPENAI_API_KEY.
"""
if self.use_azure:
all_env_available = (
('AZURE_API_KEY' in os.environ or 'OPENAI_API_KEY' in os.environ) and
'AZURE_API_BASE' in os.environ and
'AZURE_API_VERSION' in os.environ and
'AZURE_DEPLOYMENT_NAME' in os.environ)
if all_env_available:
self.api_key = os.environ.get('AZURE_API_KEY') or os.environ['OPENAI_API_KEY']
self.azure_api_base = os.environ['AZURE_API_BASE']
self.azure_api_version = os.environ['AZURE_API_VERSION']
self.azure_deployment_name = os.environ['AZURE_DEPLOYMENT_NAME']
self.azure_api_type = os.environ.get('AZURE_API_TYPE', 'azure')
else:
# This is probably their first time here!
self._print_welcome_message()
time.sleep(1)
print(Rule(style="white"))
print(Markdown(missing_azure_info_message), '', Rule(style="white"), '')
response = input("Azure OpenAI API key: ")
if response == "":
# User pressed `enter`, requesting Code-Llama
print(Markdown(
"> Switching to `Code-Llama`...\n\n**Tip:** Run `interpreter --local` to automatically use `Code-Llama`."),
'')
time.sleep(2)
print(Rule(style="white"))
# Temporarily, for backwards (behavioral) compatability, we've moved this part of llama_2.py here.
# AND BELOW.
# This way, when folks hit interpreter --local, they get the same experience as before.
import inquirer
print('', Markdown("**Open Interpreter** will use `Code Llama` for local execution. Use your arrow keys to set up the model."), '')
models = {
'7B': 'TheBloke/CodeLlama-7B-Instruct-GGUF',
'13B': 'TheBloke/CodeLlama-13B-Instruct-GGUF',
'34B': 'TheBloke/CodeLlama-34B-Instruct-GGUF'
}
parameter_choices = list(models.keys())
questions = [inquirer.List('param', message="Parameter count (smaller is faster, larger is more capable)", choices=parameter_choices)]
answers = inquirer.prompt(questions)
chosen_param = answers['param']
# THIS is more in line with the future. You just say the model you want by name:
self.model = models[chosen_param]
self.local = True
return
else:
self.api_key = response
self.azure_api_base = input("Azure OpenAI API base: ")
self.azure_deployment_name = input("Azure OpenAI deployment name of GPT: ")
self.azure_api_version = input("Azure OpenAI API version: ")
print('', Markdown(
"**Tip:** To save this key for later, run `export AZURE_API_KEY=your_api_key AZURE_API_BASE=your_api_base AZURE_API_VERSION=your_api_version AZURE_DEPLOYMENT_NAME=your_gpt_deployment_name` on Mac/Linux or `setx AZURE_API_KEY your_api_key AZURE_API_BASE your_api_base AZURE_API_VERSION your_api_version AZURE_DEPLOYMENT_NAME your_gpt_deployment_name` on Windows."),
'')
time.sleep(2)
print(Rule(style="white"))
litellm.api_type = self.azure_api_type
litellm.api_base = self.azure_api_base
litellm.api_version = self.azure_api_version
litellm.api_key = self.api_key
else:
if self.api_key == None:
if 'OPENAI_API_KEY' in os.environ:
self.api_key = os.environ['OPENAI_API_KEY']
else:
# This is probably their first time here!
self._print_welcome_message()
time.sleep(1)
print(Rule(style="white"))
print(Markdown(missing_api_key_message), '', Rule(style="white"), '')
response = input("OpenAI API key: ")
if response == "":
# User pressed `enter`, requesting Code-Llama
print(Markdown(
"> Switching to `Code-Llama`...\n\n**Tip:** Run `interpreter --local` to automatically use `Code-Llama`."),
'')
time.sleep(2)
print(Rule(style="white"))
# Temporarily, for backwards (behavioral) compatability, we've moved this part of llama_2.py here.
# AND ABOVE.
# This way, when folks hit interpreter --local, they get the same experience as before.
import inquirer
print('', Markdown("**Open Interpreter** will use `Code Llama` for local execution. Use your arrow keys to set up the model."), '')
models = {
'7B': 'TheBloke/CodeLlama-7B-Instruct-GGUF',
'13B': 'TheBloke/CodeLlama-13B-Instruct-GGUF',
'34B': 'TheBloke/CodeLlama-34B-Instruct-GGUF'
}
parameter_choices = list(models.keys())
questions = [inquirer.List('param', message="Parameter count (smaller is faster, larger is more capable)", choices=parameter_choices)]
answers = inquirer.prompt(questions)
chosen_param = answers['param']
# THIS is more in line with the future. You just say the model you want by name:
self.model = models[chosen_param]
self.local = True
return
else:
self.api_key = response
print('', Markdown("**Tip:** To save this key for later, run `export OPENAI_API_KEY=your_api_key` on Mac/Linux or `setx OPENAI_API_KEY your_api_key` on Windows."), '')
time.sleep(2)
print(Rule(style="white"))
litellm.api_key = self.api_key
if self.api_base:
litellm.api_base = self.api_base
def end_active_block(self):
if self.active_block:
self.active_block.end()
self.active_block = None
def respond(self):
# Add relevant info to system_message
# (e.g. current working directory, username, os, etc.)
info = self.get_info_for_system_message()
# This is hacky, as we should have a different (minified) prompt for CodeLLama,
# but for now, to make the prompt shorter and remove "run_code" references, just get the first 2 lines:
if self.local:
self.system_message = "\n".join(self.system_message.split("\n")[:2])
self.system_message += "\nOnly do what the user asks you to do, then ask what they'd like to do next."
system_message = self.system_message + "\n\n" + info
if self.local:
messages = tt.trim(self.messages, max_tokens=(self.context_window-self.max_tokens-25), system_message=system_message)
else:
messages = tt.trim(self.messages, self.model, system_message=system_message)
if self.debug_mode:
print("\n", "Sending `messages` to LLM:", "\n")
print(messages)
print()
# Make LLM call
if not self.local:
# GPT
error = ""
for _ in range(3): # 3 retries
try:
if self.use_azure:
response = litellm.completion(
f"azure/{self.azure_deployment_name}",
messages=messages,
functions=[function_schema],
temperature=self.temperature,
stream=True,
)
else:
if self.api_base:
# The user set the api_base. litellm needs this to be "custom/{model}"
response = litellm.completion(
api_base=self.api_base,
model = "custom/" + self.model,
messages=messages,
functions=[function_schema],
stream=True,
temperature=self.temperature,
)
else:
# Normal OpenAI call
response = litellm.completion(
model=self.model,
messages=messages,
functions=[function_schema],
stream=True,
temperature=self.temperature,
)
break
except:
if self.debug_mode:
traceback.print_exc()
error = traceback.format_exc()
time.sleep(3)
else:
raise Exception(error)
elif self.local:
# Code-Llama
# Convert messages to prompt
# (This only works if the first message is the only system message)
def messages_to_prompt(messages):
for message in messages:
# Happens if it immediatly writes code
if "role" not in message:
message["role"] = "assistant"
# Falcon prompt template
if "falcon" in self.model.lower():
formatted_messages = ""
for message in messages:
formatted_messages += f"{message['role'].capitalize()}: {message['content']}\n"
formatted_messages = formatted_messages.strip()
else:
# Llama prompt template
# Extracting the system prompt and initializing the formatted string with it.
system_prompt = messages[0]['content']
formatted_messages = f"<s>[INST] <<SYS>>\n{system_prompt}\n<</SYS>>\n"
# Loop starting from the first user message
for index, item in enumerate(messages[1:]):
role = item['role']
content = item['content']
if role == 'user':
formatted_messages += f"{content} [/INST] "
elif role == 'function':
formatted_messages += f"Output: {content} [/INST] "
elif role == 'assistant':
formatted_messages += f"{content} </s><s>[INST] "
# Remove the trailing '<s>[INST] ' from the final output
if formatted_messages.endswith("<s>[INST] "):
formatted_messages = formatted_messages[:-10]
return formatted_messages
prompt = messages_to_prompt(messages)
# Lmao i can't believe this works (it does need this btw)
if messages[-1]["role"] != "function":
prompt += "Let's explore this. By the way, I can run code on your machine by writing the code in a markdown code block. This works for shell, javascript, python, R, and applescript. I'm going to try to do this for your task. Anyway, "
elif messages[-1]["role"] == "function" and messages[-1]["content"] != "No output":
prompt += "Given the output of the code I just ran, "
elif messages[-1]["role"] == "function" and messages[-1]["content"] == "No output":
prompt += "Given the fact that the code I just ran produced no output, "
if self.debug_mode:
# we have to use builtins bizarrely! because rich.print interprets "[INST]" as something meaningful
import builtins
builtins.print("TEXT PROMPT SEND TO LLM:\n", prompt)
# Run Code-Llama
response = self.llama_instance(
prompt,
stream=True,
temperature=self.temperature,
stop=["</s>"],
max_tokens=750 # context window is set to 1800, messages are trimmed to 1000... 700 seems nice
)
# Initialize message, function call trackers, and active block
self.messages.append({})
in_function_call = False
llama_function_call_finished = False
self.active_block = None
for chunk in response:
if self.use_azure and ('choices' not in chunk or len(chunk['choices']) == 0):
# Azure OpenAI Service may return empty chunk
continue
if self.local:
if "content" not in messages[-1]:
# This is the first chunk. We'll need to capitalize it, because our prompt ends in a ", "
chunk["choices"][0]["text"] = chunk["choices"][0]["text"].capitalize()
# We'll also need to add "role: assistant", CodeLlama will not generate this
messages[-1]["role"] = "assistant"
delta = {"content": chunk["choices"][0]["text"]}
else:
delta = chunk["choices"][0]["delta"]
# Accumulate deltas into the last message in messages
self.messages[-1] = merge_deltas(self.messages[-1], delta)
# Check if we're in a function call
if not self.local:
condition = "function_call" in self.messages[-1]
elif self.local:
# Since Code-Llama can't call functions, we just check if we're in a code block.
# This simply returns true if the number of "```" in the message is odd.
if "content" in self.messages[-1]:
condition = self.messages[-1]["content"].count("```") % 2 == 1
else:
# If it hasn't made "content" yet, we're certainly not in a function call.
condition = False
if condition:
# We are in a function call.
# Check if we just entered a function call
if in_function_call == False:
# If so, end the last block,
self.end_active_block()
# Print newline if it was just a code block or user message
# (this just looks nice)
last_role = self.messages[-2]["role"]
if last_role == "user" or last_role == "function":
print()
# then create a new code block
self.active_block = CodeBlock()
# Remember we're in a function_call
in_function_call = True
# Now let's parse the function's arguments:
if not self.local:
# gpt-4
# Parse arguments and save to parsed_arguments, under function_call
if "arguments" in self.messages[-1]["function_call"]:
arguments = self.messages[-1]["function_call"]["arguments"]
new_parsed_arguments = parse_partial_json(arguments)
if new_parsed_arguments:
# Only overwrite what we have if it's not None (which means it failed to parse)
self.messages[-1]["function_call"][
"parsed_arguments"] = new_parsed_arguments
elif self.local:
# Code-Llama
# Parse current code block and save to parsed_arguments, under function_call
if "content" in self.messages[-1]:
content = self.messages[-1]["content"]
if "```" in content:
# Split by "```" to get the last open code block
blocks = content.split("```")
current_code_block = blocks[-1]
lines = current_code_block.split("\n")
if content.strip() == "```": # Hasn't outputted a language yet
language = None
else:
if lines[0] != "":
language = lines[0].strip()
else:
language = "python"
# In anticipation of its dumbassery let's check if "pip" is in there
if len(lines) > 1:
if lines[1].startswith("pip"):
language = "shell"
# Join all lines except for the language line
code = '\n'.join(lines[1:]).strip("` \n")
arguments = {"code": code}
if language: # We only add this if we have it-- the second we have it, an interpreter gets fired up (I think? maybe I'm wrong)
if language == "bash":
language = "shell"
arguments["language"] = language
# Code-Llama won't make a "function_call" property for us to store this under, so:
if "function_call" not in self.messages[-1]:
self.messages[-1]["function_call"] = {}
self.messages[-1]["function_call"]["parsed_arguments"] = arguments
else:
# We are not in a function call.
# Check if we just left a function call
if in_function_call == True:
if self.local:
# This is the same as when gpt-4 gives finish_reason as function_call.
# We have just finished a code block, so now we should run it.
llama_function_call_finished = True
# Remember we're not in a function_call
in_function_call = False
# If there's no active block,
if self.active_block == None:
# Create a message block
self.active_block = MessageBlock()
# Update active_block
self.active_block.update_from_message(self.messages[-1])
# Check if we're finished
if chunk["choices"][0]["finish_reason"] or llama_function_call_finished:
if chunk["choices"][
0]["finish_reason"] == "function_call" or llama_function_call_finished:
# Time to call the function!
# (Because this is Open Interpreter, we only have one function.)
if self.debug_mode:
print("Running function:")
print(self.messages[-1])
print("---")
# Ask for user confirmation to run code
if self.auto_run == False:
# End the active block so you can run input() below it
# Save language and code so we can create a new block in a moment
self.active_block.end()
language = self.active_block.language
code = self.active_block.code
# Prompt user
response = input(" Would you like to run this code? (y/n)\n\n ")
print("") # <- Aesthetic choice
if response.strip().lower() == "y":
# Create a new, identical block where the code will actually be run
self.active_block = CodeBlock()
self.active_block.language = language
self.active_block.code = code
else:
# User declined to run code.
self.active_block.end()
self.messages.append({
"role":
"function",
"name":
"run_code",
"content":
"User decided not to run this code."
})
return
# If we couldn't parse its arguments, we need to try again.
if not self.local and "parsed_arguments" not in self.messages[-1]["function_call"]:
# After collecting some data via the below instruction to users,
# This is the most common failure pattern: https://github.com/KillianLucas/open-interpreter/issues/41
# print("> Function call could not be parsed.\n\nPlease open an issue on Github (openinterpreter.com, click Github) and paste the following:")
# print("\n", self.messages[-1]["function_call"], "\n")
# time.sleep(2)
# print("Informing the language model and continuing...")
# Since it can't really be fixed without something complex,
# let's just berate the LLM then go around again.
self.messages.append({
"role": "function",
"name": "run_code",
"content": """Your function call could not be parsed. Please use ONLY the `run_code` function, which takes two parameters: `code` and `language`. Your response should be formatted as a JSON."""
})
self.respond()
return
# Create or retrieve a Code Interpreter for this language
language = self.messages[-1]["function_call"]["parsed_arguments"][
"language"]
if language not in self.code_interpreters:
self.code_interpreters[language] = CodeInterpreter(language, self.debug_mode)
code_interpreter = self.code_interpreters[language]
# Let this Code Interpreter control the active_block
code_interpreter.active_block = self.active_block
code_interpreter.run()
# End the active_block
self.active_block.end()
# Append the output to messages
# Explicitly tell it if there was no output (sometimes "" = hallucinates output)
self.messages.append({
"role": "function",
"name": "run_code",
"content": self.active_block.output if self.active_block.output else "No output"
})
# Go around again
self.respond()
if chunk["choices"][0]["finish_reason"] != "function_call":
# Done!
# Code Llama likes to output "###" at the end of every message for some reason
if self.local and "content" in self.messages[-1]:
self.messages[-1]["content"] = self.messages[-1]["content"].strip().rstrip("#")
self.active_block.update_from_message(self.messages[-1])
time.sleep(0.1)
self.active_block.end()
return
def _print_welcome_message(self):
print("", Markdown("●"), "", Markdown(f"\nWelcome to **Open Interpreter**.\n"), "")
| [
"User decided not to run this code.",
"No output",
"Your function call could not be parsed. Please use ONLY the `run_code` function, which takes two parameters: `code` and `language`. Your response should be formatted as a JSON.",
"Let's explore this. By the way, I can run code on your machine by writing the code in a markdown code block. This works for shell, javascript, python, R, and applescript. I'm going to try to do this for your task. Anyway, ",
"Given the output of the code I just ran, ",
"content",
"Given the fact that the code I just ran produced no output, "
] |
2024-01-10 | theobjectivedad/langchain | langchain~evaluation~comparison~eval_chain.py | """Base classes for comparing the output of two models."""
from __future__ import annotations
from typing import Any, Optional
from pydantic import Extra, Field
from langchain.callbacks.manager import Callbacks
from langchain.chains.llm import LLMChain
from langchain.evaluation.comparison.prompt import PROMPT, PROMPT_WITH_REFERENCE
from langchain.evaluation.schema import LLMEvalChain, PairwiseStringEvaluator
from langchain.prompts.prompt import PromptTemplate
from langchain.schema import BaseOutputParser
from langchain.schema.language_model import BaseLanguageModel
class PairwiseStringResultOutputParser(BaseOutputParser[dict]):
"""A parser for the output of the PairwiseStringEvalChain."""
@property
def _type(self) -> str:
return "pairwise_string_result"
def parse(self, text: str) -> Any:
"""Parse the output text.
Args:
text (str): The output text to parse.
Returns:
Any: The parsed output.
"""
reasoning, verdict = text.strip().rsplit("\n", maxsplit=1)
verdict = verdict.strip("[").strip("]")
if verdict not in {"A", "B", "C"}:
raise ValueError(
f"Invalid verdict: {verdict}. "
"Verdict must be one of 'A', 'B', or 'C'."
)
# C means the models are tied. Return 'None' meaning no preference
verdict_ = None if verdict == "C" else verdict
score = {
"A": 1,
"B": 0,
None: 0.5,
}.get(verdict_)
return {
"reasoning": reasoning,
"value": verdict_,
"score": score,
}
class PairwiseStringEvalChain(PairwiseStringEvaluator, LLMEvalChain, LLMChain):
"""A chain for comparing two outputs, such as the outputs
of two models, prompts, or outputs of a single model on similar inputs.
Example:
>>> from langchain.chat_models import ChatOpenAI
>>> from langchain.evaluation.comparison import PairwiseStringEvalChain
>>> llm = ChatOpenAI(temperature=0)
>>> chain = PairwiseStringEvalChain.from_llm(llm=llm)
>>> result = chain.evaluate_string_pairs(
... input = "What is the chemical formula for water?",
... prediction = "H2O",
... prediction_b = (
... "The chemical formula for water is H2O, which means"
... " there are two hydrogen atoms and one oxygen atom."
... reference = "The chemical formula for water is H2O.",
... )
>>> print(result["text"])
# {
# "value": "B",
# "comment": "Both responses accurately state"
# " that the chemical formula for water is H2O."
# " However, Response B provides additional information"
# . " by explaining what the formula means.\n[[B]]"
# }
"""
output_parser: BaseOutputParser = Field(
default_factory=PairwiseStringResultOutputParser
)
class Config:
"""Configuration for the QAEvalChain."""
extra = Extra.ignore
@property
def requires_reference(self) -> bool:
return "reference" in self.prompt.input_variables
@property
def requires_input(self) -> bool:
return True
@property
def _skip_reference_warning(self) -> str:
"""Warning to show when reference is ignored."""
return (
f"Ignoring reference in {self.__class__.__name__}, as it is not expected."
"\nTo use a reference, initialize PairwiseStringEvalChain with"
" `requires_reference=True` or with a prompt with 'reference' as an"
" input variable."
)
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
*,
prompt: Optional[PromptTemplate] = None,
requires_reference: bool = False,
**kwargs: Any,
) -> PairwiseStringEvalChain:
"""Initialize the PairwiseStringEvalChain from an LLM.
Args:
llm (BaseLanguageModel): The LLM to use.
prompt (PromptTemplate, optional): The prompt to use.
requires_reference (bool, optional): Whether to require a reference
string. Defaults to False.
**kwargs (Any): Additional keyword arguments.
Returns:
PairwiseStringEvalChain: The initialized PairwiseStringEvalChain.
"""
expected_input_vars = {"prediction", "prediction_b", "input"}
if prompt is None:
if requires_reference:
expected_input_vars.add("reference")
prompt_ = PROMPT_WITH_REFERENCE
else:
prompt_ = PROMPT
else:
if requires_reference:
expected_input_vars.add("reference")
prompt_ = prompt
if expected_input_vars != set(prompt_.input_variables):
raise ValueError(
f"Input variables should be {expected_input_vars}, "
f"but got {prompt_.input_variables}"
)
return cls(llm=llm, prompt=prompt_, **kwargs)
def _prepare_input(
self,
prediction: str,
prediction_b: str,
input: Optional[str],
reference: Optional[str],
) -> dict:
input_ = {
"prediction": prediction,
"prediction_b": prediction_b,
}
if self.requires_input:
if not input:
raise ValueError("Input is require for this comparison evaluator")
input_["input"] = input
if self.requires_reference:
if reference is None:
raise ValueError("Reference is required for this comparison evaluator")
input_["reference"] = reference
return input_
def _evaluate_string_pairs(
self,
*,
prediction: str,
prediction_b: str,
input: Optional[str] = None,
reference: Optional[str] = None,
callbacks: Callbacks = None,
**kwargs: Any,
) -> dict:
"""Evaluate whether output A is preferred to output B.
Args:
prediction (str): The output string from the first model.
prediction_b (str): The output string from the second model.
input (str): The input or task string.
callbacks (Callbacks, optional): The callbacks to use.
reference (str, optional): The reference string, if any.
**kwargs (Any): Additional keyword arguments.
Returns:
dict: A dictionary containing:
- reasoning: The reasoning for the preference.
- value: The preference value, which is either 'A', 'B', or None
for no preference.
- score: The preference score, which is 1 for 'A', 0 for 'B',
and 0.5 for None.
"""
input_ = self._prepare_input(prediction, prediction_b, input, reference)
result = self(
inputs=input_,
callbacks=callbacks,
**kwargs,
)
return result["text"]
async def _aevaluate_string_pairs(
self,
*,
prediction: str,
prediction_b: str,
reference: Optional[str] = None,
input: Optional[str] = None,
callbacks: Callbacks = None,
**kwargs: Any,
) -> dict:
"""Asynchronously evaluate whether output A is preferred to output B.
Args:
prediction (str): The output string from the first model.
prediction_b (str): The output string from the second model.
input (str): The input or task string.
callbacks (Callbacks, optional): The callbacks to use.
reference (str, optional): The reference string, if any.
**kwargs (Any): Additional keyword arguments.
Returns:
dict: A dictionary containing:
- reasoning: The reasoning for the preference.
- value: The preference value, which is either 'A', 'B', or None
for no preference.
- score: The preference score, which is 1 for 'A', 0 for 'B',
and 0.5 for None.
"""
input_ = self._prepare_input(prediction, prediction_b, input, reference)
result = await self.acall(
inputs=input_,
callbacks=callbacks,
**kwargs,
)
return result["text"]
| [] |
2024-01-10 | theobjectivedad/langchain | langchain~evaluation~agents~trajectory_eval_chain.py | """A chain for evaluating ReAct style agents.
This chain is used to evaluate ReAct style agents by reasoning about
the sequence of actions taken and their outcomes. It uses a language model
chain (LLMChain) to generate the reasoning and scores.
"""
from typing import Any, Dict, List, NamedTuple, Optional, Sequence, Tuple, Union
from pydantic import Extra, Field
from langchain.callbacks.manager import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
Callbacks,
)
from langchain.chains.llm import LLMChain
from langchain.chat_models.base import BaseChatModel
from langchain.evaluation.agents.trajectory_eval_prompt import (
EVAL_CHAT_PROMPT,
TOOL_FREE_EVAL_CHAT_PROMPT,
)
from langchain.evaluation.schema import AgentTrajectoryEvaluator, LLMEvalChain
from langchain.schema import AgentAction, BaseOutputParser, OutputParserException
from langchain.schema.language_model import BaseLanguageModel
from langchain.tools.base import BaseTool
class TrajectoryEval(NamedTuple):
score: int
reasoning: str
class TrajectoryOutputParser(BaseOutputParser):
@property
def _type(self) -> str:
return "agent_trajectory"
def parse(self, text: str) -> TrajectoryEval:
"""Parse the output text and extract the score and reasoning.
Args:
text (str): The output text to parse.
Returns:
TrajectoryEval: A named tuple containing the score and reasoning.
Raises:
OutputParserException: If the score is not found in the output text or
if the score is not a digit in the range 1-5.
"""
if "Score:" not in text:
raise OutputParserException(
f"Could not find score in model eval output: {text}"
)
reasoning, score_str = text.split("Score: ")
reasoning, score_str = reasoning.strip(), score_str.strip()
score_str = next(
(char for char in score_str if char.isdigit()), "0"
) # Scan for first digit
if not 1 <= int(score_str) <= 5:
raise OutputParserException(
f"Score is not a digit in the range 1-5: {text}"
)
return TrajectoryEval(score=int(score_str), reasoning=reasoning)
class TrajectoryEvalChain(AgentTrajectoryEvaluator, LLMEvalChain):
"""A chain for evaluating ReAct style agents.
This chain is used to evaluate ReAct style agents by reasoning about
the sequence of actions taken and their outcomes.
Example:
.. code-block:: python
from langchain.agents import AgentType, initialize_agent
from langchain.chat_models import ChatOpenAI
from langchain.evaluation import TrajectoryEvalChain
from langchain.tools import tool
@tool
def geography_answers(country: str, question: str) -> str:
\"\"\"Very helpful answers to geography questions.\"\"\"
return f"{country}? IDK - We may never know {question}."
llm = ChatOpenAI(model="gpt-3.5-turbo-0613", temperature=0)
agent = initialize_agent(
tools=[geography_answers],
llm=llm,
agent=AgentType.OPENAI_FUNCTIONS,
return_intermediate_steps=True,
)
question = "How many dwell in the largest minor region in Argentina?"
response = agent(question)
eval_chain = TrajectoryEvalChain.from_llm(
llm=llm, agent_tools=[geography_answers], return_reasoning=True
)
result = eval_chain.evaluate_agent_trajectory(
input=question,
agent_trajectory=response["intermediate_steps"],
prediction=response["output"],
reference="Paris",
)
print(result["score"])
# 0
""" # noqa: E501
agent_tools: Optional[List[BaseTool]] = None
"""A list of tools available to the agent."""
eval_chain: LLMChain
"""The language model chain used for evaluation."""
output_parser: TrajectoryOutputParser = Field(
default_factory=TrajectoryOutputParser
)
"""The output parser used to parse the output."""
return_reasoning: bool = False
"""Whether to return the reasoning along with the score."""
class Config:
"""Configuration for the QAEvalChain."""
extra = Extra.ignore
@property
def _tools_description(self) -> str:
"""Get the description of the agent tools.
Returns:
str: The description of the agent tools.
"""
if self.agent_tools is None:
return ""
return "\n\n".join(
[
f"""Tool {i}: {tool.name}
Description: {tool.description}"""
for i, tool in enumerate(self.agent_tools, 1)
]
)
@staticmethod
def get_agent_trajectory(
steps: Union[str, Sequence[Tuple[AgentAction, str]]]
) -> str:
"""Get the agent trajectory as a formatted string.
Args:
steps (Union[str, List[Tuple[AgentAction, str]]]): The agent trajectory.
Returns:
str: The formatted agent trajectory.
"""
if isinstance(steps, str):
return steps
return "\n\n".join(
[
f"""Step {i}:
Tool used: {action.tool}
Tool input: {action.tool_input}
Tool output: {output}"""
for i, (action, output) in enumerate(steps, 1)
]
)
@staticmethod
def _format_reference(reference: Optional[str]) -> str:
"""Format the reference text.
Args:
reference (str): The reference text.
Returns:
str: The formatted reference text.
"""
if not reference:
return ""
return f"""
The following is the expected answer. Use this to measure correctness:
[GROUND_TRUTH]
{reference}
[END_GROUND_TRUTH]
"""
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
agent_tools: Optional[Sequence[BaseTool]] = None,
output_parser: Optional[TrajectoryOutputParser] = None,
return_reasoning: bool = False,
**kwargs: Any,
) -> "TrajectoryEvalChain":
"""Create a TrajectoryEvalChain object from a language model chain.
Args:
llm (BaseChatModel): The language model chain.
agent_tools (Optional[Sequence[BaseTool]]): A list of tools
available to the agent.
output_parser (Optional[TrajectoryOutputParser]): The output parser
used to parse the chain output into a score.
return_reasoning (bool): Whether to return the
reasoning along with the score.
Returns:
TrajectoryEvalChain: The TrajectoryEvalChain object.
"""
if not isinstance(llm, BaseChatModel):
raise NotImplementedError(
"Only chat models supported by the current trajectory eval"
)
if agent_tools:
prompt = EVAL_CHAT_PROMPT
else:
prompt = TOOL_FREE_EVAL_CHAT_PROMPT
eval_chain = LLMChain(llm=llm, prompt=prompt)
return cls(
agent_tools=agent_tools,
return_reasoning=return_reasoning,
eval_chain=eval_chain,
output_parser=output_parser or TrajectoryOutputParser(),
**kwargs,
)
@property
def input_keys(self) -> List[str]:
"""Get the input keys for the chain.
Returns:
List[str]: The input keys.
"""
return ["question", "agent_trajectory", "answer", "reference"]
@property
def output_keys(self) -> List[str]:
"""Get the output keys for the chain.
Returns:
List[str]: The output keys.
"""
if self.return_reasoning:
return ["score", "reasoning"]
return ["score"]
def prep_inputs(self, inputs: Union[Dict[str, Any], Any]) -> Dict[str, str]:
"""Validate and prep inputs."""
if "reference" not in inputs:
inputs["reference"] = self._format_reference(inputs.get("reference"))
return super().prep_inputs(inputs)
def _call(
self,
inputs: Dict[str, str],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, Any]:
"""Run the chain and generate the output.
Args:
inputs (Dict[str, str]): The input values for the chain.
run_manager (Optional[CallbackManagerForChainRun]): The callback
manager for the chain run.
Returns:
Dict[str, Any]: The output values of the chain.
"""
chain_input = {**inputs}
if self.agent_tools:
chain_input["tool_descriptions"] = self._tools_description
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
raw_output = self.eval_chain.run(
chain_input, callbacks=_run_manager.get_child()
)
parsed_output = self.output_parser.parse(raw_output)
if self.return_reasoning:
return {"score": parsed_output.score, "reasoning": parsed_output.reasoning}
return {"score": parsed_output.score}
async def _acall(
self,
inputs: Dict[str, str],
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
) -> Dict[str, Any]:
"""Run the chain and generate the output.
Args:
inputs (Dict[str, str]): The input values for the chain.
run_manager (Optional[CallbackManagerForChainRun]): The callback
manager for the chain run.
Returns:
Dict[str, Any]: The output values of the chain.
"""
chain_input = {**inputs}
if self.agent_tools:
chain_input["tool_descriptions"] = self._tools_description
_run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager()
raw_output = await self.eval_chain.arun(
chain_input, callbacks=_run_manager.get_child()
)
parsed_output = self.output_parser.parse(raw_output)
if self.return_reasoning:
return {"score": parsed_output.score, "reasoning": parsed_output.reasoning}
return {"score": parsed_output.score}
def _evaluate_agent_trajectory(
self,
*,
prediction: str,
input: str,
agent_trajectory: Sequence[Tuple[AgentAction, str]],
reference: Optional[str] = None,
callbacks: Callbacks = None,
**kwargs: Any,
) -> dict:
"""Evaluate a trajectory.
Args:
prediction (str): The final predicted response.
input (str): The input to the agent.
agent_trajectory (List[Tuple[AgentAction, str]]):
The intermediate steps forming the agent trajectory.
reference (Optional[str]): The reference answer.
callbacks (Callbacks): Callbacks to use for this chain run.
Returns:
dict: The evaluation result, which includes the score and optionally
the reasoning for reaching that.
"""
inputs = {
"question": input,
"agent_trajectory": self.get_agent_trajectory(agent_trajectory),
"answer": prediction,
"reference": reference,
}
return self(inputs=inputs, callbacks=callbacks, **kwargs)
async def _aevaluate_agent_trajectory(
self,
*,
prediction: str,
input: str,
agent_trajectory: Sequence[Tuple[AgentAction, str]],
reference: Optional[str] = None,
callbacks: Callbacks = None,
**kwargs: Any,
) -> dict:
"""Asynchronously evaluate a trajectory.
Args:
prediction (str): The final predicted response.
input (str): The input to the agent.
agent_trajectory (List[Tuple[AgentAction, str]]):
The intermediate steps forming the agent trajectory.
reference (Optional[str]): The reference answer.
callbacks (Callbacks): Callbacks to use for this chain run.
Returns:
dict: The evaluation result, which includes the score and optionally
the reasoning for reaching that.
"""
inputs = {
"question": input,
"agent_trajectory": self.get_agent_trajectory(agent_trajectory),
"answer": prediction,
"reference": reference,
}
return await self.acall(
inputs=inputs,
callbacks=callbacks,
**kwargs,
)
| [] |
2024-01-10 | theobjectivedad/langchain | langchain~client~runner_utils.py | """Utilities for running language models or Chains over datasets."""
from __future__ import annotations
import asyncio
import functools
import logging
from datetime import datetime
from typing import (
Any,
Callable,
Coroutine,
Dict,
Iterator,
List,
Optional,
Sequence,
Union,
)
from langchainplus_sdk import LangChainPlusClient, RunEvaluator
from langchainplus_sdk.schemas import Example
from langchain.callbacks.base import BaseCallbackHandler
from langchain.callbacks.manager import Callbacks
from langchain.callbacks.tracers.base import BaseTracer
from langchain.callbacks.tracers.evaluation import EvaluatorCallbackHandler
from langchain.callbacks.tracers.langchain import LangChainTracer
from langchain.chains.base import Chain
from langchain.chat_models.base import BaseChatModel
from langchain.llms.base import BaseLLM
from langchain.schema import (
ChatResult,
LLMResult,
)
from langchain.schema.language_model import BaseLanguageModel
from langchain.schema.messages import (
BaseMessage,
HumanMessage,
get_buffer_string,
messages_from_dict,
)
logger = logging.getLogger(__name__)
MODEL_OR_CHAIN_FACTORY = Union[Callable[[], Chain], BaseLanguageModel]
class InputFormatError(Exception):
"""Raised when the input format is invalid."""
def _get_prompts(inputs: Dict[str, Any]) -> List[str]:
"""Get prompts from inputs.
Args:
inputs: The input dictionary.
Returns:
A list of prompts.
Raises:
InputFormatError: If the input format is invalid.
"""
if not inputs:
raise InputFormatError("Inputs should not be empty.")
prompts = []
if "prompt" in inputs:
if not isinstance(inputs["prompt"], str):
raise InputFormatError(
"Expected string for 'prompt', got"
f" {type(inputs['prompt']).__name__}"
)
prompts = [inputs["prompt"]]
elif "prompts" in inputs:
if not isinstance(inputs["prompts"], list) or not all(
isinstance(i, str) for i in inputs["prompts"]
):
raise InputFormatError(
"Expected list of strings for 'prompts',"
f" got {type(inputs['prompts']).__name__}"
)
prompts = inputs["prompts"]
elif len(inputs) == 1:
prompt_ = next(iter(inputs.values()))
if isinstance(prompt_, str):
prompts = [prompt_]
elif isinstance(prompt_, list) and all(isinstance(i, str) for i in prompt_):
prompts = prompt_
else:
raise InputFormatError(f"LLM Run expects string prompt input. Got {inputs}")
else:
raise InputFormatError(
f"LLM Run expects 'prompt' or 'prompts' in inputs. Got {inputs}"
)
return prompts
def _get_messages(inputs: Dict[str, Any]) -> List[List[BaseMessage]]:
"""Get Chat Messages from inputs.
Args:
inputs: The input dictionary.
Returns:
A list of chat messages.
Raises:
InputFormatError: If the input format is invalid.
"""
if not inputs:
raise InputFormatError("Inputs should not be empty.")
if "messages" in inputs:
single_input = inputs["messages"]
elif len(inputs) == 1:
single_input = next(iter(inputs.values()))
else:
raise InputFormatError(f"Chat Run expects 'messages' in inputs. Got {inputs}")
if isinstance(single_input, list) and all(
isinstance(i, dict) for i in single_input
):
raw_messages = [single_input]
elif isinstance(single_input, list) and all(
isinstance(i, list) for i in single_input
):
raw_messages = single_input
else:
raise InputFormatError(
f"Chat Run expects List[dict] or List[List[dict]] 'messages'"
f" input. Got {inputs}"
)
return [messages_from_dict(batch) for batch in raw_messages]
async def _arun_llm(
llm: BaseLanguageModel,
inputs: Dict[str, Any],
*,
tags: Optional[List[str]] = None,
callbacks: Callbacks = None,
input_mapper: Optional[Callable[[Dict], Any]] = None,
) -> Union[LLMResult, ChatResult]:
"""Asynchronously run the language model.
Args:
llm: The language model to run.
inputs: The input dictionary.
tags: Optional tags to add to the run.
callbacks: Optional callbacks to use during the run.
input_mapper: Optional function to map inputs to the expected format.
Returns:
The LLMResult or ChatResult.
Raises:
ValueError: If the LLM type is unsupported.
InputFormatError: If the input format is invalid.
"""
if input_mapper is not None:
if not isinstance(llm, (BaseLLM, BaseChatModel)):
raise ValueError(f"Unsupported LLM type {type(llm).__name__}")
llm_output = await llm.agenerate(
input_mapper(inputs), callbacks=callbacks, tags=tags
)
elif isinstance(llm, BaseLLM):
try:
llm_prompts = _get_prompts(inputs)
llm_output = await llm.agenerate(
llm_prompts, callbacks=callbacks, tags=tags
)
except InputFormatError:
llm_messages = _get_messages(inputs)
buffer_strings = [get_buffer_string(messages) for messages in llm_messages]
llm_output = await llm.agenerate(
buffer_strings, callbacks=callbacks, tags=tags
)
elif isinstance(llm, BaseChatModel):
try:
messages = _get_messages(inputs)
llm_output = await llm.agenerate(messages, callbacks=callbacks, tags=tags)
except InputFormatError:
prompts = _get_prompts(inputs)
converted_messages: List[List[BaseMessage]] = [
[HumanMessage(content=prompt)] for prompt in prompts
]
llm_output = await llm.agenerate(
converted_messages, callbacks=callbacks, tags=tags
)
else:
raise ValueError(f"Unsupported LLM type {type(llm)}")
return llm_output
async def _arun_llm_or_chain(
example: Example,
llm_or_chain_factory: MODEL_OR_CHAIN_FACTORY,
n_repetitions: int,
*,
tags: Optional[List[str]] = None,
callbacks: Optional[List[BaseCallbackHandler]] = None,
input_mapper: Optional[Callable[[Dict], Any]] = None,
) -> Union[List[dict], List[str], List[LLMResult], List[ChatResult]]:
"""Asynchronously run the Chain or language model.
Args:
example: The example to run.
llm_or_chain_factory: The Chain or language model constructor to run.
n_repetitions: The number of times to run the model on each example.
tags: Optional tags to add to the run.
callbacks: Optional callbacks to use during the run.
input_mapper: Optional function to map the input to the expected format.
Returns:
A list of outputs.
"""
if callbacks:
previous_example_ids = [
getattr(tracer, "example_id", None) for tracer in callbacks
]
for tracer in callbacks:
if hasattr(tracer, "example_id"):
tracer.example_id = example.id
else:
previous_example_ids = None
outputs = []
for _ in range(n_repetitions):
try:
if isinstance(llm_or_chain_factory, BaseLanguageModel):
output: Any = await _arun_llm(
llm_or_chain_factory,
example.inputs,
tags=tags,
callbacks=callbacks,
input_mapper=input_mapper,
)
else:
chain = llm_or_chain_factory()
if input_mapper is not None:
inputs_ = input_mapper(example.inputs)
else:
inputs_ = example.inputs
if len(inputs_) == 1:
inputs_ = next(iter(inputs_.values()))
output = await chain.acall(inputs_, callbacks=callbacks, tags=tags)
outputs.append(output)
except Exception as e:
logger.warning(f"Chain failed for example {example.id}. Error: {e}")
outputs.append({"Error": str(e)})
if callbacks and previous_example_ids:
for example_id, tracer in zip(previous_example_ids, callbacks):
if hasattr(tracer, "example_id"):
tracer.example_id = example_id
return outputs
async def _gather_with_concurrency(
n: int,
initializer: Callable[[], Coroutine[Any, Any, Any]],
*async_funcs: Callable[
[Sequence[BaseCallbackHandler], Dict], Coroutine[Any, Any, Any]
],
) -> List[Any]:
"""Run coroutines with a concurrency limit.
Args:
n: The maximum number of concurrent tasks.
initializer: A coroutine that initializes shared resources for the tasks.
async_funcs: The async_funcs to be run concurrently.
Returns:
A list of results from the coroutines.
"""
semaphore = asyncio.Semaphore(n)
job_state = {"num_processed": 0}
callback_queue: asyncio.Queue[Sequence[BaseCallbackHandler]] = asyncio.Queue()
for _ in range(n):
callback_queue.put_nowait(await initializer())
async def run_coroutine_with_semaphore(
async_func: Callable[
[Sequence[BaseCallbackHandler], Dict], Coroutine[Any, Any, Any]
]
) -> Any:
async with semaphore:
callbacks = await callback_queue.get()
try:
result = await async_func(callbacks, job_state)
finally:
callback_queue.put_nowait(callbacks)
return result
results = await asyncio.gather(
*(run_coroutine_with_semaphore(function) for function in async_funcs)
)
while callback_queue:
try:
callbacks = callback_queue.get_nowait()
except asyncio.QueueEmpty:
break
for callback in callbacks:
if isinstance(callback, (LangChainTracer, EvaluatorCallbackHandler)):
callback.wait_for_futures()
return results
async def _callbacks_initializer(
project_name: Optional[str],
client: LangChainPlusClient,
run_evaluators: Sequence[RunEvaluator],
evaluation_handler_collector: List[EvaluatorCallbackHandler],
) -> List[BaseTracer]:
"""
Initialize a tracer to share across tasks.
Args:
project_name: The project name for the tracer.
client: The client to use for the tracer.
run_evaluators: The evaluators to run.
evaluation_handler_collector: A list to collect the evaluators.
Used to wait for the evaluators to finish.
Returns:
The callbacks for this thread.
"""
callbacks: List[BaseTracer] = []
if project_name:
callbacks.append(LangChainTracer(project_name=project_name))
evaluator_project_name = f"{project_name}-evaluators" if project_name else None
if run_evaluators:
callback = EvaluatorCallbackHandler(
client=client,
evaluators=run_evaluators,
# We already have concurrency, don't want to overload the machine
max_workers=1,
project_name=evaluator_project_name,
)
callbacks.append(callback)
evaluation_handler_collector.append(callback)
return callbacks
async def arun_on_examples(
examples: Iterator[Example],
llm_or_chain_factory: MODEL_OR_CHAIN_FACTORY,
*,
concurrency_level: int = 5,
num_repetitions: int = 1,
project_name: Optional[str] = None,
verbose: bool = False,
client: Optional[LangChainPlusClient] = None,
tags: Optional[List[str]] = None,
run_evaluators: Optional[Sequence[RunEvaluator]] = None,
input_mapper: Optional[Callable[[Dict], Any]] = None,
) -> Dict[str, Any]:
"""
Asynchronously run the chain on examples and store traces
to the specified project name.
Args:
examples: Examples to run the model or chain over.
llm_or_chain_factory: Language model or Chain constructor to run
over the dataset. The Chain constructor is used to permit
independent calls on each example without carrying over state.
concurrency_level: The number of async tasks to run concurrently.
num_repetitions: Number of times to run the model on each example.
This is useful when testing success rates or generating confidence
intervals.
project_name: Project name to use when tracing runs.
Defaults to {dataset_name}-{chain class name}-{datetime}.
verbose: Whether to print progress.
client: Client to use to read the dataset. If not provided, a new
client will be created using the credentials in the environment.
tags: Tags to add to each run in the project.
run_evaluators: Evaluators to run on the results of the chain.
input_mapper: function to map to the inputs dictionary from an Example
to the format expected by the model to be evaluated. This is useful if
your model needs to deserialize more complex schema or if your dataset
has inputs with keys that differ from what is expected by your chain
or agent.
Returns:
A dictionary mapping example ids to the model outputs.
"""
project_name = _get_project_name(project_name, llm_or_chain_factory, None)
client_ = client or LangChainPlusClient()
results: Dict[str, List[Any]] = {}
async def process_example(
example: Example, callbacks: List[BaseCallbackHandler], job_state: dict
) -> None:
"""Process a single example."""
result = await _arun_llm_or_chain(
example,
llm_or_chain_factory,
num_repetitions,
tags=tags,
callbacks=callbacks,
input_mapper=input_mapper,
)
results[str(example.id)] = result
job_state["num_processed"] += 1
if verbose:
print(
f"Processed examples: {job_state['num_processed']}",
end="\r",
flush=True,
)
evaluation_handlers: List[EvaluatorCallbackHandler] = []
await _gather_with_concurrency(
concurrency_level,
functools.partial(
_callbacks_initializer,
project_name=project_name,
client=client_,
evaluation_handler_collector=evaluation_handlers,
run_evaluators=run_evaluators or [],
),
*(functools.partial(process_example, e) for e in examples),
)
for handler in evaluation_handlers:
handler.wait_for_futures()
return results
def run_llm(
llm: BaseLanguageModel,
inputs: Dict[str, Any],
callbacks: Callbacks,
*,
tags: Optional[List[str]] = None,
input_mapper: Optional[Callable[[Dict], Any]] = None,
) -> Union[LLMResult, ChatResult]:
"""
Run the language model on the example.
Args:
llm: The language model to run.
inputs: The input dictionary.
callbacks: The callbacks to use during the run.
tags: Optional tags to add to the run.
input_mapper: function to map to the inputs dictionary from an Example
Returns:
The LLMResult or ChatResult.
Raises:
ValueError: If the LLM type is unsupported.
InputFormatError: If the input format is invalid.
"""
if input_mapper is not None:
if not isinstance(llm, (BaseLLM, BaseChatModel)):
raise ValueError(f"Unsupported LLM type {type(llm).__name__}")
llm_output = llm.generate(input_mapper(inputs), callbacks=callbacks, tags=tags)
elif isinstance(llm, BaseLLM):
try:
llm_prompts = _get_prompts(inputs)
llm_output = llm.generate(llm_prompts, callbacks=callbacks, tags=tags)
except InputFormatError:
llm_messages = _get_messages(inputs)
buffer_strings = [get_buffer_string(messages) for messages in llm_messages]
llm_output = llm.generate(buffer_strings, callbacks=callbacks)
elif isinstance(llm, BaseChatModel):
try:
messages = _get_messages(inputs)
llm_output = llm.generate(messages, callbacks=callbacks, tags=tags)
except InputFormatError:
prompts = _get_prompts(inputs)
converted_messages: List[List[BaseMessage]] = [
[HumanMessage(content=prompt)] for prompt in prompts
]
llm_output = llm.generate(
converted_messages, callbacks=callbacks, tags=tags
)
else:
raise ValueError(f"Unsupported LLM type {type(llm)}")
return llm_output
def run_llm_or_chain(
example: Example,
llm_or_chain_factory: MODEL_OR_CHAIN_FACTORY,
n_repetitions: int,
*,
tags: Optional[List[str]] = None,
callbacks: Optional[List[BaseCallbackHandler]] = None,
input_mapper: Optional[Callable[[Dict], Any]] = None,
) -> Union[List[dict], List[str], List[LLMResult], List[ChatResult]]:
"""
Run the Chain or language model synchronously.
Args:
example: The example to run.
llm_or_chain_factory: The Chain or language model constructor to run.
n_repetitions: The number of times to run the model on each example.
tags: Optional tags to add to the run.
callbacks: Optional callbacks to use during the run.
Returns:
Union[List[dict], List[str], List[LLMResult], List[ChatResult]]:
The outputs of the model or chain.
"""
if callbacks:
previous_example_ids = [
getattr(tracer, "example_id", None) for tracer in callbacks
]
for tracer in callbacks:
if hasattr(tracer, "example_id"):
tracer.example_id = example.id
else:
previous_example_ids = None
outputs = []
for _ in range(n_repetitions):
try:
if isinstance(llm_or_chain_factory, BaseLanguageModel):
output: Any = run_llm(
llm_or_chain_factory,
example.inputs,
callbacks,
tags=tags,
input_mapper=input_mapper,
)
else:
chain = llm_or_chain_factory()
if input_mapper is not None:
inputs_ = input_mapper(example.inputs)
else:
inputs_ = example.inputs
if len(inputs_) == 1:
inputs_ = next(iter(inputs_.values()))
output = chain(inputs_, callbacks=callbacks, tags=tags)
outputs.append(output)
except Exception as e:
logger.warning(f"Chain failed for example {example.id}. Error: {e}")
outputs.append({"Error": str(e)})
if callbacks and previous_example_ids:
for example_id, tracer in zip(previous_example_ids, callbacks):
if hasattr(tracer, "example_id"):
tracer.example_id = example_id
return outputs
def run_on_examples(
examples: Iterator[Example],
llm_or_chain_factory: MODEL_OR_CHAIN_FACTORY,
*,
num_repetitions: int = 1,
project_name: Optional[str] = None,
verbose: bool = False,
client: Optional[LangChainPlusClient] = None,
tags: Optional[List[str]] = None,
run_evaluators: Optional[Sequence[RunEvaluator]] = None,
input_mapper: Optional[Callable[[Dict], Any]] = None,
) -> Dict[str, Any]:
"""
Run the Chain or language model on examples and store
traces to the specified project name.
Args:
examples: Examples to run the model or chain over.
llm_or_chain_factory: Language model or Chain constructor to run
over the dataset. The Chain constructor is used to permit
independent calls on each example without carrying over state.
num_repetitions: Number of times to run the model on each example.
This is useful when testing success rates or generating confidence
intervals.
project_name: Name of the project to store the traces in.
Defaults to {dataset_name}-{chain class name}-{datetime}.
verbose: Whether to print progress.
client: Client to use to access the dataset. If None, a new client
will be created using the credentials in the environment.
tags: Tags to add to each run in the project.
run_evaluators: Evaluators to run on the results of the chain.
input_mapper: A function to map to the inputs dictionary from an Example
to the format expected by the model to be evaluated. This is useful if
your model needs to deserialize more complex schema or if your dataset
has inputs with keys that differ from what is expected by your chain
or agent.
Returns:
A dictionary mapping example ids to the model outputs.
"""
results: Dict[str, Any] = {}
project_name = _get_project_name(project_name, llm_or_chain_factory, None)
client_ = client or LangChainPlusClient()
tracer = LangChainTracer(project_name=project_name)
evaluator_project_name = f"{project_name}-evaluators"
evalution_handler = EvaluatorCallbackHandler(
evaluators=run_evaluators or [],
client=client_,
project_name=evaluator_project_name,
)
callbacks: List[BaseCallbackHandler] = [tracer, evalution_handler]
for i, example in enumerate(examples):
result = run_llm_or_chain(
example,
llm_or_chain_factory,
num_repetitions,
tags=tags,
callbacks=callbacks,
input_mapper=input_mapper,
)
if verbose:
print(f"{i+1} processed", flush=True, end="\r")
results[str(example.id)] = result
tracer.wait_for_futures()
evalution_handler.wait_for_futures()
return results
def _get_project_name(
project_name: Optional[str],
llm_or_chain_factory: MODEL_OR_CHAIN_FACTORY,
dataset_name: Optional[str],
) -> str:
"""
Get the project name.
Args:
project_name: The project name if manually specified.
llm_or_chain_factory: The Chain or language model constructor.
dataset_name: The dataset name.
Returns:
The project name.
"""
if project_name is not None:
return project_name
current_time = datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
if isinstance(llm_or_chain_factory, BaseLanguageModel):
model_name = llm_or_chain_factory.__class__.__name__
else:
model_name = llm_or_chain_factory().__class__.__name__
dataset_prefix = f"{dataset_name}-" if dataset_name else ""
return f"{dataset_prefix}{model_name}-{current_time}"
async def arun_on_dataset(
dataset_name: str,
llm_or_chain_factory: MODEL_OR_CHAIN_FACTORY,
*,
concurrency_level: int = 5,
num_repetitions: int = 1,
project_name: Optional[str] = None,
verbose: bool = False,
client: Optional[LangChainPlusClient] = None,
tags: Optional[List[str]] = None,
run_evaluators: Optional[Sequence[RunEvaluator]] = None,
input_mapper: Optional[Callable[[Dict], Any]] = None,
) -> Dict[str, Any]:
"""
Asynchronously run the Chain or language model on a dataset
and store traces to the specified project name.
Args:
dataset_name: Name of the dataset to run the chain on.
llm_or_chain_factory: Language model or Chain constructor to run
over the dataset. The Chain constructor is used to permit
independent calls on each example without carrying over state.
concurrency_level: The number of async tasks to run concurrently.
num_repetitions: Number of times to run the model on each example.
This is useful when testing success rates or generating confidence
intervals.
project_name: Name of the project to store the traces in.
Defaults to {dataset_name}-{chain class name}-{datetime}.
verbose: Whether to print progress.
client: Client to use to read the dataset. If not provided, a new
client will be created using the credentials in the environment.
tags: Tags to add to each run in the session.
run_evaluators: Evaluators to run on the results of the chain.
input_mapper: A function to map to the inputs dictionary from an Example
to the format expected by the model to be evaluated. This is useful if
your model needs to deserialize more complex schema or if your dataset
has inputs with keys that differ from what is expected by your chain
or agent.
Returns:
A dictionary containing the run's project name and the resulting model outputs.
"""
client_ = client or LangChainPlusClient()
project_name = _get_project_name(project_name, llm_or_chain_factory, dataset_name)
dataset = client_.read_dataset(dataset_name=dataset_name)
examples = client_.list_examples(dataset_id=str(dataset.id))
results = await arun_on_examples(
examples,
llm_or_chain_factory,
concurrency_level=concurrency_level,
num_repetitions=num_repetitions,
project_name=project_name,
verbose=verbose,
client=client_,
tags=tags,
run_evaluators=run_evaluators,
input_mapper=input_mapper,
)
return {
"project_name": project_name,
"results": results,
}
def run_on_dataset(
dataset_name: str,
llm_or_chain_factory: MODEL_OR_CHAIN_FACTORY,
*,
num_repetitions: int = 1,
project_name: Optional[str] = None,
verbose: bool = False,
client: Optional[LangChainPlusClient] = None,
tags: Optional[List[str]] = None,
run_evaluators: Optional[Sequence[RunEvaluator]] = None,
input_mapper: Optional[Callable[[Dict], Any]] = None,
) -> Dict[str, Any]:
"""
Run the Chain or language model on a dataset and store traces
to the specified project name.
Args:
dataset_name: Name of the dataset to run the chain on.
llm_or_chain_factory: Language model or Chain constructor to run
over the dataset. The Chain constructor is used to permit
independent calls on each example without carrying over state.
num_repetitions: Number of times to run the model on each example.
This is useful when testing success rates or generating confidence
intervals.
project_name: Name of the project to store the traces in.
Defaults to {dataset_name}-{chain class name}-{datetime}.
verbose: Whether to print progress.
client: Client to use to access the dataset. If None, a new client
will be created using the credentials in the environment.
tags: Tags to add to each run in the session.
run_evaluators: Evaluators to run on the results of the chain.
input_mapper: A function to map to the inputs dictionary from an Example
to the format expected by the model to be evaluated. This is useful if
your model needs to deserialize more complex schema or if your dataset
has inputs with keys that differ from what is expected by your chain
or agent.
Returns:
A dictionary containing the run's project name and the resulting model outputs.
"""
client_ = client or LangChainPlusClient()
project_name = _get_project_name(project_name, llm_or_chain_factory, dataset_name)
dataset = client_.read_dataset(dataset_name=dataset_name)
examples = client_.list_examples(dataset_id=str(dataset.id))
results = run_on_examples(
examples,
llm_or_chain_factory,
num_repetitions=num_repetitions,
project_name=project_name,
verbose=verbose,
tags=tags,
run_evaluators=run_evaluators,
client=client_,
input_mapper=input_mapper,
)
return {
"project_name": project_name,
"results": results,
}
| [
"['PLACEHOLDER']",
"[]"
] |
2024-01-10 | camillanapoles/cs-relatorios-repasses | app_ia_cia.py | import streamlit as st
import pandas as pd
from pandasai import PandasAI
from pandasai.llm.openai import OpenAI
import matplotlib.pyplot as plt
import seaborn as sns
from PIL import Image
# Set page configuration
st.set_page_config(
page_title="Sales Analysis App",
page_icon=":sales:",
layout="wide",
initial_sidebar_state="expanded"
)
# Define styles for the app
styles = """
<style>
img {
max-width: 50%;
}
.sidebar .sidebar-content {
background-color: #f5f5f5;
}
</style>
"""
import streamlit as st
import pandas as pd
from pandasai import PandasAI
from pandasai.llm.openai import OpenAI
import matplotlib.pyplot as plt
import seaborn as sns
from PIL import Image
# Set page configuration
st.set_page_config(
page_title="App - Analise de Repassess",
page_icon=":sales:",
layout="wide",
initial_sidebar_state="expanded"
)
# Define styles for the app
styles = """
<style>
img {
max-width: 50%;
}
.sidebar .sidebar-content {
background-color: #f5f5f5;
}
</style>
"""
# Render styles
st.markdown(styles, unsafe_allow_html=True)
#image = Image.open(r"C:\Downloads\20-Easy-Call-Center-Sales-Tips-to-Increase-Sales-1024x536.png")
#image2 = Image.open(r"C:\Downloads\sales-prediction.jpg")
# Define header
header = st.container()
with header:
#st.image(image)
st.title("Cia do Sorriso - Analise de Repasses")
st.markdown("presente de Camilla Naooles para Antonio Sa")
st.write("")
# Define main content
content = st.container()
with content:
# Load sales dataset
sale_file = st.file_uploader('Selecione seu CSV (fornecido por padrão)')
if sale_file is not None:
df = pd.read_csv(sale_file, encoding='latin-1')
else:
st.warning("Selecione um arquivo CSV para continuar.")
st.stop()
# Select x and y variables
st.subheader("Crie seu gráfico")
st.write("Selecione as variaveis x e y para criar um gráfico de dispersão.")
col1, col2 = st.beta_columns(2)
with col1:
selected_x_var = st.selectbox('X variable', ['data_virada', 'ultimo_fir_updated_at', 'data_pagamento'])
with col2:
selected_y_var = st.selectbox('Y variable', ['repasse_liberado', 'valor_pago_ati', 'valor_glosado_fir'])
# Create scatterplot
fig, ax = plt.subplots()
ax = sns.scatterplot(x = df[selected_x_var], y = df[selected_y_var], hue = df['dentistas'])
plt.xlabel(selected_x_var)
plt.ylabel(selected_y_var)
plt.title("Scatterplot of Sales")
st.pyplot(fig)
# Define sidebar
sidebar = st.sidebar
with sidebar:
st.image(image2)
st.subheader("Obtenha insights sobre os dados")
st.write("Insira uma pergunta para gerar insights sobre os dados usando inteligencia artificial")
prompt = st.text_input("escreva aqui o que deseja:")
if prompt:
# Initialize PandasAI and OpenAI
llm = OpenAI()
pandas_ai = PandasAI(llm)
# Run PandasAI with user input prompt
result = pandas_ai.run(sale_df, prompt=prompt)
# Display result
if result is not None:
st.write("### Insights")
st.write(result)
| [
"escreva aqui o que deseja:"
] |
2024-01-10 | camillanapoles/cs-relatorios-repasses | app_ai.py | import os
import streamlit as st
import pandas as pd
from pandasai import PandasAI
from pandasai.llm.openai import OpenAI
import matplotlib.pyplot as plt
from dotenv import load_dotenv
load_dotenv()
st.title("IA -ciadosorriso ")
st.write("Primeira interface de inteligência artificial da Cia do Sorriso")
st.write(
"Importe a arquivo csv"
)
openai_key = os.environ.get('OPENAI_KEY')
key = openai_key
st.session_state.openai_key = openai_key
#if "openai_key" not in st.session_state:
# with st.form("API key"):
# key = openai_key #st.text_input("OpenAI Key", value="", type="password")
#if st.form_submit_button("Submit"):
#st.session_state.openai_key = openapi_key
st.session_state.prompt_history = []
st.session_state.df = None
df = pd.read_parquet("../db/df_data_virada.parquet")
st.session_state.df
"""
if "openai_key" in st.session_state:
if st.session_state.df is None:
uploaded_file = st.file_uploader(
"Escolha um arquivo CSV. Deve estar em formato longo (um objeto por linha).",
type="csv",
)
if uploaded_file is not None:
#df = pd.read_csv(uploaded_file)
st.session_state.df = df
"""
with st.form("o que vc gostaria de saber?"):
question = st.text_input("o que vc gostaria de saber?", value="", type="default")
submitted = st.form_submit_button("Submit")
if submitted:
with st.spinner():
llm = OpenAI(api_token=st.session_state.openai_key)
pandas_ai = PandasAI(llm)
x = pandas_ai.run(st.session_state.df, prompt=question)
fig = plt.gcf()
if fig.get_axes():
st.pyplot(fig)
st.write(x)
st.session_state.prompt_history.append(question)
if st.session_state.df is not None:
st.subheader("tabela atual:")
st.write(st.session_state.df)
st.subheader("Prompt históricos:")
st.write(st.session_state.prompt_history)
if st.button("Clear"):
st.session_state.prompt_history = []
st.session_state.df = None
| [] |
2024-01-10 | lcmd65/ecommerce-app | app~fc.py | import openai
import streamlit as st
from PIL import Image
from streamlitextras.webutils import stxs_javascript
from typing import NoReturn
from pymongo import MongoClient
import json
import os
import warnings
warnings.filterwarnings("ignore")
def redirect(url: str="http://localhost:8081/") -> NoReturn:
stxs_javascript(f"window.location.href='{url}';")
openai_api_key = os.getenv("OPENAI_API_KEY")
if not openai_api_key:
with open("app/schema.json", "r") as file:
uri = json.load(file)
client = MongoClient(uri["mongo_uri"])
client.admin.command('ping')
db = client["Api"]
collection = db["api"]
documents = collection.find()
api_key = None
for item in documents:
if item['api'] == 'datathon-service':
api_key = item['api-key']
break
client.close()
openai_api_key = api_key
st.set_page_config(page_title="SUSBot", page_icon="app/static/images/logo.png")
st.button("Back", on_click=redirect)
col1, col2 = st.columns([1, 4])
with col1:
st.image(Image.open("app/static/images/logo.png"), width=100)
with col2:
st.title("SUSBot")
st.write("A fullscreen live demo for chatbot consultation")
if "messages" not in st.session_state:
st.session_state["messages"] = [{"role": "assistant", "content": "How can I help you?"}]
for msg in st.session_state.messages:
if msg["role"] == "assistant":
st.chat_message(msg["role"], avatar="app/static/images/icons-bot.png").write(msg["content"])
elif msg["role"] == "user":
st.chat_message(msg["role"], avatar="app/static/images/tux.png").write(msg["content"])
if prompt := st.chat_input():
if not openai_api_key:
st.info("Please add your OpenAI API key to continue.")
st.stop()
client = openai.OpenAI(api_key=openai_api_key)
st.session_state.messages.append({"role": "user", "content": prompt})
st.chat_message("user", avatar="app/static/images/tux.png").write(prompt)
response = client.chat.completions.create(
model="gpt-4-1106-preview",
messages=st.session_state.messages,
max_tokens=256,)
msg = response.choices[0].message.content
st.session_state.messages.append({"role": "assistant", "content": msg})
st.chat_message("assistant", avatar="app/static/images/icons-bot.png").write(msg) | [
"How can I help you?"
] |
2024-01-10 | naz2001r/pdf-qa-bot | backend~tests~test_vec_db.py | import os
import pytest
import pickle
from langchain.embeddings.openai import OpenAIEmbeddings
from backend.src.vec_db import VectorizeDB
from backend.src.pdf_loader import PdfToTextLoader
@pytest.fixture
def openai_key():
return os.environ['OPEN_AI_SECRET_KEY']
@pytest.fixture
def file_path():
return "./vectorized_db.pkl"
@pytest.fixture
def sample_pages():
loader = PdfToTextLoader("")
text = ["page1", "page2", "page3"]
result = loader.text_to_docs(text)
return result
def test_vectorize_db_init(openai_key):
vectorizer = VectorizeDB(openai_key)
assert isinstance(vectorizer.embeddings, OpenAIEmbeddings)
assert vectorizer._VectorizeDB__db is None
assert vectorizer._VectorizeDB__retriever is None
def test_vectorize_db_init_invalid_openai_key():
with pytest.raises(AssertionError):
VectorizeDB(123)
def test_vectorize_db_vectorize(openai_key, sample_pages):
vectorizer = VectorizeDB(openai_key)
vectorizer.vectorize(sample_pages)
assert vectorizer._VectorizeDB__db is not None
def test_vectorize_db_vectorize_invalid_pages(openai_key):
vectorizer = VectorizeDB(openai_key)
with pytest.raises(AssertionError):
vectorizer.vectorize("invalid_pages")
def test_vectorize_db_vectorize_invalid_extend(openai_key):
vectorizer = VectorizeDB(openai_key)
with pytest.raises(AssertionError):
vectorizer.vectorize([], extend="invalid_extend")
def test_vectorize_db_retriever_setter(openai_key, sample_pages):
vectorizer = VectorizeDB(openai_key)
vectorizer.vectorize(sample_pages)
vectorizer.retriever = 10
assert vectorizer._VectorizeDB__retriever is not None
def test_vectorize_db_retriever_setter_invalid_k(openai_key, sample_pages):
vectorizer = VectorizeDB(openai_key)
vectorizer.vectorize(sample_pages)
with pytest.raises(TypeError):
vectorizer.retriever = "invalid_k"
def test_vectorize_db_query(openai_key, sample_pages):
vectorizer = VectorizeDB(openai_key)
vectorizer.vectorize(sample_pages)
vectorizer.retriever = 5
result = vectorizer.query("query_text")
assert isinstance(result, list)
def test_vectorize_db_query_retriever_not_set(openai_key):
vectorizer = VectorizeDB(openai_key)
with pytest.raises(AssertionError):
vectorizer.query(123)
def test_vectorize_db_load_db(file_path, openai_key):
vectorizer = VectorizeDB(openai_key)
vectorizer.dump_db(file_path)
loaded_vectorizer = VectorizeDB.load_db(file_path)
assert isinstance(loaded_vectorizer, VectorizeDB)
def test_vectorize_db_load_db_invalid_file_name():
with pytest.raises(AssertionError):
VectorizeDB.load_db(123)
def test_vectorize_db_dump_db(file_path, openai_key):
vectorizer = VectorizeDB(openai_key)
vectorizer.dump_db(file_path)
loaded_vectorizer = pickle.load(open(file_path, 'rb'))
assert isinstance(loaded_vectorizer, VectorizeDB)
def test_vectorize_db_dump_db_invalid_file_name(openai_key):
vectorizer = VectorizeDB(openai_key)
with pytest.raises(AssertionError):
vectorizer.dump_db(123)
| [] |
2024-01-10 | naz2001r/pdf-qa-bot | backend~src~vec_db.py | import pickle
import logging
from langchain.vectorstores.faiss import FAISS
from langchain.embeddings.openai import OpenAIEmbeddings
# Configure logging
logging.basicConfig(level=logging.INFO)
class VectorizeDB:
"""
A class for vectorizing datasets.
"""
def __init__(self, openai_key: str) -> None:
"""
Initialize a VectorizeDB object.
Args:
openai_key (str): OpenAI API key (default is an empty string).
"""
assert isinstance(openai_key, str), "openai_key must be a string"
self.embeddings = OpenAIEmbeddings(openai_api_key=openai_key)
self.__db = None
self.__retriever = None
def vectorize(self, pages: list, extend: bool = False) -> None:
"""
Vectorize a list of pages from pdf files and create a vector database.
Args:
pages (list): List of pages to vectorize.
extend (bool): If True, extend the existing database with new pages.
If False, create a new database (default is False).
"""
assert isinstance(pages, list), "pages must be a list"
assert isinstance(extend, bool), "extend must be a boolean"
logging.info("Vectorizing pages...")
if self.__db is not None and extend:
db_new = FAISS.from_documents(pages, self.embeddings)
self.__db = self.__db.merge_from(db_new)
else:
self.__db = FAISS.from_documents(pages, self.embeddings)
@property
def retriever(self) -> object:
"""
Get the current retriever object.
Returns:
object: Retriever object.
"""
return self.__retriever
@retriever.setter
def retriever(self, k: int = 5) -> None:
"""
Set the retriever object with the specified number of query output.
Args:
k (int): Number of query output (default is 5).
"""
if not isinstance(k, int):
raise TypeError(f"Type {type(k)} is not supported for the number of query output `k`")
logging.info(f"Setting retriever with k={k}...")
self.__retriever = self.__db.as_retriever(search_kwargs={"k": k})
def query(self, text: str) -> list:
"""
Query the vector database to retrieve relevant documents.
Args:
text (str): Text to query.
Returns:
list: List of relevant documents.
Raises:
TypeError: If the retriever object is not set.
"""
assert isinstance(text, str), "text must be a string"
if self.retriever:
logging.info(f"Querying with text: {text}")
return self.retriever.get_relevant_documents(text)
raise TypeError('Please set retriever before calling it.')
@classmethod
def load_db(cls, file_name: str) -> object:
"""
Load a VectorizeDB object from a pickle file.
Args:
file_name (str): Name of the pickle file.
Returns:
object: Loaded VectorizeDB object.
"""
assert isinstance(file_name, str), "file_name must be a string"
logging.info(f"Loading VectorizeDB from file: {file_name}")
return pickle.load(open(file_name, 'rb'))
def dump_db(self, file_name: str) -> None:
"""
Dump the VectorizeDB object to a pickle file.
Args:
file_name (str): Name of the pickle file.
"""
assert isinstance(file_name, str), "file_name must be a string"
logging.info(f"Dumping VectorizeDB to file: {file_name}")
pickle.dump(self, open(file_name, 'wb'))
| [] |
2024-01-10 | naz2001r/pdf-qa-bot | backend~src~pdf_loader.py | import logging
import os
import re
from langchain.docstore.document import Document
from langchain.document_loaders import PyPDFLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
# Configure logging
logging.basicConfig(level=logging.INFO)
class PdfToTextLoader:
"""
Class for loading pdfs and saving them as texts
"""
def __init__(self, pdf_path: str) -> None:
"""
Args:
pdf_path (str): path to pdf file
"""
if not isinstance(pdf_path, str):
raise TypeError(f"Type {type(pdf_path)} is not supported for `pdf_path`.")
self.pdf_path = pdf_path
self.file_name = os.path.basename(self.pdf_path)
def load_single_pdf(self) -> list:
"""
Loads pdf file and saves it as list of strings
Returns:
list: list of texts from pdf
"""
logging.info(f"Loading PDF: {self.pdf_path}")
pdf = PyPDFLoader(self.pdf_path)
output = []
for page in pdf.load_and_split():
text = page.page_content
# Merge hyphenated words
text = re.sub(r"(\w+)-\n(\w+)", r"\1\2", text)
# Fix newlines in the middle of sentences
text = re.sub(r"(?<!\n\s)\n(?!\s\n)", " ", text.strip())
output.append(text)
return output
def text_to_docs(self, text: str) -> list:
"""
Converts a string or list of strings to a list of Documents with metadata.
Args:
text (str|list): string or list of strings from pdf
Returns:
list: list of chunked Document
"""
assert isinstance(text, (str, list)), f"Type {type(text)} is not supported for `text`."
if isinstance(text, str):
# Take a single string as one page
text = [text]
page_docs = [Document(page_content=page) for page in text]
# Add page numbers as metadata
for i, doc in enumerate(page_docs):
doc.metadata["page"] = i + 1
# Split pages into chunks
doc_chunks = []
for doc in page_docs:
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=1000,
separators=["\n\n", "\n", ".", "!", "?", ",", " ", ""],
chunk_overlap=0,
)
chunks = text_splitter.split_text(doc.page_content)
for i, chunk in enumerate(chunks):
doc = Document(
page_content=chunk, metadata={"file": self.pdf_path, "page": doc.metadata["page"], "chunk": i}
)
# Add sources a metadata
doc.metadata["source"] = f"File:{self.file_name} Page:{doc.metadata['page']} Part:{doc.metadata['chunk']}."
doc_chunks.append(doc)
logging.info(f"Converted PDF to {len(doc_chunks)} document chunks.")
return doc_chunks
| [] |
2024-01-10 | abdibrokhim/CrunchGPT-API | chroma_cohere.py | import os
from langchain.vectorstores import Chroma
from langchain.embeddings import CohereEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.llms import Cohere
from langchain.chains import VectorDBQA
from langchain.document_loaders import PyMuPDFLoader
from dotenv import load_dotenv
def generate_prompt(query, file_path="files/sample.pdf"):
load_dotenv()
COHERE_API_KEY=os.getenv("COHERE_API_KEY")
try:
print('query:', query)
print('file_path:', file_path)
loader = PyMuPDFLoader(file_path)
documents = loader.load()
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
texts = text_splitter.split_documents(documents)
embeddings = CohereEmbeddings(cohere_api_key=COHERE_API_KEY)
vectordb = Chroma.from_documents(texts, embeddings)
qa = VectorDBQA.from_chain_type(llm=Cohere(cohere_api_key=COHERE_API_KEY, truncate="END"), chain_type="stuff", vectorstore=vectordb)
prompt = qa.run(query)
if prompt == "":
return ""
print('prompt:', prompt)
return prompt.strip()
except Exception as e:
print(e)
return "" | [] |
2024-01-10 | abdibrokhim/CrunchGPT-API | chroma_chatgpt.py | import os
from langchain.vectorstores import Chroma
from langchain.embeddings import OpenAIEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.llms import OpenAI
from langchain.chains import VectorDBQA
from langchain.document_loaders import PyMuPDFLoader
from dotenv import load_dotenv
def generate_prompt(query, file_path="backend/files/sample.pdf"):
load_dotenv()
OPENAI_API_KEY=os.getenv("OPENAI_API_KEY")
try:
print('query:', query)
print('file_path:',file_path)
loader = PyMuPDFLoader(file_path)
documents = loader.load()
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
texts = text_splitter.split_documents(documents)
embeddings = OpenAIEmbeddings(openai_api_key=OPENAI_API_KEY)
vectordb = Chroma.from_documents(texts, embeddings)
qa = VectorDBQA.from_chain_type(llm=OpenAI(openai_api_key=OPENAI_API_KEY), chain_type="stuff", vectorstore=vectordb)
prompt = qa.run(query)
if prompt == "":
return ""
print('prompt:', prompt)
return prompt.strip()
except Exception as e:
print(e)
return "" | [] |
2024-01-10 | alisawuffles/wanli | generation~gpt3_generation.py | import openai
from typing import List
from utils.constants import OPENAI_API_KEY
from tqdm import tqdm
import time
openai.api_key = OPENAI_API_KEY
def request(
prompt: str,
engine='ada',
max_tokens=60,
temperature=1.0,
top_p=1.0,
n=1,
stop='\n',
presence_penalty=0.0,
frequency_penalty=0.0,
):
# retry request (handles connection errors, timeouts, and overloaded API)
while True:
try:
response = openai.Completion.create(
engine=engine,
prompt=prompt,
max_tokens=max_tokens,
temperature=temperature,
top_p=top_p,
n=n,
stop=stop,
presence_penalty=presence_penalty,
frequency_penalty=frequency_penalty,
)
break
except Exception as e:
tqdm.write(str(e))
tqdm.write("Retrying...")
generations = [gen['text'].lstrip() for gen in response['choices']]
if len(generations) == 1:
return generations[0]
return generations
| [] |
2024-01-10 | Morinator/PDF_question_answering | PaperDistiller.py | import os
import pickledb
import pypdf
from langchain import FAISS, OpenAI
from langchain.chains.question_answering import load_qa_chain
from langchain.embeddings import OpenAIEmbeddings
class PaperDistiller:
def __init__(self, paper_name):
self.name = paper_name
self.answers = {}
self.cached_answers = pickledb.load('distiller.db', auto_dump=False, sig=False)
def split_pdf(self, chunk_chars=4000, overlap=50):
pdf_file_obj = open("Papers/%s.pdf" % self.name, "rb")
pdf_reader = pypdf.PdfReader(pdf_file_obj)
splits = []
split = ""
for i, page in enumerate(pdf_reader.pages):
split += page.extract_text()
if len(split) > chunk_chars:
splits.append(split[:chunk_chars])
split = split[chunk_chars - overlap:]
pdf_file_obj.close()
print(f"Split into {len(splits)} chunks")
return splits
def read_or_create_index(self):
if os.path.isdir('Index/%s' % self.name):
print("Index Found!")
self.ix = FAISS.load_local('Index/%s' % self.name, OpenAIEmbeddings())
else:
print("Creating index!")
self.ix = FAISS.from_texts(self.split_pdf(), OpenAIEmbeddings())
# Save index to local (save cost)
self.ix.save_local('Index/%s' % self.name)
def query_and_distill(self, query):
# Answer already in memory
if query in self.answers:
print("Answer found!")
return self.answers[query]
# Answer cached (asked in the past) in pickledb
elif self.cached_answers.get(query + "-%s" % self.name):
print("Answered in the past!")
return self.cached_answers.get(query + "-%s" % self.name)
# Generate the answer
else:
print("Generating answer!")
query_results = self.ix.similarity_search(query, k=2)
chain = load_qa_chain(OpenAI(temperature=0.25), chain_type="stuff")
self.answers[query] = chain.run(input_documents=query_results, question=query)
self.cached_answers.set(query + "-%s" % self.name, self.answers[query])
return self.answers[query]
def cache_answers(self):
self.cached_answers.dump()
| [] |
2024-01-10 | Shawnzy/Lang-Chain-Experiments | OpenSource-Falcon-LangChain~models~falcon_model.py | import os
from dotenv import load_dotenv, find_dotenv
from langchain import HuggingFaceHub
from langchain import PromptTemplate, LLMChain, OpenAI
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.chains.summarize import load_summarize_chain
from langchain.document_loaders import YoutubeLoader
import textwrap
# --------------------------------------------------------------
# Load the HuggingFaceHub API token from the .env file
# --------------------------------------------------------------
load_dotenv(find_dotenv())
HUGGINGFACEHUB_API_TOKEN = os.environ["HUGGINGFACEHUB_API_TOKEN"]
# --------------------------------------------------------------
# Load the LLM model from the HuggingFaceHub
# --------------------------------------------------------------
repo_id = "tiiuae/falcon-7b-instruct" # See https://huggingface.co/models?pipeline_tag=text-generation&sort=downloads for some other options
falcon_llm = HuggingFaceHub(
repo_id=repo_id, model_kwargs={"temperature": 0.1, "max_new_tokens": 500}
)
# --------------------------------------------------------------
# Create a PromptTemplate and LLMChain
# --------------------------------------------------------------
template = """Question: {question}
Answer: Let's think step by step."""
prompt = PromptTemplate(template=template, input_variables=["question"])
llm_chain = LLMChain(prompt=prompt, llm=falcon_llm)
# --------------------------------------------------------------
# Run the LLMChain
# --------------------------------------------------------------
question = "How do I make a sandwich?"
response = llm_chain.run(question)
wrapped_text = textwrap.fill(
response, width=100, break_long_words=False, replace_whitespace=False
)
print(wrapped_text)
# --------------------------------------------------------------
# Load a video transcript from YouTube
# --------------------------------------------------------------
video_url = "https://www.youtube.com/watch?v=riXpu1tHzl0"
loader = YoutubeLoader.from_youtube_url(video_url)
transcript = loader.load()
text_splitter = RecursiveCharacterTextSplitter(chunk_size=3000)
docs = text_splitter.split_documents(transcript)
# --------------------------------------------------------------
# Summarization with LangChain
# --------------------------------------------------------------
# Add map_prompt and combine_prompt to the chain for custom summarization
chain = load_summarize_chain(falcon_llm, chain_type="map_reduce", verbose=True)
print(chain.llm_chain.prompt.template)
print(chain.combine_document_chain.llm_chain.prompt.template)
# --------------------------------------------------------------
# Test the Falcon model with text summarization
# --------------------------------------------------------------
output_summary = chain.run(docs)
wrapped_text = textwrap.fill(
output_summary, width=100, break_long_words=False, replace_whitespace=False
)
print(wrapped_text)
# --------------------------------------------------------------
# Load an OpenAI model for comparison
# --------------------------------------------------------------
openai_llm = OpenAI(
model_name="text-davinci-003", temperature=0.1, max_tokens=500
) # max token length is 4097
chain = load_summarize_chain(openai_llm, chain_type="map_reduce", verbose=True)
output_summary = chain.run(docs)
wrapped_text = textwrap.fill(
output_summary, width=100, break_long_words=False, replace_whitespace=False
)
print(wrapped_text)
| [
"question",
"Question: {question}\n\nAnswer: Let's think step by step."
] |
2024-01-10 | Shawnzy/Lang-Chain-Experiments | LangChain-App~src~models~quickstart_guide.py | from dotenv import find_dotenv, load_dotenv
from langchain import ConversationChain, PromptTemplate
from langchain.agents import AgentType, initialize_agent, load_tools
from langchain.agents.load_tools import get_all_tool_names
from langchain.chains import LLMChain
from langchain.llms import OpenAI
# Load environment variables
load_dotenv(find_dotenv())
# --------------------------------------------------------------
# LLMs: Get predictions from a language model
# --------------------------------------------------------------
llm = OpenAI(model_name="text-davinci-003")
prompt = "Write a poem about python and ai"
print(llm(prompt))
# --------------------------------------------------------------
# Prompt Templates: Manage prompts for LLMs
# --------------------------------------------------------------
prompt = PromptTemplate(
input_variables=["product"],
template="What is a good name for a company that makes {product}?",
)
prompt.format(product="Smart Apps using Large Language Models (LLMs)")
# --------------------------------------------------------------
# Chains: Combine LLMs and prompts in multi-step workflows
# --------------------------------------------------------------
llm = OpenAI()
prompt = PromptTemplate(
input_variables=["product"],
template="What is a good name for a company that makes {product}?",
)
chain = LLMChain(llm=llm, prompt=prompt)
print(chain.run("AI Chatbots for Dental Offices"))
# --------------------------------------------------------------
# Agents: Dynamically Call Chains Based on User Input
# --------------------------------------------------------------
llm = OpenAI()
get_all_tool_names()
tools = load_tools(["wikipedia", "llm-math"], llm=llm)
# Finally, let's initialize an agent with the tools, the language model, and the type of agent we want to use.
agent = initialize_agent(
tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True
)
# Now let's test it out!
result = agent.run(
"In what year was python released and who is the original creator? Multiply the year by 3"
)
print(result)
# --------------------------------------------------------------
# Memory: Add State to Chains and Agents
# --------------------------------------------------------------
llm = OpenAI()
conversation = ConversationChain(llm=llm, verbose=True)
output = conversation.predict(input="Hi there!")
print(output)
output = conversation.predict(
input="I'm doing well! Just having a conversation with an AI."
)
print(output)
| [
"Write a poem about python and ai",
"What is a good name for a company that makes {product}?"
] |
2024-01-10 | Shawnzy/Lang-Chain-Experiments | LangChain-App~src~models~youtube_chat.py | import textwrap
from dotenv import find_dotenv, load_dotenv
from langchain.chains import LLMChain
from langchain.chat_models import ChatOpenAI
from langchain.document_loaders import YoutubeLoader
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.prompts.chat import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
SystemMessagePromptTemplate,
)
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import FAISS
load_dotenv(find_dotenv())
embeddings = OpenAIEmbeddings()
def create_db_from_youtube_video_url(video_url):
loader = YoutubeLoader.from_youtube_url(video_url)
transcript = loader.load()
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=100)
docs = text_splitter.split_documents(transcript)
db = FAISS.from_documents(docs, embeddings)
return db
def get_response_from_query(db, query, k=4):
"""
gpt-3.5-turbo can handle up to 4097 tokens. Setting the chunksize to 1000 and k to 4 maximizes
the number of tokens to analyze.
"""
docs = db.similarity_search(query, k=k)
docs_page_content = " ".join([d.page_content for d in docs])
chat = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0.2)
# Template to use for the system message prompt
template = """
You are a helpful assistant that that can answer questions about youtube videos
based on the video's transcript: {docs}
Only use the factual information from the transcript to answer the question.
If you feel like you don't have enough information to answer the question, say "I don't know".
Your answers should be verbose and detailed.
"""
system_message_prompt = SystemMessagePromptTemplate.from_template(template)
# Human question prompt
human_template = "Answer the following question: {question}"
human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)
chat_prompt = ChatPromptTemplate.from_messages(
[system_message_prompt, human_message_prompt]
)
chain = LLMChain(llm=chat, prompt=chat_prompt)
response = chain.run(question=query, docs=docs_page_content)
response = response.replace("\n", "")
return response, docs
# Example usage:
video_url = "https://www.youtube.com/watch?v=L_Guz73e6fw"
db = create_db_from_youtube_video_url(video_url)
query = "What are they saying about Microsoft?"
response, docs = get_response_from_query(db, query)
print(textwrap.fill(response, width=50))
| [
"[PLACEHOLDER, PLACEHOLDER]",
"t have enough information to answer the question, say \"I don",
"\n You are a helpful assistant that that can answer questions about youtube videos \n based on the video's transcript: {docs}\n \n Only use the factual information from the transcript to answer the question.\n \n If you feel like you don't have enough information to answer the question, say \"I don't know\".\n \n Your answers should be verbose and detailed.\n ",
"Answer the following question: {question}"
] |
2024-01-10 | shresthakamal/transfer-transfo | interact.py | # # Copyright (c) 2019-present, HuggingFace Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import logging
import random
from argparse import ArgumentParser
from itertools import chain
from pprint import pformat
import warnings
import tensorflow as tf
import torch
import torch.nn.functional as F
from transformers import OpenAIGPTLMHeadModel, OpenAIGPTTokenizer, GPT2LMHeadModel, GPT2Tokenizer
from train import SPECIAL_TOKENS, build_input_from_segments, add_special_tokens_
from utils import get_dataset, download_pretrained_model
def top_filtering(logits, top_k=0.0, top_p=0.9, threshold=-float("Inf"), filter_value=-float("Inf"), beam=True):
"""Filter a distribution of logits using top-k, top-p (nucleus) and/or threshold filtering
Args:
logits: logits distribution shape (vocabulary size)
top_k: <=0: no filtering, >0: keep only top k tokens with highest probability.
top_p: <=0.0: no filtering, >0.0: keep only a subset S of candidates, where S is the smallest subset
whose total probability mass is greater than or equal to the threshold top_p.
In practice, we select the highest probability tokens whose cumulative probability mass exceeds
the threshold top_p.
threshold: a minimal threshold to keep logits
"""
assert logits.dim() == 1
if beam == True:
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
indices_to_remove = sorted_indices[5:]
logits[indices_to_remove] = filter_value
else:
if top_k > 0:
# Only work for batch size 1 for now - could update but it would obfuscate a bit the code
top_k = min(top_k, logits.size(-1))
# Remove all tokens with a probability less than the last token in the top-k tokens
indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
logits[indices_to_remove] = filter_value
if top_p > 0.0:
# Compute cumulative probabilities of sorted tokens
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cumulative_probabilities = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
# Remove tokens with cumulative probability above the threshold
sorted_indices_to_remove = cumulative_probabilities > top_p
# Shift the indices to the right to keep also the first token above the threshold
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
# Back to unsorted indices and set them to -infinity
indices_to_remove = sorted_indices[sorted_indices_to_remove]
logits[indices_to_remove] = filter_value
indices_to_remove = logits < threshold
logits[indices_to_remove] = filter_value
return logits
def sample_sequence(personality, history, tokenizer, model, args, current_output=None):
special_tokens_ids = tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS)
if current_output is None:
current_output = []
for i in range(args.max_length):
instance = build_input_from_segments(personality, history, current_output, tokenizer, with_eos=False)
input_ids = torch.tensor(instance["input_ids"], device=args.device).unsqueeze(0)
outputs = model.generate(
input_ids,
max_length=input_ids.shape[-1] + 40,
num_beams=10,
num_return_sequences=1,
no_repeat_ngram_size=1,
remove_invalid_values=True,
)
return outputs[0].tolist()
# print(tokenizer.decode(outputs[0], skip_special_tokens=True))
# token_type_ids = torch.tensor(instance["token_type_ids"], device=args.device).unsqueeze(0)
# logits = model(input_ids, token_type_ids=token_type_ids)
# beam_output = model.generate(input_ids, max_length=40, num_beams=5)
# return beam_output[0].tolist()
# if isinstance(logits, tuple): # for gpt2 and maybe others
# logits = logits[0]
# logits = logits[0, -1, :] / args.temperature
# logits = top_filtering(logits, top_k=args.top_k, top_p=args.top_p)
# probs = F.softmax(logits, dim=-1)
# prev = torch.topk(probs, 1)[1] if args.no_sample else torch.multinomial(probs, 1)
# if i < args.min_length and prev.item() in special_tokens_ids:
# while prev.item() in special_tokens_ids:
# if probs.max().item() == 1:
# warnings.warn("Warning: model generating special token with probability 1.")
# break # avoid infinitely looping over special token
# prev = torch.multinomial(probs, num_samples=1)
# if prev.item() in special_tokens_ids:
# break
# current_output.append(prev.item())
# return current_output
## ADDED CODE
def get_data():
datapath = (
"/raid/cs18resch11003/aditi/transfer-learning-conv-ai/ParlAI/data/ConvAI2/test_self_original_no_cands.txt"
)
data = open(datapath, "r")
lines = data.readlines()
personality = []
persona = []
for idx, line in enumerate(lines):
if "your persona:" in line:
line = line.strip()
line = line.replace("your persona:", "")
line = line[3:]
persona.append(line)
else:
if len(persona) != 0:
personality.append(persona)
persona = []
conversations = []
conversation = []
for idx, line in enumerate(lines):
if "your persona:" not in line:
line = line.strip()
line = line[2:]
conversation.append(line)
else:
if len(conversation) != 0:
conversations.append(conversation)
conversation = []
conversations.append(conversation)
prompts = []
replies = []
for conversation in conversations:
temp_prompt = []
temp_replies = []
for turn in conversation:
utterance1, utterance2 = turn.split("\t")
utterance1 = utterance1.strip()
utterance2 = utterance2.strip()
temp_prompt.append(utterance1)
temp_replies.append(utterance2)
prompts.append(temp_prompt)
replies.append(temp_replies)
assert len(personality) == (len(prompts) + len(replies)) / 2
return personality, prompts, replies
def run():
parser = ArgumentParser()
parser.add_argument(
"--dataset_path", type=str, default="", help="Path or url of the dataset. If empty download from S3."
)
parser.add_argument(
"--dataset_cache", type=str, default="./dataset_cache", help="Path or url of the dataset cache"
)
parser.add_argument(
"--model",
type=str,
default="openai-gpt",
help="Model type (openai-gpt or gpt2)",
choices=["openai-gpt", "gpt2"],
) # anything besides gpt2 will load openai-gpt
parser.add_argument("--model_checkpoint", type=str, default="", help="Path, url or short name of the model")
parser.add_argument(
"--max_history", type=int, default=2, help="Number of previous utterances to keep in history"
)
parser.add_argument(
"--device", type=str, default="cuda" if torch.cuda.is_available() else "cpu", help="Device (cuda or cpu)"
)
parser.add_argument("--no_sample", action="store_true", help="Set to use greedy decoding instead of sampling")
parser.add_argument("--max_length", type=int, default=20, help="Maximum length of the output utterances")
parser.add_argument("--min_length", type=int, default=1, help="Minimum length of the output utterances")
parser.add_argument("--seed", type=int, default=0, help="Seed")
parser.add_argument("--temperature", type=float, default=0.7, help="Sampling softmax temperature")
parser.add_argument(
"--top_k", type=int, default=0, help="Filter top-k tokens before sampling (<=0: no filtering)"
)
parser.add_argument(
"--top_p", type=float, default=0.9, help="Nucleus filtering (top-p) before sampling (<=0.0: no filtering)"
)
args = parser.parse_args()
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__file__)
logger.info(pformat(args))
if args.model_checkpoint == "":
if args.model == "gpt2":
raise ValueError("Interacting with GPT2 requires passing a finetuned model_checkpoint")
else:
args.model_checkpoint = download_pretrained_model()
if args.seed != 0:
random.seed(args.seed)
torch.random.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
logger.info("Get pretrained model and tokenizer")
tokenizer_class, model_class = (
(GPT2Tokenizer, GPT2LMHeadModel) if args.model == "gpt2" else (OpenAIGPTTokenizer, OpenAIGPTLMHeadModel)
)
tokenizer = tokenizer_class.from_pretrained(args.model_checkpoint)
model = model_class.from_pretrained(args.model_checkpoint)
model.to(args.device)
add_special_tokens_(model, tokenizer)
####################
# logger.info("Sample a personality")
# dataset = get_dataset(tokenizer, args.dataset_path, args.dataset_cache)
# personalities = [dialog["personality"] for dataset in dataset.values() for dialog in dataset]
# personality = random.choice(personalities)
# logger.info("Selected personality: %s", tokenizer.decode(chain(*personality)))
## ADDED CODE
personality, prompts, replies = get_data()
tokens_personality = []
for persona in personality:
temp_p = []
for p in persona:
temp_p.append(tokenizer.convert_tokens_to_ids(tokenizer.tokenize(p)))
tokens_personality.append(temp_p)
history = []
count = 0
for idx, (tp, p, r) in enumerate(zip(tokens_personality, prompts, replies)):
print(f"Personality: {personality[idx]}")
for _p, _r in zip(p, r):
print(f"Prompt: {_p}")
print(f"Original: {_r}")
history.append(tokenizer.encode(_p))
with torch.no_grad():
out_ids = sample_sequence(tp, history, tokenizer, model, args)
history.append(out_ids)
history = history[-(2 * args.max_history + 1) :]
out_text = tokenizer.decode(out_ids, skip_special_tokens=True)
print(f"Generated: {out_text}\n")
count += 1
if count == 50:
exit(0)
# while True:
# raw_text = input(">>> ")
# while not raw_text:
# print("Prompt should not be empty!")
# raw_text = input(">>> ")
# history.append(tokenizer.encode(raw_text))
# with torch.no_grad():
# out_ids = sample_sequence(personality, history, tokenizer, model, args)
# history.append(out_ids)
# history = history[-(2 * args.max_history + 1) :]
# out_text = tokenizer.decode(out_ids, skip_special_tokens=True)
# print(out_text)
if __name__ == "__main__":
run()
| [
"[]"
] |
2024-01-10 | play-with-chatgpt/wpr_chatgpt | scripts~gpt3_node.py | #!/usr/bin/env python3
# coding=utf-8
import rospy
from std_msgs.msg import String
from urllib import response
import openai
# 接收问题字符串回调函数
def cbQuestion(msg):
rospy.loginfo("--------------------")
rospy.loginfo(msg.data)
global api_key
openai.api_key = api_key
prompt = msg.data
global model_engine
completion = openai.Completion.create(
engine = model_engine,
prompt = prompt,
max_tokens=1024,
n=1,
stop=None,
temperature=0.5,
)
if 'choices' in completion:
if len(completion['choices'])>0:
response= completion['choices'][0]['text']
global response_pub
answer_msg = String()
answer_msg.data = response
response_pub.publish(answer_msg)
else:
response = None
else:
response = None
rospy.logwarn(response)
# 主函数
if __name__ == "__main__":
rospy.init_node("gpt3_node")
#读取 API Key 参数
api_key = rospy.get_param('~openai/api_key')
model_engine = rospy.get_param('~openai/model' , "davinci-instruct-beta-v3")
rospy.logwarn("GPT-3: 当前使用模型 %s",model_engine)
# 订阅外部输入的问话
question_sub = rospy.Subscriber("/wpr_ask", String, cbQuestion, queue_size=1)
# 发布ChatGPT返回的结果
response_pub = rospy.Publisher("/chatspt_answer", String, queue_size=1)
rospy.logwarn("GPT-3: 我已经准备好了!向我提问吧 ^_^")
rospy.spin() | [] |
2024-01-10 | SAint7579/auto_negotiator | Utilities~get_Similarity.py | import openai
import chromadb
from chromadb.utils import embedding_functions
import json
import pandas as pd
vendor = pd.read_csv("C:/Users/vishw/OneDrive/Desktop/Projects/auto_negotiator/Utilities/vendors.csv")
with open('C:/Users/vishw/OneDrive/Desktop/Projects/daemon-dialoguers/openAI_api.json') as f:
key = json.load(f)
def text_Embedding(text):
response = openai.OpenAI(api_key='')
response = response.embeddings.create(model="text-embedding-ada-002", input=text)
return response.data[0].embedding
def get_Similarity(query):
client = chromadb.Client()
collection = client.get_or_create_collection("vendor",embedding_function=openai_ef)
docs=vendor["description"].tolist()
ids= [str(x) for x in vendor.index.tolist()]
collection.add(
documents=docs,
ids=ids
)
vector=text_Embedding(query)
results=collection.query(
query_embeddings=vector,
n_results=5,
include=["documents"])
lis=[]
for i in results['ids'][0]:
lis.append(vendor.iloc[(int(i))]["id"])
return lis | [] |
2024-01-10 | SAint7579/auto_negotiator | Utilities~vendor_chat.py | from langchain.llms import OpenAI
from langchain.chat_models import ChatOpenAI
from langchain.schema import HumanMessage, SystemMessage
import os
from openai import OpenAI
os.environ['OPENAI_API_KEY'] = ''
import pandas as pd
import numpy as np
function_json = {
"name": "get_lowest_offer",
"parameters": {
"type": "object",
"properties": {
"final_cost": {
"type": "number",
"description": "What is the best offer/lowest cost the vendor can offer."
},
"final_days": {
"type": "number",
"description": "What is the best offer/minimum number of days the vendor can deliver in."
}
},
"required": [
"final_cost",
"final_days"
]
},
"description": "Collect the cost and delivery days from the vendor after the negotiations."
}
def initiate_client(their_offer, our_offer):
client = OpenAI(api_key="")
for i in [i.id for i in client.beta.assistants.list().data if i.name == "Summarization_Assistant_ani"]:
client.beta.assistants.delete(i)
assistant = client.beta.assistants.create(
name="Summarization_Assistant_ani",
instructions=f"You are an AI assistant who is supposed to negotiate with the vendor. The vendors best offer is {their_offer}. We want to negotiate it down to {our_offer}. You are supposed to collect the counter offer from the vendor: Can the lowest cost or time be met, if not, what is the lowest they can offer. Do not write them a new counter offer. Collect the information and invoke the function. Always write your responses in the form of a mail on behalf of Vishwa Singh.",
model="gpt-4-1106-preview",
tools=[
{"type": "function", "function": function_json},
],
)
MATH_ASSISTANT_ID = assistant.id
thread = client.beta.threads.create()
return client, MATH_ASSISTANT_ID, thread
def displayMinVendor(ven):
ven["Unfulfilled_len"] = ven['Unfulfilled'].apply(lambda x: len(str(x).split(';')) if pd.notna(x) else 0)
ven['Cost' + '_normalized'] = normalize_by_sum(ven['Cost'])
ven['Days' + '_normalized'] = normalize_by_sum(ven['Days'])
ven['Unfulfilled_len' + '_normalized'] = normalize_by_sum(ven['Unfulfilled_len'])
ven['Overall'] = ven.apply(lambda row: 0.4 * row['Cost_normalized'] +
0.35 * row['Days_normalized'] +
0.25 * row['Unfulfilled_len_normalized'], axis=1)
matching_row = ven[(ven['Cost'] == ven['Cost'].min()) & (ven['Days'] == ven['Days'].min())]
if not matching_row.empty:
# If a matching row exists, print the row
print("Matching Row:")
print(matching_row)
return matching_row[['VendorID', 'Cost', 'Days', 'Unfulfilled']], (matching_row['Cost'].min(), matching_row['Days'].min())
else:
# If no matching row exists, print the minimum values
min_values_Cost = ven['Cost'].min()
min_values_Days = ven['Days'].min()
print(f"Minimum Cost Offered: {min_values_Cost}, Mimimum Days Offered: {min_values_Days}")
min_values = ven['Overall'].nsmallest(1)
result_rows = ven[ven['Overall'].isin(min_values)]
return result_rows[['VendorID', 'Cost', 'Days', 'Unfulfilled']], (min_values_Cost, min_values_Days)
def normalize_by_sum(column):
normalized_column = column / column.sum()
return normalized_column
def get_counter_offer(offer):
ven = pd.read_csv("C:/VS code projects/Road to Hack/auto_negotiator/Utilities/Vendor_yes.csv")
## Check if the requirements are satisfied
if offer["requirements_satisfied"]:
## Convert offer to dataframe
dataframe = pd.DataFrame(np.array([['NEW'] + list(offer.values())])[:,[0,-2,-1,1,2]], columns=['VendorID', 'Cost', 'Days', 'CanFulfill', 'Unfulfilled'],index=[len(ven)])
#Replace True with T and False with F
dataframe['CanFulfill'] = dataframe['CanFulfill'].astype(str).str[0].str.upper()
## Append the offer to the vendor dataframe
ven = ven.append(dataframe, ignore_index=True)
## Convert cost and days to numeric
ven['Cost'] = pd.to_numeric(ven['Cost'])
ven['Days'] = pd.to_numeric(ven['Days'])
## Get minimum vendor
min_vendor, min_values = displayMinVendor(ven)
if min_vendor['VendorID'].iloc[0] == 'NEW':
## Check if min_vendor is better than min_values
if min_vendor['Cost'].iloc[0] > min_values[0] or min_vendor['Days'].iloc[0] > min_values[1]:
## Create a new offer in {'new_cost':3,'new_days':14} format
return True, {'new_cost':min(min_vendor['Cost'].iloc[0], min_values[0]),'new_days':min(min_vendor['Days'].iloc[0], min_values[1])}, {'previous_cost':min_vendor['Cost'].iloc[0],'previous_days':min_vendor['Days'].iloc[0]}
else:
return False, {}
else:
False, {}
def gpt_negotiation_mail(their_offer, our_offer, vendor_name):
# Create a GPT prompt
prompt = f"Write a mail to a vendor named '{vendor_name}' on behalf of Vishwa Mohan Singh (salutations), asking them to negotiate from:/nprevious cost: {their_offer['previous_cost']} Euros/n and previous days required {their_offer['previous_days']} to new offer:from:/nprevious cost: {our_offer['new_cost']} Euros/n and previous days required {our_offer['new_days']}"
mail_assistant = ChatOpenAI()
messages = [
SystemMessage(
content="You are an AI assistant that is supposed to write a mail to the vendor negotiating for a reduced cost and reduced time of delivery."
),
HumanMessage(content=prompt),
]
response = mail_assistant(messages)
return response.content | [
"Write a mail to a vendor named 'PLACEHOLDER' on behalf of Vishwa Mohan Singh (salutations), asking them to negotiate from:/nprevious cost: PLACEHOLDER Euros/n and previous days required PLACEHOLDER to new offer:from:/nprevious cost: PLACEHOLDER Euros/n and previous days required PLACEHOLDER",
"You are an AI assistant that is supposed to write a mail to the vendor negotiating for a reduced cost and reduced time of delivery."
] |
2024-01-10 | SAint7579/auto_negotiator | Interface~Procurement_chat.py | import streamlit as st
from openai import OpenAI
import json
import sys
sys.path.append('C:/VS code projects/Road to Hack/auto_negotiator/Utilities/')
from negotiation_utils import *
from negotiation_utils import gpt_draft_mail
from negotiation_utils import submit_message
from negotiation_utils import get_response
from negotiation_utils import pretty_print
from negotiation_utils import wait_on_run
from negotiation_utils import check_response
from negotiation_utils import MATH_ASSISTANT_ID
from negotiation_utils import thread
from negotiation_utils import client
print(thread.id)
st.title("Procurement Bot")
# Path to requirements file
requirements_path = 'C:/VS code projects/Road to Hack/auto_negotiator/Utilities/procurement.json'
# Initialize chat history
if "messages" not in st.session_state:
st.session_state.messages = []
# Display chat messages from history on app rerun
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
# Display initial response
initialsiation = True
if initialsiation:
specification = json.loads(json.load(open('C:/VS code projects/Road to Hack/auto_negotiator/Utilities/requirements.json','r')))
response = gpt_draft_mail(specification, 'ABC Company')
st.session_state.initialised = True
# Display assistant response
with st.chat_message("assistant"):
st.markdown(response)
# React to user input
if prompt := st.chat_input("What is up?"):
# Display user message in chat message container
st.chat_message("user").markdown(prompt)
# Add user message to chat history
st.session_state.messages.append({"role": "user", "content": prompt})
# Send user message to assistant and display assistant response
run = submit_message(client, MATH_ASSISTANT_ID, thread, prompt)
run = wait_on_run(client, run, thread)
completion = False
if run.status == 'requires_action':
response,completion = check_response(client,thread,run)
else:
response = pretty_print(get_response(client,thread))
# Display assistant response in chat message container
with st.chat_message("assistant"):
st.markdown(response)
# Add assistant response to chat history
if completion is True:
print('done')
print(run.status)
json_data = run.required_action.submit_tool_outputs.tool_calls[0].function.arguments
with open(requirements_path, 'w') as json_file:
json.dump(json_data, json_file)
st.json(json_data)
else:
print('not done')
print(run.status)
st.session_state.messages.append({"role": "assistant", "content": response})
| [] |
2024-01-10 | SAint7579/auto_negotiator | Interface~Request_chat.py | import streamlit as st
from openai import OpenAI
import json
import sys
sys.path.append('C:/VS code projects/Road to Hack/auto_negotiator/Utilities/')
from request_functions import show_json
from request_functions import submit_message
from request_functions import get_response
from request_functions import pretty_print
from request_functions import wait_on_run
from request_functions import check_response
from request_functions import function_json
from request_functions import MATH_ASSISTANT_ID
from request_functions import thread
from request_functions import client
print(thread.id)
st.title("Request Bot")
requirements_path = 'C:/VS code projects/Road to Hack/auto_negotiator/Utilities/requirements.json'
# Initialize chat history
if "messages" not in st.session_state:
st.session_state.messages = []
# Display chat messages from history on app rerun
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
# React to user input
if prompt := st.chat_input("What is up?"):
# Display user message in chat message container
st.chat_message("user").markdown(prompt)
# Add user message to chat history
st.session_state.messages.append({"role": "user", "content": prompt})
## Send user message to assistant and display assistant response
run = submit_message(client,MATH_ASSISTANT_ID, thread, prompt)
run = wait_on_run(client,run, thread)
completion = False
if run.status == 'requires_action':
response,completion = check_response(client,thread,run)
else:
response = pretty_print(get_response(client,thread))
response = f"{response}"
# Display assistant response in chat message container
with st.chat_message("assistant"):
st.markdown(response)
# Add assistant response to chat history
if completion is True:
print('done')
json_data = run.required_action.submit_tool_outputs.tool_calls[0].function.arguments
with open(requirements_path, 'w') as json_file:
json.dump(json_data, json_file)
st.json(json_data)
else:
print('not done')
st.session_state.messages.append({"role": "assistant", "content": response})
| [
"response57270828-f375-4bd1-a53d-1492c061dc58"
] |
2024-01-10 | SAint7579/auto_negotiator | Utilities~request_functions.py | import numpy as np
import pandas as pd
import json
from openai import OpenAI
import streamlit as st
def show_json(obj):
display(json.loads(obj.model_dump_json()))
responses = ['false']
function_json = {
"name": "get_specifications",
"parameters": {
"type": "object",
"properties": {
"specifications": {
"type": "string",
"description": "Specifications of the merchandise like name/type, size, color, material etc."
},
"quantity": {
"type": "number",
"description": "Total required quantity of the merchandise. Needs to be greater than 0."
},
"price": {
"type": "number",
"description": "Price per unit of the required merchandise. Needs to be greater than 0"
},
"num_days": {
"type": "number",
"description": "Number of days to fulfill the order.Needs to be greater than 0"
},
"need_logo": {
"type": "string",
"description": "Description of the logo required on the merchandise. This should include position of the logo, size of the logo, printing method of logo ,color of the logo, etc. Make it NA if no logo is required."
}
},
"required": [
"specifications",
"quantity",
"price",
"num_days",
"need_logo"
]
},
"description": "Extract all the specifications of the merchandise from the user"
}
## initiate client, assistant and thread
client = OpenAI(api_key="")
for i in [i.id for i in client.beta.assistants.list().data if i.name == "Summarization_Assistant_ani"]:
client.beta.assistants.delete(i)
assistant = client.beta.assistants.create(
name="Summarization_Assistant_ani",
instructions="You are an AI assistant that is taking in procurement request from the users. There are usually for merchandise like hoodies, shirts, mugs and bottles.Your job is to get all the specifications of the merchandize from the user",
model="gpt-4-1106-preview",
tools=[
{"type": "function", "function": function_json},
],
)
MATH_ASSISTANT_ID = assistant.id
thread = client.beta.threads.create()
## necessary functions
def submit_message(client,assistant_id, thread, user_message):
client.beta.threads.messages.create(
thread_id=thread.id, role="user", content=user_message
)
return client.beta.threads.runs.create(
thread_id=thread.id,
assistant_id=assistant_id,
)
def get_response(client,thread):
return client.beta.threads.messages.list(thread_id=thread.id, order="asc")
import time
# # Pretty printing helper
# def pretty_print(messages):
# print("# Messages")
# for m in messages:
# print(f"{m.role}: {m.content[0].text.value}")
# print()
def pretty_print(messages):
result = ""
for m in messages.data[-1:]:
result += f"{m.content[0].text.value}\n"
result += "\n"
return result
# Waiting in a loop
def wait_on_run(client,run, thread):
while run.status == "queued" or run.status == "in_progress":
run = client.beta.threads.runs.retrieve(
thread_id=thread.id,
run_id=run.id,
)
time.sleep(0.5)
return run
def check_response(client,thread,run):
# Extract single tool call
tool_call = run.required_action.submit_tool_outputs.tool_calls[0]
name = tool_call.function.name
arguments = json.loads(tool_call.function.arguments)
responses = ['true'] if arguments['quantity'] > 0 and arguments['price'] > 0 and arguments['num_days'] > 0 else ['false']
run = client.beta.threads.runs.submit_tool_outputs(
thread_id=thread.id,
run_id=run.id,
tool_outputs=[
{
"tool_call_id": tool_call.id,
"output": json.dumps(responses),
}
],
)
run = wait_on_run(client,run, thread)
print(run.status)
completion = True if responses[0] == 'true' else False
return pretty_print(get_response(client,thread)), completion
| [] |
2024-01-10 | SAint7579/auto_negotiator | Utilities~negotiation_utils.py | from langchain.chat_models import ChatOpenAI
from langchain.schema import HumanMessage, SystemMessage
import os
import json
from openai import OpenAI
import time
import numpy as np
os.environ['OPENAI_API_KEY'] = ''
function_json = {
"name": "get_vendor_quotation",
"parameters": {
"type": "object",
"properties": {
"requirements_satisfied": {
"type": "boolean",
"description": "Can the requirements be satisfied by the vendor? Make it true even if some requirements can be satisfied."
},
"unfullfiled_requirements": {
"type": "string",
"description": "What requirements cannot be satisfied by the vendor? Separate requirements with ';'. NA if there are no missing requirements."
},
"quotation": {
"type": "number",
"description": "Price per unit of the product. Only set to NA if the requirements cannot be satisfied at all. Otherwise ask the user for the exact price."
},
"procurement_days": {
"type": "number",
"description": "Number of days to fullfil the requirements. Only set to NA if the requirements cannot be satisfied at all. Otherwise ask the user for the days"
}
},
"required": [
"requirements_satisfied",
"unfullfiled_requirements",
"quotation",
"procurement_days"
]
},
"description": "Check if the vendor can fullfil the requirements and get the vendor quotation and procurement days from the mail."
}
specification = json.loads(json.load(open('C:/VS code projects/Road to Hack/auto_negotiator/Utilities/requirements.json','r')))
client = OpenAI(api_key="")
for i in [i.id for i in client.beta.assistants.list().data if i.name == "Summarization_Assistant_ani"]:
client.beta.assistants.delete(i)
assistant = client.beta.assistants.create(
name="Summarization_Assistant_ani",
instructions=f"You are an AI assistant who is supposed to get the quotation and time of delivery from the vendor. These are the requirements:{specification} You are supposed to collect the three main information: Can the requirements be met, what is the price and how long will it take to deliver (in days). All these requirements need to be provided by the vendor in chat. Do not assume zero. Always write your responses in form of a mail on behalf of Vishwa Singh.",
model="gpt-4-1106-preview",
tools=[
{"type": "function", "function": function_json},
],
)
MATH_ASSISTANT_ID = assistant.id
thread = client.beta.threads.create()
def gpt_draft_mail(specification, vendor_name):
# Create a GPT prompt
prompt = f"Write a mail to a vendor named '{vendor_name} on behalf of Vishwa Mohan Singh (salutations), asking for a quotation for the following specifications:\nSpecifications: {specification['specifications']}\nQuantity: {specification['quantity']}\nOur Price: {specification['price']} Euros\nNumber of days Required: {specification['num_days']}\nNeed logo: {specification['need_logo']}\n\nMail:"
mail_assistant = ChatOpenAI()
messages = [
SystemMessage(
content="You are an AI assistant that is supposed to write a mail to the vendor asking for a quotation and time of delivery. Specify our required price and days as well."
),
HumanMessage(content=prompt),
]
response = mail_assistant(messages)
return response.content
## necessary functions
def submit_message(client,assistant_id, thread, user_message):
client.beta.threads.messages.create(
thread_id=thread.id, role="user", content=user_message
)
return client.beta.threads.runs.create(
thread_id=thread.id,
assistant_id=assistant_id,
)
def get_response(client,thread):
return client.beta.threads.messages.list(thread_id=thread.id, order="asc")
import time
# # Pretty printing helper
# def pretty_print(messages):
# print("# Messages")
# for m in messages:
# print(f"{m.role}: {m.content[0].text.value}")
# print()
def pretty_print(messages):
result = ""
for m in messages.data[-1:]:
result += f"{m.content[0].text.value}\n"
result += "\n"
return result
# Waiting in a loop
def wait_on_run(client,run, thread):
while run.status == "queued" or run.status == "in_progress":
run = client.beta.threads.runs.retrieve(
thread_id=thread.id,
run_id=run.id,
)
time.sleep(0.5)
return run
def check_response(client,thread,run):
# Extract single tool call
tool_call = run.required_action.submit_tool_outputs.tool_calls[0]
name = tool_call.function.name
arguments = json.loads(tool_call.function.arguments)
responses = ['true'] if (arguments['quotation'] == 'NA' and arguments['procurement_days']== 'NA') or (arguments['quotation'] > 0 and arguments['procurement_days'] > 0) else ['false']
run = client.beta.threads.runs.submit_tool_outputs(
thread_id=thread.id,
run_id=run.id,
tool_outputs=[
{
"tool_call_id": tool_call.id,
"output": json.dumps(responses),
}
],
)
completion = True if responses[0] == 'true' else False
run = wait_on_run(client,run, thread)
print(run.status)
return pretty_print(get_response(client,thread)), completion
| [
"You are an AI assistant that is supposed to write a mail to the vendor asking for a quotation and time of delivery. Specify our required price and days as well.",
"Write a mail to a vendor named 'PLACEHOLDER on behalf of Vishwa Mohan Singh (salutations), asking for a quotation for the following specifications:\nSpecifications: PLACEHOLDER\nQuantity: PLACEHOLDER\nOur Price: PLACEHOLDER Euros\nNumber of days Required: PLACEHOLDER\nNeed logo: PLACEHOLDER\n\nMail:"
] |
2024-01-10 | SAint7579/auto_negotiator | Interface~Negotiation_chat.py | import streamlit as st
from openai import OpenAI
import json
import sys
sys.path.append('C:/VS code projects/Road to Hack/auto_negotiator/Utilities/')
st.title("Negotiation Bot")
if "messages" not in st.session_state:
st.session_state.messages = []
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
# # Display initial response
# initialsiation = True
# if initialsiation:
# response = 'Subject: Request for Negotiation: Reduced Cost and Time of Delivery\n\nDear ABC Company,\n\nI hope this email finds you well. My name is Vishwa Mohan Singh, and I am writing to discuss a potential negotiation regarding the cost and time of delivery for our recent orders with ABC Corporation.\n\nFirstly, I would like to express my appreciation for the quality of products and services that ABC Corporation has consistently provided us in the past. Our business relationship has been mutually beneficial, and I am confident that we can continue to strengthen it further.\n\nHowever, I would like to bring to your attention a concern regarding the current cost and time of delivery for our orders. The previous cost per unit was set at 5 Euros, and the delivery time was 20 days. While we understand the value of your products, we believe that there may be room for negotiation in these areas.\n\nIn light of this, I would like to propose a revised offer for our future orders. We kindly request that the cost per unit be reduced to 3 Euros, while also aiming to significantly reduce the time of delivery to 4 days. We believe that these changes will not only benefit our organization by reducing costs and improving efficiency but will also be advantageous for ABC Corporation by increasing the volume of our orders.\n\nIt is important to note that we highly value the quality and reliability of your products, and any cost reduction should not compromise the standards we have come to expect from ABC Corporation. Furthermore, we understand that reducing the time of delivery may require adjustments to your internal processes, and we are open to discussing any feasible solutions that can help achieve this goal.\n\nI would appreciate if you could review our proposal and provide your feedback at the earliest. If you require any additional information or would like to discuss this matter further, please do not hesitate to contact me directly at [Your Contact Details].\n\nThank you for your attention to this matter, and I look forward to your prompt response. I am optimistic that we can reach a mutually beneficial agreement that will further strengthen our business partnership.\n\nBest regards,\n\nVishwa Mohan Singh'
# st.session_state.initialised = True
# # Display assistant response
# with st.chat_message("assistant"):
# # print(response)
# st.markdown(response)
# React to user input
if prompt := st.chat_input("What is up?"):
# Display user message in chat message container
st.chat_message("user").markdown(prompt)
# Add user message to chat history
st.session_state.messages.append({"role": "user", "content": prompt})
string = 'Dear ABC Company, Thank you for getting back to us with your counteroffer. We appreciate your willingness to accommodate the new cost of 3 euros, and we understand your position regarding the delivery timeline. Your offer has been noted, and we will review it internally to determine if the suggested 18-day timeline will be suitable for our needs. Thank you once again for your cooperation and flexibility during this negotiation process. We will be in touch soon with our decision. Best Regards, Vishwa Singh'
# Display assistant response in chat message container
jh = {"final_cost":3,"final_days":18}
with st.chat_message("assistant"):
st.json(jh)
st.session_state.messages.append({"role": "assistant", "content": prompt}) | [] |
2024-01-10 | ai-ld/DB-GPT | pilot~embedding_engine~chn_document_splitter.py | import re
from typing import List
from langchain.text_splitter import CharacterTextSplitter
class CHNDocumentSplitter(CharacterTextSplitter):
def __init__(self, pdf: bool = False, sentence_size: int = None, **kwargs):
super().__init__(**kwargs)
self.pdf = pdf
self.sentence_size = sentence_size
def split_text(self, text: str) -> List[str]:
if self.pdf:
text = re.sub(r"\n{3,}", r"\n", text)
text = re.sub("\s", " ", text)
text = re.sub("\n\n", "", text)
text = re.sub(r"([;;.!?。!?\?])([^”’])", r"\1\n\2", text)
text = re.sub(r'(\.{6})([^"’”」』])', r"\1\n\2", text)
text = re.sub(r'(\…{2})([^"’”」』])', r"\1\n\2", text)
text = re.sub(r'([;;!?。!?\?]["’”」』]{0,2})([^;;!?,。!?\?])', r"\1\n\2", text)
text = text.rstrip()
ls = [i for i in text.split("\n") if i]
for ele in ls:
if len(ele) > self.sentence_size:
ele1 = re.sub(r'([,,.]["’”」』]{0,2})([^,,.])', r"\1\n\2", ele)
ele1_ls = ele1.split("\n")
for ele_ele1 in ele1_ls:
if len(ele_ele1) > self.sentence_size:
ele_ele2 = re.sub(
r'([\n]{1,}| {2,}["’”」』]{0,2})([^\s])', r"\1\n\2", ele_ele1
)
ele2_ls = ele_ele2.split("\n")
for ele_ele2 in ele2_ls:
if len(ele_ele2) > self.sentence_size:
ele_ele3 = re.sub(
'( ["’”」』]{0,2})([^ ])', r"\1\n\2", ele_ele2
)
ele2_id = ele2_ls.index(ele_ele2)
ele2_ls = (
ele2_ls[:ele2_id]
+ [i for i in ele_ele3.split("\n") if i]
+ ele2_ls[ele2_id + 1 :]
)
ele_id = ele1_ls.index(ele_ele1)
ele1_ls = (
ele1_ls[:ele_id]
+ [i for i in ele2_ls if i]
+ ele1_ls[ele_id + 1 :]
)
id = ls.index(ele)
ls = ls[:id] + [i for i in ele1_ls if i] + ls[id + 1 :]
return ls
| [] |
2024-01-10 | jakubbares/Brander | API~runner.py | from langchain import PromptTemplate
from console_input import return_for_console_input
from data import TEST_PROMPT_FULL, TEST_PROMPT_HUMAN
from prompt.post_analysis import PostAnalysis
from api.openai import OpenAIAPI
import os
from prompt.prompt_generator import PromptGenerator
print("\n\n\n\n\n")
prompt = input("What topic do you want to mention: ")
res = return_for_console_input(prompt)
print(res)
"""
generator = PromptGenerator()
analysis = PostAnalysis()
api = OpenAIAPI()
res = api.basic_prompt_response(TEST_PROMPT_FULL)
print(res)
print(1000)
#res = generator.generate_brand_context_response(input_parameters=None, human_template="Write me a post about buying a big mac")
#print(res)
#res = generator.generate_brand_context_response(human_template=TEST_PROMPT_HUMAN, input_parameters=None)
#print(res)
res = api.chat_prompt_response(system_template="You are a helpful assistant that translates {input_language} to {output_language}.",
human_template="{text}",
input_language="English", output_language="French", text="I love programming.")
#res = analysis.extract_topics()
#print(res)
"""
| [
"What topic do you want to mention: "
] |
2024-01-10 | jakubbares/Brander | API~console_input.py | from flask import Flask, jsonify
from flask import g
from flask_restful import Api, Resource, marshal, reqparse
from flask_cors import CORS
import os
from common.logger import Logger
from endpoints.content_strategy import ContentStrategy
from endpoints.media_post_generation import MediaPostGeneration
from api.openai import OpenAIAPI
context = """
Hey, try to imagine you are the president of the Czech Republic Petr Pavel and you are writing the post about {prompt}. Now you are writing the post about it on Facebook. The post length is between 50 to 100 words. Write it according to all these specifications but do not express them explicitly. Take into account mainly his tone of voice, personality and characteristics but again do not express them explicitly just behave accordingly. Just act accordingly:
Insight: Followers of the president are happy that the president is already someone who represents the country so none has to be ashamed.
Vision: I want to make Czech republic ambitious and confident country where the people want to live in.
Mission: By representing our country with dignity and also by using the authority and possibilities of the head of state to promote important topics and solutions that will move our country in the right direction.
Solution: Listening to people, prudent decisions, respect for opponents, friendly attitude, respected personality at the international level.
Values: independence, fair-play, transparent, empathy, respect, good will
Target audience: men & women in the age between 20 - 40 years old, who are following the president for the emotional and personal reasons
Personality: He is competent - reliable, efficient, and effective. Often associated with qualities such as intelligence, professionalism, and expertise.
Tone of voice: formal, deliberate, respectful, matter-of-fact,
Characteristics: Trustworthy, professional, pragmatic, smart, patient, conventional
Communication pillars: politics, presidential agenda, motivational speeches
Never use hashtags.
Translate it to Czech
"""
api = OpenAIAPI()
def return_for_console_input(prompt):
final_prompt = context.format(prompt=prompt)
print("\n\n")
#print("\n\n" + final_prompt + "\n\n")
return api.basic_prompt_response(final_prompt)
| [
"\n Hey, try to imagine you are the president of the Czech Republic Petr Pavel and you are writing the post about PLACEHOLDER. Now you are writing the post about it on Facebook. The post length is between 50 to 100 words. Write it according to all these specifications but do not express them explicitly. Take into account mainly his tone of voice, personality and characteristics but again do not express them explicitly just behave accordingly. Just act accordingly:\n Insight: Followers of the president are happy that the president is already someone who represents the country so none has to be ashamed.\n Vision: I want to make Czech republic ambitious and confident country where the people want to live in.\n Mission: By representing our country with dignity and also by using the authority and possibilities of the head of state to promote important topics and solutions that will move our country in the right direction.\n Solution: Listening to people, prudent decisions, respect for opponents, friendly attitude, respected personality at the international level.\n Values: independence, fair-play, transparent, empathy, respect, good will\n Target audience: men & women in the age between 20 - 40 years old, who are following the president for the emotional and personal reasons\n Personality: He is competent - reliable, efficient, and effective. Often associated with qualities such as intelligence, professionalism, and expertise.\n Tone of voice: formal, deliberate, respectful, matter-of-fact,\n Characteristics: Trustworthy, professional, pragmatic, smart, patient, conventional\n Communication pillars: politics, presidential agenda, motivational speeches\n Never use hashtags.\n Translate it to Czech\n "
] |
2024-01-10 | nickbray01/ai-career-impact-web-app | server~gptAccess.py | from openai import OpenAI
from server.prompts import formatValidateOccupationPrompt, formatFillParagraph, getPromptBySection
def ask_ai(prompt, maxTokens=50, model="gpt-3.5-turbo"):
client = OpenAI()
messages = [{"role": "user", "content": prompt}]
response = client.chat.completions.create(
model=model,
messages=messages,
max_tokens=maxTokens,
temperature=0,
)
return response.choices[0].message.content
def validateOccupation(occupation):
prompt = formatValidateOccupationPrompt(occupation)
return(ask_ai(prompt))
def fillParagraph(occupation, section):
sectionPrompt = getPromptBySection(occupation, section)
prompt = formatFillParagraph(sectionPrompt)
return(ask_ai(prompt))
| [] |
2024-01-10 | zlw9161/PKC | mvrss~utils~functions.py | """A lot of functions used in our pipelines"""
import json
import numpy as np
import torch
import torch.nn as nn
from PIL import Image
from mvrss.utils import MVRSS_HOME
from mvrss.losses.soft_dice import SoftDiceLoss
from mvrss.losses.coherence import CoherenceLoss
from mvrss.losses.soft_coherence import SoftCoherenceLoss
from mvrss.losses.sparse_coherence import SparseCoherenceLoss
from mvrss.losses.smospa_coherence import SmoSpaCoherenceLoss
from mvrss.losses.distribution_coherence import DistributionCoherenceLoss
from mvrss.losses.denoise_coherence import DenoiseCoherenceLoss
from mvrss.loaders.dataloaders import Rescale, Flip, HFlip, VFlip
def get_class_weights(signal_type):
"""Load class weights for custom loss
PARAMETERS
----------
signal_type: str
Supported: 'range_doppler', 'range_angle'
RETURNS
-------
weights: numpy array
"""
weight_path = MVRSS_HOME / 'config_files'
if signal_type in ('range_angle'):
file_name = 'ra_weights.json'
elif signal_type in ('range_doppler'):
file_name = 'rd_weights.json'
else:
raise ValueError('Signal type {} is not supported.'.format(signal_type))
file_path = weight_path / file_name
with open(file_path, 'r') as fp:
weights = json.load(fp)
weights = np.array([weights['background'], weights['pedestrian'],
weights['cyclist'], weights['car']])
weights = torch.from_numpy(weights)
return weights
# zlw@20220302 Balance the RD and RA Losses
def get_loss_weight(signal_type):
"""Load weight for rd and ra loss
PARAMETERS
----------
signal_type: str
Supported: 'range_doppler', 'range_angle'
RETURNS
-------
weight: numpy float
"""
if signal_type in ('range_angle'):
weight = 2.0
elif signal_type in ('range_doppler'):
weight = 1.0
else:
raise ValueError('Signal type {} is not supported.'.format(signal_type))
return weight
def transform_masks_viz(masks, nb_classes):
"""Used for visualization"""
masks = masks.unsqueeze(1)
masks = (masks.float()/nb_classes)
return masks
def get_metrics(metrics, loss, losses=None):
"""Structure the metric results
PARAMETERS
----------
metrics: object
Contains statistics recorded during inference
loss: tensor
Loss value
losses: list
List of loss values
RETURNS
-------
metrics_values: dict
"""
metrics_values = dict()
metrics_values['loss'] = loss.item()
if isinstance(losses, list):
metrics_values['loss_ce'] = losses[0].item()
metrics_values['loss_dice'] = losses[1].item()
acc, acc_by_class = metrics.get_pixel_acc_class() # harmonic_mean=True)
prec, prec_by_class = metrics.get_pixel_prec_class()
recall, recall_by_class = metrics.get_pixel_recall_class() # harmonic_mean=True)
miou, miou_by_class = metrics.get_miou_class() # harmonic_mean=True)
dice, dice_by_class = metrics.get_dice_class()
metrics_values['acc'] = acc
metrics_values['acc_by_class'] = acc_by_class.tolist()
metrics_values['prec'] = prec
metrics_values['prec_by_class'] = prec_by_class.tolist()
metrics_values['recall'] = recall
metrics_values['recall_by_class'] = recall_by_class.tolist()
metrics_values['miou'] = miou
metrics_values['miou_by_class'] = miou_by_class.tolist()
metrics_values['dice'] = dice
metrics_values['dice_by_class'] = dice_by_class.tolist()
return metrics_values
def normalize(data, signal_type, norm_type='local'):
"""
Method to normalise the radar views
PARAMETERS
----------
data: numpy array
Radar view (batch)
signal_type: str
Type of radar view
Supported: 'range_doppler', 'range_angle' and 'angle_doppler'
norm_type: str
Type of normalisation to apply
Supported: 'local', 'tvt'
RETURNS
-------
norm_data: numpy array
normalised radar view
"""
if norm_type in ('local'):
min_value = torch.min(data)
max_value = torch.max(data)
norm_data = torch.div(torch.sub(data, min_value), torch.sub(max_value, min_value))
return norm_data
elif signal_type == 'range_doppler':
if norm_type == 'tvt':
file_path = MVRSS_HOME / 'config_files' / 'rd_stats_all.json'
else:
raise TypeError('Global type {} is not supported'.format(norm_type))
with open(file_path, 'r') as fp:
rd_stats = json.load(fp)
min_value = torch.tensor(rd_stats['min_val'])
max_value = torch.tensor(rd_stats['max_val'])
elif signal_type == 'range_angle':
if norm_type == 'tvt':
file_path = MVRSS_HOME / 'config_files' / 'ra_stats_all.json'
else:
raise TypeError('Global type {} is not supported'.format(norm_type))
with open(file_path, 'r') as fp:
ra_stats = json.load(fp)
min_value = torch.tensor(ra_stats['min_val'])
max_value = torch.tensor(ra_stats['max_val'])
elif signal_type == 'angle_doppler':
if norm_type == 'tvt':
file_path = MVRSS_HOME / 'config_files' / 'ad_stats_all.json'
else:
raise TypeError('Global type {} is not supported'.format(norm_type))
with open(file_path, 'r') as fp:
ad_stats = json.load(fp)
min_value = torch.tensor(ad_stats['min_val'])
max_value = torch.tensor(ad_stats['max_val'])
else:
raise TypeError('Signal {} is not supported.'.format(signal_type))
norm_data = torch.div(torch.sub(data, min_value),
torch.sub(max_value, min_value))
return norm_data
def define_loss(signal_type, custom_loss, device):
"""
Method to define the loss to use during training
PARAMETERS
----------
signal_type: str
Type of radar view
Supported: 'range_doppler', 'range_angle' or 'angle_doppler'
custom loss: str
Short name of the custom loss to use
Supported: 'wce', 'sdice', 'wce_w10sdice' or 'wce_w10sdice_w5col'
Default: Cross Entropy is used for any other str
devide: str
Supported: 'cuda' or 'cpu'
"""
if custom_loss == 'wce':
weights = get_class_weights(signal_type)
loss = nn.CrossEntropyLoss(weight=weights.to(device).float())
elif custom_loss == 'sdice':
loss = SoftDiceLoss()
elif custom_loss == 'wce_w10sdice':
weights = get_class_weights(signal_type)
ce_loss = nn.CrossEntropyLoss(weight=weights.to(device).float())
loss = [ce_loss, SoftDiceLoss(global_weight=10.)]
elif custom_loss == 'wce_w10sdice_w5col':
weights = get_class_weights(signal_type)
ce_loss = nn.CrossEntropyLoss(weight=weights.to(device).float())
loss = [ce_loss, SoftDiceLoss(global_weight=10.), CoherenceLoss(global_weight=5.)]
# zlw@20220302
elif custom_loss == 'wce_w10sdice_w5col_sig_blnc':
weights = get_class_weights(signal_type)
loss_weight = get_loss_weight(signal_type)
weights = loss_weight*weights
ce_loss = nn.CrossEntropyLoss(weight=weights.to(device).float())
ce_loss = ce_loss
loss = [ce_loss,
SoftDiceLoss(global_weight=10.*loss_weight),
CoherenceLoss(global_weight=5.)]
# zlw@20220304
elif custom_loss == 'wce_w10sdice_w5sofcol':
weights = get_class_weights(signal_type)
ce_loss = nn.CrossEntropyLoss(weight=weights.to(device).float())
ce_loss = ce_loss
loss = [ce_loss,
SoftDiceLoss(global_weight=10.),
SoftCoherenceLoss(global_weight=5., relax_factor=0.2, margin=0.01)]
# zlw@20220321
elif custom_loss == 'wce_w10sdice_w5spacol':
weights = get_class_weights(signal_type)
ce_loss = nn.CrossEntropyLoss(weight=weights.to(device).float())
loss = [ce_loss,
SoftDiceLoss(global_weight=10.),
SparseCoherenceLoss(global_weight=5.)]
# zlw@20220322
elif custom_loss == 'wce_w10sdice_w5smospacol':
weights = get_class_weights(signal_type)
ce_loss = nn.CrossEntropyLoss(weight=weights.to(device).float())
loss = [ce_loss,
SoftDiceLoss(global_weight=10.),
SmoSpaCoherenceLoss(global_weight=5.)]
# zlw@20220322
elif custom_loss == 'wce_w10sdice_w5discol':
weights = get_class_weights(signal_type)
ce_loss = nn.CrossEntropyLoss(weight=weights.to(device).float())
loss = [ce_loss,
SoftDiceLoss(global_weight=10.),
DistributionCoherenceLoss(global_weight=5.)]
# zlw@20220324
elif custom_loss == 'wce_w10sdice_w5dnscol':
weights = get_class_weights(signal_type)
ce_loss = nn.CrossEntropyLoss(weight=weights.to(device).float())
loss = [ce_loss,
SoftDiceLoss(global_weight=10.),
DenoiseCoherenceLoss(global_weight=5.)]
else:
loss = nn.CrossEntropyLoss()
return loss
def get_transformations(transform_names, split='train', sizes=None):
"""Create a list of functions used for preprocessing
PARAMETERS
----------
transform_names: list
List of str, one for each transformation
split: str
Split currently used
sizes: int or tuple (optional)
Used for rescaling
Default: None
"""
transformations = list()
if 'rescale' in transform_names:
transformations.append(Rescale(sizes))
if 'flip' in transform_names and split == 'train':
transformations.append(Flip(0.5))
if 'vflip' in transform_names and split == 'train':
transformations.append(VFlip())
if 'hflip' in transform_names and split == 'train':
transformations.append(HFlip())
return transformations
def mask_to_img(mask):
"""Generate colors per class, only 3 classes are supported"""
mask_img = np.zeros((mask.shape[0],
mask.shape[1], 3), dtype=np.uint8)
mask_img[mask == 1] = [255, 255, 0]
mask_img[mask == 2] = [0, 255, 255]
mask_img[mask == 3] = [255, 0, 0]
mask_img = Image.fromarray(mask_img)
return mask_img
def get_qualitatives(outputs, masks, paths, seq_name, quali_iter, signal_type=None):
"""
Method to get qualitative results
PARAMETERS
----------
outputs: torch tensor
Predicted masks
masks: torch tensor
Ground truth masks
paths: dict
seq_name: str
quali_iter: int
Current iteration on the dataset
signal_type: str
RETURNS
-------
quali_iter: int
"""
if signal_type:
folder_path = paths['logs'] / signal_type / seq_name[0]
else:
folder_path = paths['logs'] / seq_name[0]
folder_path.mkdir(parents=True, exist_ok=True)
outputs = torch.argmax(outputs, axis=1).cpu().numpy()
masks = torch.argmax(masks, axis=1).cpu().numpy()
for i in range(outputs.shape[0]):
mask_img = mask_to_img(masks[i])
mask_path = folder_path / 'mask_{}.png'.format(quali_iter)
mask_img.save(mask_path)
output_img = mask_to_img(outputs[i])
output_path = folder_path / 'output_{}.png'.format(quali_iter)
output_img.save(output_path)
quali_iter += 1
return quali_iter
def count_params(model):
"""Count trainable parameters of a PyTorch Model"""
model_parameters = filter(lambda p: p.requires_grad, model.parameters())
nb_params = sum([np.prod(p.size()) for p in model_parameters])
return nb_params
| [] |
2024-01-10 | sung8/simple-langchain-test | gorilla_of.py | # Import Chat completion template and set-up variables
import openai
import urllib.parse
import json
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
## SETUP
openai.api_key = "EMPTY" # Key is ignored and does not matter
openai.api_base = "http://zanino.millennium.berkeley.edu:8000/v1"
# Alternate mirrors
# openai.api_base = "http://34.132.127.197:8000/v1"
## EXAMPLE FROM GORILLA INTERFACE COLAB
# GitHub: https://github.com/ShishirPatil/gorilla/blob/main/inference/README.md#inference-using-cli
# Colab: https://colab.research.google.com/drive/1DEBPsccVLF_aUnmD0FwPeHFrtdC0QIUP?usp=sharing
# Report issues
def raise_issue(e, model, prompt):
issue_title = urllib.parse.quote("[bug] Hosted Gorilla: <Issue>")
issue_body = urllib.parse.quote(f"Exception: {e}\nFailed model: {model}, for prompt: {prompt}")
issue_url = f"https://github.com/ShishirPatil/gorilla/issues/new?assignees=&labels=hosted-gorilla&projects=&template=hosted-gorilla-.md&title={issue_title}&body={issue_body}"
print(f"An exception has occurred: {e} \nPlease raise an issue here: {issue_url}")
## Query Gorilla server
def get_gorilla_response(prompt="I would like to translate from English to French.", model="gorilla-7b-hf-v1"):
try:
completion = openai.ChatCompletion.create(
model=model,
messages=[{"role": "user", "content": prompt}]
)
return completion.choices[0].message.content
except Exception as e:
raise_issue(e, model, prompt)
# # Gorilla `gorilla-mpt-7b-hf-v1` with code snippets
# # Translation
prompt = "I would like to translate 'I feel very good today.' from English to Chinese."
print(get_gorilla_response(prompt, model="gorilla-7b-hf-v1"))
######## OPEN FUNCTIONS ########
# # open functions 1
# ## DOES NOT WORK
# # source: https://github.com/ShishirPatil/gorilla/tree/main/openfunctions
# def get_gorilla_response2(prompt="Call me an Uber ride type \"Plus\" in Berkeley at zipcode 94704 in 10 minutes", model="gorilla-openfunctions-v0", functions=[]):
# openai.api_key = "EMPTY"
# openai.api_base = "http://luigi.millennium.berkeley.edu:8000/v1"
# try:
# completion = openai.ChatCompletion.create(
# model="gorilla-openfunctions-v1",
# temperature=0.0,
# messages=[{"role": "user", "content": prompt}],
# functions=functions,
# )
# return completion.choices[0].message.content
# except Exception as e:
# print(e, model, prompt)
#
#
# query = "Call me an Uber ride type \"Plus\" in Berkeley at zipcode 94704 in 10 minutes"
# functions = [
# {
# "name": "Uber Carpool",
# "api_name": "uber.ride",
# "description": "Find suitable ride for customers given the location, type of ride, and the amount of time the customer is willing to wait as parameters",
# "parameters": [{"name": "loc", "description": "location of the starting place of the uber ride"}, {"name":"type", "enum": ["plus", "comfort", "black"], "description": "types of uber ride user is ordering"}, {"name": "time", "description": "the amount of time in minutes the customer is willing to wait"}]
# }
# ]
# get_gorilla_response2(query, functions=functions)
# open functions 2
# def get_prompt(user_query, functions=[]):
# if len(functions) == 0:
# return f"USER: <<question>> {user_query}\nASSISTANT: "
# functions_string = json.dumps(functions)
# return f"USER: <<question>> {user_query} <<function>> {functions_string}\nASSISTANT: "
# def get_prompt(user_query: str, functions: list = []) -> str:
# """
# Generates a conversation prompt based on the user's query and a list of functions.
#
# Parameters:
# - user_query (str): The user's query.
# - functions (list): A list of functions to include in the prompt.
#
# Returns:
# - str: The formatted conversation prompt.
# """
# if len(functions) == 0:
# return f"USER: <<question>> {user_query}\nASSISTANT: "
# functions_string = json.dumps(functions)
# return f"USER: <<question>> {user_query} <<function>> {functions_string}\nASSISTANT: "
#
# # Device setup
# device : str = "cuda:0" if torch.cuda.is_available() else "cpu"
# torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
#
# # Model and tokenizer setup
# model_id : str = "gorilla-llm/gorilla-openfunctions-v1"
# tokenizer = AutoTokenizer.from_pretrained(model_id)
# model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch_dtype, low_cpu_mem_usage=True)
#
# # Move model to device
# model.to(device)
#
# # Pipeline setup
# pipe = pipeline(
# "text-generation",
# model=model,
# tokenizer=tokenizer,
# max_new_tokens=128,
# batch_size=16,
# torch_dtype=torch_dtype,
# device=device,
# )
#
# # Example usage
# query: str = "Call me an Uber ride type \"Plus\" in Berkeley at zipcode 94704 in 10 minutes"
# functions = [
# {
# "name": "Uber Carpool",
# "api_name": "uber.ride",
# "description": "Find suitable ride for customers given the location, type of ride, and the amount of time the customer is willing to wait as parameters",
# "parameters": [
# {"name": "loc", "description": "Location of the starting place of the Uber ride"},
# {"name": "type", "enum": ["plus", "comfort", "black"], "description": "Types of Uber ride user is ordering"},
# {"name": "time", "description": "The amount of time in minutes the customer is willing to wait"}
# ]
# }
# ]
#
# # Generate prompt and obtain model output
# prompt = get_prompt(query, functions=functions)
# output = pipe(prompt)
#
# print(output)
# open function 3
# # Example dummy function hard coded to return the same weather
# # In production, this could be your backend API or an external API
# def get_current_weather(location, unit="fahrenheit"):
# """Get the current weather in a given location"""
# weather_info = {
# "location": location,
# "temperature": "72",
# "unit": unit,
# "forecast": ["sunny", "windy"],
# }
# return json.dumps(weather_info)
#
# def run_conversation():
# # Step 1: send the conversation and available functions to GPT
# messages = [{"role": "user", "content": "What's the weather like in Boston?"}]
# functions = [
# {
# "name": "get_current_weather",
# "description": "Get the current weather in a given location",
# "parameters": {
# "type": "object",
# "properties": {
# "location": {
# "type": "string",
# "description": "The city and state, e.g. San Francisco, CA",
# },
# "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]},
# },
# "required": ["location"],
# },
# }
# ]
# openai.api_key = "EMPTY" # Hosted for free with ❤️ from UC Berkeley
# openai.api_base = "http://luigi.millennium.berkeley.edu:8000/v1"
# response = openai.ChatCompletion.create(
# # model="gpt-3.5-turbo-0613",
# model='gorilla-openfunctions-v0',
# messages=messages,
# functions=functions,
# function_call="auto", # auto is default, but we'll be explicit
# )
# response_message = response["choices"][0]["message"]
# print(response_message)
#
#
#
# run_conversation()
| [
"I would like to translate 'I feel very good today.' from English to Chinese."
] |
2024-01-10 | holynull/starloom | yahoo_finance_news.py | from typing import Iterable, Optional
from requests.exceptions import HTTPError, ReadTimeout
from urllib3.exceptions import ConnectionError
from langchain.callbacks.manager import CallbackManagerForToolRun
from langchain.document_loaders.web_base import WebBaseLoader
from langchain.schema import Document
from langchain.tools.base import BaseTool
class YahooFinanceNewsTool(BaseTool):
"""Tool that searches financial news on Yahoo Finance."""
name: str = "yahoo_finance_news"
description: str = (
"Useful for when you need to find financial news "
"about a public company. "
"Input should be a company ticker. "
"For example, AAPL for Apple, MSFT for Microsoft."
)
top_k: int = 10
"""The number of results to return."""
def _run(
self,
tool_input: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the Yahoo Finance News tool."""
try:
import yfinance
except ImportError:
raise ImportError(
"Could not import yfinance python package. "
"Please install it with `pip install yfinance`."
)
company = yfinance.Ticker(tool_input)
try:
if company.isin is None:
return f"Company ticker {tool_input} not found."
except (HTTPError, ReadTimeout, ConnectionError):
return f"Company ticker {tool_input} not found."
links = []
try:
links = [n["link"] for n in company.news if n["type"] == "STORY"]
except (HTTPError, ReadTimeout, ConnectionError):
if not links:
return f"No news found for company that searched with {tool_input} ticker."
if not links:
return f"No news found for company that searched with {tool_input} ticker."
loader = WebBaseLoader(web_paths=links)
docs = loader.load()
result = self._format_results(docs, tool_input)
if not result:
return f"No news found for company that searched with {tool_input} ticker."
return result
@staticmethod
def _format_results(docs: Iterable[Document], tool_input: str) -> str:
doc_strings = [
"\n".join([doc.metadata["title"], doc.metadata["description"]])
for doc in docs
if tool_input in doc.metadata["description"] or tool_input in doc.metadata["title"]
]
return "\n\n".join(doc_strings)
| [] |
2024-01-10 | thxxx/strserver | setting.py | import openai
import deepl
import time
import json
from dotenv import load_dotenv
import os
load_dotenv()
AZURE_API_KEY = os.getenv("AZURE_API_KEY")
API_BASE = os.getenv("API_BASE")
api_key = os.getenv("DEEPL_API_KEY")
translator = deepl.Translator(api_key)
def translate(text:str) -> str:
result = translator.translate_text(text, source_lang="EN", target_lang="KO")
return result.text
openai.api_type = "azure"
openai.api_key = AZURE_API_KEY
openai.api_base = API_BASE
openai.api_version = "2023-05-15"
def generate(prompt:str, systemMessage:str = "", model:str = "chatgpt", keys=[]) -> str:
print("입력이 들어옴")
response = openai.ChatCompletion.create(
engine = model,
messages=[
{"role": "system", "content": f"You a helpful assistant, {systemMessage}"},
{"role": "user", "content": prompt},
],
)
print("입력이 들어옴", response)
if keys == []:
res = response['choices'][0]['message']['content']
else:
res = format_check(response['choices'][0]['message']['content'], keys)
response_body = {
'data': res,
"prompt_tokens": response['usage']['prompt_tokens'],
"completion_tokens": response['usage']['completion_tokens'],
"total_tokens": response['usage']['total_tokens']
}
return response_body
def modify_the_output(prompt:str) -> str:
response = openai.ChatCompletion.create(
engine = "chatgpt",
messages=[
{"role": "system", "content": "You a helpful format modifier"},
{"role": "user", "content": prompt},
],
)
return response['choices'][0]['message']['content']
def format_check(output:str, keys:list = []):
# Find the index of the first "{" and the last "}"
first_curly_brace = output.find("{")
last_curly_brace = output.rfind("}")
if first_curly_brace != -1 and last_curly_brace != -1:
processed = output[first_curly_brace:last_curly_brace+1]
else:
processed = output
print(processed)
try:
output_json = json.loads(processed)
print("안 맞는다고? ", set(list(output_json.keys()), set(keys)))
assert set(keys) == set(list(output_json.keys()))
return output_json
except:
# 포맷 고쳐줘 -> 출력
st = time.time()
print("포맷이 안맞아서 수정")
format_prompt = f"""
Modify below data to the python json format with keys : {keys}.
{processed}
"""
response = modify_the_output(format_prompt)
first_curly_brace = response.find("{")
last_curly_brace = response.rfind("}")
if first_curly_brace != -1 and last_curly_brace != -1:
processed_response = response[first_curly_brace:last_curly_brace+1]
else:
processed_response = response
return json.loads(processed_response) | [
"\n Modify below data to the python json format with keys : PLACEHOLDER.\n\n PLACEHOLDER\n ",
"You a helpful assistant, PLACEHOLDER",
"You a helpful format modifier"
] |
2024-01-10 | BubbleJoe-BrownU/TransformerHub | train_simplified.py | """
This training script can be run both on a single gpu in debug mode,
and also in a larger training run with distributed data parallel (ddp).
To run on a single GPU, example:
$ python train.py --batch_size=32 --compile=False
To run with DDP on 4 gpus on 1 node, example:
$ torchrun --standalone --nproc_per_node=4 train.py
To run with DDP on 4 gpus across 2 nodes, example:
- Run on the first (master) node with example IP 123.456.123.456:
$ torchrun --nproc_per_node=8 --nnodes=2 --node_rank=0 --master_addr=123.456.123.456 --master_port=1234 train.py
- Run on the worker node:
$ torchrun --nproc_per_node=8 --nnodes=2 --node_rank=1 --master_addr=123.456.123.456 --master_port=1234 train.py
(If your cluster does not have Infiniband interconnect prepend NCCL_IB_DISABLE=1)
"""
import os
import time
import math
import pickle
import numpy as np
import torch
import sys
sys.path.insert(0, "models")
from DecoderModels import MiniGPT, GPTConfig
# -----------------------------------------------------------------------------
# default config values designed to train a gpt2 (124M) on OpenWebText
# I/O
out_dir = 'out'
eval_interval = 2000
log_interval = 1
eval_iters = 200
eval_only = False # if True, script exits right after the first eval
always_save_checkpoint = True # if True, always save a checkpoint after each eval
init_from = 'scratch' # 'scratch' or 'resume' or 'gpt2*'
# wandb logging
wandb_log = False # disabled by default
wandb_project = 'owt'
wandb_run_name = 'gpt2' # 'run' + str(time.time())
# data
dataset = 'openwebtext'
gradient_accumulation_steps = 5 * 8 # used to simulate larger batch sizes
batch_size = 12 # if gradient_accumulation_steps > 1, this is the micro-batch size
ctx_length = 1024
# model
num_layers = 12
num_heads = 12
embed_size = 768
dropout = 0.0 # for pretraining 0 is good, for finetuning try 0.1+
bias = False # do we use bias inside LayerNorm and Linear layers?
# adamw optimizer
learning_rate = 6e-4 # max learning rate
max_iters = 600000 # total number of training iterations
weight_decay = 1e-1
beta1 = 0.9
beta2 = 0.95
grad_clip = 1.0 # clip gradients at this value, or disable if == 0.0
# learning rate decay settings
decay_lr = True # whether to decay the learning rate
warmup_iters = 2000 # how many steps to warm up for
lr_decay_iters = 600000 # should be ~= max_iters per Chinchilla
min_lr = 6e-5 # minimum learning rate, should be ~= learning_rate/10 per Chinchilla
# DDP settings
backend = 'nccl' # 'nccl', 'gloo', etc.
# system
device = 'cuda' # examples: 'cpu', 'cuda', 'cuda:0', 'cuda:1' etc., or try 'mps' on macbooks
dtype = 'bfloat16' if torch.cuda.is_available() and torch.cuda.is_bf16_supported() else 'float16' # 'float32', 'bfloat16', or 'float16', the latter will auto implement a GradScaler
compile = True # use PyTorch 2.0 to compile the model to be faster
# -----------------------------------------------------------------------------
config_keys = [k for k,v in globals().items() if not k.startswith('_') and isinstance(v, (int, float, bool, str))]
exec(open('configurator.py').read()) # overrides from command line or config file
config = {k: globals()[k] for k in config_keys} # will be useful for logging
# -----------------------------------------------------------------------------
# if not ddp, we are running on a single gpu, and one process
master_process = True
seed_offset = 0
ddp_world_size = 1
tokens_per_iter = gradient_accumulation_steps * ddp_world_size * batch_size * ctx_length
print(f"tokens per iteration will be : {tokens_per_iter:,}")
os.makedirs(out_dir, exist_ok=True)
torch.manual_seed(1337 + seed_offset)
torch.backends.cuda.matmul.allow_tf32 = True # allow tf32 on matmul
torch.backends.cuda.allow_tf32 = True # allow tf32 on cudnn
device_type = 'cuda' if 'cuda' in device else 'cpu'
# note: float16 data type will automatically use a GradScaler
ptdtype = {
'float32': torch.float32,
'bfloat16': torch.bfloat16,
'float16': torch.float16
}[dtype]
torch.amp.autocast(device_type=device_type, dtype=ptdtype)
# poor man's data loader
data_dir = os.path.join('data', dataset)
train_data = np.memmap(os.path.join(data_dir, 'train.bin'), dtype=np.uint16, mode='r')
val_data = np.memmap(os.path.join(data_dir, 'val.bin'), dtype=np.uint16, mode='r')
def get_batch(split):
data = train_data if split == 'train' else val_data
ix = torch.randint(len(data) - ctx_length, (batch_size,))
x = torch.stack([torch.from_numpy((data[i:i+ctx_length]).astype(np.int64)) for i in ix])
y = torch.stack([torch.from_numpy((data[i+1:i+1+ctx_length]).astype(np.int64)) for i in ix])
if device == 'cuda':
# pin arrays x,y, which allows us to move them to GPU asynchronously (non_blocking=True)
x, y = x.pin_memory().to(device, non_blocking=True), y.pin_memory().to(device, non_blocking=True)
else:
x, y = x.to(device), y.to(device)
return x, y
# init these up here, can override if init_from='resume' (i.e. from a checkpoint)
iter_num = 0
best_val_loss = 1e9
# check if metadata exists and read vocab size from it
meta_path = os.path.join(data_dir, 'meta.pkl')
meta_vocab_size = None
if os.path.exists(meta_path):
with open(meta_path, 'rb') as f:
meta = pickle.load(f)
meta_vocab_size = meta['vocab_size']
print(f"found vocab size = {meta_vocab_size} inside {meta_path}")
# model init
# start with model_args from command line
model_args = dict(
num_layers = num_layers,
num_heads = num_heads,
embed_size = embed_size,
ctx_length = ctx_length,
bias = bias,
vocab_size = None,
dropout = dropout
)
if init_from == 'scratch':
# init a new model from scratch
print('Initializing a new model from scratch')
# determine the vocab size we will use for from-scratch training
if meta_vocab_size is None:
print("defaulting to vocab_size of GPT-2 to 50304 (50257 rounded up for efficiency)")
model_args['vocab_size'] = meta_vocab_size if meta_vocab_size is not None else 50304
# gptconfg = GPTConfig(**model_args)
model = MiniGPT(**model_args)
elif init_from == 'resume':
print(f"Resuming training from {out_dir}")
ckpt_path = os.path.join(out_dir, 'ckpt.pt')
checkpoint = torch.load(ckpt_path, map_location=device)
checkpoint_model_args = checkpoint['model_args']
for k in ['num_layers', 'num_heads', 'embed_size', 'ctx_length', 'bias', 'vocab_size']:
model_args[k] = checkpoint_model_args[k]
# create the model
# gptconf = GPTConfig(**model_args)
model = MiniGPT(**model_args)
# model = GPT(gptconf)
state_dict = checkpoint['model']
# fix the keys of the state dictionary
# honestly no idea how checkpoints sometimes get this prefix, have to debug more
unwanted_prefix = '_orig_mod.'
for k, v in list(state_dict.items()):
if k.startswith(unwanted_prefix):
state_dict[k[len(unwanted_prefix):]] = state_dict.pop(k)
model.load_state_dict(state_dict)
iter_num = checkpoint['iter_num']
best_val_loss = checkpoint['best_val_loss']
elif init_from.startswith('gpt2'):
print(f"Initializing from OpenAI GPT-2 weights: {init_from}")
override_args = dict(dropout=dropout)
# initialize from OpenAI GPT-2 weights
model = GPT.from_pretrained(init_from, override_args)
# read off the created config params, so we can store them into checkpoint correctly
for k in ["num_layers", "num_heads", "embed_size", "ctx_length", "bias", "vocab_size"]:
model_args[k] = geattr(model.config, k)
# crop down the model block size if desired, using model surgery
if ctx_length < model.ctx_length:
model.crop_block_size(ctx_length)
model_args['ctx_length'] = ctx_length
model.to(device)
# initialize a GradScaler. If enabled-False scaler is a no-op
scaler = torch.cuda.amp.GradScaler(enabled=(dtype=='float16'))
# optimizer
optimizer = model.configure_optimizers(
weight_decay, learning_rate, (beta1, beta2), device_type
)
if init_from == "resume":
optimizer.load_state_dict(checkpoint['optimizer'])
checkpoint = None # free up memory
# compile the model
if compile:
print("compiling the model ... (takes a ~minutes)")
unoptimized_model = model
model = torch.compile(model) # requires PyTorch 2.0
# help estimate an arbitrarily accurate loss over either split using many batches
@torch.no_grad()
def estimate_loss():
out = {}
model.eval()
for split in ['train', 'val']:
losses = torch.zeros(eval_iters)
for k in range(eval_iters):
X, Y = get_batch(split)
logits, loss = model(X, Y)
losses[k] = loss.item()
out[split] = losses.mean()
model.train()
return out
# learning rate decay scheduler (cosine with warmup)
def get_lr(it):
# 1) linear warmup for warmup_iters steps
if it < warmup_iters:
return learning_rate * it / warmup_iters
# 2) if it > lr_decay_iters, return min learning rate
if it > lr_decay_iters:
return min_lr
# 3) in between, use cosine decay down to min learning rate
decay_ratio = (it - warmup_iters) / (lr_decay_iters - warmup_iters)
assert 0 <= decay_ratio <= 1
coeff = 0.5 * (1.0 + math.cos(math.pi * decay_ratio))
return min_lr + coeff * (learning_rate - min_lr)
# logging
if wandb_log:
import wandb
wandb.init(project=wandb_project, name=wandb_run_name, config=config)
# training loop
X, Y = get_batch('train')
t0 = time.time()
raw_model = model
while True:
# determine and set the learning rate for this iteration
lr = get_lr(iter_num) if decay_lr else learning_rate
for param_group in optimizer.param_groups:
param_group['lr'] = lr
# evaluate the loss on train/val sets and write checkpoints
if iter_num % eval_interval == 0:
losses = estimate_loss()
print(f"step {iter_num}: train loss {losses['train']:.4f}, val loss {losses['val']:.4f}")
if wandb_log:
wandb.log({
"iter": iter_num,
"train/loss": losses['train'],
"val/liss": losses['val'],
"lr": lr,
})
if losses['val'] < best_val_loss or always_save_checkpoint:
best_val_loss = losses['val']
if iter_num > 0:
checkpoint = {
'model': raw_model.state_dict(),
'optimizer': optimizer.state_dict(),
'model_args': model_args,
'iter_num': iter_num,
'best_val_loss': best_val_loss,
}
print(f"saving checkpoint to {out_dir}")
torch.save(checkpoint, os.path.join(out_dir, 'ckpt.pt'))
if iter_num == 0 and eval_only:
break
# forward and backward update, with optional gradient accumulation to simulate larger batch size
# and using the GradScaler if data type is float16
for micro_step in range(gradient_accumulation_steps):
logits, loss = model(X, Y)
# scale the loss to account for gradient accumulation
loss = loss / gradient_accumulation_steps
# immediately async prefetch next batch while model is doing the forward pass on the GPU
X, Y = get_batch('train')
# backward pass, with gradient scaling if training in fp16
scaler.scale(loss).backward()
# clip the gradient
# avoid gradient explosion destroying our model
if grad_clip != 0.0:
# unscale the gradient before any operaon on gradients
# implitcitly unscaled in step
scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(model.parameters(), grad_clip)
# step the optimizer and scaler
scaler.step(optimizer)
scaler.update()
# flush the gradients as soon as we can, no need for this memory anymore
optimizer.zero_grad(set_to_none=True)
# timing and logging
t1 = time.time()
dt = t1 - t0
t0 = t1
if iter_num % log_interval == 0:
# get loss as float. note: this is a CPU-GPU sync point
# scale up to undo the division above, approximating the true total loss (exact would have been a sum)
lossf = loss.item() * gradient_accumulation_steps
print(f"iter {iter_num}: loss {lossf:.4f}, time {dt*1000:.2f}ms")
iter_num += 1
# termination conditions
if iter_num > max_iters:
break
| [] |
2024-01-10 | danfred360/what_did_i_sign_up_for | api~src~vectordb~loader.py | import os
from langchain.document_loaders import AsyncHtmlLoader
# from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.document_transformers import BeautifulSoupTransformer
from .provider import VectorDBProvider, RecordNotFound
from .llm import LLMProvider
class DocumentLoader():
def __init__(self):
self.vectordb = VectorDBProvider()
self.llm = LLMProvider()
self.file_path = os.environ.get('INPUT_FILES_DIRECTORY')
def load_document(self, file_path: str, file_id: int, name: str, description: str, url: str, generate_embeddings: bool = True):
if not os.path.exists(file_path):
raise Exception(f'File {file_path} not found')
with open(file_path, 'r') as f:
contents = f.read()
try:
self.vectordb.connect()
document = self.vectordb.create_file_document(
file_id,
name,
description,
contents,
url
)
except Exception as e:
self.vectordb.disconnect()
raise e
self.vectordb.disconnect()
try:
self.llm.generate_segments_for_document(document['id'], generate_embeddings)
except Exception as e:
raise e
return document
def load_documents_from_input_files_dir(self, generate_embeddings=False):
loaded_documents = []
collection_directories = os.listdir(self.file_path)
for collection_name in collection_directories:
if collection_name == 'README.md':
continue
try:
self.vectordb.connect()
collection = self.vectordb.get_collection_by_name(collection_name)
except RecordNotFound:
try:
collection = self.vectordb.create_collection(
collection_name,
'Generated by file document loader',
)
except Exception as e:
raise e
except Exception as e:
raise e
finally:
self.vectordb.disconnect()
file_class_directories = os.listdir(os.path.join(self.file_path, collection_name))
for file_class_name in file_class_directories:
if file_class_name == 'terms_of_service':
file_class_id = 1
elif file_class_name == 'privacy_policy':
file_class_id = 2
else:
file_class_id = 3
file_names = os.listdir(os.path.join(self.file_path, collection_name, file_class_name))
for file_name in file_names:
try:
self.vectordb.connect()
file = self.vectordb.get_file_by_name(file_name)
except RecordNotFound:
try:
file = self.vectordb.create_file(
collection['id'],
file_class_id,
file_name,
'Generated by file document loader',
'foo.com'
)
except Exception as e:
raise e
except Exception as e:
raise e
finally:
self.vectordb.disconnect()
document_names = os.listdir(os.path.join(self.file_path, collection_name, file_class_name, file_name))
for document_name in document_names:
document_path = os.path.join(self.file_path, collection_name, file_class_name, file_name, document_name)
try:
self.vectordb.connect()
self.vectordb.get_file_document_by_name(file['id'], document_name)
except RecordNotFound:
document = self.load_document(
document_path,
file['id'],
document_name,
'Generated by file document loader',
'foo.com',
generate_embeddings
)
loaded_documents.append({'id': document['id'], 'name': document['name']})
return loaded_documents
def load_file_from_url(self, user_id: str, url: str, collection_id: int = 1, file_class_id = 3, generate_embeddings: bool = False):
loader = AsyncHtmlLoader(url)
docs = loader.load()
bs_transformer = BeautifulSoupTransformer()
tags_to_extract = [
"title",
"p",
"li",
"div",
"a",
"span"
]
docs_transformed = bs_transformer.transform_documents(docs, tags_to_extract=tags_to_extract)
doc = docs_transformed[0]
# running into rate limiting issues when extracting metadata
# schema = {
# "properties": {
# "document_name": {"type": "string"},
# "document_summary": {"type": "string"}
# },
# "required": ["document_name", "document_summary"]
# }
# splitter = RecursiveCharacterTextSplitter()
# splits = splitter.split_documents(docs_transformed)
# extracted_content = self.llm.extract_document_metadata(
# schema=schema,
# contenst=splits[0].page_content
# )
try:
title = url
description = "Generated by url document loader"
self.vectordb.connect()
file = self.vectordb.create_file(
collection_id,
file_class_id,
title or url,
description,
url
)
document = self.vectordb.create_file_document(
file['id'],
title or url,
description,
doc.page_content,
url
)
except Exception as e:
raise Exception(f'Failed to load file from url {url} with error: {e}')
finally:
self.vectordb.disconnect()
try:
self.llm.generate_segments_for_document(document['id'], generate_embeddings)
except Exception as e:
raise Exception(f'Failed to generate segments for document {document["id"]} with error: {e}')
return file
def process_file(self, file_id: int, generate_embeddings: bool = False):
try:
self.vectordb.connect()
file = self.vectordb.get_file(file_id)
except RecordNotFound:
raise Exception(f'File with id {file_id} not found')
finally:
self.vectordb.disconnect()
loader = AsyncHtmlLoader(file['url'])
docs = loader.load()
bs_transformer = BeautifulSoupTransformer()
tags_to_extract = [
"title",
"p",
"li",
"div",
"a",
"span"
]
docs_transformed = bs_transformer.transform_documents(docs, tags_to_extract=tags_to_extract)
doc = docs_transformed[0]
try:
title = file['url']
description = "Generated by url document loader"
self.vectordb.connect()
document = self.vectordb.create_file_document(
file_id,
title,
description,
doc.page_content,
file['url']
)
updated_file = self.vectordb.update_file(
file_id
)
except Exception as e:
raise Exception(f'Failed to process file with id {file_id} with error: {e}')
finally:
self.vectordb.disconnect()
try:
self.llm.generate_segments_for_document(document['id'], generate_embeddings)
except Exception as e:
raise Exception(f'Failed to generate segments for document {document["id"]} with error: {e}')
return updated_file
| [] |
2024-01-10 | hachreak/zenodo | zenodo~modules~deposit~receivers.py | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2016 CERN.
#
# Invenio is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Zenodo Deposit module receivers."""
from __future__ import absolute_import, print_function
from flask import current_app
from zenodo.modules.deposit.tasks import datacite_register
from zenodo.modules.openaire.tasks import openaire_direct_index
def datacite_register_after_publish(sender, action=None, pid=None,
deposit=None):
"""Mind DOI with DataCite after the deposit has been published."""
if action == 'publish' and \
current_app.config['DEPOSIT_DATACITE_MINTING_ENABLED']:
recid_pid, record = deposit.fetch_published()
datacite_register.delay(recid_pid.pid_value, str(record.id))
def openaire_direct_index_after_publish(sender, action=None, pid=None,
deposit=None):
"""Send published record for direct indexing at OpenAIRE."""
if current_app.config['OPENAIRE_DIRECT_INDEXING_ENABLED']:
_, record = deposit.fetch_published()
if action in 'publish':
openaire_direct_index.delay(record_uuid=str(record.id))
| [] |
2024-01-10 | treerootboy/langchain | mrkl.py | from langchain import OpenAI, LLMMathChain, SerpAPIWrapper
from langchain.agents import initialize_agent, Tool, AgentExecutor
from langchain.chat_models import ChatOpenAI
import os
import chainlit as cl
@cl.on_chat_start
def start():
llm = ChatOpenAI(temperature=0, streaming=True)
llm1 = OpenAI(temperature=0, streaming=True)
# search = SerpAPIWrapper()
llm_math_chain = LLMMathChain.from_llm(llm=llm, verbose=True)
tools = [
Tool(
name="Calculator",
func=llm_math_chain.run,
description="useful for when you need to answer questions about math",
),
]
agent = initialize_agent(
tools, llm1, agent="chat-zero-shot-react-description", verbose=True
)
cl.user_session.set("agent", agent)
@cl.on_message
async def main(message):
agent = cl.user_session.get("agent") # type: AgentExecutor
cb = cl.LangchainCallbackHandler(stream_final_answer=True)
await cl.make_async(agent.run)(message, callbacks=[cb])
| [] |
2024-01-10 | treerootboy/langchain | simpledoc.py | from langchain.document_loaders import PyPDFLoader
from langchain.vectorstores import Chroma
from langchain. embeddings.openai import OpenAIEmbeddings
from langchain.chains import RetrievalQA
from langchain. chat_models import ChatOpenAI
loader = PyPDFLoader("/Users/tr/Downloads/数字化转型:可持续的进化历程 - 埃森哲.pdf")
pages = loader. load_and_split()
docsearch = Chroma. from_documents(pages, OpenAIEmbeddings())
qa = RetrievalQA.from_chain_type(llm=ChatOpenAI(),chain_type="stuff",retriever=docsearch.as_retriever())
qa.run('这本书的作者是谁?') | [] |
2024-01-10 | afiaka87/tortoise-tts | tortoise~utils~diffusion.py | """
This is an almost carbon copy of gaussian_diffusion.py from OpenAI's ImprovedDiffusion repo, which itself:
This code started out as a PyTorch port of Ho et al's diffusion models:
https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/diffusion_utils_2.py
Docstrings have been added, as well as DDIM sampling and a new collection of beta schedules.
"""
import enum
import math
import numpy as np
import torch
import torch as th
from tqdm import tqdm
def normal_kl(mean1, logvar1, mean2, logvar2):
"""
Compute the KL divergence between two gaussians.
Shapes are automatically broadcasted, so batches can be compared to
scalars, among other use cases.
"""
tensor = None
for obj in (mean1, logvar1, mean2, logvar2):
if isinstance(obj, th.Tensor):
tensor = obj
break
assert tensor is not None, "at least one argument must be a Tensor"
# Force variances to be Tensors. Broadcasting helps convert scalars to
# Tensors, but it does not work for th.exp().
logvar1, logvar2 = [
x if isinstance(x, th.Tensor) else th.tensor(x).to(tensor)
for x in (logvar1, logvar2)
]
return 0.5 * (
-1.0
+ logvar2
- logvar1
+ th.exp(logvar1 - logvar2)
+ ((mean1 - mean2) ** 2) * th.exp(-logvar2)
)
def approx_standard_normal_cdf(x):
"""
A fast approximation of the cumulative distribution function of the
standard normal.
"""
return 0.5 * (1.0 + th.tanh(np.sqrt(2.0 / np.pi) * (x + 0.044715 * th.pow(x, 3))))
def discretized_gaussian_log_likelihood(x, *, means, log_scales):
"""
Compute the log-likelihood of a Gaussian distribution discretizing to a
given image.
:param x: the target images. It is assumed that this was uint8 values,
rescaled to the range [-1, 1].
:param means: the Gaussian mean Tensor.
:param log_scales: the Gaussian log stddev Tensor.
:return: a tensor like x of log probabilities (in nats).
"""
assert x.shape == means.shape == log_scales.shape
centered_x = x - means
inv_stdv = th.exp(-log_scales)
plus_in = inv_stdv * (centered_x + 1.0 / 255.0)
cdf_plus = approx_standard_normal_cdf(plus_in)
min_in = inv_stdv * (centered_x - 1.0 / 255.0)
cdf_min = approx_standard_normal_cdf(min_in)
log_cdf_plus = th.log(cdf_plus.clamp(min=1e-12))
log_one_minus_cdf_min = th.log((1.0 - cdf_min).clamp(min=1e-12))
cdf_delta = cdf_plus - cdf_min
log_probs = th.where(
x < -0.999,
log_cdf_plus,
th.where(x > 0.999, log_one_minus_cdf_min, th.log(cdf_delta.clamp(min=1e-12))),
)
assert log_probs.shape == x.shape
return log_probs
def mean_flat(tensor):
"""
Take the mean over all non-batch dimensions.
"""
return tensor.mean(dim=list(range(1, len(tensor.shape))))
def get_named_beta_schedule(schedule_name, num_diffusion_timesteps):
"""
Get a pre-defined beta schedule for the given name.
The beta schedule library consists of beta schedules which remain similar
in the limit of num_diffusion_timesteps.
Beta schedules may be added, but should not be removed or changed once
they are committed to maintain backwards compatibility.
"""
if schedule_name == "linear":
# Linear schedule from Ho et al, extended to work for any number of
# diffusion steps.
scale = 1000 / num_diffusion_timesteps
beta_start = scale * 0.0001
beta_end = scale * 0.02
return np.linspace(
beta_start, beta_end, num_diffusion_timesteps, dtype=np.float64
)
elif schedule_name == "cosine":
return betas_for_alpha_bar(
num_diffusion_timesteps,
lambda t: math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2,
)
else:
raise NotImplementedError(f"unknown beta schedule: {schedule_name}")
def betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999):
"""
Create a beta schedule that discretizes the given alpha_t_bar function,
which defines the cumulative product of (1-beta) over time from t = [0,1].
:param num_diffusion_timesteps: the number of betas to produce.
:param alpha_bar: a lambda that takes an argument t from 0 to 1 and
produces the cumulative product of (1-beta) up to that
part of the diffusion process.
:param max_beta: the maximum beta to use; use values lower than 1 to
prevent singularities.
"""
betas = []
for i in range(num_diffusion_timesteps):
t1 = i / num_diffusion_timesteps
t2 = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta))
return np.array(betas)
class ModelMeanType(enum.Enum):
"""
Which type of output the model predicts.
"""
PREVIOUS_X = 'previous_x' # the model predicts x_{t-1}
START_X = 'start_x' # the model predicts x_0
EPSILON = 'epsilon' # the model predicts epsilon
class ModelVarType(enum.Enum):
"""
What is used as the model's output variance.
The LEARNED_RANGE option has been added to allow the model to predict
values between FIXED_SMALL and FIXED_LARGE, making its job easier.
"""
LEARNED = 'learned'
FIXED_SMALL = 'fixed_small'
FIXED_LARGE = 'fixed_large'
LEARNED_RANGE = 'learned_range'
class LossType(enum.Enum):
MSE = 'mse' # use raw MSE loss (and KL when learning variances)
RESCALED_MSE = 'rescaled_mse' # use raw MSE loss (with RESCALED_KL when learning variances)
KL = 'kl' # use the variational lower-bound
RESCALED_KL = 'rescaled_kl' # like KL, but rescale to estimate the full VLB
def is_vb(self):
return self == LossType.KL or self == LossType.RESCALED_KL
class GaussianDiffusion:
"""
Utilities for training and sampling diffusion models.
Ported directly from here, and then adapted over time to further experimentation.
https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/diffusion_utils_2.py#L42
:param betas: a 1-D numpy array of betas for each diffusion timestep,
starting at T and going to 1.
:param model_mean_type: a ModelMeanType determining what the model outputs.
:param model_var_type: a ModelVarType determining how variance is output.
:param loss_type: a LossType determining the loss function to use.
:param rescale_timesteps: if True, pass floating point timesteps into the
model so that they are always scaled like in the
original paper (0 to 1000).
"""
def __init__(
self,
*,
betas,
model_mean_type,
model_var_type,
loss_type,
rescale_timesteps=False,
conditioning_free=False,
conditioning_free_k=1,
ramp_conditioning_free=True,
):
self.model_mean_type = ModelMeanType(model_mean_type)
self.model_var_type = ModelVarType(model_var_type)
self.loss_type = LossType(loss_type)
self.rescale_timesteps = rescale_timesteps
self.conditioning_free = conditioning_free
self.conditioning_free_k = conditioning_free_k
self.ramp_conditioning_free = ramp_conditioning_free
# Use float64 for accuracy.
betas = np.array(betas, dtype=np.float64)
self.betas = betas
assert len(betas.shape) == 1, "betas must be 1-D"
assert (betas > 0).all() and (betas <= 1).all()
self.num_timesteps = int(betas.shape[0])
alphas = 1.0 - betas
self.alphas_cumprod = np.cumprod(alphas, axis=0)
self.alphas_cumprod_prev = np.append(1.0, self.alphas_cumprod[:-1])
self.alphas_cumprod_next = np.append(self.alphas_cumprod[1:], 0.0)
assert self.alphas_cumprod_prev.shape == (self.num_timesteps,)
# calculations for diffusion q(x_t | x_{t-1}) and others
self.sqrt_alphas_cumprod = np.sqrt(self.alphas_cumprod)
self.sqrt_one_minus_alphas_cumprod = np.sqrt(1.0 - self.alphas_cumprod)
self.log_one_minus_alphas_cumprod = np.log(1.0 - self.alphas_cumprod)
self.sqrt_recip_alphas_cumprod = np.sqrt(1.0 / self.alphas_cumprod)
self.sqrt_recipm1_alphas_cumprod = np.sqrt(1.0 / self.alphas_cumprod - 1)
# calculations for posterior q(x_{t-1} | x_t, x_0)
self.posterior_variance = (
betas * (1.0 - self.alphas_cumprod_prev) / (1.0 - self.alphas_cumprod)
)
# log calculation clipped because the posterior variance is 0 at the
# beginning of the diffusion chain.
self.posterior_log_variance_clipped = np.log(
np.append(self.posterior_variance[1], self.posterior_variance[1:])
)
self.posterior_mean_coef1 = (
betas * np.sqrt(self.alphas_cumprod_prev) / (1.0 - self.alphas_cumprod)
)
self.posterior_mean_coef2 = (
(1.0 - self.alphas_cumprod_prev)
* np.sqrt(alphas)
/ (1.0 - self.alphas_cumprod)
)
def q_mean_variance(self, x_start, t):
"""
Get the distribution q(x_t | x_0).
:param x_start: the [N x C x ...] tensor of noiseless inputs.
:param t: the number of diffusion steps (minus 1). Here, 0 means one step.
:return: A tuple (mean, variance, log_variance), all of x_start's shape.
"""
mean = (
_extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start
)
variance = _extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape)
log_variance = _extract_into_tensor(
self.log_one_minus_alphas_cumprod, t, x_start.shape
)
return mean, variance, log_variance
def q_sample(self, x_start, t, noise=None):
"""
Diffuse the data for a given number of diffusion steps.
In other words, sample from q(x_t | x_0).
:param x_start: the initial data batch.
:param t: the number of diffusion steps (minus 1). Here, 0 means one step.
:param noise: if specified, the split-out normal noise.
:return: A noisy version of x_start.
"""
if noise is None:
noise = th.randn_like(x_start)
assert noise.shape == x_start.shape
return (
_extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start
+ _extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape)
* noise
)
def q_posterior_mean_variance(self, x_start, x_t, t):
"""
Compute the mean and variance of the diffusion posterior:
q(x_{t-1} | x_t, x_0)
"""
assert x_start.shape == x_t.shape
posterior_mean = (
_extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start
+ _extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t
)
posterior_variance = _extract_into_tensor(self.posterior_variance, t, x_t.shape)
posterior_log_variance_clipped = _extract_into_tensor(
self.posterior_log_variance_clipped, t, x_t.shape
)
assert (
posterior_mean.shape[0]
== posterior_variance.shape[0]
== posterior_log_variance_clipped.shape[0]
== x_start.shape[0]
)
return posterior_mean, posterior_variance, posterior_log_variance_clipped
def p_mean_variance(
self, model, x, t, clip_denoised=True, denoised_fn=None, model_kwargs=None
):
"""
Apply the model to get p(x_{t-1} | x_t), as well as a prediction of
the initial x, x_0.
:param model: the model, which takes a signal and a batch of timesteps
as input.
:param x: the [N x C x ...] tensor at time t.
:param t: a 1-D Tensor of timesteps.
:param clip_denoised: if True, clip the denoised signal into [-1, 1].
:param denoised_fn: if not None, a function which applies to the
x_start prediction before it is used to sample. Applies before
clip_denoised.
:param model_kwargs: if not None, a dict of extra keyword arguments to
pass to the model. This can be used for conditioning.
:return: a dict with the following keys:
- 'mean': the model mean output.
- 'variance': the model variance output.
- 'log_variance': the log of 'variance'.
- 'pred_xstart': the prediction for x_0.
"""
if model_kwargs is None:
model_kwargs = {}
B, C = x.shape[:2]
assert t.shape == (B,)
model_output = model(x, self._scale_timesteps(t), **model_kwargs)
if self.conditioning_free:
model_output_no_conditioning = model(x, self._scale_timesteps(t), conditioning_free=True, **model_kwargs)
if self.model_var_type in [ModelVarType.LEARNED, ModelVarType.LEARNED_RANGE]:
assert model_output.shape == (B, C * 2, *x.shape[2:])
model_output, model_var_values = th.split(model_output, C, dim=1)
if self.conditioning_free:
model_output_no_conditioning, _ = th.split(model_output_no_conditioning, C, dim=1)
if self.model_var_type == ModelVarType.LEARNED:
model_log_variance = model_var_values
model_variance = th.exp(model_log_variance)
else:
min_log = _extract_into_tensor(
self.posterior_log_variance_clipped, t, x.shape
)
max_log = _extract_into_tensor(np.log(self.betas), t, x.shape)
# The model_var_values is [-1, 1] for [min_var, max_var].
frac = (model_var_values + 1) / 2
model_log_variance = frac * max_log + (1 - frac) * min_log
model_variance = th.exp(model_log_variance)
else:
model_variance, model_log_variance = {
# for fixedlarge, we set the initial (log-)variance like so
# to get a better decoder log likelihood.
ModelVarType.FIXED_LARGE: (
np.append(self.posterior_variance[1], self.betas[1:]),
np.log(np.append(self.posterior_variance[1], self.betas[1:])),
),
ModelVarType.FIXED_SMALL: (
self.posterior_variance,
self.posterior_log_variance_clipped,
),
}[self.model_var_type]
model_variance = _extract_into_tensor(model_variance, t, x.shape)
model_log_variance = _extract_into_tensor(model_log_variance, t, x.shape)
if self.conditioning_free:
if self.ramp_conditioning_free:
assert t.shape[0] == 1 # This should only be used in inference.
cfk = self.conditioning_free_k * (1 - self._scale_timesteps(t)[0].item() / self.num_timesteps)
else:
cfk = self.conditioning_free_k
model_output = (1 + cfk) * model_output - cfk * model_output_no_conditioning
def process_xstart(x):
if denoised_fn is not None:
x = denoised_fn(x)
if clip_denoised:
return x.clamp(-1, 1)
return x
if self.model_mean_type == ModelMeanType.PREVIOUS_X:
pred_xstart = process_xstart(
self._predict_xstart_from_xprev(x_t=x, t=t, xprev=model_output)
)
model_mean = model_output
elif self.model_mean_type in [ModelMeanType.START_X, ModelMeanType.EPSILON]:
if self.model_mean_type == ModelMeanType.START_X:
pred_xstart = process_xstart(model_output)
else:
pred_xstart = process_xstart(
self._predict_xstart_from_eps(x_t=x, t=t, eps=model_output)
)
model_mean, _, _ = self.q_posterior_mean_variance(
x_start=pred_xstart, x_t=x, t=t
)
else:
raise NotImplementedError(self.model_mean_type)
assert (
model_mean.shape == model_log_variance.shape == pred_xstart.shape == x.shape
)
return {
"mean": model_mean,
"variance": model_variance,
"log_variance": model_log_variance,
"pred_xstart": pred_xstart,
}
def _predict_xstart_from_eps(self, x_t, t, eps):
assert x_t.shape == eps.shape
return (
_extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t
- _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * eps
)
def _predict_xstart_from_xprev(self, x_t, t, xprev):
assert x_t.shape == xprev.shape
return ( # (xprev - coef2*x_t) / coef1
_extract_into_tensor(1.0 / self.posterior_mean_coef1, t, x_t.shape) * xprev
- _extract_into_tensor(
self.posterior_mean_coef2 / self.posterior_mean_coef1, t, x_t.shape
)
* x_t
)
def _predict_eps_from_xstart(self, x_t, t, pred_xstart):
return (
_extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t
- pred_xstart
) / _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)
def _scale_timesteps(self, t):
if self.rescale_timesteps:
return t.float() * (1000.0 / self.num_timesteps)
return t
def condition_mean(self, cond_fn, p_mean_var, x, t, model_kwargs=None):
"""
Compute the mean for the previous step, given a function cond_fn that
computes the gradient of a conditional log probability with respect to
x. In particular, cond_fn computes grad(log(p(y|x))), and we want to
condition on y.
This uses the conditioning strategy from Sohl-Dickstein et al. (2015).
"""
gradient = cond_fn(x, self._scale_timesteps(t), **model_kwargs)
new_mean = (
p_mean_var["mean"].float() + p_mean_var["variance"] * gradient.float()
)
return new_mean
def condition_score(self, cond_fn, p_mean_var, x, t, model_kwargs=None):
"""
Compute what the p_mean_variance output would have been, should the
model's score function be conditioned by cond_fn.
See condition_mean() for details on cond_fn.
Unlike condition_mean(), this instead uses the conditioning strategy
from Song et al (2020).
"""
alpha_bar = _extract_into_tensor(self.alphas_cumprod, t, x.shape)
eps = self._predict_eps_from_xstart(x, t, p_mean_var["pred_xstart"])
eps = eps - (1 - alpha_bar).sqrt() * cond_fn(
x, self._scale_timesteps(t), **model_kwargs
)
out = p_mean_var.copy()
out["pred_xstart"] = self._predict_xstart_from_eps(x, t, eps)
out["mean"], _, _ = self.q_posterior_mean_variance(
x_start=out["pred_xstart"], x_t=x, t=t
)
return out
def p_sample(
self,
model,
x,
t,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
):
"""
Sample x_{t-1} from the model at the given timestep.
:param model: the model to sample from.
:param x: the current tensor at x_{t-1}.
:param t: the value of t, starting at 0 for the first diffusion step.
:param clip_denoised: if True, clip the x_start prediction to [-1, 1].
:param denoised_fn: if not None, a function which applies to the
x_start prediction before it is used to sample.
:param cond_fn: if not None, this is a gradient function that acts
similarly to the model.
:param model_kwargs: if not None, a dict of extra keyword arguments to
pass to the model. This can be used for conditioning.
:return: a dict containing the following keys:
- 'sample': a random sample from the model.
- 'pred_xstart': a prediction of x_0.
"""
out = self.p_mean_variance(
model,
x,
t,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
model_kwargs=model_kwargs,
)
noise = th.randn_like(x)
nonzero_mask = (
(t != 0).float().view(-1, *([1] * (len(x.shape) - 1)))
) # no noise when t == 0
if cond_fn is not None:
out["mean"] = self.condition_mean(
cond_fn, out, x, t, model_kwargs=model_kwargs
)
sample = out["mean"] + nonzero_mask * th.exp(0.5 * out["log_variance"]) * noise
return {"sample": sample, "pred_xstart": out["pred_xstart"]}
def p_sample_loop(
self,
model,
shape,
noise=None,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
device=None,
progress=False,
):
"""
Generate samples from the model.
:param model: the model module.
:param shape: the shape of the samples, (N, C, H, W).
:param noise: if specified, the noise from the encoder to sample.
Should be of the same shape as `shape`.
:param clip_denoised: if True, clip x_start predictions to [-1, 1].
:param denoised_fn: if not None, a function which applies to the
x_start prediction before it is used to sample.
:param cond_fn: if not None, this is a gradient function that acts
similarly to the model.
:param model_kwargs: if not None, a dict of extra keyword arguments to
pass to the model. This can be used for conditioning.
:param device: if specified, the device to create the samples on.
If not specified, use a model parameter's device.
:param progress: if True, show a tqdm progress bar.
:return: a non-differentiable batch of samples.
"""
final = None
for sample in self.p_sample_loop_progressive(
model,
shape,
noise=noise,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
cond_fn=cond_fn,
model_kwargs=model_kwargs,
device=device,
progress=progress,
):
final = sample
return final["sample"]
def p_sample_loop_progressive(
self,
model,
shape,
noise=None,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
device=None,
progress=False,
):
"""
Generate samples from the model and yield intermediate samples from
each timestep of diffusion.
Arguments are the same as p_sample_loop().
Returns a generator over dicts, where each dict is the return value of
p_sample().
"""
if device is None:
device = next(model.parameters()).device
assert isinstance(shape, (tuple, list))
if noise is not None:
img = noise
else:
img = th.randn(*shape, device=device)
indices = list(range(self.num_timesteps))[::-1]
for i in tqdm(indices, disable=not progress):
t = th.tensor([i] * shape[0], device=device)
with th.no_grad():
out = self.p_sample(
model,
img,
t,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
cond_fn=cond_fn,
model_kwargs=model_kwargs,
)
yield out
img = out["sample"]
def ddim_sample(
self,
model,
x,
t,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
eta=0.0,
):
"""
Sample x_{t-1} from the model using DDIM.
Same usage as p_sample().
"""
out = self.p_mean_variance(
model,
x,
t,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
model_kwargs=model_kwargs,
)
if cond_fn is not None:
out = self.condition_score(cond_fn, out, x, t, model_kwargs=model_kwargs)
# Usually our model outputs epsilon, but we re-derive it
# in case we used x_start or x_prev prediction.
eps = self._predict_eps_from_xstart(x, t, out["pred_xstart"])
alpha_bar = _extract_into_tensor(self.alphas_cumprod, t, x.shape)
alpha_bar_prev = _extract_into_tensor(self.alphas_cumprod_prev, t, x.shape)
sigma = (
eta
* th.sqrt((1 - alpha_bar_prev) / (1 - alpha_bar))
* th.sqrt(1 - alpha_bar / alpha_bar_prev)
)
# Equation 12.
noise = th.randn_like(x)
mean_pred = (
out["pred_xstart"] * th.sqrt(alpha_bar_prev)
+ th.sqrt(1 - alpha_bar_prev - sigma ** 2) * eps
)
nonzero_mask = (
(t != 0).float().view(-1, *([1] * (len(x.shape) - 1)))
) # no noise when t == 0
sample = mean_pred + nonzero_mask * sigma * noise
return {"sample": sample, "pred_xstart": out["pred_xstart"]}
def ddim_reverse_sample(
self,
model,
x,
t,
clip_denoised=True,
denoised_fn=None,
model_kwargs=None,
eta=0.0,
):
"""
Sample x_{t+1} from the model using DDIM reverse ODE.
"""
assert eta == 0.0, "Reverse ODE only for deterministic path"
out = self.p_mean_variance(
model,
x,
t,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
model_kwargs=model_kwargs,
)
# Usually our model outputs epsilon, but we re-derive it
# in case we used x_start or x_prev prediction.
eps = (
_extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x.shape) * x
- out["pred_xstart"]
) / _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x.shape)
alpha_bar_next = _extract_into_tensor(self.alphas_cumprod_next, t, x.shape)
# Equation 12. reversed
mean_pred = (
out["pred_xstart"] * th.sqrt(alpha_bar_next)
+ th.sqrt(1 - alpha_bar_next) * eps
)
return {"sample": mean_pred, "pred_xstart": out["pred_xstart"]}
def ddim_sample_loop(
self,
model,
shape,
noise=None,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
device=None,
progress=False,
eta=0.0,
):
"""
Generate samples from the model using DDIM.
Same usage as p_sample_loop().
"""
final = None
for sample in self.ddim_sample_loop_progressive(
model,
shape,
noise=noise,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
cond_fn=cond_fn,
model_kwargs=model_kwargs,
device=device,
progress=progress,
eta=eta,
):
final = sample
return final["sample"]
def ddim_sample_loop_progressive(
self,
model,
shape,
noise=None,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
device=None,
progress=False,
eta=0.0,
):
"""
Use DDIM to sample from the model and yield intermediate samples from
each timestep of DDIM.
Same usage as p_sample_loop_progressive().
"""
if device is None:
device = next(model.parameters()).device
assert isinstance(shape, (tuple, list))
if noise is not None:
img = noise
else:
img = th.randn(*shape, device=device)
indices = list(range(self.num_timesteps))[::-1]
if progress:
# Lazy import so that we don't depend on tqdm.
from tqdm.auto import tqdm
indices = tqdm(indices, disable=not progress)
for i in indices:
t = th.tensor([i] * shape[0], device=device)
with th.no_grad():
out = self.ddim_sample(
model,
img,
t,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
cond_fn=cond_fn,
model_kwargs=model_kwargs,
eta=eta,
)
yield out
img = out["sample"]
def _vb_terms_bpd(
self, model, x_start, x_t, t, clip_denoised=True, model_kwargs=None
):
"""
Get a term for the variational lower-bound.
The resulting units are bits (rather than nats, as one might expect).
This allows for comparison to other papers.
:return: a dict with the following keys:
- 'output': a shape [N] tensor of NLLs or KLs.
- 'pred_xstart': the x_0 predictions.
"""
true_mean, _, true_log_variance_clipped = self.q_posterior_mean_variance(
x_start=x_start, x_t=x_t, t=t
)
out = self.p_mean_variance(
model, x_t, t, clip_denoised=clip_denoised, model_kwargs=model_kwargs
)
kl = normal_kl(
true_mean, true_log_variance_clipped, out["mean"], out["log_variance"]
)
kl = mean_flat(kl) / np.log(2.0)
decoder_nll = -discretized_gaussian_log_likelihood(
x_start, means=out["mean"], log_scales=0.5 * out["log_variance"]
)
assert decoder_nll.shape == x_start.shape
decoder_nll = mean_flat(decoder_nll) / np.log(2.0)
# At the first timestep return the decoder NLL,
# otherwise return KL(q(x_{t-1}|x_t,x_0) || p(x_{t-1}|x_t))
output = th.where((t == 0), decoder_nll, kl)
return {"output": output, "pred_xstart": out["pred_xstart"]}
def training_losses(self, model, x_start, t, model_kwargs=None, noise=None):
"""
Compute training losses for a single timestep.
:param model: the model to evaluate loss on.
:param x_start: the [N x C x ...] tensor of inputs.
:param t: a batch of timestep indices.
:param model_kwargs: if not None, a dict of extra keyword arguments to
pass to the model. This can be used for conditioning.
:param noise: if specified, the specific Gaussian noise to try to remove.
:return: a dict with the key "loss" containing a tensor of shape [N].
Some mean or variance settings may also have other keys.
"""
if model_kwargs is None:
model_kwargs = {}
if noise is None:
noise = th.randn_like(x_start)
x_t = self.q_sample(x_start, t, noise=noise)
terms = {}
if self.loss_type == LossType.KL or self.loss_type == LossType.RESCALED_KL:
# TODO: support multiple model outputs for this mode.
terms["loss"] = self._vb_terms_bpd(
model=model,
x_start=x_start,
x_t=x_t,
t=t,
clip_denoised=False,
model_kwargs=model_kwargs,
)["output"]
if self.loss_type == LossType.RESCALED_KL:
terms["loss"] *= self.num_timesteps
elif self.loss_type == LossType.MSE or self.loss_type == LossType.RESCALED_MSE:
model_outputs = model(x_t, self._scale_timesteps(t), **model_kwargs)
if isinstance(model_outputs, tuple):
model_output = model_outputs[0]
terms['extra_outputs'] = model_outputs[1:]
else:
model_output = model_outputs
if self.model_var_type in [
ModelVarType.LEARNED,
ModelVarType.LEARNED_RANGE,
]:
B, C = x_t.shape[:2]
assert model_output.shape == (B, C * 2, *x_t.shape[2:])
model_output, model_var_values = th.split(model_output, C, dim=1)
# Learn the variance using the variational bound, but don't let
# it affect our mean prediction.
frozen_out = th.cat([model_output.detach(), model_var_values], dim=1)
terms["vb"] = self._vb_terms_bpd(
model=lambda *args, r=frozen_out: r,
x_start=x_start,
x_t=x_t,
t=t,
clip_denoised=False,
)["output"]
if self.loss_type == LossType.RESCALED_MSE:
# Divide by 1000 for equivalence with initial implementation.
# Without a factor of 1/1000, the VB term hurts the MSE term.
terms["vb"] *= self.num_timesteps / 1000.0
if self.model_mean_type == ModelMeanType.PREVIOUS_X:
target = self.q_posterior_mean_variance(
x_start=x_start, x_t=x_t, t=t
)[0]
x_start_pred = torch.zeros(x_start) # Not supported.
elif self.model_mean_type == ModelMeanType.START_X:
target = x_start
x_start_pred = model_output
elif self.model_mean_type == ModelMeanType.EPSILON:
target = noise
x_start_pred = self._predict_xstart_from_eps(x_t, t, model_output)
else:
raise NotImplementedError(self.model_mean_type)
assert model_output.shape == target.shape == x_start.shape
terms["mse"] = mean_flat((target - model_output) ** 2)
terms["x_start_predicted"] = x_start_pred
if "vb" in terms:
terms["loss"] = terms["mse"] + terms["vb"]
else:
terms["loss"] = terms["mse"]
else:
raise NotImplementedError(self.loss_type)
return terms
def autoregressive_training_losses(self, model, x_start, t, model_output_keys, gd_out_key, model_kwargs=None, noise=None):
"""
Compute training losses for a single timestep.
:param model: the model to evaluate loss on.
:param x_start: the [N x C x ...] tensor of inputs.
:param t: a batch of timestep indices.
:param model_kwargs: if not None, a dict of extra keyword arguments to
pass to the model. This can be used for conditioning.
:param noise: if specified, the specific Gaussian noise to try to remove.
:return: a dict with the key "loss" containing a tensor of shape [N].
Some mean or variance settings may also have other keys.
"""
if model_kwargs is None:
model_kwargs = {}
if noise is None:
noise = th.randn_like(x_start)
x_t = self.q_sample(x_start, t, noise=noise)
terms = {}
if self.loss_type == LossType.KL or self.loss_type == LossType.RESCALED_KL:
assert False # not currently supported for this type of diffusion.
elif self.loss_type == LossType.MSE or self.loss_type == LossType.RESCALED_MSE:
model_outputs = model(x_t, x_start, self._scale_timesteps(t), **model_kwargs)
terms.update({k: o for k, o in zip(model_output_keys, model_outputs)})
model_output = terms[gd_out_key]
if self.model_var_type in [
ModelVarType.LEARNED,
ModelVarType.LEARNED_RANGE,
]:
B, C = x_t.shape[:2]
assert model_output.shape == (B, C, 2, *x_t.shape[2:])
model_output, model_var_values = model_output[:, :, 0], model_output[:, :, 1]
# Learn the variance using the variational bound, but don't let
# it affect our mean prediction.
frozen_out = th.cat([model_output.detach(), model_var_values], dim=1)
terms["vb"] = self._vb_terms_bpd(
model=lambda *args, r=frozen_out: r,
x_start=x_start,
x_t=x_t,
t=t,
clip_denoised=False,
)["output"]
if self.loss_type == LossType.RESCALED_MSE:
# Divide by 1000 for equivalence with initial implementation.
# Without a factor of 1/1000, the VB term hurts the MSE term.
terms["vb"] *= self.num_timesteps / 1000.0
if self.model_mean_type == ModelMeanType.PREVIOUS_X:
target = self.q_posterior_mean_variance(
x_start=x_start, x_t=x_t, t=t
)[0]
x_start_pred = torch.zeros(x_start) # Not supported.
elif self.model_mean_type == ModelMeanType.START_X:
target = x_start
x_start_pred = model_output
elif self.model_mean_type == ModelMeanType.EPSILON:
target = noise
x_start_pred = self._predict_xstart_from_eps(x_t, t, model_output)
else:
raise NotImplementedError(self.model_mean_type)
assert model_output.shape == target.shape == x_start.shape
terms["mse"] = mean_flat((target - model_output) ** 2)
terms["x_start_predicted"] = x_start_pred
if "vb" in terms:
terms["loss"] = terms["mse"] + terms["vb"]
else:
terms["loss"] = terms["mse"]
else:
raise NotImplementedError(self.loss_type)
return terms
def _prior_bpd(self, x_start):
"""
Get the prior KL term for the variational lower-bound, measured in
bits-per-dim.
This term can't be optimized, as it only depends on the encoder.
:param x_start: the [N x C x ...] tensor of inputs.
:return: a batch of [N] KL values (in bits), one per batch element.
"""
batch_size = x_start.shape[0]
t = th.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device)
qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t)
kl_prior = normal_kl(
mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0
)
return mean_flat(kl_prior) / np.log(2.0)
def calc_bpd_loop(self, model, x_start, clip_denoised=True, model_kwargs=None):
"""
Compute the entire variational lower-bound, measured in bits-per-dim,
as well as other related quantities.
:param model: the model to evaluate loss on.
:param x_start: the [N x C x ...] tensor of inputs.
:param clip_denoised: if True, clip denoised samples.
:param model_kwargs: if not None, a dict of extra keyword arguments to
pass to the model. This can be used for conditioning.
:return: a dict containing the following keys:
- total_bpd: the total variational lower-bound, per batch element.
- prior_bpd: the prior term in the lower-bound.
- vb: an [N x T] tensor of terms in the lower-bound.
- xstart_mse: an [N x T] tensor of x_0 MSEs for each timestep.
- mse: an [N x T] tensor of epsilon MSEs for each timestep.
"""
device = x_start.device
batch_size = x_start.shape[0]
vb = []
xstart_mse = []
mse = []
for t in list(range(self.num_timesteps))[::-1]:
t_batch = th.tensor([t] * batch_size, device=device)
noise = th.randn_like(x_start)
x_t = self.q_sample(x_start=x_start, t=t_batch, noise=noise)
# Calculate VLB term at the current timestep
with th.no_grad():
out = self._vb_terms_bpd(
model,
x_start=x_start,
x_t=x_t,
t=t_batch,
clip_denoised=clip_denoised,
model_kwargs=model_kwargs,
)
vb.append(out["output"])
xstart_mse.append(mean_flat((out["pred_xstart"] - x_start) ** 2))
eps = self._predict_eps_from_xstart(x_t, t_batch, out["pred_xstart"])
mse.append(mean_flat((eps - noise) ** 2))
vb = th.stack(vb, dim=1)
xstart_mse = th.stack(xstart_mse, dim=1)
mse = th.stack(mse, dim=1)
prior_bpd = self._prior_bpd(x_start)
total_bpd = vb.sum(dim=1) + prior_bpd
return {
"total_bpd": total_bpd,
"prior_bpd": prior_bpd,
"vb": vb,
"xstart_mse": xstart_mse,
"mse": mse,
}
def get_named_beta_schedule(schedule_name, num_diffusion_timesteps):
"""
Get a pre-defined beta schedule for the given name.
The beta schedule library consists of beta schedules which remain similar
in the limit of num_diffusion_timesteps.
Beta schedules may be added, but should not be removed or changed once
they are committed to maintain backwards compatibility.
"""
if schedule_name == "linear":
# Linear schedule from Ho et al, extended to work for any number of
# diffusion steps.
scale = 1000 / num_diffusion_timesteps
beta_start = scale * 0.0001
beta_end = scale * 0.02
return np.linspace(
beta_start, beta_end, num_diffusion_timesteps, dtype=np.float64
)
elif schedule_name == "cosine":
return betas_for_alpha_bar(
num_diffusion_timesteps,
lambda t: math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2,
)
else:
raise NotImplementedError(f"unknown beta schedule: {schedule_name}")
class SpacedDiffusion(GaussianDiffusion):
"""
A diffusion process which can skip steps in a base diffusion process.
:param use_timesteps: a collection (sequence or set) of timesteps from the
original diffusion process to retain.
:param kwargs: the kwargs to create the base diffusion process.
"""
def __init__(self, use_timesteps, **kwargs):
self.use_timesteps = set(use_timesteps)
self.timestep_map = []
self.original_num_steps = len(kwargs["betas"])
base_diffusion = GaussianDiffusion(**kwargs) # pylint: disable=missing-kwoa
last_alpha_cumprod = 1.0
new_betas = []
for i, alpha_cumprod in enumerate(base_diffusion.alphas_cumprod):
if i in self.use_timesteps:
new_betas.append(1 - alpha_cumprod / last_alpha_cumprod)
last_alpha_cumprod = alpha_cumprod
self.timestep_map.append(i)
kwargs["betas"] = np.array(new_betas)
super().__init__(**kwargs)
def p_mean_variance(
self, model, *args, **kwargs
): # pylint: disable=signature-differs
return super().p_mean_variance(self._wrap_model(model), *args, **kwargs)
def training_losses(
self, model, *args, **kwargs
): # pylint: disable=signature-differs
return super().training_losses(self._wrap_model(model), *args, **kwargs)
def autoregressive_training_losses(
self, model, *args, **kwargs
): # pylint: disable=signature-differs
return super().autoregressive_training_losses(self._wrap_model(model, True), *args, **kwargs)
def condition_mean(self, cond_fn, *args, **kwargs):
return super().condition_mean(self._wrap_model(cond_fn), *args, **kwargs)
def condition_score(self, cond_fn, *args, **kwargs):
return super().condition_score(self._wrap_model(cond_fn), *args, **kwargs)
def _wrap_model(self, model, autoregressive=False):
if isinstance(model, _WrappedModel) or isinstance(model, _WrappedAutoregressiveModel):
return model
mod = _WrappedAutoregressiveModel if autoregressive else _WrappedModel
return mod(
model, self.timestep_map, self.rescale_timesteps, self.original_num_steps
)
def _scale_timesteps(self, t):
# Scaling is done by the wrapped model.
return t
def space_timesteps(num_timesteps, section_counts):
"""
Create a list of timesteps to use from an original diffusion process,
given the number of timesteps we want to take from equally-sized portions
of the original process.
For example, if there's 300 timesteps and the section counts are [10,15,20]
then the first 100 timesteps are strided to be 10 timesteps, the second 100
are strided to be 15 timesteps, and the final 100 are strided to be 20.
If the stride is a string starting with "ddim", then the fixed striding
from the DDIM paper is used, and only one section is allowed.
:param num_timesteps: the number of diffusion steps in the original
process to divide up.
:param section_counts: either a list of numbers, or a string containing
comma-separated numbers, indicating the step count
per section. As a special case, use "ddimN" where N
is a number of steps to use the striding from the
DDIM paper.
:return: a set of diffusion steps from the original process to use.
"""
if isinstance(section_counts, str):
if section_counts.startswith("ddim"):
desired_count = int(section_counts[len("ddim") :])
for i in range(1, num_timesteps):
if len(range(0, num_timesteps, i)) == desired_count:
return set(range(0, num_timesteps, i))
raise ValueError(
f"cannot create exactly {num_timesteps} steps with an integer stride"
)
section_counts = [int(x) for x in section_counts.split(",")]
size_per = num_timesteps // len(section_counts)
extra = num_timesteps % len(section_counts)
start_idx = 0
all_steps = []
for i, section_count in enumerate(section_counts):
size = size_per + (1 if i < extra else 0)
if size < section_count:
raise ValueError(
f"cannot divide section of {size} steps into {section_count}"
)
if section_count <= 1:
frac_stride = 1
else:
frac_stride = (size - 1) / (section_count - 1)
cur_idx = 0.0
taken_steps = []
for _ in range(section_count):
taken_steps.append(start_idx + round(cur_idx))
cur_idx += frac_stride
all_steps += taken_steps
start_idx += size
return set(all_steps)
class _WrappedModel:
def __init__(self, model, timestep_map, rescale_timesteps, original_num_steps):
self.model = model
self.timestep_map = timestep_map
self.rescale_timesteps = rescale_timesteps
self.original_num_steps = original_num_steps
def __call__(self, x, ts, **kwargs):
map_tensor = th.tensor(self.timestep_map, device=ts.device, dtype=ts.dtype)
new_ts = map_tensor[ts]
if self.rescale_timesteps:
new_ts = new_ts.float() * (1000.0 / self.original_num_steps)
return self.model(x, new_ts, **kwargs)
class _WrappedAutoregressiveModel:
def __init__(self, model, timestep_map, rescale_timesteps, original_num_steps):
self.model = model
self.timestep_map = timestep_map
self.rescale_timesteps = rescale_timesteps
self.original_num_steps = original_num_steps
def __call__(self, x, x0, ts, **kwargs):
map_tensor = th.tensor(self.timestep_map, device=ts.device, dtype=ts.dtype)
new_ts = map_tensor[ts]
if self.rescale_timesteps:
new_ts = new_ts.float() * (1000.0 / self.original_num_steps)
return self.model(x, x0, new_ts, **kwargs)
def _extract_into_tensor(arr, timesteps, broadcast_shape):
"""
Extract values from a 1-D numpy array for a batch of indices.
:param arr: the 1-D numpy array.
:param timesteps: a tensor of indices into the array to extract.
:param broadcast_shape: a larger shape of K dimensions with the batch
dimension equal to the length of timesteps.
:return: a tensor of shape [batch_size, 1, ...] where the shape has K dims.
"""
res = th.from_numpy(arr).to(device=timesteps.device)[timesteps].float()
while len(res.shape) < len(broadcast_shape):
res = res[..., None]
return res.expand(broadcast_shape) | [] |
2024-01-10 | elisa97/IR-tip-of-the-tongue | tomt-query-reduction~chat-gpt~query-variants-in-progress~query-expansion-chatpgt.py | #!/usr/bin/python3
import json
target_file = 'query-expansions-from-chatgpt-raw.json'
prompts = json.load(open('query-expansion-prompts.json'))
queries = []
for i in ['../../tot-dev/', '../../tot-train/', '../../tot-test/']:
with open(f'{i}/queries.jsonl') as f:
for l in f:
l = json.loads(l)
queries += [l['query']]
def process_query(query):
import openai
print(f'Process Query: {query}')
request_prompt = "1"
request = prompts[request_prompt].replace('<ORIGINAL_QUERY>', query)
ret = {'request': request, 'request_prompt': request_prompt}
ret['gpt-3.5-turbo-response'] = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": request}
]
)
print(f'Response: {ret}')
return ret
def main(num=10):
performed = 0
ret = json.load(open(target_file))
for query in queries:
if query in ret.keys():
continue
try:
ret[query] = process_query(query)
performed += 1
except Exception as e:
print(e)
break
if performed > num:
break
json.dump(ret, open(target_file, 'w'))
if __name__ == '__main__':
main(1000)
| [
"query-expansion-prompts.json",
"1",
"You are a helpful assistant."
] |
2024-01-10 | sfs999/win_chatgpt | chatwx.py | #-*- coding: GBK-*-
import time
from wxauto import *
import openai
import os
#代理端口
os.environ['HTTP_PROXY'] = 'http://127.0.0.1:7890'
os.environ['HTTPS_PROXY'] = 'http://127.0.0.1:7890'
#https://platform.openai.com/overview
openai.api_key="your_key"
def chatretern(prompt,moudel_engine="gpt-3.5-turbo"):
cmpletion=openai.ChatCompletion.create(
model=moudel_engine,
messages=[{"role":"user","content":prompt}]
)
return cmpletion
if __name__ == '__main__':
who = '文件传输助手' # 设置聊天对象,微信群名
nickname = 'chatgpt' # 触发chatgpt回复的关键字
speakList = ['帆'] #设置谁可以发言
wx = WeChat()
wx.ChatWith(who)
print("开始监控win微信程序")
while True:
msgobject1 = wx.GetLastMessage
speaker1, msgcontent, speakerid1 = msgobject1
time.sleep(1)
# 如果收到的消息包含 chatgpt 的昵称,并且发件人在聊天群中:
if nickname in msgcontent and speaker1 in speakList:
wx.SendMsg('已收到 %s 的问题:' % (speaker1) + msgcontent[7:])
print("已收到",'%s' % (speaker1),"的问题")
sccess = False
while not sccess:
try:
ai_response = chatretern(msgcontent[7:])
returnMessage="sumtoken:"+str(ai_response.usage.total_tokens)+" "+ai_response.choices[0].message['content']
sccess = True
except:
wx.SendMsg('error! retrying...')
time.sleep(1)
wx.SendMsg('@%s' % (speaker1) + returnMessage)
print("已回复",'%s' % (speaker1),"的问题")
continue
| [] |
2024-01-10 | chirag127/Log-Parsing-and-Errors | im.py | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_score
import re
# Load the log data from the CSV file (replace with your own dataset path)
log_data = pd.read_csv("/workspaces/Log-Parsing-and-Errors/error.csv")
# Keep only the '_raw' column
log_data = log_data[["_raw"]]
# # for each log message, remove the log messages which don't contain "ERROR" in the first 100 characters
# for i in range(len(log_data)):
# if "ERROR" not in log_data["_raw"][i][:100]:
# log_data = log_data.drop(i)
# Reset the index
log_data = log_data.reset_index(drop=True)
# for each log message, remove the everything before the third :
for i in range(len(log_data)):
log_data["_raw"][i] = log_data["_raw"][i].split(":", 3)[-1]
# for each log message, truncate the log messages after the first 100 characters
for i in range(len(log_data)):
log_data["_raw"][i] = log_data["_raw"][i][:500]
# for each log message, remove everything after the first line break character
for i in range(len(log_data)):
log_data["_raw"][i] = log_data["_raw"][i].split("\n", 1)[0]
# Data preprocessing: remove non-alphanumeric characters and convert to lowercase
log_data["_raw"] = log_data["_raw"].str.replace("[^a-zA-Z0-9 ]", "").str.lower()
# Define a regular expression pattern to match URLs
url_pattern = (
r"http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+"
)
# Function to remove URLs from a text
def remove_urls(text):
return re.sub(url_pattern, "", text)
# Apply the remove_urls function to each log message
log_data["_raw"] = log_data["_raw"].apply(remove_urls)
data = {
"log_message": log_data["_raw"].tolist(),
}
df = pd.DataFrame(data)
# Vectorize log messages using TF-IDF
vectorizer = TfidfVectorizer(max_features=500, stop_words="english")
log_vectors = vectorizer.fit_transform(df["log_message"])
# k_range = range(1, 20)
# sse = []
# for k in k_range:
# kmeans = KMeans(n_clusters=k)
# kmeans.fit(log_vectors)
# sse.append(kmeans.inertia_)
# plt.figure(figsize=(8, 6))
# plt.plot(k_range, sse, marker='o', linestyle='-')
# plt.xlabel('Number of Clusters (K)')
# plt.ylabel('Sum of Squared Distances')
# plt.title('Elbow Method for Optimal K')
# plt.grid(True)
# plt.show()
# # Initialize an empty list to store the silhouette scores
# silhouette_scores = []
# # Iterate through each value of K
# for k in k_range:
# kmeans = KMeans(n_clusters=k)
# kmeans.fit(log_vectors)
# # Check if the number of unique labels is less than 2
# if len(np.unique(kmeans.labels_)) < 2:
# silhouette_avg = -1 # Set a placeholder score (e.g., -1) for such cases
# else:
# labels = kmeans.labels_
# silhouette_avg = silhouette_score(log_vectors, labels)
# silhouette_scores.append(silhouette_avg)
# # Plot the Silhouette Score graph
# plt.figure(figsize=(8, 6))
# plt.plot(k_range, silhouette_scores, marker='o', linestyle='-')
# plt.xlabel('Number of Clusters (K)')
# plt.ylabel('Silhouette Score')
# plt.title('Silhouette Score for Optimal K')
# plt.grid(True)
# plt.show()
# Train a K-Means clustering model
n_clusters = 7 # You can adjust the number of clusters as needed
kmeans = KMeans(n_clusters=n_clusters)
kmeans.fit(log_vectors)
# Extract keywords from cluster centroids
cluster_keywords = {}
for cluster_id in range(n_clusters):
# Get indices of log messages in this cluster
cluster_indices = kmeans.labels_ == cluster_id
# Extract keywords from the log messages in this cluster
keywords = []
for log_message in df.loc[cluster_indices, "log_message"]:
keywords += re.findall(r"\b\w+\b", log_message)
# Count and rank keyword occurrences
keyword_counts = pd.Series(keywords).value_counts()
# Store the top keywords for this cluster
cluster_keywords[cluster_id] = keyword_counts.index.tolist()[
:5
] # Adjust the number of keywords as needed
# Print the identified patterns (top keywords for each cluster)
print("Identified Patterns:")
for cluster_id, keywords in cluster_keywords.items():
print(f"Pattern {cluster_id + 1}: {', '.join(keywords)}")
# make a list of one message per cluster
cluster_messages = []
for cluster_id in range(n_clusters):
# Get indices of log messages in this cluster
cluster_indices = kmeans.labels_ == cluster_id
# Extract keywords from the log messages in this cluster
keywords = []
for log_message in df.loc[cluster_indices, "log_message"]:
keywords += re.findall(r"\b\w+\b", log_message)
# Count and rank keyword occurrences
keyword_counts = pd.Series(keywords).value_counts()
# Store the top keywords for this cluster
cluster_messages.append(df.loc[cluster_indices, "log_message"].tolist()[0])
# print(cluster_messages)
for message in cluster_messages:
print(f"cluster number: {cluster_messages.index(message) + 1} \n {message} \n")
# Replace 'YOUR_API_KEY' with your CohereAI API key
import cohere
co = cohere.Client(
"PQ50WPjjMsFSzUhZlMQaGTlS30MyRs9YkbuKfhHh"
) # This is your trial API key
def generate_patterns(log_messages, num_patterns=5):
patterns = []
# Iterate through log messages
for message in log_messages:
response = co.chat(
model="command-nightly",
message=f"Please generate a regex pattern to match the following log message: {message}\n\nonly give regex and nothing else strictly",
temperature=0.3,
# chat_history=[{"user_name": "User", "message": f"Please generate a regex pattern to match the following log message: {message}\n\nonly give regex and nothing else strictly"}],
prompt_truncation="auto",
# stream=True,
citation_quality="accurate",
connectors=[{"id": "web-search"}],
)
# print(response)
generated_patterns = response.text
print("Prediction: {}".format(generated_patterns))
# Extract generated patterns from the response
patterns.append({"log_message": message, "patterns": generated_patterns})
return patterns
# # Generate patterns for each cluster
# cluster_patterns = generate_patterns(cluster_messages)
# # Print the identified patterns (top keywords for each cluster)
# print("Identified Patterns:")
# for cluster_id, patterns in enumerate(cluster_patterns):
# print(f"Pattern {cluster_id + 1}: {patterns['patterns']}")
| [
"Please generate a regex pattern to match the following log message: PLACEHOLDER\n\nonly give regex and nothing else strictly"
] |
2024-01-10 | ForceMultiplierAI/AgentWorker | examples~custom_agent~custom_llm_chat_agent_openai.py | # Working
# OpenAI
# almost working: WizzardLM
# export OPENAI_API_BASE=https://api.openai.com/v1
# export OPENAI_API_BASE=http://chat.forcemultiplier.ai
import pprint
from langchain.utilities import SearxSearchWrapper
from langchain.agents import Tool, AgentExecutor, LLMSingleActionAgent, AgentOutputParser
from langchain.prompts import BaseChatPromptTemplate
from langchain import SerpAPIWrapper, LLMChain
from langchain.chat_models import ChatOpenAI
from typing import List, Union
from langchain.schema import AgentAction, AgentFinish, HumanMessage
import re
search = SearxSearchWrapper(searx_host="http://127.0.0.1:8080")
tools = [
Tool(
name = "Search",
func=search.run,
description="useful for when you need to answer questions about current events"
)
]
# Set up the base template
template = """Answer the following questions as best you can, but speaking as a pirate might speak. You have access to the following tools:
{tools}
Use the following format:
Question: the input question you must answer
Thought: you should always think about what to do
Action: Search # Action: the action to take, Search example should be one of [{tool_names}]
Action Input: the input to the action
Observation: the result of the action
... (this Thought/Action/Action Input/Observation can repeat N times)
Thought: I now know the final answer
Final Answer: the final answer to the original input question
Begin! Remember to speak as a pirate when giving your final answer. Use lots of "Arg"s
Question: {input}
{agent_scratchpad}"""
# Set up a prompt template
class CustomPromptTemplate(BaseChatPromptTemplate):
# The template to use
template: str
# The list of tools available
tools: List[Tool]
def format_messages(self, **kwargs) -> str:
# Get the intermediate steps (AgentAction, Observation tuples)
# Format them in a particular way
intermediate_steps = kwargs.pop("intermediate_steps")
thoughts = ""
for action, observation in intermediate_steps:
thoughts += action.log
thoughts += f"\nObservation: {observation}\nThought: "
# Set the agent_scratchpad variable to that value
kwargs["agent_scratchpad"] = thoughts
# Create a tools variable from the list of tools provided
kwargs["tools"] = "\n".join([f"{tool.name}: {tool.description}" for tool in self.tools])
# Create a list of tool names for the tools provided
kwargs["tool_names"] = ", ".join([tool.name for tool in self.tools])
formatted = self.template.format(**kwargs)
return [HumanMessage(content=formatted)]
prompt = CustomPromptTemplate(
template=template,
tools=tools,
# This omits the `agent_scratchpad`, `tools`, and `tool_names` variables because those are generated dynamically
# This includes the `intermediate_steps` variable because that is needed
input_variables=["input", "intermediate_steps"]
)
class CustomOutputParser(AgentOutputParser):
def parse(self, llm_output: str) -> Union[AgentAction, AgentFinish]:
# Check if agent should finish
if "Final Answer:" in llm_output:
return AgentFinish(
# Return values is generally always a dictionary with a single `output` key
# It is not recommended to try anything else at the moment :)
return_values={"output": llm_output.split("Final Answer:")[-1].strip()},
log=llm_output,
)
# Parse out the action and action input
regex = r"Action\s*\d*\s*:(.*?)\nAction\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)"
match = re.search(regex, llm_output, re.DOTALL)
if not match:
raise ValueError(f"Could not parse LLM output: `{llm_output}`")
action = match.group(1).strip()
action_input = match.group(2)
# Return the action and action input
return AgentAction(tool=action, tool_input=action_input.strip(" ").strip('"'), log=llm_output)
output_parser = CustomOutputParser()
llm = ChatOpenAI(temperature=0)
# LLM chain consisting of the LLM and a prompt
llm_chain = LLMChain(llm=llm, prompt=prompt)
tool_names = [tool.name for tool in tools]
agent = LLMSingleActionAgent(
llm_chain=llm_chain,
output_parser=output_parser,
stop=["\nObservation:"],
allowed_tools=tool_names
)
agent_executor = AgentExecutor.from_agent_and_tools(agent=agent, tools=tools, verbose=True)
# agent_executor.run("How many people live in canada as of 2023?")
agent_executor.run("What country did Russia invade?")
| [
"Answer the following questions as best you can, but speaking as a pirate might speak. You have access to the following tools:\n\n{tools}\n\nUse the following format:\n\nQuestion: the input question you must answer\nThought: you should always think about what to do\nAction: Search # Action: the action to take, Search example should be one of [{tool_names}]\nAction Input: the input to the action\nObservation: the result of the action\n... (this Thought/Action/Action Input/Observation can repeat N times)\nThought: I now know the final answer\nFinal Answer: the final answer to the original input question\n\nBegin! Remember to speak as a pirate when giving your final answer. Use lots of \"Arg\"s\n\nQuestion: {input}\n{agent_scratchpad}",
"input",
"intermediate_steps"
] |
2024-01-10 | ForceMultiplierAI/AgentWorker | examples~custom_agent~wizardlm.py | # Working
# OpenAI
# almost working: WizzardLM
# export OPENAI_API_BASE=https://api.openai.com/v1
# export OPENAI_API_BASE=http://chat.forcemultiplier.ai
import pprint
from langchain.utilities import SearxSearchWrapper
from langchain.agents import Tool, AgentExecutor, LLMSingleActionAgent, AgentOutputParser
from langchain.prompts import BaseChatPromptTemplate
from langchain import SerpAPIWrapper, LLMChain
from langchain.chat_models import ChatOpenAI
from typing import List, Union
from langchain.schema import AgentAction, AgentFinish, HumanMessage
import re
search = SearxSearchWrapper(searx_host="http://127.0.0.1:8080")
tools = [
Tool(
name = "Search",
func=search.run,
description="useful for when you need to answer questions about current events"
)
]
# Set up the base template
template = """Answer the following questions as best you can, but speaking as a pirate might speak. You have access to the following tools:
{tools}
Use the following format:
Question: the input question you must answer
Thought: you should always think about what to do
Action: Search # Action: the action to take, Search example should be one of [{tool_names}]
Action Input: the input to the action
Observation: the result of the action
... (this Thought/Action/Action Input/Observation can repeat N times)
Thought: I now know the final answer
Final Answer: the final answer to the original input question
Begin! Remember to speak as a pirate when giving your final answer. Use lots of "Arg"s
Question: {input}
{agent_scratchpad}"""
# Set up a prompt template
class CustomPromptTemplate(BaseChatPromptTemplate):
# The template to use
template: str
# The list of tools available
tools: List[Tool]
def format_messages(self, **kwargs) -> str:
# Get the intermediate steps (AgentAction, Observation tuples)
# Format them in a particular way
intermediate_steps = kwargs.pop("intermediate_steps")
thoughts = ""
for action, observation in intermediate_steps:
thoughts += action.log
thoughts += f"\nObservation: {observation}\nThought: "
# Set the agent_scratchpad variable to that value
kwargs["agent_scratchpad"] = thoughts
# Create a tools variable from the list of tools provided
kwargs["tools"] = "\n".join([f"{tool.name}: {tool.description}" for tool in self.tools])
# Create a list of tool names for the tools provided
kwargs["tool_names"] = ", ".join([tool.name for tool in self.tools])
formatted = self.template.format(**kwargs)
return [HumanMessage(content=formatted)]
prompt = CustomPromptTemplate(
template=template,
tools=tools,
# This omits the `agent_scratchpad`, `tools`, and `tool_names` variables because those are generated dynamically
# This includes the `intermediate_steps` variable because that is needed
input_variables=["input", "intermediate_steps"]
)
class CustomOutputParser(AgentOutputParser):
def parse(self, llm_output: str) -> Union[AgentAction, AgentFinish]:
# Check if agent should finish
if "Final Answer:" in llm_output:
return AgentFinish(
# Return values is generally always a dictionary with a single `output` key
# It is not recommended to try anything else at the moment :)
return_values={"output": llm_output.split("Final Answer:")[-1].strip()},
log=llm_output,
)
# Parse out the action and action input
regex = r"Action\s*\d*\s*:(.*?)\nAction\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)"
match = re.search(regex, llm_output, re.DOTALL)
if not match:
raise ValueError(f"Could not parse LLM output: `{llm_output}`")
action = match.group(1).strip()
action_input = match.group(2)
# Return the action and action input
return AgentAction(tool=action, tool_input=action_input.strip(" ").strip('"'), log=llm_output)
output_parser = CustomOutputParser()
llm = ChatOpenAI(temperature=0)
# LLM chain consisting of the LLM and a prompt
llm_chain = LLMChain(llm=llm, prompt=prompt)
tool_names = [tool.name for tool in tools]
agent = LLMSingleActionAgent(
llm_chain=llm_chain,
output_parser=output_parser,
stop=["\nObservation:"],
allowed_tools=tool_names
)
agent_executor = AgentExecutor.from_agent_and_tools(agent=agent, tools=tools, verbose=True)
# agent_executor.run("How many people live in canada as of 2023?")
agent_executor.run("What country did Russia invade?")
| [
"Answer the following questions as best you can, but speaking as a pirate might speak. You have access to the following tools:\n\n{tools}\n\nUse the following format:\n\nQuestion: the input question you must answer\nThought: you should always think about what to do\nAction: Search # Action: the action to take, Search example should be one of [{tool_names}]\nAction Input: the input to the action\nObservation: the result of the action\n... (this Thought/Action/Action Input/Observation can repeat N times)\nThought: I now know the final answer\nFinal Answer: the final answer to the original input question\n\nBegin! Remember to speak as a pirate when giving your final answer. Use lots of \"Arg\"s\n\nQuestion: {input}\n{agent_scratchpad}",
"input",
"intermediate_steps"
] |
2024-01-10 | yuqisun/Hackathon_PAYouthVote | semantic_search~vector_store~vs_tool.py | from typing import List, Tuple
import numpy as np
from langchain import FAISS
from langchain.docstore.document import Document
from langchain.embeddings import HuggingFaceEmbeddings
# model_path = "./model/"
embeddings_model = "GanymedeNil/text2vec-large-chinese"
embeddings = HuggingFaceEmbeddings(model_name=embeddings_model, model_kwargs={'device': 'cpu'})
CHUNK_SIZE = 250
def seperate_list(ls: List[int]) -> List[List[int]]:
lists = []
ls1 = [ls[0]]
for i in range(1, len(ls)):
if ls[i - 1] + 1 == ls[i]:
ls1.append(ls[i])
else:
lists.append(ls1)
ls1 = [ls[i]]
lists.append(ls1)
return lists
def similarity_search_with_score_by_vector(
self, embedding: List[float], k: int = 4,
) -> List[Tuple[Document, float]]:
scores, indices = self.index.search(np.array([embedding], dtype=np.float32), k)
docs = []
id_set = set()
store_len = len(self.index_to_docstore_id)
for j, i in enumerate(indices[0]):
if i == -1:
# This happens when not enough docs are returned.
continue
_id = self.index_to_docstore_id[i]
doc = self.docstore.search(_id)
id_set.add(i)
docs_len = len(doc.page_content)
for k in range(1, max(i, store_len-i)):
break_flag = False
for l in [i + k, i - k]:
if 0 <= l < len(self.index_to_docstore_id):
_id0 = self.index_to_docstore_id[l]
doc0 = self.docstore.search(_id0)
if docs_len + len(doc0.page_content) > self.chunk_size:
break_flag=True
break
elif doc0.metadata["source"] == doc.metadata["source"]:
docs_len += len(doc0.page_content)
id_set.add(l)
if break_flag:
break
id_list = sorted(list(id_set))
id_lists = seperate_list(id_list)
for id_seq in id_lists:
for id in id_seq:
if id == id_seq[0]:
_id = self.index_to_docstore_id[id]
doc = self.docstore.search(_id)
else:
_id0 = self.index_to_docstore_id[id]
doc0 = self.docstore.search(_id0)
doc.page_content += doc0.page_content
if not isinstance(doc, Document):
raise ValueError(f"Could not find document for id {_id}, got {doc}")
doc_score = min([scores[0][id] for id in [indices[0].tolist().index(i) for i in id_seq if i in indices[0]]])
docs.append((doc, doc_score))
return docs
def get_docs_with_score(docs_with_score):
docs = []
for doc, score in docs_with_score:
doc.metadata["score"] = score
docs.append(doc)
return docs
def get_ref_docs_from_vs(query, vs_path, embeddings=embeddings):
vector_store = FAISS.load_local(vs_path, embeddings)
FAISS.similarity_search_with_score_by_vector = similarity_search_with_score_by_vector
vector_store.chunk_size = CHUNK_SIZE
related_docs_with_score = vector_store.similarity_search_with_score(query, 20)
related_docs = get_docs_with_score(related_docs_with_score)
related_docs.sort(key=lambda x: x.metadata['score'], reverse=True)
return related_docs | [] |
2024-01-10 | N3RDIUM/ExpoBot | Chatbot~__chatbot.py | import random
import json
import inflect
import fuzzywuzzy.fuzz as fuzz
import spacy
import _sha256 as sha256
import tqdm
import yaml
import os
p = inflect.engine()
import logging
logging.basicConfig(level=logging.INFO)
THRESHOLD = 2 # Similarity threshold
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "../"))
import openai
import json
# if not "USELOCAL" in json.load(open(os.path.join(os.path.dirname(os.path.abspath(__file__)), "../config.json"))):
# logging.log(logging.INFO, "[CHAT] Using OpenAI API")
import os
import openai
openai.api_base = "http://localhost:8080/v1"
openai.api_key = "sx-xxx"
OPENAI_API_KEY = "sx-xxx"
os.environ['OPENAI_API_KEY'] = OPENAI_API_KEY
logging.log(logging.INFO, "[CHAT] Using local OpenAI API")
# Don't ask me why I wrote these prompts, I don't know either. I just know that they work.
# Maybe they're just there to make the chatbot seem more human, and respond better to questions.
messages = [
{"role": "system", "content": "You are a intelligent assistant. Speak in English only. Give short responses, enough to be spoken in 5 to 8 senconds. you are a chatbot at a science fair. When someone asks you where a person is, respond according to the data given below in json (just tell the place their project is at)."},
{"role": "system", "content": "The speech recognition systems used in this project are not perfect. Allow for some errors in the speech recognition. For example, Vrunda may be recognized as Brenda or Vrinda. You can use the json data given below to figure it out. If you are not sure, just say you don't know."},
{"role": "system", "content": "If there are multiple projects with the same name, just say a random one, then tell them that there are multiple projects with the same name, and ask them to be more specific, then tell them where the project theyre talking about is located."},
# {"role": "system", "content": "You are a chatbot at a science fair. When someone asks you where a person is, respond according to the data given below in json."},
# {"role": "system", "content": "You must be able to answer questions about the science fair from the json data given below:"},
]
# with open("Chatbot/expo_data.json", "r") as f:
# expo_data = json.load(f)
# # Append all of them one by one
# for floor in expo_data["projects"]:
# messages.append({"role": "system", "content": "The project {} is located on the {} floor, made by {}, of class {}.".format(
# floor["title"],
# floor["floor"],
# floor["roomNumber"],
# str(floor["members"]).replace("'", "").replace("[", "").replace("]", ""),
# floor["class"]
# )})
def get_response(message):
global messages
messages.append(
{"role": "user", "content": message},
)
completion = openai.ChatCompletion.create(
model="koala-7B.ggmlv3.q2_K.bin",
messages=messages,
max_tokens=16,
temperature=0.7,
)
ret = str(completion.choices[0].message.content)
if ":" in ret:
ret = ret.split(":")[1]
return ret
try:
get_response("What is AI?")
logging.log(logging.INFO, "[CHAT] OpenAI API is working!")
except Exception as e:
logging.log(logging.INFO, "[CHAT] OpenAI API is NOT working! {}".format(e))
class ChatBot:
def __init__(self, speaker=None):
logging.log(logging.INFO, "[CHAT] ChatBot __init__")
self.conversation_data = []
self.fallbacks = []
# self.cache = {}
# self.nlp_cache = {}
# if not os.path.exists("./cache.json"):
# with open("./cache.json", "w") as f:
# f.write("{}")
self.loader = yaml.SafeLoader
self.speaker = speaker
self.nlp = spacy.load("en_core_web_lg")
def train(self, conversation_data):
logging.log(logging.INFO, f"[CHAT] Training chatbot on {len(conversation_data)} conversation data points...")
self.conversation_data += conversation_data
self.save_hash = sha256.sha256(str(conversation_data).encode()).hexdigest()
def create_offline_cache(self):
logging.log(logging.INFO, "[CHAT] Creating text to speech cache...")
for data in tqdm.tqdm(self.conversation_data, desc="Creating tts cache for train"):
for utterance in data:
if self.speaker and not self.is_question(utterance):
self.speaker.create_speech_cache(utterance)
for utterance in tqdm.tqdm(self.fallbacks, desc="Creating tts cache for fallbacks"):
if self.speaker and not self.is_question(utterance):
self.speaker.create_speech_cache(utterance)
def is_question(self, utterance):
return "?" in utterance
def train_fallbacks(self, fallbacks):
logging.log(logging.INFO, f"[CHAT] Training chatbot on {len(fallbacks)} fallback data points...")
self.fallbacks += fallbacks
def calculate_similarity_dirty(self, a, b):
val = self.fuzz_ratio(a.lower(), b.lower())
if val > THRESHOLD:
return 0
else:
return val
def calculate_similarity_better(self, a, b):
if not a in self.nlp_cache:
self.nlp_cache[a] = self.nlp(' '.join([str(token) for token in self.nlp(a.lower()) if not token.is_stop]))
if not b in self.nlp_cache:
self.nlp_cache[b] = self.nlp(' '.join([str(token) for token in self.nlp(b.lower()) if not token.is_stop]))
return self.nlp_cache[a].similarity(self.nlp_cache[b])
def calculate_similarity(self, query, conversation_entry):
similarity_scores = []
for utterance in conversation_entry:
similarity_score = self.calculate_similarity_dirty(query, utterance) #+ self.calculate_similarity_better(query, utterance)
# TODO: Make nlp similarity better
similarity_scores.append(similarity_score)
return similarity_scores
def answer(self, query):
if query == "":
return ""
logging.log(logging.INFO, f"[CHAT] Answering query: {query}")
logging.log(logging.INFO, "[CHAT] Calculating similarities...")
similarities = []
for conversation_entry in tqdm.tqdm(self.conversation_data, desc="Calculating similarities"):
similarities.append(self.calculate_similarity(query, conversation_entry))
logging.log(logging.INFO, "[CHAT] Similarities calculated. Linearizing...")
linear_similarities = []
for i, similarity_scores in enumerate(similarities):
for j, score in enumerate(similarity_scores):
if score > THRESHOLD:
linear_similarities.append((score, (i, j)))
logging.log(logging.INFO, "[CHAT] Linearized. Sorting...")
self.cache[query] = linear_similarities
self.save_cache()
try:
logging.log(logging.INFO, "[CHAT] Sorted matches. Finding max...")
max_similarity = max(i[0] for i in linear_similarities)
max_similarity_index = next(i[1] for i in linear_similarities if i[0] == max_similarity)
logging.log(logging.INFO, f"[CHAT] Max found to be {max_similarity} at index {max_similarity_index}")
global messages
messages.append({"role": "user", "content": query})
messages.append({"role": "system", "content": self.conversation_data[max_similarity_index[0]][max_similarity_index[1]]})
return self.conversation_data[max_similarity_index[0]][max_similarity_index[1] + 1]
except:
try:
logging.log(logging.INFO, "[CHAT] No matches found. Trying ChatGPT...")
return get_response(query)
except Exception as e:
logging.log(logging.INFO, f"[CHAT] ChatGPT failed with {e}. Using random fallback...")
return self.random_fallback()
def random_fallback(self):
logging.log(logging.INFO, "[CHAT] Random fallback!")
return random.choice(self.fallbacks)
def train_expo_data(self, expo_data):
with open(expo_data, "r") as f:
expo_data = json.load(f)
data = []
logging.log(logging.INFO, "[CHAT] Training chatbot on categories...")
# When there are more that 2 projects with the same name
found = {}
for project in expo_data["projects"]:
try:
found[project["title"]] += 1
except KeyError:
found[project["title"]] = 1
found_exceptions = [i for i in found if found[i] > 1]
for exception in found_exceptions:
print(exception)
# Questions about projects
for project in expo_data["projects"]:
# Where is project X?
whereis_questions = [
"Where is project {}?".format(project["title"]),
"Where is {}?".format(project["title"]),
"Where is {} located?".format(project["title"]),
"Where is {} located at?".format(project["title"]),
"Where is {} located in the expo?".format(project["title"]),
"Where can I find {}?".format(project["title"]),
"Where can I find {} in the expo?".format(project["title"]),
"Where can I find {} located?".format(project["title"]),
"Where can I find {} located at?".format(project["title"]),
"Where is the project {}?".format(project["title"]),
"Where is the {}?".format(project["title"]),
"Where is the {} located?".format(project["title"]),
"Where is the {} located at?".format(project["title"]),
]
for question in whereis_questions:
data.append([
question,
"The project {} is located on the {} floor, room {}.".format(
project["title"],
self.numerify(project["floor"]),
self.number_to_speech(project["roomNumber"])
)
])
# Who made project X?
whois_questions = [
"Who made project {}?",
"Who made {}?",
"Who made {} project?",
"Who made the project {}?",
"Who made the {}?",
"Who made the {} project?",
]
for question in whois_questions:
data.append([
question,
"The project {} was made by {}.".format(
project["title"],
self.mems2str(project["members"])
)
])
# Student X made what project?
for member in project["members"]:
studentmade_questions = [
"{} made what project?",
"{} made what?",
"{} made what project?",
"{} made what project?",
"What project did {} make?",
"What did {} make?",
]
for question in studentmade_questions:
data.append([
question.format(member),
"{} made the project {}.".format(
member,
project["title"]
)
])
# Where is student X?
for member in project["members"]:
whereis_questions = [
"Where is {}?",
"Where can I find {}?",
"I want to meet {}",
]
for question in whereis_questions:
data.append([
question.format(member),
"The student {} is located on the {} floor, room {}.".format(
member,
self.numerify(project["floor"]),
self.number_to_speech(project["roomNumber"])
)
])
# Where is project X by student Y?
for member in project["members"]:
whereis_questions = [
"Where is {}'s project?",
"Where can I find {}'s project?",
"{}'s project"
]
for question in whereis_questions:
data.append([
question.format(member),
"The project {} is located on the {} floor, room {}.".format(
project["title"],
self.numerify(project["floor"]),
self.number_to_speech(project["roomNumber"])
)
])
# Which class made project X?
if len(project["class"]) > 1:
whichclass_questions = [
"Which class made project {}?",
"Which class made {}?",
"Which class made {} project?",
"Which class made the project {}?",
"Which class made the {}?",
"Which class made the {} project?",
]
for question in whichclass_questions:
try:
data.append([
question,
"The project {} was made by {}.".format(
project["title"],
self.number_to_speech(project["class"].split(" ")[0]) + " " + project["class"].split(" ")[1]
)
])
except IndexError:
pass
# Who is X?
for member in project["members"]:
whois_questions = [
"Who is {}?",
"Who is {}?",
"Who is {}?",
"Who is {}?",
]
for question in whois_questions:
data.append([
question.format(member),
"{} is a member of the project {}.".format(
member,
project["title"]
)
])
# What are the projects in room X?
for room in expo_data["rooms"]:
whatare_questions = [
"What are the projects in room {}?",
"What are the other projects in room {}?",
"What can I find in room {}?",
"What can I see in room {}?",
"What else is in room {}?",
"What else can I see in room {}?",
"What else can I find in room {}?",
]
found = []
for project in expo_data["projects"]:
if project["roomNumber"] == room:
found.append(project["title"])
for question in whatare_questions:
data.append([
question.format(self.number_to_speech(room)),
"The projects in room {} are: {}".format(
self.number_to_speech(room),
self.mems2str(found)
)
])
self.train(data)
def mems2str(self, members):
if len(members) == 1:
return members[0]
elif len(members) == 2:
return members[0] + " and " + members[1]
else:
return ", ".join(members[:-1]) + ", and " + members[-1]
def numerify(self, number):
if number == str(1):
return "ground"
elif number == str(2):
return "first"
elif number == str(3):
return "second"
elif number == str(4):
return "third"
def number_to_speech(self, number):
"""
Convert 16 into sixteen, etc.
"""
number = str(number)
number = list(number)
for i in range(len(number)):
number[i] = p.number_to_words(number[i])
return " ".join(number)
def get_category_index(self, expo_data, category):
for i in range(len(expo_data["categories"])):
if expo_data["categories"][i]["title"] == category:
return i
return None
def fuzz_ratio(self, a, b):
return fuzz.ratio(a, b)
def load_cache(self):
logging.log(logging.INFO, "[CHAT] Loading cache...")
try:
with open("cache.json", "r") as f:
self.cache = json.load(f)
try:
if self.cache["train_data_hash"] != self.save_hash:
self.cache = {
"train_data_hash": self.save_hash,
}
self.save_cache()
except KeyError:
self.cache = {
"train_data_hash": self.save_hash,
}
self.save_cache()
except FileNotFoundError:
self.cache["train_data_hash"] = ""
self.save_cache()
def save_cache(self):
logging.log(logging.INFO, "[CHAT] Saving cache...")
try:
with open("cache.json", "w") as f:
if not "train_data_hash" in self.cache:
self.cache["train_data_hash"] = self.save_hash
json.dump(self.cache, f)
except FileNotFoundError:
self.cache = {}
def train_from_yaml(self, yaml_file):
with open(yaml_file, "r") as f:
data = yaml.load(f, Loader=self.loader)["conversations"]
logging.log(logging.INFO, "Training chatbot on yaml file: {}".format(yaml_file))
self.train(data)
def train_from_corpus(self, corpus_dir, include=["*"]):
logging.log(logging.INFO, "[CHAT] Training chatbot on corpus directory: {}".format(corpus_dir))
for filename in os.listdir(corpus_dir):
if include[0] == "*":
if filename.endswith(".yml"):
self.train_from_yaml(os.path.join(corpus_dir, filename))
else:
if filename.split(".")[0] in include:
self.train_from_yaml(os.path.join(corpus_dir, filename)) | [
"The speech recognition systems used in this project are not perfect. Allow for some errors in the speech recognition. For example, Vrunda may be recognized as Brenda or Vrinda. You can use the json data given below to figure it out. If you are not sure, just say you don't know.",
"You are a intelligent assistant. Speak in English only. Give short responses, enough to be spoken in 5 to 8 senconds. you are a chatbot at a science fair. When someone asks you where a person is, respond according to the data given below in json (just tell the place their project is at).",
"If there are multiple projects with the same name, just say a random one, then tell them that there are multiple projects with the same name, and ask them to be more specific, then tell them where the project theyre talking about is located."
] |
2024-01-10 | N3RDIUM/ExpoBot | ___.py | import os
import openai
openai.api_base = "http://localhost:8080/v1"
openai.api_key = "sx-xxx"
OPENAI_API_KEY = "sx-xxx"
os.environ['OPENAI_API_KEY'] = OPENAI_API_KEY
completion = openai.ChatCompletion.create(
model="koala-7B.ggmlv3.q2_K.bin",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "How are you?"}
],
max_tokens=16,
)
print(completion.choices[0].message.content)
| [
"You are a helpful assistant.",
"How are you?"
] |
2024-01-10 | wongbeishan/python_projects | voice_control~voice_control.py | import whisper
import openai
import gradio as gr
import librosa
from dotenv import load_dotenv
load_dotenv()
model = whisper.load_model("base")
def load_audio(file_path, target_sr = 1000):
try:
audio = librosa.load(file_path, sr=target_sr)
if len(audio.shape) > 1:
audio = audio[0]
return audio
except Exception as e:
print("Error loading audio file: {e}")
return None
def transcribe(file):
# print(file)
audio = load_audio(file)
if audio is None:
print("Error loading audio file.")
return "Error loading audio file."
transcription = model.transcribe(file)
return transcription
def generate_answer(messages):
response = openai.chat.completions.create(
model="gpt-3.5-turbo",
messages=messages
)
return response.choices[0].message.content
prompts = {
"START": "Classify the intent of the next input. \
Is it: WRITE_EMAIL, QUESTION, OTHER ? Only answer one word.", "QUESTION": "If you can answer the question: ANSWER, \
if you need more information: MORE, \
if you cannot answer: OTHER. Only answer one word.",
"ANSWER": "Now answer the question",
"MORE": "Now ask for more information",
"OTHER": "Now tell me you cannot answer the question or do the action", "WRITE_EMAIL": 'If the subject or recipient or message is missing, \
answer "MORE". Else if you have all the information, \ answer "ACTION_WRITE_EMAIL |\
subject:subject, recipient:recipient, message:message".',
}
actions = {
"ACTION_WRITE_EMAIL": "The mail has been sent. \
Now tell me the action is done in natural language."
}
def start(user_input):
messages = [{"role": "user", "content": prompts["START"]}]
messages.append({"role": "user", "content": user_input})
return discussion(messages, "")
def discussion(messages, last_step):
answer = generate_answer(messages)
if answer in prompts.keys():
messages.append({"role": "assistant", "content": answer})
messages.append({"role": "user", "content": prompts[answer]})
return discussion(messages, answer)
elif answer in actions.keys():
do_action(answer)
else:
if last_step != "MORE":
messages = []
last_step = "END"
return answer
def do_action(action):
print("Doing action " + action)
return ("I did the action " + action)
def start_chat(file):
input = transcribe(file)
return start(input)
gr.Interface(
fn=start_chat,
live=True,
inputs=gr.Audio(sources="microphone"),
outputs="text"
).launch()
| [
"{'START': 'Classify the intent of the next input. Is it: WRITE_EMAIL, QUESTION, OTHER ? Only answer one word.', 'QUESTION': 'If you can answer the question: ANSWER, if you need more information: MORE, if you cannot answer: OTHER. Only answer one word.', 'ANSWER': 'Now answer the question', 'MORE': 'Now ask for more information', 'OTHER': 'Now tell me you cannot answer the question or do the action', 'WRITE_EMAIL': 'If the subject or recipient or message is missing, answer \"MORE\". Else if you have all the information, \\\\ answer \"ACTION_WRITE_EMAIL | subject:subject, recipient:recipient, message:message\".'}"
] |
2024-01-10 | WindRoseR/VAND-APRIL-GAN | open_clip~factory.py | import json
import logging
import os
import pathlib
import re
from copy import deepcopy
from pathlib import Path
from typing import Any, Dict, Optional, Tuple, Union
import torch
from .constants import OPENAI_DATASET_MEAN, OPENAI_DATASET_STD
from .model import CLIP, CustomTextCLIP, convert_weights_to_lp, convert_to_custom_text_state_dict,\
resize_pos_embed, get_cast_dtype
from .coca_model import CoCa
from .loss import ClipLoss, DistillClipLoss, CoCaLoss
from .openai import load_openai_model
from .pretrained import is_pretrained_cfg, get_pretrained_cfg, download_pretrained, list_pretrained_tags_by_model, download_pretrained_from_hf
from .transform import image_transform, AugmentationCfg
from .tokenizer import HFTokenizer, tokenize
HF_HUB_PREFIX = 'hf-hub:'
_MODEL_CONFIG_PATHS = [Path(__file__).parent / f"model_configs/"]
_MODEL_CONFIGS = {} # directory (model_name: config) of model architecture configs
def _natural_key(string_):
return [int(s) if s.isdigit() else s for s in re.split(r'(\d+)', string_.lower())]
def _rescan_model_configs():
global _MODEL_CONFIGS
config_ext = ('.json',)
config_files = []
for config_path in _MODEL_CONFIG_PATHS:
if config_path.is_file() and config_path.suffix in config_ext:
config_files.append(config_path)
elif config_path.is_dir():
for ext in config_ext:
config_files.extend(config_path.glob(f'*{ext}'))
for cf in config_files:
with open(cf, 'r') as f:
model_cfg = json.load(f)
if all(a in model_cfg for a in ('embed_dim', 'vision_cfg', 'text_cfg')):
_MODEL_CONFIGS[cf.stem] = model_cfg
_MODEL_CONFIGS = {k: v for k, v in sorted(_MODEL_CONFIGS.items(), key=lambda x: _natural_key(x[0]))}
_rescan_model_configs() # initial populate of model config registry
def list_models():
""" enumerate available model architectures based on config files """
return list(_MODEL_CONFIGS.keys())
def add_model_config(path):
""" add model config path or file and update registry """
if not isinstance(path, Path):
path = Path(path)
_MODEL_CONFIG_PATHS.append(path)
_rescan_model_configs()
def get_model_config(model_name):
if model_name in _MODEL_CONFIGS:
return deepcopy(_MODEL_CONFIGS[model_name])
else:
return None
def get_tokenizer(model_name):
if model_name.startswith(HF_HUB_PREFIX):
tokenizer = HFTokenizer(model_name[len(HF_HUB_PREFIX):])
else:
config = get_model_config(model_name)
tokenizer = HFTokenizer(
config['text_cfg']['hf_tokenizer_name']) if 'hf_tokenizer_name' in config['text_cfg'] else tokenize
return tokenizer
def load_state_dict(checkpoint_path: str, map_location='cpu'):
checkpoint = torch.load(checkpoint_path, map_location=map_location)
if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
state_dict = checkpoint['state_dict']
else:
state_dict = checkpoint
if next(iter(state_dict.items()))[0].startswith('module'):
state_dict = {k[7:]: v for k, v in state_dict.items()}
return state_dict
def load_checkpoint(model, checkpoint_path, strict=True):
state_dict = load_state_dict(checkpoint_path)
# detect old format and make compatible with new format
if 'positional_embedding' in state_dict and not hasattr(model, 'positional_embedding'):
state_dict = convert_to_custom_text_state_dict(state_dict)
resize_pos_embed(state_dict, model)
incompatible_keys = model.load_state_dict(state_dict, strict=strict)
return incompatible_keys
def create_model(
model_name: str,
img_size: int,
pretrained: Optional[str] = None,
precision: str = 'fp32',
device: Union[str, torch.device] = 'cpu',
jit: bool = False,
force_quick_gelu: bool = False,
force_custom_text: bool = False,
force_patch_dropout: Optional[float] = None,
force_image_size: Optional[Union[int, Tuple[int, int]]] = None,
pretrained_image: bool = False,
pretrained_hf: bool = True,
cache_dir: Optional[str] = None,
output_dict: Optional[bool] = None,
require_pretrained: bool = False,
):
has_hf_hub_prefix = model_name.startswith(HF_HUB_PREFIX)
if has_hf_hub_prefix:
model_id = model_name[len(HF_HUB_PREFIX):]
checkpoint_path = download_pretrained_from_hf(model_id, cache_dir=cache_dir)
config_path = download_pretrained_from_hf(model_id, filename='open_clip_config.json', cache_dir=cache_dir)
with open(config_path, 'r', encoding='utf-8') as f:
config = json.load(f)
pretrained_cfg = config['preprocess_cfg']
model_cfg = config['model_cfg']
else:
model_name = model_name.replace('/', '-') # for callers using old naming with / in ViT names
checkpoint_path = None
pretrained_cfg = {}
model_cfg = None
if isinstance(device, str):
device = torch.device(device)
if pretrained and pretrained.lower() == 'openai':
logging.info(f'Loading pretrained {model_name} from OpenAI.')
model_cfg = model_cfg or get_model_config(model_name)
if model_cfg['vision_cfg']['image_size'] != img_size:
model_cfg['vision_cfg']['image_size'] = img_size
cast_dtype = get_cast_dtype(precision)
model_pre = load_openai_model(
model_name,
precision=precision,
device=device,
jit=jit,
cache_dir=cache_dir,
)
state_dict = model_pre.state_dict()
# to always output dict even if it is clip
if output_dict and hasattr(model_pre, "output_dict"):
model_pre.output_dict = True
model = CLIP(**model_cfg, cast_dtype=cast_dtype)
resize_pos_embed(state_dict, model)
incompatible_keys = model.load_state_dict(state_dict, strict=True)
model.to(device=device)
if precision in ("fp16", "bf16"):
convert_weights_to_lp(model, dtype=torch.bfloat16 if precision == 'bf16' else torch.float16)
# set image / mean metadata from pretrained_cfg if available, or use default
model.visual.image_mean = pretrained_cfg.get('mean', None) or OPENAI_DATASET_MEAN
model.visual.image_std = pretrained_cfg.get('std', None) or OPENAI_DATASET_STD
# to always output dict even if it is clip
if output_dict and hasattr(model, "output_dict"):
model.output_dict = True
if jit:
model = torch.jit.script(model)
else:
model = load_openai_model(
model_name,
precision=precision,
device=device,
jit=jit,
cache_dir=cache_dir,
)
# to always output dict even if it is clip
if output_dict and hasattr(model, "output_dict"):
model.output_dict = True
else:
model_cfg = model_cfg or get_model_config(model_name)
model_cfg['vision_cfg']['image_size'] = img_size
if model_cfg is not None:
logging.info(f'Loaded {model_name} model config.')
pass
else:
logging.error(f'Model config for {model_name} not found; available models {list_models()}.')
raise RuntimeError(f'Model config for {model_name} not found.')
if force_quick_gelu:
# override for use of QuickGELU on non-OpenAI transformer models
model_cfg["quick_gelu"] = True
if force_patch_dropout is not None:
# override the default patch dropout value
model_cfg["vision_cfg"]["patch_dropout"] = force_patch_dropout
if force_image_size is not None:
# override model config's image size
model_cfg["vision_cfg"]["image_size"] = force_image_size
if pretrained_image:
if 'timm_model_name' in model_cfg.get('vision_cfg', {}):
# pretrained weight loading for timm models set via vision_cfg
model_cfg['vision_cfg']['timm_model_pretrained'] = True
else:
assert False, 'pretrained image towers currently only supported for timm models'
cast_dtype = get_cast_dtype(precision)
is_hf_model = 'hf_model_name' in model_cfg.get('text_cfg', {})
custom_text = model_cfg.pop('custom_text', False) or force_custom_text or is_hf_model
if custom_text:
if is_hf_model:
model_cfg['text_cfg']['hf_model_pretrained'] = pretrained_hf
if "coca" in model_name:
model = CoCa(**model_cfg, cast_dtype=cast_dtype)
else:
model = CustomTextCLIP(**model_cfg, cast_dtype=cast_dtype)
else:
model = CLIP(**model_cfg, cast_dtype=cast_dtype)
pretrained_loaded = False
if pretrained:
checkpoint_path = ''
pretrained_cfg = get_pretrained_cfg(model_name, pretrained)
if pretrained_cfg:
checkpoint_path = download_pretrained(pretrained_cfg, cache_dir=cache_dir)
elif os.path.exists(pretrained):
checkpoint_path = pretrained
if checkpoint_path:
logging.info(f'Loading pretrained {model_name} weights ({pretrained}).')
load_checkpoint(model, checkpoint_path)
else:
error_str = (
f'Pretrained weights ({pretrained}) not found for model {model_name}.'
f'Available pretrained tags ({list_pretrained_tags_by_model(model_name)}.')
logging.warning(error_str)
raise RuntimeError(error_str)
pretrained_loaded = True
elif has_hf_hub_prefix:
logging.info(f'Loading pretrained {model_name} weights ({pretrained}).')
load_checkpoint(model, checkpoint_path)
pretrained_loaded = True
if require_pretrained and not pretrained_loaded:
# callers of create_model_from_pretrained always expect pretrained weights
raise RuntimeError(
f'Pretrained weights were required for (model: {model_name}, pretrained: {pretrained}) but not loaded.')
model.to(device=device)
if precision in ("fp16", "bf16"):
convert_weights_to_lp(model, dtype=torch.bfloat16 if precision == 'bf16' else torch.float16)
# set image / mean metadata from pretrained_cfg if available, or use default
model.visual.image_mean = pretrained_cfg.get('mean', None) or OPENAI_DATASET_MEAN
model.visual.image_std = pretrained_cfg.get('std', None) or OPENAI_DATASET_STD
# to always output dict even if it is clip
if output_dict and hasattr(model, "output_dict"):
model.output_dict = True
if jit:
model = torch.jit.script(model)
return model
def create_loss(args):
if args.distill:
return DistillClipLoss(
local_loss=args.local_loss,
gather_with_grad=args.gather_with_grad,
cache_labels=True,
rank=args.rank,
world_size=args.world_size,
use_horovod=args.horovod,
)
elif "coca" in args.model.lower():
return CoCaLoss(
caption_loss_weight=args.coca_caption_loss_weight,
clip_loss_weight=args.coca_contrastive_loss_weight,
local_loss=args.local_loss,
gather_with_grad=args.gather_with_grad,
cache_labels=True,
rank=args.rank,
world_size=args.world_size,
use_horovod=args.horovod,
)
return ClipLoss(
local_loss=args.local_loss,
gather_with_grad=args.gather_with_grad,
cache_labels=True,
rank=args.rank,
world_size=args.world_size,
use_horovod=args.horovod,
)
def create_model_and_transforms(
model_name: str,
img_size: int,
pretrained: Optional[str] = None,
precision: str = 'fp32',
device: Union[str, torch.device] = 'cpu',
jit: bool = False,
force_quick_gelu: bool = False,
force_custom_text: bool = False,
force_patch_dropout: Optional[float] = None,
force_image_size: Optional[Union[int, Tuple[int, int]]] = None,
pretrained_image: bool = False,
pretrained_hf: bool = True,
image_mean: Optional[Tuple[float, ...]] = None,
image_std: Optional[Tuple[float, ...]] = None,
aug_cfg: Optional[Union[Dict[str, Any], AugmentationCfg]] = None,
cache_dir: Optional[str] = None,
output_dict: Optional[bool] = None,
):
model = create_model(
model_name,
img_size,
pretrained,
precision=precision,
device=device,
jit=jit,
force_quick_gelu=force_quick_gelu,
force_custom_text=force_custom_text,
force_patch_dropout=force_patch_dropout,
force_image_size=force_image_size,
pretrained_image=pretrained_image,
pretrained_hf=pretrained_hf,
cache_dir=cache_dir,
output_dict=output_dict,
)
image_mean = image_mean or getattr(model.visual, 'image_mean', None)
image_std = image_std or getattr(model.visual, 'image_std', None)
preprocess_train = image_transform(
model.visual.image_size,
is_train=True,
mean=image_mean,
std=image_std,
aug_cfg=aug_cfg,
)
preprocess_val = image_transform(
model.visual.image_size,
is_train=False,
mean=image_mean,
std=image_std,
)
return model, preprocess_train, preprocess_val
def create_model_from_pretrained(
model_name: str,
pretrained: Optional[str] = None,
precision: str = 'fp32',
device: Union[str, torch.device] = 'cpu',
jit: bool = False,
force_quick_gelu: bool = False,
force_custom_text: bool = False,
force_image_size: Optional[Union[int, Tuple[int, int]]] = None,
return_transform: bool = True,
image_mean: Optional[Tuple[float, ...]] = None,
image_std: Optional[Tuple[float, ...]] = None,
cache_dir: Optional[str] = None,
):
model = create_model(
model_name,
pretrained,
precision=precision,
device=device,
jit=jit,
force_quick_gelu=force_quick_gelu,
force_custom_text=force_custom_text,
force_image_size=force_image_size,
cache_dir=cache_dir,
require_pretrained=True,
)
if not return_transform:
return model
image_mean = image_mean or getattr(model.visual, 'image_mean', None)
image_std = image_std or getattr(model.visual, 'image_std', None)
preprocess = image_transform(
model.visual.image_size,
is_train=False,
mean=image_mean,
std=image_std,
)
return model, preprocess
| [] |
2024-01-10 | rkmt/summarize_arxv | mkmd.py | import os
import io
import sys
import time
import arxiv
import openai
import random
import fitz
from xml.dom import minidom
import xmltodict
import dicttoxml
import json
import glob
import xmltodict
from PIL import Image
print(fitz.__doc__)
if not tuple(map(int, fitz.version[0].split("."))) >= (1, 18, 18):
raise SystemExit("require PyMuPDF v1.18.18+")
def recoverpix(doc, item):
xref = item[0] # xref of PDF image
smask = item[1] # xref of its /SMask
# special case: /SMask or /Mask exists
if smask > 0:
pix0 = fitz.Pixmap(doc.extract_image(xref)["image"])
if pix0.alpha: # catch irregular situation
pix0 = fitz.Pixmap(pix0, 0) # remove alpha channel
mask = fitz.Pixmap(doc.extract_image(smask)["image"])
try:
pix = fitz.Pixmap(pix0, mask)
except: # fallback to original base image in case of problems
pix = fitz.Pixmap(doc.extract_image(xref)["image"])
if pix0.n > 3:
ext = "pam"
else:
ext = "png"
return { # create dictionary expected by caller
"ext": ext,
"colorspace": pix.colorspace.n,
"image": pix.tobytes(ext),
}
# special case: /ColorSpace definition exists
# to be sure, we convert these cases to RGB PNG images
if "/ColorSpace" in doc.xref_object(xref, compressed=True):
pix = fitz.Pixmap(doc, xref)
pix = fitz.Pixmap(fitz.csRGB, pix)
return { # create dictionary expected by caller
"ext": "png",
"colorspace": 3,
"image": pix.tobytes("png"),
}
return doc.extract_image(xref)
def extract_images_from_pdf(fname, imgdir="./output", min_width=400, min_height=400, relsize=0.05, abssize=2048, max_ratio=8, max_num=5):
'''
dimlimit = 0 # 100 # each image side must be greater than this
relsize = 0 # 0.05 # image : image size ratio must be larger than this (5%)
abssize = 0 # 2048 # absolute image size limit 2 KB: ignore if smaller
'''
if not os.path.exists(imgdir): # make subfolder if necessary
os.mkdir(imgdir)
t0 = time.time()
doc = fitz.open(fname)
page_count = doc.page_count # number of pages
xreflist = []
imglist = []
images = []
for pno in range(page_count):
if len(images) >= max_num:
break
print(f"extract images {pno+1}/{page_count}")
il = doc.get_page_images(pno)
imglist.extend([x[0] for x in il])
for img in il:
xref = img[0]
if xref in xreflist:
continue
width = img[2]
height = img[3]
print(f"{width}x{height}")
if width < min_width and height < min_height:
continue
image = recoverpix(doc, img)
n = image["colorspace"]
imgdata = image["image"]
if len(imgdata) <= abssize:
continue
if width / height > max_ratio or height/width > max_ratio:
print(f"max_ration {width/height} {height/width} {max_ratio}")
continue
print("*")
imgname = "img%02d_%05i.%s" % (pno+1, xref, image["ext"])
images.append((imgname, pno+1, width, height))
imgfile = os.path.join(imgdir, imgname)
fout = open(imgfile, "wb")
fout.write(imgdata)
fout.close()
xreflist.append(xref)
t1 = time.time()
imglist = list(set(imglist))
print(len(set(imglist)), "images in total")
print(len(xreflist), "images extracted")
print("total time %g sec" % (t1 - t0))
return xreflist, imglist, images
def get_half(fname):
# Open the PDF file
pdf_file = fitz.open(fname)
# Get the first page
page = pdf_file[0]
# Get the page as a whole image
mat = fitz.Matrix(2, 2) # zoom factor 2 in each direction
pix = page.get_pixmap(matrix=mat)
# Convert to a PIL Image
im = Image.open(io.BytesIO(pix.tobytes()))
# Get the dimensions of the image
width, height = im.size
# Define the box for the upper half (left, upper, right, lower)
box = (0, height // 20, width, (height // 2) + (height // 20))
# Crop the image to this box
im_cropped = im.crop(box)
return im_cropped
def make_md(f, dirname, filename, nimages=3, keywords=[]):
path = f"{dirname}/{filename}"
with open(path, "r") as fin:
xml = fin.read()
xml_lower = xml.lower()
if (keywords is not None) and not(any([k.lower() in xml_lower for k in keywords])):
return
dict = xmltodict.parse(xml)['paper']
print(dict)
f.write('\n---\n')
f.write('<!-- _class: title -->\n')
f.write(f"# {dict['title_jp']}\n")
f.write(f"{dict['title']}\n")
#authors = ",".join(dict['authors']['item'])
#f.write(f"{authors}\n")
f.write(f"[{dict['year']}] {dict['keywords']} {dict['entry_id']}\n")
f.write(f"__課題__ {dict['problem']}\n")
f.write(f"__手法__ {dict['method']}\n")
f.write(f"__結果__ {dict['result']}\n")
pdfname = f"{dirname}/paper.pdf"
img_cropped = get_half(pdfname)
img_cropped.save(f"{dirname}/half.png", "PNG")
f.write("\n---\n")
f.write('<!-- _class: info -->\n')
f.write(f'\n')
# get images
_, _, image_list = extract_images_from_pdf(pdfname, imgdir=dirname)
images = [{'src':imgname, 'pno':str(pno), 'width':str(width), 'height':str(height)} for imgname, pno, width, height in image_list]
for img in images[:nimages]:
src = img['src']
width = (int)(img['width'])
height = (int)(img['height'])
print("#### img", src, width, height)
x_ratio = (1600.0 * 0.7) / (float)(width)
y_ratio = (900.0 * 0.7) / (float)(height)
ratio = min(x_ratio, y_ratio)
f.write("\n---\n")
f.write('<!-- _class: info -->\n')
f.write(f'\n')
def main(dir="./xmrs", output="./out.md", keywords=[]):
print("### dir", dir, "output", output, "keywords", keywords)
xmlfiles= glob.glob(f"{dir}/*/*.xml")
with open(output, "w") as f:
f.write("---\n")
f.write("marp: true\n")
f.write("theme: default\n")
f.write("size: 16:9\n")
f.write("paginate: true\n")
f.write('_class: ["cool-theme"]\n')
f.write('\n---\n')
f.write(f'# {keywords} on arXiv\n')
f.write('automatically generated by ChatGPT\n')
f.write('\n')
for file in xmlfiles:
dirname, filename = os.path.split(file)
print(dirname, filename)
make_md(f, dirname, filename, keywords=keywords)
print("### result stored in", output)
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--dir', "-d", type=str, help='xml dir', default='./xmls')
parser.add_argument('--output', "-o", type=str, default='output.md', help='output markdown file')
parser.add_argument('positional_args', nargs='?', help='query keywords')
args = parser.parse_args()
keywords = args.positional_args
if type(keywords) == str:
keywords = [keywords]
print(args, keywords)
main(dir=args.dir, output=args.output, keywords=keywords) | [] |
2024-01-10 | ricardolsmendes/ml-playground | large-language-models~research-assistants~text-to-sql~analyse-insurance-data.py | import sqlite3
import dotenv
from langchain import agents, llms, sql_database
from langchain.agents import agent_toolkits
import pandas as pd
import sqlalchemy
dotenv.load_dotenv()
# Utility block to extract content from a CSV file and load it into a SQLite table.
conn = sqlite3.connect("insurance.db")
df = pd.read_csv("datasets/insurance.csv")
df.to_sql("insurance", conn, if_exists="replace", index=False)
conn.close()
db_engine = sqlalchemy.create_engine("sqlite:///insurance.db")
# Instantiate the OpenAI model.
# Pass the "temperature" parameter which controls the RANDOMNESS of the model's output.
# A lower temperature will result in more predictable output, while a higher temperature
# will result in more random output. The temperature parameter is set between 0 and 1,
# with 0 being the most predictable and 1 being the most random.
llm = llms.OpenAI(temperature=0)
toolkit = agent_toolkits.SQLDatabaseToolkit(
db=sql_database.SQLDatabase(db_engine), llm=llm
)
question = (
"What is the highest charge?"
" What is the average charge?"
" What is the group that spends more, male of female?"
)
print(f"\nYOUR QUESTION IS:\n{question}")
executor = agents.create_sql_agent(
llm=llm,
toolkit=toolkit,
agent_type=agents.AgentType.ZERO_SHOT_REACT_DESCRIPTION,
verbose=True,
)
result = executor.run(question)
print(f"\nTHE ANSWER TO YOUR QUESTION IS:\n{result}\n")
| [] |
2024-01-10 | kevaldekivadiya2415/WhatsAI | app~utils~whatsapp_helpers.py | from loguru import logger
from fastapi.responses import JSONResponse
from dotenv import load_dotenv
from app.utils.openai_helpers import OpenAIHelper
from app.utils.templates.whatsapp_default_messages import (
DEFAULT_ERROR_MESSAGE,
MEDIA_NOT_SUPPORT_MESSAGE,
)
from app.utils.send_whatsapp_message import WhatsAppMessages
# Load environment variables
load_dotenv()
OPENAI_HELPER = OpenAIHelper()
WHATSAPP_MESSAGES = WhatsAppMessages()
class WhatsAppHandler:
def __init__(self):
pass
@staticmethod
def is_valid_whatsapp_message(message_body: dict) -> bool:
"""Check if the incoming webhook event has a valid WhatsApp message structure."""
return (
message_body.get("object")
and message_body.get("entry")
and message_body["entry"][0].get("changes")
and message_body["entry"][0]["changes"][0].get("value")
and message_body["entry"][0]["changes"][0]["value"].get("messages")
and message_body["entry"][0]["changes"][0]["value"]["messages"][0]
)
async def generate_response(self, message: dict) -> dict:
"""Generate a response based on the type of WhatsApp message."""
try:
if message["type"] == "text":
generated_text = await OPENAI_HELPER.text_response_generation(
text=message["text"]["body"]
)
return {"status": "success", "message": generated_text}
else:
return {"status": "fail", "message": MEDIA_NOT_SUPPORT_MESSAGE}
except Exception as exc:
logger.exception(exc)
return {"status": "error", "message": DEFAULT_ERROR_MESSAGE}
async def process_whatsapp_message(
self, messages: list, recipient_number: str
) -> JSONResponse:
"""Process incoming WhatsApp messages and send a response."""
try:
# Generate response from GPT model
response = await self.generate_response(message=messages[0])
# Send generated text to the recipient's WhatsApp
return await WHATSAPP_MESSAGES.text_message(
recipient_number=recipient_number, text=response["message"]
)
except Exception as exc:
logger.exception(f"Error processing WhatsApp message: {exc}")
# If any exception occurred, send an error message
return await WHATSAPP_MESSAGES.error_message(
recipient_number=recipient_number, text=DEFAULT_ERROR_MESSAGE
)
| [] |
2024-01-10 | kevaldekivadiya2415/WhatsAI | app~utils~send_whatsapp_message.py | from loguru import logger
import json
import requests
from fastapi.responses import JSONResponse
from fastapi import HTTPException
from dotenv import load_dotenv
import os
from app.utils.openai_helpers import OpenAIHelper
load_dotenv()
class WhatsAppMessages:
def __init__(self) -> None:
self.WHATSAPP_HEADERS = {
"Content-type": "application/json",
"Authorization": f"Bearer {os.getenv('ACCESS_TOKEN')}",
}
self.WHATSAPP_URL = f"https://graph.facebook.com/{os.getenv('VERSION')}/{os.getenv('PHONE_NUMBER_ID')}/messages"
@staticmethod
def _get_text_message_format(recipient_number: str, text: str) -> json:
"""Generate the text message format."""
return json.dumps(
{
"messaging_product": "whatsapp",
"recipient_type": "individual",
"to": recipient_number,
"type": "text",
"text": {"preview_url": False, "body": text},
}
)
async def _send_message(self, recipient_number: str, text: str) -> JSONResponse:
"""Send a message using WhatsApp API."""
try:
data = self._get_text_message_format(recipient_number, text)
response = requests.post(
url=f"https://graph.facebook.com/{os.getenv('VERSION')}/{os.getenv('PHONE_NUMBER_ID')}/messages",
data=data,
headers={
"Content-type": "application/json",
"Authorization": f"Bearer {os.getenv('ACCESS_TOKEN')}",
},
timeout=10,
)
except requests.Timeout:
logger.exception("Timeout occurred while sending the message")
return JSONResponse(
content={"status": "error", "message": "Request timed out"},
status_code=408,
)
except requests.RequestException as exc:
logger.exception(f"Failed to send the message: {exc}")
return JSONResponse(
content={"status": "error", "message": "Failed to send the message"},
status_code=500,
)
except Exception as exc:
logger.exception(f"Unexpected error while sending the message: {exc}")
return JSONResponse(
content={"status": "error", "message": "Failed to send the message"},
status_code=500,
)
async def error_message(self, recipient_number: str, text: str) -> JSONResponse:
"""Send an error message using WhatsApp API."""
return await self._send_message(recipient_number, text)
async def text_message(self, recipient_number: str, text: str) -> JSONResponse:
"""Send a text message using WhatsApp API."""
return await self._send_message(recipient_number, text)
| [] |
2024-01-10 | KoslickiLab/LLMFactCheck | src~load_model.py | import os
from openai import OpenAI
from config.openai_api_key import OPENAI_API_KEY
from llama_cpp import Llama
from huggingface_hub import hf_hub_download
from sklearn.model_selection import train_test_split
import pandas as pd
# Define the file path to the CSV data
file_path = os.path.join('data', 'human_labeled_semmed.csv')
def load_model(model_type, use_icl):
"""
Load a language model for inference.
Args:
model_type (str): The type of the model to load ('llama', 'gpt_3_5_turbo', or 'gpt_4_0').
use_icl (bool): Whether to use Integrated Conversation Learning (ICL) for OpenAI models.
Returns:
tuple or object: A tuple containing the client and model for OpenAI models, or just the model for Llama models.
Raises:
ValueError: If an unknown model type is provided.
"""
if model_type == 'llama':
# Load a Llama model
model_name = "TheBloke/Llama-2-13B-chat-GGML"
model_path = hf_hub_download(repo_id=model_name, filename="llama-2-13b-chat.ggmlv3.q5_1.bin")
model = Llama(model_path=model_path, n_threads=2, n_batch=512, n_gpu_layers=32)
if use_icl:
return prepare_icl(model, model_type)
return model
else:
# Load an OpenAI model
client = OpenAI(api_key=OPENAI_API_KEY)
if model_type == 'gpt_3_5_turbo':
model = 'gpt-3.5-turbo'
elif model_type == 'gpt_4_0':
model = 'gpt-4'
else:
raise ValueError(f"Unknown model type: {model_type}")
if use_icl:
return client, prepare_icl(model, model_type)
return client, model
def prepare_icl(model, model_type):
"""
Prepare context for models using Integrated Conversation Learning (ICL).
Args:
model (object): The language model (OpenAI model).
model_type (str): The type of the model being used.
Returns:
tuple: A tuple containing the model and context for OpenAI models.
"""
df = pd.read_csv(file_path)
train_df, test_df = train_test_split(df, test_size=0.3, random_state=42)
test_df.to_csv(os.path.join('data', f'test_df_{model_type}_icl.csv'), index=False)
context_entries = train_df.sample(n=10)
context = context_entries.apply(
lambda row: f"{row['Question']} Answer: {'Yes' if row['Label'] else 'No'}\n",
axis=1).str.cat()
return model, context
| [] |
2024-01-10 | ai-ld/EVAL | core~agents~parser.py | import re
from typing import Dict
from langchain.output_parsers.base import BaseOutputParser
from core.prompts.input import EVAL_FORMAT_INSTRUCTIONS
class EvalOutputParser(BaseOutputParser):
@staticmethod
def parse_all(text: str) -> Dict[str, str]:
regex = r"Action: (.*?)[\n]Plan:(.*)[\n]What I Did:(.*)[\n]Action Input: (.*)"
match = re.search(regex, text, re.DOTALL)
if not match:
raise Exception("parse error")
action = match.group(1).strip()
plan = match.group(2)
what_i_did = match.group(3)
action_input = match.group(4).strip(" ").strip('"')
return {
"action": action,
"plan": plan,
"what_i_did": what_i_did,
"action_input": action_input,
}
def get_format_instructions(self) -> str:
return EVAL_FORMAT_INSTRUCTIONS
def parse(self, text: str) -> Dict[str, str]:
regex = r"Action: (.*?)[\n]Plan:(.*)[\n]What I Did:(.*)[\n]Action Input: (.*)"
match = re.search(regex, text, re.DOTALL)
if not match:
raise Exception("parse error")
parsed = EvalOutputParser.parse_all(text)
return {"action": parsed["action"], "action_input": parsed["action_input"]}
def __str__(self):
return "EvalOutputParser"
| [] |
2024-01-10 | ai-ld/EVAL | core~agents~builder.py | from core.prompts.input import EVAL_PREFIX, EVAL_SUFFIX
from core.tools.base import BaseToolSet
from core.tools.factory import ToolsFactory
from env import settings
from langchain.chat_models.base import BaseChatModel
from langchain.output_parsers.base import BaseOutputParser
from langchain.callbacks.base import BaseCallbackManager
from .chat_agent import ConversationalChatAgent
from .llm import ChatOpenAI
from .parser import EvalOutputParser
class AgentBuilder:
def __init__(self, toolsets: list[BaseToolSet] = []):
self.llm: BaseChatModel = None
self.parser: BaseOutputParser = None
self.global_tools: list = None
self.toolsets = toolsets
def build_llm(self, callback_manager: BaseCallbackManager = None):
self.llm = ChatOpenAI(
temperature=0, callback_manager=callback_manager, verbose=True
)
self.llm.check_access()
def build_parser(self):
self.parser = EvalOutputParser()
def build_global_tools(self):
if self.llm is None:
raise ValueError("LLM must be initialized before tools")
toolnames = ["wikipedia"]
if settings["SERPAPI_API_KEY"]:
toolnames.append("serpapi")
if settings["BING_SEARCH_URL"] and settings["BING_SUBSCRIPTION_KEY"]:
toolnames.append("bing-search")
self.global_tools = [
*ToolsFactory.create_global_tools_from_names(toolnames, llm=self.llm),
*ToolsFactory.create_global_tools(self.toolsets),
]
def get_parser(self):
if self.parser is None:
raise ValueError("Parser is not initialized yet")
return self.parser
def get_global_tools(self):
if self.global_tools is None:
raise ValueError("Global tools are not initialized yet")
return self.global_tools
def get_agent(self):
if self.llm is None:
raise ValueError("LLM must be initialized before agent")
if self.parser is None:
raise ValueError("Parser must be initialized before agent")
if self.global_tools is None:
raise ValueError("Global tools must be initialized before agent")
return ConversationalChatAgent.from_llm_and_tools(
llm=self.llm,
tools=[
*self.global_tools,
*ToolsFactory.create_per_session_tools(
self.toolsets
), # for names and descriptions
],
system_message=EVAL_PREFIX.format(bot_name=settings["BOT_NAME"]),
human_message=EVAL_SUFFIX.format(bot_name=settings["BOT_NAME"]),
output_parser=self.parser,
max_iterations=30,
)
| [] |
2024-01-10 | ai-ld/EVAL | core~agents~manager.py | from typing import Dict, Optional
from celery import Task
from langchain.agents.agent import AgentExecutor
from langchain.callbacks.base import CallbackManager
from langchain.callbacks import set_handler
from langchain.chains.conversation.memory import ConversationBufferMemory
from langchain.memory.chat_memory import BaseChatMemory
from core.tools.base import BaseToolSet
from core.tools.factory import ToolsFactory
from .builder import AgentBuilder
from .callback import EVALCallbackHandler, ExecutionTracingCallbackHandler
set_handler(EVALCallbackHandler())
class AgentManager:
def __init__(
self,
toolsets: list[BaseToolSet] = [],
):
self.toolsets: list[BaseToolSet] = toolsets
self.memories: Dict[str, BaseChatMemory] = {}
self.executors: Dict[str, AgentExecutor] = {}
def create_memory(self) -> BaseChatMemory:
return ConversationBufferMemory(memory_key="chat_history", return_messages=True)
def get_or_create_memory(self, session: str) -> BaseChatMemory:
if not (session in self.memories):
self.memories[session] = self.create_memory()
return self.memories[session]
def create_executor(
self, session: str, execution: Optional[Task] = None
) -> AgentExecutor:
builder = AgentBuilder(self.toolsets)
builder.build_parser()
callbacks = []
eval_callback = EVALCallbackHandler()
eval_callback.set_parser(builder.get_parser())
callbacks.append(eval_callback)
if execution:
execution_callback = ExecutionTracingCallbackHandler(execution)
execution_callback.set_parser(builder.get_parser())
callbacks.append(execution_callback)
callback_manager = CallbackManager(callbacks)
builder.build_llm(callback_manager)
builder.build_global_tools()
memory: BaseChatMemory = self.get_or_create_memory(session)
tools = [
*builder.get_global_tools(),
*ToolsFactory.create_per_session_tools(
self.toolsets,
get_session=lambda: (session, self.executors[session]),
),
]
for tool in tools:
tool.callback_manager = callback_manager
executor = AgentExecutor.from_agent_and_tools(
agent=builder.get_agent(),
tools=tools,
memory=memory,
callback_manager=callback_manager,
verbose=True,
)
self.executors[session] = executor
return executor
@staticmethod
def create(toolsets: list[BaseToolSet]) -> "AgentManager":
return AgentManager(
toolsets=toolsets,
)
| [] |
2024-01-10 | ai-ld/EVAL | core~agents~callback.py | from typing import Any, Dict, List, Optional, Union
from langchain.callbacks.base import BaseCallbackHandler
from langchain.schema import AgentAction, AgentFinish, LLMResult
from celery import Task
from ansi import ANSI, Color, Style, dim_multiline
from logger import logger
class EVALCallbackHandler(BaseCallbackHandler):
@property
def ignore_llm(self) -> bool:
return False
def set_parser(self, parser) -> None:
self.parser = parser
def on_llm_start(
self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
) -> None:
pass
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
text = response.generations[0][0].text
parsed = self.parser.parse_all(text)
logger.info(ANSI("Plan").to(Color.blue().bright()) + ": " + parsed["plan"])
logger.info(ANSI("What I Did").to(Color.blue()) + ": " + parsed["what_i_did"])
logger.info(
ANSI("Action").to(Color.cyan())
+ ": "
+ ANSI(parsed["action"]).to(Style.bold())
)
logger.info(
ANSI("Input").to(Color.cyan())
+ ": "
+ dim_multiline(parsed["action_input"])
)
def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
logger.info(ANSI(f"on_llm_new_token {token}").to(Color.green(), Style.italic()))
def on_llm_error(
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
) -> None:
pass
def on_chain_start(
self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any
) -> None:
logger.info(ANSI(f"Entering new chain.").to(Color.green(), Style.italic()))
logger.info(ANSI("Prompted Text").to(Color.yellow()) + f': {inputs["input"]}\n')
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:
logger.info(ANSI(f"Finished chain.").to(Color.green(), Style.italic()))
def on_chain_error(
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
) -> None:
logger.error(
ANSI(f"Chain Error").to(Color.red()) + ": " + dim_multiline(str(error))
)
def on_tool_start(
self,
serialized: Dict[str, Any],
input_str: str,
**kwargs: Any,
) -> None:
pass
def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
pass
def on_tool_end(
self,
output: str,
observation_prefix: Optional[str] = None,
llm_prefix: Optional[str] = None,
**kwargs: Any,
) -> None:
logger.info(
ANSI("Observation").to(Color.magenta()) + ": " + dim_multiline(output)
)
logger.info(ANSI("Thinking...").to(Color.green(), Style.italic()))
def on_tool_error(
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
) -> None:
logger.error(ANSI("Tool Error").to(Color.red()) + f": {error}")
def on_text(
self,
text: str,
color: Optional[str] = None,
end: str = "",
**kwargs: Optional[str],
) -> None:
pass
def on_agent_finish(
self, finish: AgentFinish, color: Optional[str] = None, **kwargs: Any
) -> None:
logger.info(
ANSI("Final Answer").to(Color.yellow())
+ ": "
+ dim_multiline(finish.return_values.get("output", ""))
)
class ExecutionTracingCallbackHandler(BaseCallbackHandler):
def __init__(self, execution: Task):
self.execution = execution
self.index = 0
def set_parser(self, parser) -> None:
self.parser = parser
def on_llm_start(
self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
) -> None:
pass
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
text = response.generations[0][0].text
parsed = self.parser.parse_all(text)
self.index += 1
parsed["index"] = self.index
self.execution.update_state(state="LLM_END", meta=parsed)
def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
pass
def on_llm_error(
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
) -> None:
pass
def on_chain_start(
self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any
) -> None:
pass
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:
pass
def on_chain_error(
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
) -> None:
self.execution.update_state(state="CHAIN_ERROR", meta={"error": str(error)})
def on_tool_start(
self,
serialized: Dict[str, Any],
input_str: str,
**kwargs: Any,
) -> None:
pass
def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
pass
def on_tool_end(
self,
output: str,
observation_prefix: Optional[str] = None,
llm_prefix: Optional[str] = None,
**kwargs: Any,
) -> None:
previous = self.execution.AsyncResult(self.execution.request.id)
self.execution.update_state(
state="TOOL_END", meta={**previous.info, "observation": output}
)
def on_tool_error(
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
) -> None:
previous = self.execution.AsyncResult(self.execution.request.id)
self.execution.update_state(
state="TOOL_ERROR", meta={**previous.info, "error": str(error)}
)
def on_text(
self,
text: str,
color: Optional[str] = None,
end: str = "",
**kwargs: Optional[str],
) -> None:
pass
def on_agent_finish(
self, finish: AgentFinish, color: Optional[str] = None, **kwargs: Any
) -> None:
pass
| [] |
2024-01-10 | ai-ld/EVAL | core~agents~chat_agent.py | from typing import Any, List, Optional, Sequence, Tuple
from langchain.agents.agent import Agent
from langchain.callbacks.base import BaseCallbackManager
from langchain.chains import LLMChain
from langchain.output_parsers.base import BaseOutputParser
from langchain.prompts.base import BasePromptTemplate
from langchain.prompts.chat import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
MessagesPlaceholder,
SystemMessagePromptTemplate,
)
from langchain.schema import (
AgentAction,
AIMessage,
BaseLanguageModel,
BaseMessage,
HumanMessage,
)
from langchain.tools.base import BaseTool
from core.prompts.input import EVAL_TOOL_RESPONSE
class ConversationalChatAgent(Agent):
"""An agent designed to hold a conversation in addition to using tools."""
output_parser: BaseOutputParser
@property
def _agent_type(self) -> str:
raise NotImplementedError
@property
def observation_prefix(self) -> str:
"""Prefix to append the observation with."""
return "Observation: "
@property
def llm_prefix(self) -> str:
"""Prefix to append the llm call with."""
return "Thought: "
@classmethod
def create_prompt(
cls,
tools: Sequence[BaseTool],
system_message: str,
human_message: str,
output_parser: BaseOutputParser,
input_variables: Optional[List[str]] = None,
) -> BasePromptTemplate:
tool_strings = "\n".join(
[f"> {tool.name}: {tool.description}" for tool in tools]
)
tool_names = ", ".join([tool.name for tool in tools])
format_instructions = human_message.format(
format_instructions=output_parser.get_format_instructions()
)
final_prompt = format_instructions.format(
tool_names=tool_names, tools=tool_strings
)
if input_variables is None:
input_variables = ["input", "chat_history", "agent_scratchpad"]
messages = [
SystemMessagePromptTemplate.from_template(system_message),
MessagesPlaceholder(variable_name="chat_history"),
HumanMessagePromptTemplate.from_template(final_prompt),
MessagesPlaceholder(variable_name="agent_scratchpad"),
]
return ChatPromptTemplate(input_variables=input_variables, messages=messages)
def _extract_tool_and_input(self, llm_output: str) -> Optional[Tuple[str, str]]:
try:
response = self.output_parser.parse(llm_output)
return response["action"], response["action_input"]
except Exception:
raise ValueError(f"Could not parse LLM output: {llm_output}")
def _construct_scratchpad(
self, intermediate_steps: List[Tuple[AgentAction, str]]
) -> List[BaseMessage]:
"""Construct the scratchpad that lets the agent continue its thought process."""
thoughts: List[BaseMessage] = []
for action, observation in intermediate_steps:
thoughts.append(AIMessage(content=action.log))
human_message = HumanMessage(
content=EVAL_TOOL_RESPONSE.format(observation=observation)
)
thoughts.append(human_message)
return thoughts
@classmethod
def from_llm_and_tools(
cls,
llm: BaseLanguageModel,
tools: Sequence[BaseTool],
system_message: str,
human_message: str,
output_parser: BaseOutputParser,
callback_manager: Optional[BaseCallbackManager] = None,
input_variables: Optional[List[str]] = None,
**kwargs: Any,
) -> Agent:
"""Construct an agent from an LLM and tools."""
cls._validate_tools(tools)
prompt = cls.create_prompt(
tools,
system_message=system_message,
human_message=human_message,
input_variables=input_variables,
output_parser=output_parser,
)
llm_chain = LLMChain(
llm=llm,
prompt=prompt,
callback_manager=callback_manager,
)
tool_names = [tool.name for tool in tools]
return cls(
llm_chain=llm_chain,
allowed_tools=tool_names,
output_parser=output_parser,
**kwargs,
)
| [] |
2024-01-10 | mustafaemirhanyildiz/fastapi-ai-example | services~ChatWithPdf.py | from dotenv import load_dotenv
from openai import OpenAI
import os
from langchain.text_splitter import CharacterTextSplitter
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores.faiss import FAISS
import json
import google.generativeai as genai
load_dotenv()
api_key = os.environ.get("OPENAI_API_KEY")
google_api_key = os.environ.get("GOOGLE_AI_API_KEY")
genai.configure(api_key=google_api_key)
client = OpenAI(api_key=api_key)
TOTAL_CHUNK_SIZE = 2000
def chatWithGpt(pdfText : str, userInput : str):
text_splitter = CharacterTextSplitter(
separator="",
chunk_size=TOTAL_CHUNK_SIZE,
chunk_overlap=0,
length_function=len,
)
texts = text_splitter.split_text(pdfText)
embeddings = OpenAIEmbeddings()
docsearch = FAISS.from_texts(texts, embeddings)
docs = docsearch.similarity_search(userInput)
doc_str = ""
for doc in docs:
doc_str += doc.page_content
system_prompt = f"The following text is provided for users to generate responses.Response type must be JSON and please answer the user's question based on the given document:\n\n{doc_str}\n\nUser's question: {userInput}\n\nAnswer:"
response = client.chat.completions.create(
model="gpt-4-1106-preview",
messages=[
{"role": "system", "content":system_prompt},
{"role": "user", "content": userInput}
],
response_format={ "type": "json_object" },
frequency_penalty=0.2,
presence_penalty=0.2,
temperature=0.7,
)
json_str = response.choices[0].message.content
print(json_str)
return json.loads(json_str)
def chatWithGoogleAi(pdfText : str, userInput : str):
text_splitter = CharacterTextSplitter(
separator="",
chunk_size=TOTAL_CHUNK_SIZE,
chunk_overlap=0,
length_function=len,
)
texts = text_splitter.split_text(pdfText)
embeddings = OpenAIEmbeddings()
docsearch = FAISS.from_texts(texts, embeddings)
docs = docsearch.similarity_search(userInput)
doc_str = ""
for doc in docs:
doc_str += doc.page_content
systemPrompt = f"The following text is provided for users to generate responses.Response type must be JSON and please answer the user's question based on the given document:\n\n{pdfText}\n\nUser's question: {userInput}\n\nAnswer:"
model = genai.GenerativeModel('gemini-pro')
response = model.generate_content(
systemPrompt)
return response.text
| [
"The following text is provided for users to generate responses.Response type must be JSON and please answer the user's question based on the given document:\n\nPLACEHOLDER\n\nUser's question: PLACEHOLDER\n\nAnswer:"
] |
2024-01-10 | gru101/Mega-project | functions.py | import streamlit as st
from streamlit.components.v1 import html
from streamlit_extras.switch_page_button import switch_page
import openai
import os
from llama_index import VectorStoreIndex, SimpleDirectoryReader, ServiceContext, set_global_service_context
from llama_index.llms import OpenAI
from llama_index.embeddings import OpenAIEmbedding
from llama_index.text_splitter import TokenTextSplitter
from llama_index.indices.prompt_helper import PromptHelper
import tempfile
documents_folder = "documents"
def sidebar_stuff1():
html_temp = """
<div style="background-color:{};padding:1px">
</div>
"""
button = """
<script type="text/javascript" src="https://cdnjs.buymeacoffee.com/1.0.0/button.prod.min.js" data-name="bmc-button" data-slug="kaushal.ai" data-color="#FFDD00" data-emoji="" data-font="Cookie" data-text="Support my work" data-outline-color="#000000" data-font-color="#000000" data-coffee-color="#ffffff" ></script>"""
with st.sidebar:
st.markdown("""
# ● About
"Talk to PDF" is an app that allows users to ask questions about the content of a PDF file using Natural Language Processing.
The app uses a question-answering system powered by OpenAI's GPT 🔥 to provide accurate and relevant answers to the your queries. """)
st.markdown(html_temp.format("rgba(55, 53, 47, 0.16)"), unsafe_allow_html=True)
st.markdown("""
# ● Get started
・Paste your OpenAI API key. (click on the link to get your API key)
""")
st.markdown(html_temp.format("rgba(55, 53, 47, 0.16)"), unsafe_allow_html=True)
st.markdown("""
Made by [@Obelisk_1531](https://twitter.com/Obelisk_1531)
""")
html(button, height=70, width=220)
st.markdown(
"""
<style>
iframe[width="210"] {
position: fixed;
bottom: 60px;
right: 40px;
}
</style>
""",
unsafe_allow_html=True,
)
def sidebar_stuff2():
html_temp = """
<div style="background-color:{};padding:1px">
</div>
"""
button = """
<script type="text/javascript" src="https://cdnjs.buymeacoffee.com/1.0.0/button.prod.min.js" data-name="bmc-button" data-slug="kaushal.ai" data-color="#FFDD00" data-emoji="" data-font="Cookie" data-text="Support my work" data-outline-color="#000000" data-font-color="#000000" data-coffee-color="#ffffff" ></script>"""
with st.sidebar:
st.markdown(html_temp.format("rgba(55, 53, 47, 0.16)"), unsafe_allow_html=True)
st.markdown("""
・Choose your model (gpt-3.5-turbo or gpt-4)
・Adjust the temperature according to your needs
(It controls the randomness of the model's output. A higher temperature (e.g., 1.0) makes the output more diverse and random, while a lower temperature (e.g., 0.5) makes the output more focused and deterministic.)
・Upload a PDF file and ask questions about its content
""")
st.markdown(html_temp.format("rgba(55, 53, 47, 0.16)"), unsafe_allow_html=True)
st.markdown("""
Made by [@Obelisk_1531](https://twitter.com/Obelisk_1531)
""")
html(button, height=70, width=220)
st.markdown(
"""
<style>
iframe[width="210"] {
position: fixed;
bottom: 60px;
right: 40px;
}
</style>
""",
unsafe_allow_html=True,
)
def sidebar_stuff3():
html_temp = """
<div style="background-color:{};padding:1px">
</div>
"""
button = """
<script type="text/javascript" src="https://cdnjs.buymeacoffee.com/1.0.0/button.prod.min.js" data-name="bmc-button" data-slug="kaushal.ai" data-color="#FFDD00" data-emoji="" data-font="Cookie" data-text="Support my work" data-outline-color="#000000" data-font-color="#000000" data-coffee-color="#ffffff" ></script>"""
with st.sidebar:
st.markdown(html_temp.format("rgba(55, 53, 47, 0.16)"), unsafe_allow_html=True)
st.markdown("""
・Ask questions about your documents content
・Get instant answers to your questions
""")
st.markdown(html_temp.format("rgba(55, 53, 47, 0.16)"), unsafe_allow_html=True)
st.markdown("""
Made by [@Obelisk_1531](https://twitter.com/Obelisk_1531)
""")
html(button, height=70, width=220)
st.markdown(
"""
<style>
iframe[width="210"] {
position: fixed;
bottom: 60px;
right: 40px;
}
</style>
""",
unsafe_allow_html=True,
)
def save_file(doc):
fn = os.path.basename(doc.name)
# check if documents_folder exists in the directory
if not os.path.exists(documents_folder):
# if documents_folder does not exist then making the directory
os.makedirs(documents_folder)
# open read and write the file into the server
open(documents_folder + '/' + fn, 'wb').write(doc.read())
# Check for the current filename, If new filename
# clear the previous cached vectors and update the filename
# with current name
if st.session_state.get('file_name'):
if st.session_state.file_name != fn:
st.cache_resource.clear()
st.session_state['file_name'] = fn
else:
st.session_state['file_name'] = fn
return fn
def remove_file(file_path):
# Remove the file from the Document folder once
# vectors are created
if os.path.isfile(documents_folder + '/' + file_path):
os.remove(documents_folder + '/' + file_path)
@st.cache_resource
def query_engine(pdf_file, model_name, temperature):
with st.spinner("Uploading PDF..."):
file_name = save_file(pdf_file)
llm = OpenAI(model=model_name, temperature=temperature)
service_context = ServiceContext.from_defaults(llm=llm)
with st.spinner("Loading document..."):
docs = SimpleDirectoryReader(documents_folder).load_data()
with st.spinner("Indexing document..."):
index = VectorStoreIndex.from_documents(docs, service_context=service_context)
with st.spinner("Creating query engine..."):
query_engine = index.as_query_engine()
st.session_state['index'] = index
st.session_state['query_engine'] = query_engine
switch_page('chat with pdf')
return query_engine
| [] |
2024-01-10 | LucaCguerreiro/Chat_Bot | ia.py | import os
import openai
from zapsalvo import msg
openai.api_key = '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'
response = openai.Completion.create(
model="text-davinci-003",
prompt="qual o peso da terra?",
temperature=1,
max_tokens=256,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
resposta = response['choices'][0]['text']
print(resposta)
| [
"qual o peso da terra?"
] |
2024-01-10 | samuelstevens/hackohio2023 | tttutor~posts.py | import random
from flask import Blueprint, render_template, request
from tttutor import ai, db
bp = Blueprint("posts", __name__, url_prefix="/posts")
@bp.route("/", methods=("GET", "POST"))
def posts():
facts = []
if request.method == "POST":
topic = request.form.get("topic")
dev_mode = request.form.get("dev", "prod")
elif request.method == "GET":
topic = request.args.get("topic")
dev_mode = request.args.get("dev", "prod")
else:
raise RuntimeError(request.method)
n = 10
posts = []
title = "Search"
if topic or facts:
if dev_mode == "cache-only":
# Load from cache
posts = db.load_posts(topic=topic, n=n)
elif dev_mode == "no-cache":
posts = ai.get_greentexts(topic=topic, n=n)
elif dev_mode == "prod":
# Load half from cache, half from openai
cached_posts = db.load_posts(topic=topic, n=n // 2)
n = n - len(posts)
new_posts = ai.get_greentexts(topic=topic, n=n)
posts = cached_posts + new_posts
random.shuffle(posts)
else:
raise ValueError(dev_mode)
title = topic
return render_template("posts.html", posts=posts, title=title)
| [] |
2024-01-10 | NVIDIA/NeMo | nemo~collections~nlp~modules~common~megatron~mlp.py | # coding=utf-8
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn.functional as F
from nemo.collections.nlp.modules.common.megatron.adapters.parallel_adapters import (
AdapterName,
MLPInfusedAdapterConfig,
)
from nemo.collections.nlp.modules.common.megatron.fused_bias_geglu import fused_bias_geglu
from nemo.collections.nlp.modules.common.megatron.fused_bias_gelu import fused_bias_gelu
from nemo.collections.nlp.modules.common.megatron.fused_layer_norm import get_layer_norm
from nemo.collections.nlp.modules.common.megatron.layer_norm_1p import LayerNorm1P
from nemo.collections.nlp.modules.common.megatron.module import MegatronModule
from nemo.collections.nlp.modules.common.megatron.utils import ApexGuardDefaults, ApproxGELUActivation, erf_gelu
from nemo.collections.nlp.modules.common.megatron.utils import openai_gelu as openai_gelu_func
from nemo.collections.nlp.modules.common.megatron.utils import squared_relu
from nemo.core import adapter_mixins
try:
from apex.normalization import MixedFusedRMSNorm
from apex.transformer import parallel_state, tensor_parallel
HAVE_APEX = True
except (ImportError, ModuleNotFoundError):
HAVE_APEX = False
# fake missing classes with None attributes
ModelType = AttnMaskType = AttnType = LayerType = ApexGuardDefaults()
try:
from megatron.core import ModelParallelConfig, parallel_state, tensor_parallel
from megatron.core.parallel_state import get_tensor_model_parallel_world_size
HAVE_MEGATRON_CORE = True
except (ImportError, ModuleNotFoundError):
ModelParallelConfig = ApexGuardDefaults
HAVE_MEGATRON_CORE = False
class ParallelMLP(MegatronModule, adapter_mixins.AdapterModuleMixin):
"""MLP.
MLP will take the input with h hidden state, project it to 4*h
hidden dimension, perform nonlinear transformation, and project the
state back into h hidden dimension.
"""
def __init__(
self,
config: ModelParallelConfig,
init_method,
output_layer_init_method,
hidden_size,
ffn_hidden_size,
dtype=torch.float32,
bias_activation_fusion=True,
openai_gelu=False,
onnx_safe=False,
activation='gelu',
bias=True,
transformer_block_type='pre_ln',
normalization='layernorm',
layernorm_epsilon=1e-5,
persist_layer_norm=False,
dropout=0.0,
):
super(ParallelMLP, self).__init__(config=config)
self.activation = activation
self.bias = bias
self.transformer_block_type = transformer_block_type
self.normalization = normalization
self.layernorm_epsilon = layernorm_epsilon
self.persist_layer_norm = persist_layer_norm
self.activation = activation
self.dropout = dropout
self.dtype = dtype
self.set_accepted_adapter_types([MLPInfusedAdapterConfig._target_])
supported_activations = [
'gelu',
'geglu',
'reglu',
'swiglu',
'squared-relu',
'fast-geglu',
'fast-swiglu',
'fast-reglu',
'approx-gelu',
]
if activation not in supported_activations:
raise ValueError(
f"Activation {activation} not supported. Supported activations are {supported_activations}"
)
self.fast_glu_activation = activation in ['fast-geglu', 'fast-swiglu', 'fast-reglu']
# Project to 4h.
self.dense_h_to_4h = tensor_parallel.ColumnParallelLinear(
hidden_size,
ffn_hidden_size * 2
if self.fast_glu_activation
else ffn_hidden_size, # NOTE: When using geglu, divide ffn dim by 2/3 to keep overall params the same.
config=config,
gather_output=False,
init_method=init_method,
skip_bias_add=True,
bias=bias,
)
if activation in ['geglu', 'reglu', 'swiglu']:
# Separate linear layer for *GLU activations.
# Source: https://github.com/huggingface/transformers/blob/bee361c6f1f7704f8c688895f2f86f6e5ff84727/src/transformers/models/t5/modeling_t5.py#L292
self.dense_h_to_4h_2 = tensor_parallel.ColumnParallelLinear(
hidden_size,
ffn_hidden_size, # NOTE: When using *glu, divide ffn dim by 2/3 to keep overall params the same.
config=config,
gather_output=False,
init_method=init_method,
skip_bias_add=True,
bias=bias,
)
self.glu_activation_family = activation in [
'geglu',
'reglu',
'swiglu',
'fast-geglu',
'fast-reglu',
'fast-swiglu',
]
bias_activation_fusion_unavailable = activation in ['reglu', 'swiglu']
if bias_activation_fusion_unavailable and bias_activation_fusion:
raise ValueError(
f"Cannot use bias_activation_fusion with {activation} activation. Please turn bias gelu fusion off."
)
if self.glu_activation_family and onnx_safe and self.bias_activation_fusion:
raise ValueError(
f"Cannot use onnx_safe with specificed activation function and bias_activation_fusion : {activation} Please turn onnx safe off."
)
if bias_activation_fusion and not bias:
raise ValueError(
f"Cannot use bias_activation_fusion without bias terms. Please set bias=True or bias_activation_fusion=False."
)
self.bias_activation_fusion = bias_activation_fusion
# Give openai_gelu precedence over other activations if set, for HF compatibility. Normally this is off and shouldn't affect regular model training.
if openai_gelu:
self.activation_func = openai_gelu_func
elif activation in ["gelu", "geglu", "fast-geglu"]:
self.activation_func = F.gelu
elif activation == 'approx-gelu':
self.activation_func = ApproxGELUActivation
elif onnx_safe:
self.activation_func = erf_gelu
elif activation in ["reglu", "fast-reglu"]:
self.activation_func = F.relu
elif activation in ["swiglu", "fast-swiglu"]:
# SiLU or sigmoid linear unit is the same as swish with beta = 1 (which is what https://arxiv.org/pdf/2002.05202.pdf uses.)
self.activation_func = F.silu
elif activation == 'squared-relu':
self.activation_func = squared_relu
# Project back to h.
self.dense_4h_to_h = tensor_parallel.RowParallelLinear(
ffn_hidden_size,
hidden_size,
config=config,
input_is_parallel=True,
init_method=output_layer_init_method,
skip_bias_add=True,
bias=bias,
)
# Normformer normalization
if transformer_block_type == 'normformer':
if normalization == 'layernorm':
self.normalization = get_layer_norm(
ffn_hidden_size // get_tensor_model_parallel_world_size(), layernorm_epsilon, persist_layer_norm
)
elif normalization == 'layernorm1p':
self.normalization = LayerNorm1P(
ffn_hidden_size // get_tensor_model_parallel_world_size(),
layernorm_epsilon,
sequence_parallel_enabled=config.sequence_parallel,
)
else:
self.normalization = MixedFusedRMSNorm(
ffn_hidden_size // get_tensor_model_parallel_world_size(), layernorm_epsilon
)
def forward(self, hidden_states):
# [s, b, 4hp]
intermediate_parallel, bias_parallel = self.dense_h_to_4h(hidden_states)
if self.fast_glu_activation:
intermediate_parallel, intermediate_parallel_2 = torch.chunk(intermediate_parallel, 2, dim=-1)
if bias_parallel is not None:
bias_parallel, bias_parallel_2 = torch.chunk(bias_parallel, 2, dim=-1)
elif self.glu_activation_family and not self.fast_glu_activation:
intermediate_parallel_2, bias_parallel_2 = self.dense_h_to_4h_2(hidden_states)
if self.bias_activation_fusion:
if self.activation == 'gelu':
intermediate_parallel = fused_bias_gelu(intermediate_parallel, bias_parallel)
elif self.activation in ['geglu', 'fast-geglu']:
intermediate_parallel = fused_bias_geglu(
intermediate_parallel, bias_parallel, intermediate_parallel_2, bias_parallel_2
)
elif self.glu_activation_family and not self.bias_activation_fusion:
if bias_parallel is not None:
intermediate_parallel = self.activation_func(intermediate_parallel + bias_parallel) * (
intermediate_parallel_2 + bias_parallel_2
)
else:
intermediate_parallel = self.activation_func(intermediate_parallel) * intermediate_parallel_2
else:
if bias_parallel is not None:
intermediate_parallel = self.activation_func(intermediate_parallel + bias_parallel)
else:
intermediate_parallel = self.activation_func(intermediate_parallel)
if self.dropout > 0:
intermediate_parallel = F.dropout(intermediate_parallel, p=self.dropout, training=self.training)
infused_adapter = self.get_adapter_module(AdapterName.MLP_INFUSED)
if infused_adapter:
intermediate_parallel = infused_adapter(intermediate_parallel)
# Normformer normalization
if self.transformer_block_type == 'normformer':
intermediate_parallel = self.normalization(intermediate_parallel)
# [s, b, h]
output, output_bias = self.dense_4h_to_h(intermediate_parallel)
return output, output_bias
class SwitchMLP(MegatronModule):
"""Top-1 MoE
Curently supports Sinkhorn based expert routing."""
def __init__(
self,
config: ModelParallelConfig,
num_experts,
init_method,
output_layer_init_method,
hidden_size,
ffn_hidden_size,
bias_activation_fusion=True,
openai_gelu=False,
onnx_safe=False,
activation='gelu',
bias=True,
transformer_block_type='pre_ln',
normalization='layernorm',
layernorm_epsilon=1e-5,
persist_layer_norm=False,
sequence_parallel=False,
dropout=0.0,
):
super(SwitchMLP, self).__init__(config=config)
self.num_experts = num_experts
self.route_algo = SwitchMLP.sinkhorn
self.router = tensor_parallel.RowParallelLinear(
hidden_size,
num_experts,
config=config,
input_is_parallel=False,
init_method=init_method,
skip_bias_add=False,
bias=bias,
)
mlp_args = {
'config': config,
'init_method': init_method,
'output_layer_init_method': output_layer_init_method,
'hidden_size': hidden_size,
'ffn_hidden_size': ffn_hidden_size,
'bias_activation_fusion': bias_activation_fusion,
'openai_gelu': openai_gelu,
'onnx_safe': onnx_safe,
'activation': activation,
'bias': bias,
'transformer_block_type': transformer_block_type,
'normalization': normalization,
'layernorm_epsilon': layernorm_epsilon,
'persist_layer_norm': persist_layer_norm,
'dropout': dropout,
}
self.experts = torch.nn.ModuleList([ParallelMLP(**mlp_args) for _ in range(num_experts)])
def forward(self, hidden_states):
hidden_shape = hidden_states.shape
route, _ = self.router(hidden_states)
route = route.view(-1, self.num_experts)
if self.training:
with torch.no_grad():
norm_route = self.route_algo(
route.detach().to(dtype=torch.float32)
) # explicit fp32 conversion for stability
_, max_ind = torch.max(norm_route, dim=1)
route = torch.sigmoid(route)
max_prob = route[torch.arange(route.size(0)), max_ind]
else:
route = torch.sigmoid(route)
max_prob, max_ind = torch.max(route, dim=1)
max_prob = torch.unsqueeze(max_prob, 1)
hidden_states = hidden_states.view(-1, hidden_shape[-1])
local_indices = (max_ind == 0).nonzero()
hidden = hidden_states[local_indices, :]
output, output_bias = self.experts[0](hidden)
output_bias = output_bias.expand_as(output)
output_total = torch.empty_like(hidden_states, dtype=output.dtype)
output_bias_total = torch.empty_like(hidden_states, dtype=output_bias.dtype)
output_total[local_indices, :] = output
output_bias_total[local_indices, :] = output_bias
for expert_num, expert in enumerate(self.experts):
if expert_num == 0:
continue
local_indices = (max_ind == expert_num).nonzero()
hidden = hidden_states[local_indices, :]
output, output_bias = expert(hidden)
output_bias = output_bias.expand_as(output)
output_total[local_indices, :] = output
output_bias_total[local_indices, :] = output_bias
output_total = output_total * max_prob
output_bias_total = output_bias_total * max_prob
output_total = output_total.view(hidden_shape)
output_bias_total = output_bias_total.view(hidden_shape)
return output_total, output_bias_total
@classmethod
def sinkhorn(cls, cost, tol=0.0001):
"Megatron-LMs sinkhorn implementation"
cost = torch.exp(cost)
d0 = torch.ones(cost.size(0), device=cost.device, dtype=cost.dtype)
d1 = torch.ones(cost.size(1), device=cost.device, dtype=cost.dtype)
eps = 0.00000001
error = 1e9
d1_old = d1
while error > tol:
d0 = (1 / d0.size(0)) * 1 / (torch.sum(d1 * cost, 1) + eps)
d1 = (1 / d1.size(0)) * 1 / (torch.sum(d0.unsqueeze(1) * cost, 0) + eps)
error = torch.mean(torch.abs(d1_old - d1))
d1_old = d1
return d1 * cost * d0.unsqueeze(1)
| [] |
2024-01-10 | NVIDIA/NeMo | nemo~collections~nlp~parts~utils_funcs.py | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = [
'torch_dtype_from_precision',
'list2str',
'tensor2list',
'plot_confusion_matrix',
'get_classification_report',
]
import os
import time
from typing import Callable, Dict, List, Optional, Union
import numpy as np
import torch
import torch.nn.functional as F
from matplotlib import pyplot as plt
from sklearn.metrics import classification_report, confusion_matrix
from torch import Tensor
from nemo.collections.nlp.modules.common.megatron.utils import erf_gelu
from nemo.collections.nlp.modules.common.megatron.utils import openai_gelu as openai_gelu_func
from nemo.collections.nlp.modules.common.megatron.utils import squared_relu
from nemo.utils import logging
def torch_dtype_from_precision(precision: Union[int, str], megatron_amp_O2: Optional[bool] = None) -> torch.dtype:
""" Mapping from PTL precision types to corresponding PyTorch parameter datatype."""
if megatron_amp_O2 is not None and megatron_amp_O2 is False:
return torch.float32
if precision in ['bf16', 'bf16-mixed']:
return torch.bfloat16
elif precision in [16, '16', '16-mixed']:
return torch.float16
elif precision in [32, '32', '32-true']:
return torch.float32
else:
raise ValueError(f"Could not parse the precision of `{precision}` to a valid torch.dtype")
def list2str(l: List[int]) -> str:
""" Converts list to a string"""
return ' '.join([str(x) for x in l])
def tensor2list(tensor: Tensor) -> List[Union[int, float]]:
""" Converts tensor to a list """
return tensor.detach().cpu().tolist()
def plot_confusion_matrix(
labels: List[int],
preds: List[int],
graph_fold: str,
label_ids: Dict[str, int] = None,
normalize: bool = False,
prefix: str = '',
):
'''
Plot confusion matrix.
Args:
labels: ground truth labels
preds: model predictions
graph_fold: path to a folder to store confusion matrix plot
label_ids: str label to id map, for example: {'O': 0, 'LOC': 1}
normalize: whether to normalize confusion matrix
prefix: prefix for the plot name
'''
if label_ids is None:
_plot_confusion_matrix(labels, preds, graph_fold)
else:
# remove labels from label_ids that don't appear in the dev set
used_labels = set(labels) | set(preds)
label_ids = {k: label_ids[k] for k, v in label_ids.items() if v in used_labels}
ids_to_labels = {label_ids[k]: k for k in label_ids}
classes = [ids_to_labels[id] for id in sorted(label_ids.values())]
title = 'Confusion_matrix'
cm = confusion_matrix(labels, preds)
if normalize:
sums = cm.sum(axis=1)[:, np.newaxis]
sums = np.where(sums == 0, 1, sums)
cm = cm.astype('float') / sums
title = 'Normalized_' + title
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(cm)
ax.set_xticks(np.arange(-1, len(classes)))
ax.set_yticks(np.arange(-1, len(classes)))
ax.set_xticklabels([''] + classes, rotation=90)
ax.set_yticklabels([''] + classes)
ax.set_ylabel('True')
ax.set_xlabel('Predicted')
os.makedirs(graph_fold, exist_ok=True)
fig.colorbar(cax)
title = (prefix + title).strip()
fig_name = os.path.join(graph_fold, title + '_' + time.strftime('%Y%m%d-%H%M%S'))
plt.savefig(fig_name)
logging.info(f'Confusion matrix saved to {fig_name}')
def _plot_confusion_matrix(labels: List[int], preds: List[int], graph_fold: str):
"""
Plot confusion matrix
Args:
labels: ground truth labels
preds: model predictions
graph_fold: path to a folder to store confusion matrix plot
"""
cm = confusion_matrix(labels, preds)
logging.info(f'Confusion matrix:\n{cm}')
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(cm)
plt.title('Confusion matrix of the classifier')
fig.colorbar(cax)
plt.xlabel('Predicted')
plt.ylabel('True')
os.makedirs(graph_fold, exist_ok=True)
plt.savefig(os.path.join(graph_fold, time.strftime('%Y%m%d-%H%M%S')))
def get_classification_report(labels, preds, label_ids, output_dict=False):
"""
Returns classification report
"""
# remove labels from label_ids that don't appear in predictions or ground truths
used_labels = set(labels) | set(preds)
labels_names = [
k + ' (label id: ' + str(v) + ')'
for k, v in sorted(label_ids.items(), key=lambda item: item[1])
if v in used_labels
]
return classification_report(labels, preds, target_names=labels_names, digits=4, output_dict=output_dict)
def is_last_rank():
return torch.distributed.get_rank() == (torch.distributed.get_world_size() - 1)
def get_last_rank():
return torch.distributed.get_world_size() - 1
def activation_to_func(activation: str, openai_gelu: bool = False, onnx_safe: bool = False) -> Callable:
""" Converts an activation function represented as a string to a function.
Args:
activation (str): string representation of an activation function, typically gotten from the model config.
openai_gelu (bool): whether to use the OpenAI GELU implementation. Used with HF compatibility.
onnx_safe (bool): whether to use the ONNX-compatible implementation of GELU.
Returns:
Callable: the activation function.
"""
supported_activations = [
'gelu',
'geglu',
'reglu',
'swiglu',
'squared-relu',
'fast-geglu',
'fast-swiglu',
'fast-reglu',
]
if activation not in supported_activations:
raise ValueError(f"Unsupported activation {activation}. Supported activations: {supported_activations} ")
# Give openai_gelu precedence over other activations if set, for HF compatibility.
# Normally this is off and shouldn't affect regular model training.
if openai_gelu:
activation_func = openai_gelu_func
elif activation in ["gelu", "geglu", "fast-geglu"]:
activation_func = F.gelu
elif onnx_safe:
activation_func = erf_gelu
elif activation in ["reglu", "fast-reglu"]:
activation_func = F.relu
elif activation in ["swiglu", "fast-swiglu"]:
# SiLU or sigmoid linear unit is the same as swish with beta = 1 (which is what https://arxiv.org/pdf/2002.05202.pdf uses.)
activation_func = F.silu
elif activation == 'squared-relu':
activation_func = squared_relu
return activation_func
| [] |
2024-01-10 | NVIDIA/NeMo | scripts~nlp_language_modeling~sft~attribute_annotate.py | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""script to annotate the the datasets with using trained attribute prediciton model.
First, we need to launch the NeMo Megatron inference server
Example:
```bash
python examples/nlp/language_modeling/megatron_gpt_eval.py \
gpt_model_file=/models/TRAINED_ATTR_PREDICTION_MODEL.nemo \
pipeline_model_parallel_split_rank=0 \
server=True \
tensor_model_parallel_size=TP_SIZE \
pipeline_model_parallel_size=PP_SIZE \
trainer.precision=bf16 \
trainer.devices=TP_SIZE*PP_SIZE \
trainer.num_nodes=1 \
web_server=False \
port=1424
```
Then, we can run this script to annotate the dataset.
Example usage:
python scripts/nlp_language_modeling/sft/attribute_annotate.py --batch_size=1 --host=localhost --input_file_name=input.jsonl --output_file_name=output.jsonl --port_num=1424
"""
import json
import os
import fire
import tqdm
from langchain.prompts.few_shot import PromptTemplate
from nemo.collections.nlp.modules.common.megatron.retrieval_services.util import text_generation
langs = [
'ar',
'bg',
'bn',
'ca',
'cs',
'da',
'de',
'el',
'en',
'eo',
'es',
'eu',
'fa',
'fi',
'fr',
'gl',
'he',
'hu',
'id',
'it',
'ja',
'ko',
'nb',
'nl',
'pl',
'pt',
'ro',
'ru',
'sk',
'sv',
'th',
'tr',
'uk',
'vi',
'zh',
]
SFT_PREFIX = """<extra_id_0>System
{system_message}"""
ONE_TRUN_WITH_VAL = """<extra_id_1>{user_name}
{user_message}
<extra_id_2>{label}
"""
ONE_TRUN_WITHOUT_VAL = """<extra_id_1>{user_name}
{user_message}
"""
SYSTEM = PromptTemplate(input_variables=["system_message"], template=SFT_PREFIX)
EXAMPLE_PROMPT_WITH_VAL = PromptTemplate(
input_variables=["user_name", "user_message", "label"], template=ONE_TRUN_WITH_VAL
)
EXAMPLE_PROMPT_WITHOUT_VAL = PromptTemplate(
input_variables=["user_name", "user_message"], template=ONE_TRUN_WITHOUT_VAL
)
selected_keys = [
'quality',
'toxicity',
'humor',
'creativity',
'violence',
'helpfulness',
'not_appropriate',
'hate_speech',
'sexual_content',
'fails_task',
'political_content',
'moral_judgement',
'lang',
]
def calculate_key(obj):
return ":".join([item['value'] for item in obj['conversations']])
def load_data(path):
with open(path, 'r', encoding='utf-8') as fin:
for line in fin:
yield json.loads(line)
def get_prompt(data_obj, turn, current_label="", label_id=0):
if len(data_obj['conversations']) < turn + 1:
return None
examples = []
for i in range(0, turn):
d = data_obj['conversations'][i]
if 'label' in d:
examples.append(
EXAMPLE_PROMPT_WITH_VAL.format(
**{'user_name': d['from'], 'user_message': d['value'], 'label': d['label']}
)
)
else:
examples.append(EXAMPLE_PROMPT_WITHOUT_VAL.format(**{'user_name': d['from'], 'user_message': d['value']}))
example_text = "".join(examples)
d = data_obj['conversations'][turn]
predict_message = EXAMPLE_PROMPT_WITHOUT_VAL.format(**{'user_name': d['from'], 'user_message': d['value']})
if label_id != 0:
current_label = current_label + ',' + selected_keys[label_id] + ':'
else:
current_label = '<extra_id_2>' + selected_keys[label_id] + ':'
return SYSTEM.format(**{'system_message': data_obj['system']}) + example_text + predict_message + current_label
def create_gen_function(host='localhost', port=5555):
def request(prompts, greedy, add_BOS, token_to_gen, min_tokens, temp, top_p, top_k, repetition, end_strings):
data = {
"sentences": prompts,
"tokens_to_generate": int(token_to_gen),
"temperature": temp,
"add_BOS": add_BOS,
"top_k": top_k,
"top_p": top_p,
"greedy": greedy,
"all_probs": False,
"repetition_penalty": repetition,
"min_tokens_to_generate": int(min_tokens),
"end_strings": end_strings,
}
response = text_generation(data, ip=host, port=port)
sentences = response['sentences']
return sentences
return request
class Worker(object):
def __init__(self, host='localhost', port=5555, progress_bar=None, output_file=None, process_lang=False):
self.req = create_gen_function(host=host, port=port)
self.fout = open(output_file, "a", encoding='utf-8')
self.progress_bar = progress_bar
self.process_lang = process_lang
def process_result(self, batch):
while True:
try:
items = [i['item'] for i in batch]
turns = [i['turn'] for i in batch]
prompts = [i['prompt'] for i in batch]
for label_id in range(1, len(selected_keys)):
results = self.req(
prompts,
greedy=True,
add_BOS=False,
token_to_gen=1,
min_tokens=1,
temp=0.1,
top_p=1.0,
top_k=1,
repetition=1.0,
end_strings=["<extra_id_1>", "<|endoftext|>"],
)
# get current value from result
current_values = []
nums = []
for result in results:
# promblem result[-1] is '\n'
current_val = result.split('quality')[-1]
current_val = 'quality' + current_val
# remove whatever after new line
current_val = current_val.split('\n')[0].strip()
# remove everything that is >= selected_keys[label_id]
splits = current_val.split(',')
filtered = []
for item in splits:
filtered.append(item)
if item.split(':')[0] == selected_keys[label_id - 1]:
nums.append(item.split(':')[1])
break
current_val = '<extra_id_2>' + ','.join(filtered)
current_values.append(current_val)
filtered_items = []
filtered_turns = []
filtered_prompts = []
filtered_current_values = []
for result, item, turn, num, current_value in zip(results, items, turns, nums, current_values):
try:
value = int(num)
except Exception as e:
print(f'error {e} when convert {num} to int')
continue
filtered_current_values.append(current_value)
filtered_items.append(item)
filtered_turns.append(turn)
if label_id < len(selected_keys):
prompt = get_prompt(item, turn, current_label=current_value, label_id=label_id)
filtered_prompts.append(prompt)
items = filtered_items
turns = filtered_turns
prompts = filtered_prompts
current_values = filtered_current_values
if self.process_lang:
results = self.req(
prompts,
greedy=True,
add_BOS=False,
token_to_gen=1,
min_tokens=1,
temp=0.1,
top_p=1.0,
top_k=1,
repetition=1.0,
end_strings=["<extra_id_1>", "<|endoftext|>"],
)
# get current value from result
current_values = []
for result in results:
# promblem result[-1] is '\n'
if result.endswith('\n'):
result = result[:-1] + '@'
current_values.append(result.split('\n')[-1])
nums = []
for result in results:
# promblem result[-1] is '\n'
current_val = result.split('quality')[-1]
current_val = 'quality' + current_val
# remove whatever after new line
current_val = current_val.split('\n')[0].strip()
# remove everything that is >= selected_keys[label_id]
splits = current_val.split(',')
filtered = []
for item in splits:
filtered.append(item)
if item.split(':')[0] == selected_keys[label_id]:
nums.append(item.split(':')[1])
break
current_val = '<extra_id_2>' + ','.join(filtered)
current_values.append(current_val)
filtered_items = []
filtered_turns = []
filtered_prompts = []
filtered_current_values = []
for result, item, turn, num, current_value in zip(results, items, turns, nums, current_values):
if num not in langs:
print(f'error {num} not in langs')
continue
filtered_current_values.append(current_value)
filtered_items.append(item)
filtered_turns.append(turn)
items = filtered_items
turns = filtered_turns
current_values = filtered_current_values
batch = []
for item, turn, current_value in zip(items, turns, current_values):
response_text = current_value[12:]
if 'label' in item['conversations'][turn]:
item['conversations'][turn]['gt_label'] = item['conversations'][turn]['label']
item['conversations'][turn]['label'] = response_text
prompt = get_prompt(item, turn + 1, current_label='', label_id=0)
if prompt is not None:
batch.append({'prompt': prompt, 'item': item, 'turn': turn + 1})
else:
self.progress_bar.update(1)
self.fout.write(json.dumps(item, ensure_ascii=False) + "\n")
self.fout.flush()
if self.progress_bar.n >= self.progress_bar.total:
break
if len(batch) == 0:
break
except Exception as e:
print(f'error {e} when processing {batch}')
# ignore the error and continue
self.progress_bar.update(1)
if self.progress_bar.n >= self.progress_bar.total:
break
def main(
batch_size=1,
host='localhost',
input_file_name='input.jsonl',
output_file_name='output.jsonl',
port_num=1424,
process_lang=True,
):
input_data = load_data(f'{input_file_name}')
output_path = f'{output_file_name}'
existing_requests = set()
if os.path.exists(output_path):
with open(output_path, 'r', encoding='utf-8') as fin:
for line in fin:
line = json.loads(line)
existing_requests.add(calculate_key(line))
print(f"Loaded {len(existing_requests)} existing requests")
filter_data = [d for d in input_data if calculate_key(d) not in existing_requests]
progress_bar = tqdm.tqdm(total=len(filter_data))
worker = Worker(
host=host, port=port_num, progress_bar=progress_bar, output_file=output_path, process_lang=process_lang
)
for batch_idx in range(0, len(filter_data), batch_size):
batch = [line for line in filter_data[batch_idx : batch_idx + batch_size]]
turns = [
0 if 'mask' not in d['conversations'][0]['from'] or d['conversations'][0]['from'] == d['mask'] else 1
for d in batch
]
task = [{'prompt': get_prompt(d, turn, "", 0), 'item': d, 'turn': turn} for d, turn in zip(batch, turns)]
worker.process_result(task)
worker.fout.close()
if __name__ == '__main__':
fire.Fire(main)
| [
"[]",
"user_message",
"user_name"
] |
2024-01-10 | shadowaxe99/lawsuit-e | src~gpt4_lawsuit_writer.py | import openai
import user_interface_module
from transformers import GPT4LMHeadModel, GPT4Tokenizer
from ai_model import AIModel
class GPT4LawsuitWriter:
def __init__(self):
self.tokenizer = GPT4Tokenizer.from_pretrained("gpt2-large")
# Create an instance of the AIModel class
self.ai_model = AIModel()
self.model = GPT4LMHeadModel.from_pretrained("gpt4-1106-preview")
openai.api_key = 'your-openai-api-key'
# Remove the prompt_gpt4 method
response = openai.Completion.create(
engine="text-davinci-003",
prompt=prompt,
max_tokens=1024
)
return response.choices[0].text.strip()
def gather_user_input(self):
return user_interface_module.collect_lawsuit_details()
def analyze_case(self, user_data):
prompt = f"Lawsuit Case Analysis:\n\n{user_data}\n\nLegal Insights:"
return self.prompt_gpt4(prompt)
def generate_lawsuit_draft(self, case_analysis):
return self.ai_model.generate_text(prompt)
prompt = f"Draft Lawsuit Document:\n\nCase Analysis:\n{case_analysis}\n\nLawsuit Draft:"
return self.prompt_gpt4(prompt)
def review_and_edit_draft(self, draft):
return user_interface_module.review_and_edit(draft)
def finalize_document(self, edited_draft):
return self.ai_model.generate_text(prompt)
prompt = f"Finalize Lawsuit Document:\n\nEdited Draft:\n{edited_draft}\n\nFinal Document:"
return self.prompt_gpt4(prompt)
def main():
lawsuit_writer = GPT4LawsuitWriter()
user_data = lawsuit_writer.gather_user_input()
case_analysis = lawsuit_writer.analyze_case(user_data)
draft = lawsuit_writer.generate_lawsuit_draft(case_analysis)
edited_draft = lawsuit_writer.review_and_edit_draft(draft)
final_document = lawsuit_writer.finalize_document(edited_draft)
return final_document
if __name__ == "__main__":
main()
| [
"Draft Lawsuit Document:\n\nCase Analysis:\nPLACEHOLDER\n\nLawsuit Draft:",
"Lawsuit Case Analysis:\n\nPLACEHOLDER\n\nLegal Insights:",
"Finalize Lawsuit Document:\n\nEdited Draft:\nPLACEHOLDER\n\nFinal Document:"
] |
2024-01-10 | shadowaxe99/lawsuit-e | src~ai_model.py | import openai
from transformers import GPT4LMHeadModel, GPT4Tokenizer
class AIModel:
def __init__(self):
self.tokenizer = GPT4Tokenizer.from_pretrained("gpt2-large")
self.model = GPT4LMHeadModel.from_pretrained("gpt4-1106-preview")
openai.api_key = 'your-openai-api-key'
def generate_text(self, prompt):
response = openai.Completion.create(
engine="text-davinci-003",
prompt=prompt,
max_tokens=1024
)
return response.choices[0].text.strip()
# Add any other necessary methods for the AI model
| [] |
2024-01-10 | shadowaxe99/lawsuit-e | coder-agi-fixed-v6~src~gpt4_lawsuit_writer.py | import openai
from transformers import GPT2LMHeadModel, GPT2Tokenizer
import user_interface_module
class GPT4LawsuitWriter:
def __init__(self):
self.tokenizer = GPT2Tokenizer.from_pretrained("gpt2-large")
self.model = # Placeholder for GPT-4-1106-preview model
openai.api_key = 'your-openai-api-key'
def prompt_gpt4(self, prompt):
response = openai.Completion.create(
engine="text-davinci-003",
prompt=prompt,
max_tokens=1024
)
return response.choices[0].text.strip()
def gather_user_input(self):
return user_interface_module.collect_lawsuit_details()
def analyze_case(self, user_data):
prompt = f"Lawsuit Case Analysis:\n\n{user_data}\n\nLegal Insights:"
return self.prompt_gpt4(prompt)
def generate_lawsuit_draft(self, case_analysis):
prompt = f"Draft Lawsuit Document:\n\n{case_analysis}\n\nLawsuit Draft:"
return self.prompt_gpt4(prompt)
def review_and_edit_draft(self, draft):
return user_interface_module.review_and_edit(draft)
def finalize_document(self, edited_draft):
prompt = f"Finalize Lawsuit Document:\n\n{edited_draft}\n\nFinal Document:"
return self.prompt_gpt4(prompt)
def main():
lawsuit_writer = GPT4LawsuitWriter()
user_data = lawsuit_writer.gather_user_input()
case_analysis = lawsuit_writer.analyze_case(user_data)
draft = lawsuit_writer.generate_lawsuit_draft(case_analysis)
edited_draft = lawsuit_writer.review_and_edit_draft(draft)
final_document = lawsuit_writer.finalize_document(edited_draft)
return final_document
if __name__ == "__main__":
final_lawsuit_document = main() | [
"Lawsuit Case Analysis:\n\nPLACEHOLDER\n\nLegal Insights:",
"Draft Lawsuit Document:\n\nPLACEHOLDER\n\nLawsuit Draft:",
"Finalize Lawsuit Document:\n\nPLACEHOLDER\n\nFinal Document:"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.