date_collected
stringclasses 1
value | repo_name
stringlengths 6
116
| file_name
stringlengths 2
220
| file_contents
stringlengths 13
357k
| prompts
sequence |
---|---|---|---|---|
2024-01-10 | vempatisaivishal/GlobalTruthHub | titletest.py | import openai
import requests
import spacy
import re
import multiprocessing
from transformers import pipeline
from clickbait import clickbait
from spellchecker import SpellChecker
from subjectivemodel import subjective
from urllib.parse import urlparse
from newspaper import Config
import nltk
from nltk.tokenize import sent_tokenize
from gingerit.gingerit import GingerIt
from isnewstitle import checkNewsTitle
from bs4 import BeautifulSoup
from similarity import calculate_sentence_similarity
import time
from selenium.webdriver.chrome.service import Service
from webdriver_manager.chrome import ChromeDriverManager
import html5lib
from selenium import webdriver
config = Config()
config.browser_user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3'
API_KEY = "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"
openai.api_key = API_KEY
class checkTitle:
def __init__(self, title):
self.headline = title
self.own_corrections = {'iam': "i'm", 'im': "i'm"}
self.spellings_ratio = 0
self.corrected = title
self.misspelled_words = []
self.required = []
self.contexts = []
def lower_case(self, text):
text = text.lower()
pattern = re.compile("<.*?>")
text = pattern.sub(r'', text)
pattern = re.compile(r'https?://\S+|www\.\S+')
text = pattern.sub(r'', text)
exclude = "[!\#\$%\&\(\)\*\+,\.\-\"/:;<=>\?@\[\]\^_`\{\|\}\~0123456789]"
return text.translate(str.maketrans('', '', exclude))
def spelling_mistakes(self):
head = self.lower_case(self.headline)
output_dir = "C://Users/vixha/downloads"
ner_pipeline = pipeline("ner", grouped_entities=True, model=output_dir)
head2 = ' '.join([i.capitalize() for i in head.split()])
ner_results = ner_pipeline(head2)
named_entities = [entity["word"].lower() for entity in ner_results if entity["entity_group"] == "PER" or entity["entity_group"] == "LOC"]
misspelled_words = []
# parser = GingerIt()
spell = SpellChecker()
words = []
for token_text in head.split(' '):
# corrected_token = parser.parse(token_text)['result'].lower()
corrected_token = spell.correction(token_text)
if token_text.isalpha() and token_text not in named_entities:
if token_text in self.own_corrections:
words.append(self.own_corrections[token_text])
misspelled_words.append(token_text)
elif token_text != corrected_token:
misspelled_words.append(token_text)
words.append(corrected_token)
else:
words.append(token_text)
else:
words.append(token_text)
self.corrected = ' '.join(words)
self.misspelled_words = set(misspelled_words)
if len(misspelled_words) == 0:
return True
ratio = len(misspelled_words) / len(self.headline.split(" "))
return ratio < 0.5
def classify_clickbait(self):
click = clickbait(self.corrected)
return click.run() == 0
def subjective_test(self):
subjective_obj = subjective()
answer = subjective_obj.send_request(self.headline)
return answer == "objective"
def is_newstitle(self):
if len(self.headline) > 90:
return False
if not re.search(r'[A-Z][a-z]+', self.headline):
return False
is_news = checkNewsTitle(self.headline).run()
if is_news[0] == 0:
return False
return True
def present_on_google(self):
url = f"https://www.google.com/search?q={self.headline}"
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
}
response = requests.get(url, headers=headers)
soup = BeautifulSoup(response.content, 'html.parser')
include_link = soup.find('a', class_='fl')
if include_link:
response = requests.get('https://www.google.com/' + include_link['href'], headers=headers)
soup = BeautifulSoup(response.content, 'html.parser')
search_results = soup.find_all('div', class_='yuRUbf')
search_contexts = soup.find_all('div', {'class': ['VwiC3b', 'yXK7lf', 'MUxGbd', 'yDYNvb', 'lyLwlc', 'lEBKkf']})
search_contexts = [i for i in search_contexts if
str(i).find('class="VwiC3b yXK7lf MUxGbd yDYNvb lyLwlc lEBKkf"') != -1]
urls = []
for result, context in zip(search_results, search_contexts):
link = result.find('a')
url = link['href']
heading = result.find(re.compile('^h[1-6]$')).text
response = requests.get(url, headers=headers)
soup = BeautifulSoup(response.content, 'html.parser')
parsed_url = urlparse(url)
domain_name = parsed_url.netloc
domain_name = domain_name.replace('www.', '')
if soup.find('title') and self.present_on_google_news_2(domain_name):
self.required.append(soup.find('title').text)
if len(context.find_all('span')) > 2:
self.contexts.append(context.find_all('span')[2].text)
elif len(context.find_all('span')) > 1:
self.contexts.append(context.find_all('span')[1].text)
else:
self.contexts.append(context.find_all('span')[0].text)
urls.append({'url': url, 'heading': heading})
print(self.contexts)
if len(self.required) < 3:
return False
return self.availability_on_web(urls)
def availability_on_web(self, results):
similar_links = []
max_similarity = 0
article_heading = ''
for result, context in zip(self.required, self.contexts):
similarity_percentage_1 = calculate_sentence_similarity(self.headline, result)
print(self.headline, " ", result, " ", similarity_percentage_1)
similarity_percentage_2 = calculate_sentence_similarity(self.headline, context)
print(self.headline, " ", context, " 2 ", similarity_percentage_2)
if similarity_percentage_1 > similarity_percentage_2:
if similarity_percentage_1 > max_similarity:
article_heading = result
else:
if similarity_percentage_2 > max_similarity:
article_heading = context
similarity_percentage = max(similarity_percentage_1, similarity_percentage_2)
if similarity_percentage >= 0.55:
similar_links.append(similarity_percentage)
if len(similar_links) < 2:
return False
if not self.check_similarity2(article_heading):
return False
return True
def present_on_google_news_2(self, domain):
print(domain)
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
}
ggl_news_link = f"https://www.google.com/search?q={domain}&tbm=nws"
req = requests.get(ggl_news_link, headers=headers)
sup = BeautifulSoup(req.content, 'html.parser')
link = sup.find('a', class_='WlydOe')
if link:
nd_domain = urlparse(link['href'])
domain_name = nd_domain.netloc
domain_name = domain_name.replace('www.', '')
print(domain, domain_name)
if domain == domain_name:
return True
return False
def check_similarity2(self, context):
response = openai.Completion.create(
model="text-davinci-003",
prompt=f"i have two sentences \nsentence1 = {self.headline} \nsentence2 = {context} \ndont consider additional information, based on the contextual similarity, is the first statement true based on second statement yes or no",
temperature=1,
max_tokens=256,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
response_text = response.to_dict()['choices'][0]['text'].replace("\n", "")
print(response_text)
final_response = response_text[:4]
if final_response.lower().find('yes') != -1:
return True
else:
return False
def run(self):
print(self.is_newstitle())
checkTitle("hi").present_on_google_news_2('hello how are you')
# if __name__ == '__main__':
# x = checkTitle("DRDO and L&T will Sign Contract To Develop 'Indigenous' AIP System For indian Navy Submarines")
# tim = time.time()
# p1 = multiprocessing.Process(target=x.spelling_mistakes)
# p2 = multiprocessing.Process(target=x.classify_clickbait)
# p3 = multiprocessing.Process(target=x.subjective_test)
# p4 = multiprocessing.Process(target=x.is_newstitle)
# p5 = multiprocessing.Process(target=x.present_on_google)
# p1.start()
# p2.start()
# p3.start()
# p4.start()
# p5.start()
# p1.join()
# p2.join()
# p3.join()
# p4.join()
# p5.join()
# print(time.time() - tim)
| [] |
2024-01-10 | mivanovitch/7-Days-of-LangChain | day_4~scientific_newsletter.py | """
This script shows how to create a newsletter based on the latest Arxiv articles.
We're using an easy LangChain implementation to show how to use the different components of LangChain.
This is part of my '7 Days of LangChain' series.
Check out the explanation about the code on my Twitter (@JorisTechTalk)
"""
from langchain.document_loaders import ArxivLoader
from langchain.agents.agent_toolkits import GmailToolkit
from langchain import OpenAI
import os
from langchain.agents import initialize_agent, AgentType
from langchain.chat_models import ChatOpenAI
from langchain.prompts import PromptTemplate
from langchain import LLMChain
from langchain.callbacks import get_openai_callback
import arxiv
# Topic of the newsletter you want to write about
query = "LLM"
# Set up the ArxivLoader
search = arxiv.Search(
query = query,
max_results = 4,
sort_by = arxiv.SortCriterion.SubmittedDate
)
# Initialize the docs variable
docs = ""
# Add all relevant information to the docs variable
for result in search.results():
docs += "Title: " + result.title + "\n"
docs += "Abstract: " + result.summary + "\n"
docs += "Download URL: " + result.pdf_url + "\n"
print(result.links)
for link in result.links:
docs += "Links: " + link.href + "\n"
# Track cost
with get_openai_callback() as cb:
# Template for the newsletter
prompt_newsletter_template = """
You are a newsletter writer. You write newsletters about scientific articles. You introduce the article and show a small summary to tell the user what the article is about.
You're main goal is to write a newsletter which contains summaries to interest the user in the articles.
--------------------
{text}
--------------------
Start with the title of the article. Then, write a small summary of the article.
Below each summary, include the link to the article containing /abs/ in the URL.
Summaries:
"""
PROMPT_NEWSLETTER = PromptTemplate(template=prompt_newsletter_template, input_variables=["text"])
# Set the OpenAI API key
os.environ['OPENAI_API_KEY'] = 'YOUR_API_KEY_HERE'
# Initialize the language model
llm = ChatOpenAI(temperature=0.6, model_name="gpt-3.5-turbo-16k", verbose=True)
# Initialize the LLMChain
newsletter_chain = LLMChain(llm=llm, prompt=PROMPT_NEWSLETTER, verbose=True)
# Run the LLMChain
newsletter = newsletter_chain.run(docs)
# Write newsletter to a text file
with open("newsletter.txt", "w") as f:
f.write(newsletter)
# Set toolkit
toolkit = GmailToolkit()
# Initialize the Gmail agent
agent = initialize_agent(
tools=toolkit.get_tools(),
llm=llm,
agent=AgentType.STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION,
verbose=True
)
# Run the agent
instructions = f"""
Write a draft directed to [email protected], NEVER SEND THE EMAIL.
The subject should be 'Scientific Newsletter about {query}'.
The content should be the following: {newsletter}.
"""
agent.run(instructions)
print(cb) | [
"\n You are a newsletter writer. You write newsletters about scientific articles. You introduce the article and show a small summary to tell the user what the article is about.\n\n You're main goal is to write a newsletter which contains summaries to interest the user in the articles.\n\n --------------------\n {text}\n --------------------\n\n Start with the title of the article. Then, write a small summary of the article.\n\n Below each summary, include the link to the article containing /abs/ in the URL.\n\n Summaries:\n\n "
] |
2024-01-10 | j-min/DallEval | models~dalle_small~DALLE-pytorch~finetune_dalle.py | import argparse
from pathlib import Path
import time
from glob import glob
import os
import shutil
import torch
import wandb # Quit early if user doesn't have wandb installed.
from torch.nn.utils import clip_grad_norm_
from torch.optim import Adam
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.utils.data import DataLoader
from dalle_pytorch import OpenAIDiscreteVAE, VQGanVAE, DiscreteVAE, DALLE
from dalle_pytorch import distributed_utils
# from dalle_pytorch.loader import TextImageDataset
# from dalle_pytorch.contrastive_loader import ContrastiveTextImageDataset
from dalle_pytorch.skill_loader import SkillTextImageDataset
from dalle_pytorch.coco_loader import COCOTextImageDataset
from dalle_pytorch.tokenizer import tokenizer, HugTokenizer, ChineseTokenizer, YttmTokenizer
# libraries needed for webdataset support
import webdataset as wds
from torchvision import transforms as T
from PIL import Image
from io import BytesIO
import torch.nn.functional as F
# argument parsing
parser = argparse.ArgumentParser()
group = parser.add_mutually_exclusive_group(required=False)
group.add_argument('--vae_path', type=str,
help='path to your trained discrete VAE')
group.add_argument('--dalle_path', type=str,
help='path to your partially trained DALL-E')
parser.add_argument('--vqgan_model_path', type=str, default = None,
help='path to your trained VQGAN weights. This should be a .ckpt file. (only valid when taming option is enabled)')
parser.add_argument('--vqgan_config_path', type=str, default = None,
help='path to your trained VQGAN config. This should be a .yaml file. (only valid when taming option is enabled)')
# parser.add_argument('--image_text_folder', type=str, required=True,
# help='path to your folder of images and text for learning the DALL-E')
parser.add_argument('--dataset_dir', type=str,
default='/playpen3/home/jmincho/workspace/datasets',
help='path to your dataset directory')
parser.add_argument('--text_file', type=str,
default='object_train.json',
help='path to your text file')
parser.add_argument(
'--wds',
type = str,
default='',
help = 'Comma separated list of WebDataset (1) image and (2) text column names. Must contain 2 values, e.g. img,cap.'
)
parser.add_argument('--truncate_captions', dest='truncate_captions', action='store_true',
help='Captions passed in which exceed the max token length will be truncated if this is set.')
parser.add_argument('--random_resize_crop_lower_ratio', dest='resize_ratio', type=float, default=0.75,
help='Random resized crop lower ratio')
parser.add_argument('--chinese', dest='chinese', action='store_true')
parser.add_argument('--taming', dest='taming', action='store_true')
parser.add_argument('--hug', dest='hug', action='store_true')
parser.add_argument('--bpe_path', type=str,
help='path to your BPE json file')
parser.add_argument('--dalle_output_file_name', type=str, default = "dalle",
help='output_file_name')
parser.add_argument('--fp16', action='store_true',
help='(experimental) - Enable DeepSpeed 16 bit precision. Reduces VRAM.')
parser.add_argument('--amp', action='store_true',
help='Apex "O1" automatic mixed precision. More stable than 16 bit precision. Can\'t be used in conjunction with deepspeed zero stages 1-3.')
parser.add_argument('--wandb_name', default='dalle_train_transformer',
help='Name W&B will use when saving results.\ne.g. `--wandb_name "coco2017-full-sparse"`')
parser.add_argument('--wandb_entity', default=None,
help='(optional) Name of W&B team/entity to log to.')
parser.add_argument('--wandb_run_name', default=None,
help='')
parser.add_argument('--skill_name', type=str, default='object')
parser.add_argument('--split', type=str, default='train')
parser.add_argument('--stable_softmax', dest='stable_softmax', action='store_true',
help='Prevent values from becoming too large during softmax. Helps with stability in fp16 and Mixture of Quantization training.')
parser = distributed_utils.wrap_arg_parser(parser)
train_group = parser.add_argument_group('Training settings')
train_group.add_argument('--flops_profiler', dest = 'flops_profiler', action='store_true', help = 'Exits after printing detailed flops/runtime analysis of forward/backward')
train_group.add_argument('--epochs', default = 20, type = int, help = 'Number of epochs')
train_group.add_argument('--save_every_n_steps', default = 1000, type = int, help = 'Save a checkpoint every n steps')
train_group.add_argument('--keep_n_checkpoints', default = None, type = int, help = '(Careful) Deletes old deepspeed checkpoints if there are more than n')
train_group.add_argument('--batch_size', default = 4, type = int, help = 'Batch size')
train_group.add_argument('--ga_steps', default = 1, type = int, help = 'Number of steps to accumulate gradients across per each iteration. DeepSpeed only.')
train_group.add_argument('--learning_rate', default = 3e-4, type = float, help = 'Learning rate')
train_group.add_argument('--clip_grad_norm', default = 0.5, type = float, help = 'Clip gradient norm')
train_group.add_argument('--lr_decay', dest = 'lr_decay', action = 'store_true')
model_group = parser.add_argument_group('Model settings')
model_group.add_argument('--dim', default = 512, type = int, help = 'Model dimension')
model_group.add_argument('--text_seq_len', default = 256, type = int, help = 'Text sequence length')
model_group.add_argument('--depth', default = 2, type = int, help = 'Model depth')
model_group.add_argument('--heads', default = 8, type = int, help = 'Model number of heads')
model_group.add_argument('--dim_head', default = 64, type = int, help = 'Model head dimension')
train_group.add_argument('--ff_dropout', default = 0.0, type = float, help = 'Feed forward dropout.')
train_group.add_argument('--attn_dropout', default = 0.0, type = float, help = 'Feed forward dropout.')
model_group.add_argument('--reversible', dest = 'reversible', action='store_true')
model_group.add_argument('--loss_img_weight', default = 7, type = int, help = 'Image loss weight')
model_group.add_argument('--attn_types', default = 'full', type = str, help = 'comma separated list of attention types. attention type can be: full or sparse or axial_row or axial_col or conv_like.')
model_group.add_argument('--coco_caption', action='store_true')
args = parser.parse_args()
# helpers
def exists(val):
return val is not None
def get_trainable_params(model):
return [params for params in model.parameters() if params.requires_grad]
def cp_path_to_dir(cp_path, tag):
"""Convert a checkpoint path to a directory with `tag` inserted.
If `cp_path` is already a directory, return it unchanged.
"""
if not isinstance(cp_path, Path):
cp_path = Path(cp_path)
if cp_path.is_dir():
return cp_path
path_sans_extension = cp_path.parent / cp_path.stem
cp_dir = Path(f'{path_sans_extension}-{tag}-cp')
return cp_dir
# constants
WEBDATASET_IMAGE_TEXT_COLUMNS = tuple(args.wds.split(','))
ENABLE_WEBDATASET = True if len(WEBDATASET_IMAGE_TEXT_COLUMNS) == 2 else False
DALLE_OUTPUT_FILE_NAME = args.dalle_output_file_name + ".pt"
VAE_PATH = args.vae_path
VQGAN_MODEL_PATH = args.vqgan_model_path
VQGAN_CONFIG_PATH = args.vqgan_config_path
DALLE_PATH = args.dalle_path
RESUME = exists(DALLE_PATH)
EPOCHS = args.epochs
BATCH_SIZE = args.batch_size
LEARNING_RATE = args.learning_rate
GRAD_CLIP_NORM = args.clip_grad_norm
LR_DECAY = args.lr_decay
SAVE_EVERY_N_STEPS = args.save_every_n_steps
KEEP_N_CHECKPOINTS = args.keep_n_checkpoints
MODEL_DIM = args.dim
TEXT_SEQ_LEN = args.text_seq_len
DEPTH = args.depth
HEADS = args.heads
DIM_HEAD = args.dim_head
REVERSIBLE = args.reversible
LOSS_IMG_WEIGHT = args.loss_img_weight
FF_DROPOUT = args.ff_dropout
ATTN_DROPOUT = args.attn_dropout
STABLE = args.stable_softmax
ATTN_TYPES = tuple(args.attn_types.split(','))
DEEPSPEED_CP_AUX_FILENAME = 'auxiliary.pt'
if not ENABLE_WEBDATASET:
# quit early if you used the wrong folder name
# assert Path(args.image_text_folder).exists(), f'The path {args.image_text_folder} was not found.'
assert Path(args.dataset_dir).exists(), f'The path {args.dataset_dir} was not found.'
else:
# quit early if no tar files were found
if Path(args.image_text_folder).is_dir():
DATASET = [str(p) for p in Path(args.image_text_folder).glob("**/*") if ".tar" in str(p).lower()] # .name
assert len(DATASET) > 0, 'The directory ({}) does not contain any WebDataset/.tar files.'.format(args.image_text_folder)
print('Found {} WebDataset .tar(.gz) file(s) under given path {}!'.format(len(DATASET), args.image_text_folder))
elif ('http://' in args.image_text_folder.lower()) | ('https://' in args.image_text_folder.lower()):
DATASET = f"pipe:curl -L -s {args.image_text_folder} || true"
print('Found {} http(s) link under given path!'.format(len(DATASET), args.image_text_folder))
elif 'gs://' in args.image_text_folder.lower():
DATASET = f"pipe:gsutil cat {args.image_text_folder} || true"
print('Found {} GCS link under given path!'.format(len(DATASET), args.image_text_folder))
elif '.tar' in args.image_text_folder:
DATASET = args.image_text_folder
print('Found WebDataset .tar(.gz) file under given path {}!'.format(args.image_text_folder))
else:
raise Exception('No folder, no .tar(.gz) and no url pointing to tar files provided under {}.'.format(args.image_text_folder))
# initialize distributed backend
distr_backend = distributed_utils.set_backend_from_args(args)
distr_backend.initialize()
using_deepspeed = \
distributed_utils.using_backend(distributed_utils.DeepSpeedBackend)
# tokenizer
if exists(args.bpe_path):
klass = HugTokenizer if args.hug else YttmTokenizer
tokenizer = klass(args.bpe_path)
elif args.chinese:
tokenizer = ChineseTokenizer()
# reconstitute vae
if RESUME:
dalle_path = Path(DALLE_PATH)
if using_deepspeed:
cp_dir = cp_path_to_dir(dalle_path, 'ds')
assert cp_dir.is_dir(), \
f'DeepSpeed checkpoint directory {cp_dir} not found'
dalle_path = cp_dir / DEEPSPEED_CP_AUX_FILENAME
else:
assert dalle_path.exists(), 'DALL-E model file does not exist'
loaded_obj = torch.load(str(dalle_path), map_location='cpu')
dalle_params, vae_params, weights = loaded_obj['hparams'], loaded_obj['vae_params'], loaded_obj['weights']
opt_state = loaded_obj.get('opt_state')
scheduler_state = loaded_obj.get('scheduler_state')
if vae_params is not None:
vae = DiscreteVAE(**vae_params)
else:
if args.taming:
vae = VQGanVAE(VQGAN_MODEL_PATH, VQGAN_CONFIG_PATH)
else:
vae = OpenAIDiscreteVAE()
dalle_params = dict(
**dalle_params
)
IMAGE_SIZE = vae.image_size
resume_epoch = loaded_obj.get('epoch', 0)
else:
if exists(VAE_PATH):
vae_path = Path(VAE_PATH)
assert vae_path.exists(), 'VAE model file does not exist'
assert not vae_path.is_dir(), \
('Cannot load VAE model from directory; please use a '
'standard *.pt checkpoint. '
'Currently, merging a DeepSpeed-partitioned VAE into a DALLE '
'model is not supported.')
loaded_obj = torch.load(str(vae_path))
vae_params, weights = loaded_obj['hparams'], loaded_obj['weights']
vae = DiscreteVAE(**vae_params)
vae.load_state_dict(weights)
else:
if distr_backend.is_root_worker():
print('using pretrained VAE for encoding images to tokens')
vae_params = None
if args.taming:
vae = VQGanVAE(VQGAN_MODEL_PATH, VQGAN_CONFIG_PATH)
else:
vae = OpenAIDiscreteVAE()
IMAGE_SIZE = vae.image_size
dalle_params = dict(
num_text_tokens=tokenizer.vocab_size,
text_seq_len=TEXT_SEQ_LEN,
dim=MODEL_DIM,
depth=DEPTH,
heads=HEADS,
dim_head=DIM_HEAD,
reversible=REVERSIBLE,
loss_img_weight=LOSS_IMG_WEIGHT,
attn_types=ATTN_TYPES,
ff_dropout=FF_DROPOUT,
attn_dropout=ATTN_DROPOUT,
stable=STABLE,
)
resume_epoch = 0
# configure OpenAI VAE for float16s
if isinstance(vae, OpenAIDiscreteVAE) and args.fp16:
vae.enc.blocks.output.conv.use_float16 = True
# helpers
def group_weight(model):
group_decay, group_no_decay = [], []
for params in model.named_parameters():
if 'transformer' in params[0]:
if 'bias' in params[0] or 'norm' in params[0]:
group_no_decay.append(params[1])
continue
group_decay.append(params[1])
assert len(list(model.parameters())) == len(group_decay) + len(group_no_decay)
groups = [dict(params=group_decay), dict(params=group_no_decay, weight_decay=.0)]
return groups
# create dataset and dataloader
is_shuffle = not distributed_utils.using_backend(distributed_utils.HorovodBackend)
imagepreproc = T.Compose([
T.Lambda(lambda img: img.convert('RGB')
if img.mode != 'RGB' else img),
T.RandomResizedCrop(IMAGE_SIZE,
scale=(args.resize_ratio, 1.),
ratio=(1., 1.)),
T.ToTensor(),
])
def imagetransform(b):
return Image.open(BytesIO(b))
def tokenize(s):
return tokenizer.tokenize(
s.decode('utf-8'),
TEXT_SEQ_LEN,
truncate_text=args.truncate_captions).squeeze(0)
if ENABLE_WEBDATASET:
DATASET_SIZE = int(1e9) # You need to set a nominal length for the Dataset in order to avoid warnings from DataLoader
myimg, mycap = WEBDATASET_IMAGE_TEXT_COLUMNS
image_text_mapping = {
myimg: imagetransform,
mycap: tokenize
}
image_mapping = {
myimg: imagepreproc
}
num_batches = DATASET_SIZE // BATCH_SIZE
ds = (
wds.WebDataset(DATASET, length=num_batches)
# .shuffle(is_shuffle) # Commented out for WebDataset as the behaviour cannot be predicted yet
.map_dict(**image_text_mapping)
.map_dict(**image_mapping)
.to_tuple(mycap, myimg)
.batched(BATCH_SIZE, partial=False) # It is good to avoid partial batches when using Distributed training
)
else:
# ds = TextImageDataset(
# ds = ContrastiveTextImageDataset(
# mutant_image_dir=args.dataset_dir+'vqa-mutant/images/',
# coco_image_dir=args.dataset_dir+'COCO/images/',
# text_data_file=args.dataset_dir+args.text_file,
# # args.image_text_folder,
# text_len=TEXT_SEQ_LEN,
# image_size=IMAGE_SIZE,
# resize_ratio=args.resize_ratio,
# truncate_captions=args.truncate_captions,
# tokenizer=tokenizer,
# shuffle=is_shuffle,
# )
if args.coco_caption:
image_dir = str(Path(args.dataset_dir) / 'images')
text_data_file = str(Path(args.dataset_dir) / 'dataset_coco.json')
ds = COCOTextImageDataset(
# skill_name=args.skill_name,
split=args.split,
image_dir=image_dir,
# text_data_file=text_data_file,
text_len=TEXT_SEQ_LEN,
image_size=IMAGE_SIZE,
resize_ratio=args.resize_ratio,
truncate_captions=args.truncate_captions,
tokenizer=tokenizer,
shuffle=is_shuffle,
load_image=True
)
else:
image_dir = str(Path(args.dataset_dir) / args.skill_name / 'images')
text_data_file = str(Path(args.dataset_dir) / args.skill_name / 'scenes' / args.text_file)
ds = SkillTextImageDataset(
skill_name=args.skill_name,
split=args.split,
image_dir=image_dir,
text_data_file=text_data_file,
text_len=TEXT_SEQ_LEN,
image_size=IMAGE_SIZE,
resize_ratio=args.resize_ratio,
truncate_captions=args.truncate_captions,
tokenizer=tokenizer,
shuffle=is_shuffle,
)
assert len(ds) > 0, 'dataset is empty'
if distr_backend.is_root_worker():
if not ENABLE_WEBDATASET:
print(f'{len(ds)} image-text pairs found for training')
if not is_shuffle:
data_sampler = torch.utils.data.distributed.DistributedSampler(
ds,
num_replicas=distr_backend.get_world_size(),
rank=distr_backend.get_rank()
)
else:
data_sampler = None
if ENABLE_WEBDATASET:
# WebLoader for WebDataset and DeepSpeed compatibility
dl = wds.WebLoader(ds, batch_size=None, shuffle=False) # optionally add num_workers=2 (n) argument
number_of_batches = DATASET_SIZE // (BATCH_SIZE * distr_backend.get_world_size())
dl = dl.repeat(2).slice(number_of_batches)
dl.length = number_of_batches
else:
# Regular DataLoader for image-text-folder datasets
dl = DataLoader(ds, batch_size=BATCH_SIZE, shuffle=is_shuffle, drop_last=True, sampler=data_sampler)
# initialize DALL-E
dalle = DALLE(vae=vae, **dalle_params)
if not using_deepspeed:
if args.fp16:
dalle = dalle.half()
dalle = dalle.cuda()
# if RESUME and not using_deepspeed:
# dalle.load_state_dict(weights)
if RESUME:
# if args.transformer_scratch:
# for k in list(weights.keys()):
# if 'vae' not in k:
# del weights[k]
# results = dalle.load_state_dict(weights, strict=False)
# print('Randomly initialized Transformer checkpoint')
# else:
results = dalle.load_state_dict(weights)
print(results)
print('Loaded DALLE checkpoint')
# optimizer
opt = Adam(get_trainable_params(dalle), lr=LEARNING_RATE)
if RESUME and opt_state:
opt.load_state_dict(opt_state)
if LR_DECAY:
scheduler = ReduceLROnPlateau(
opt,
mode="min",
factor=0.5,
patience=10,
cooldown=10,
min_lr=1e-6,
verbose=True,
)
if RESUME and scheduler_state:
scheduler.load_state_dict(scheduler_state)
else:
scheduler = None
if distr_backend.is_root_worker():
# experiment tracker
model_config = dict(
depth=DEPTH,
heads=HEADS,
dim_head=DIM_HEAD
)
run = wandb.init(
project=args.wandb_name,
entity=args.wandb_entity,
resume=False,
config=model_config,
)
run.name = args.wandb_run_name
# distribute
distr_backend.check_batch_size(BATCH_SIZE)
deepspeed_config = {
'train_batch_size': BATCH_SIZE,
'gradient_accumulation_steps': args.ga_steps,
'gradient_clipping': GRAD_CLIP_NORM,
'fp16': {
'enabled': args.fp16,
},
'amp': {
'enabled': args.amp,
'opt_level': 'O1',
},
"flops_profiler": {
"enabled": args.flops_profiler,
"profile_step": 200,
"module_depth": -1,
"top_modules": 1,
"detailed": True,
"output_file": None # TODO Can't get this to work.
},
}
if deepspeed_config.get('zero_optimization', {}).get('stage', 0) >= 2:
print(f"Checkpoints made with DeepSpeed ZeRO Stages 2 and 3 will be stored in deepspeed checkpoint folder")
print(f"As such, they will require DeepSpeed as a dependency in order to resume from or generate with.")
print("See the deespeed conversion script for details on how to convert your ZeRO stage 2/3 checkpoint to a single file.")
print("If using a single GPU, consider running with apex automatic mixed precision instead for a similar speedup to ZeRO.")
time.sleep(2)
(distr_dalle, distr_opt, distr_dl, distr_scheduler) = distr_backend.distribute(
args=args,
model=dalle,
optimizer=opt,
model_parameters=get_trainable_params(dalle),
training_data=ds if using_deepspeed else dl,
# Do not pass the LR scheduler to DeepSpeed so we can manually
# advance it.
lr_scheduler=scheduler if LR_DECAY and not using_deepspeed else None,
config_params=deepspeed_config,
)
# Prefer scheduler in `deepspeed_config`.
if LR_DECAY and distr_scheduler is None:
distr_scheduler = scheduler
avoid_model_calls = using_deepspeed and args.fp16
if RESUME and using_deepspeed:
distr_dalle.load_checkpoint(str(cp_dir))
def save_model(path, epoch=0):
save_obj = {
'hparams': dalle_params,
'vae_params': vae_params,
'epoch': epoch,
}
if using_deepspeed:
cp_dir = cp_path_to_dir(path, 'ds')
if KEEP_N_CHECKPOINTS is not None and distr_backend.is_root_worker():
checkpoints = sorted(glob(str(cp_dir / "global*")), key=os.path.getmtime, reverse=True)
for checkpoint in checkpoints[KEEP_N_CHECKPOINTS:]:
shutil.rmtree(checkpoint)
distr_dalle.save_checkpoint(cp_dir, client_state=save_obj)
if not distr_backend.is_root_worker():
return
# Save auxiliary values so we can reuse the standard routine
# for loading.
save_obj = {
**save_obj,
# Save a nonsense value that directs the user to
# further help.
'weights': (
'To get a working standard checkpoint, '
'look into consolidating DeepSpeed checkpoints.'
),
}
torch.save(save_obj, str(cp_dir / DEEPSPEED_CP_AUX_FILENAME))
if deepspeed_config.get('zero_optimization', {}).get('stage', 0) >= 2: # see https://github.com/lucidrains/DALLE-pytorch/wiki/DeepSpeed-Checkpoints
return
if not distr_backend.is_root_worker():
return
save_obj = {
**save_obj,
'weights': dalle.state_dict(),
'opt_state': opt.state_dict(),
}
save_obj['scheduler_state'] = (scheduler.state_dict() if scheduler else None)
torch.save(save_obj, path)
# training
from tqdm import tqdm
# Saves a checkpoint before training begins to fail early when mis-configured.
# See https://github.com/lucidrains/DALLE-pytorch/wiki/DeepSpeed-Checkpoints
save_model(DALLE_OUTPUT_FILE_NAME, epoch=resume_epoch)
for epoch in range(resume_epoch, EPOCHS):
if data_sampler:
data_sampler.set_epoch(epoch)
if ENABLE_WEBDATASET:
dataloader = dl
else:
dataloader = distr_dl
for i, (text, images) in enumerate(
tqdm(dataloader, desc=f"{args.skill_name} Epoch {epoch}")):
if i % 10 == 0 and distr_backend.is_root_worker():
t = time.time()
if args.fp16:
images = images.half()
text, images = map(lambda t: t.cuda(), (text, images))
loss = distr_dalle(text, images, return_loss=True, reduce_loss=True)
if using_deepspeed:
distr_dalle.backward(loss)
distr_dalle.step()
# Gradients are automatically zeroed after the step
else:
loss.backward()
clip_grad_norm_(distr_dalle.parameters(), GRAD_CLIP_NORM)
distr_opt.step()
distr_opt.zero_grad()
# Collective loss, averaged
avg_loss = distr_backend.average_all(loss)
log = {}
if i % 10 == 0 and distr_backend.is_root_worker():
# print(epoch, i, f'loss - {avg_loss.item()}')
log = {
**log,
'epoch': epoch,
'iter': i,
'loss': avg_loss.item()
}
# if i % SAVE_EVERY_N_STEPS == 0:
# save_model(DALLE_OUTPUT_FILE_NAME, epoch=epoch)
if i % 1000 == 0:
if distr_backend.is_root_worker():
sample_text = text[:1]
token_list = sample_text.masked_select(sample_text != 0).tolist()
decoded_text = tokenizer.decode(token_list)
if not avoid_model_calls:
# CUDA index errors when we don't guard this
image = dalle.generate_images(text[:1], filter_thres=0.9) # topk sampling at 0.9
log = {
**log,
}
if not avoid_model_calls:
log['image'] = wandb.Image(image, caption=decoded_text)
if i % 10 == 9 and distr_backend.is_root_worker():
sample_per_sec = BATCH_SIZE * 10 / (time.time() - t)
log["sample_per_sec"] = sample_per_sec
# print(epoch, i, f'sample_per_sec - {sample_per_sec}')
if i == 201 and args.flops_profiler:
raise StopIteration("Profiler has finished running. Stopping training early.")
if distr_backend.is_root_worker():
wandb.log(log)
if LR_DECAY:
distr_scheduler.step(avg_loss)
# save_model(DALLE_OUTPUT_FILE_NAME, epoch=epoch)
# if distr_backend.is_root_worker():
# # save trained model to wandb as an artifact every epoch's end\
# model_artifact = wandb.Artifact('trained-dalle', type='model', metadata=dict(model_config))
# model_artifact.add_file(DALLE_OUTPUT_FILE_NAME)
# run.log_artifact(model_artifact)
save_model(DALLE_OUTPUT_FILE_NAME, epoch=epoch)
if distr_backend.is_root_worker():
wandb.save(DALLE_OUTPUT_FILE_NAME)
model_artifact = wandb.Artifact('trained-dalle', type='model', metadata=dict(model_config))
model_artifact.add_file(DALLE_OUTPUT_FILE_NAME)
run.log_artifact(model_artifact)
wandb.finish()
| [] |
2024-01-10 | j-min/DallEval | models~dalle_small~DALLE-pytorch~dalle_pytorch~dalle_pytorch.py | from math import log2, sqrt
import torch
from torch import nn, einsum
import torch.nn.functional as F
import numpy as np
from axial_positional_embedding import AxialPositionalEmbedding
from einops import rearrange
from dalle_pytorch import distributed_utils, tokenizer
from dalle_pytorch.vae import OpenAIDiscreteVAE, VQGanVAE
from dalle_pytorch.transformer import Transformer, DivideMax
# helpers
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def always(val):
def inner(*args, **kwargs):
return val
return inner
def is_empty(t):
return t.nelement() == 0
def masked_mean(t, mask, dim = 1):
t = t.masked_fill(~mask[:, :, None], 0.)
return t.sum(dim = 1) / mask.sum(dim = 1)[..., None]
def set_requires_grad(model, value):
for param in model.parameters():
param.requires_grad = value
def eval_decorator(fn):
def inner(model, *args, **kwargs):
was_training = model.training
model.eval()
out = fn(model, *args, **kwargs)
model.train(was_training)
return out
return inner
# sampling helpers
def top_k(logits, thres = 0.5):
num_logits = logits.shape[-1]
k = max(int((1 - thres) * num_logits), 1)
val, ind = torch.topk(logits, k)
probs = torch.full_like(logits, float('-inf'))
probs.scatter_(1, ind, val)
return probs
# discrete vae class
class ResBlock(nn.Module):
def __init__(self, chan):
super().__init__()
self.net = nn.Sequential(
nn.Conv2d(chan, chan, 3, padding = 1),
nn.ReLU(),
nn.Conv2d(chan, chan, 3, padding = 1),
nn.ReLU(),
nn.Conv2d(chan, chan, 1)
)
def forward(self, x):
return self.net(x) + x
class DiscreteVAE(nn.Module):
def __init__(
self,
image_size = 256,
num_tokens = 512,
codebook_dim = 512,
num_layers = 3,
num_resnet_blocks = 0,
hidden_dim = 64,
channels = 3,
smooth_l1_loss = False,
temperature = 0.9,
straight_through = False,
kl_div_loss_weight = 0.,
normalization = ((0.5,) * 3, (0.5,) * 3)
):
super().__init__()
assert log2(image_size).is_integer(), 'image size must be a power of 2'
assert num_layers >= 1, 'number of layers must be greater than or equal to 1'
has_resblocks = num_resnet_blocks > 0
self.image_size = image_size
self.num_tokens = num_tokens
self.num_layers = num_layers
self.temperature = temperature
self.straight_through = straight_through
self.codebook = nn.Embedding(num_tokens, codebook_dim)
hdim = hidden_dim
enc_chans = [hidden_dim] * num_layers
dec_chans = list(reversed(enc_chans))
enc_chans = [channels, *enc_chans]
dec_init_chan = codebook_dim if not has_resblocks else dec_chans[0]
dec_chans = [dec_init_chan, *dec_chans]
enc_chans_io, dec_chans_io = map(lambda t: list(zip(t[:-1], t[1:])), (enc_chans, dec_chans))
enc_layers = []
dec_layers = []
for (enc_in, enc_out), (dec_in, dec_out) in zip(enc_chans_io, dec_chans_io):
enc_layers.append(nn.Sequential(nn.Conv2d(enc_in, enc_out, 4, stride = 2, padding = 1), nn.ReLU()))
dec_layers.append(nn.Sequential(nn.ConvTranspose2d(dec_in, dec_out, 4, stride = 2, padding = 1), nn.ReLU()))
for _ in range(num_resnet_blocks):
dec_layers.insert(0, ResBlock(dec_chans[1]))
enc_layers.append(ResBlock(enc_chans[-1]))
if num_resnet_blocks > 0:
dec_layers.insert(0, nn.Conv2d(codebook_dim, dec_chans[1], 1))
enc_layers.append(nn.Conv2d(enc_chans[-1], num_tokens, 1))
dec_layers.append(nn.Conv2d(dec_chans[-1], channels, 1))
self.encoder = nn.Sequential(*enc_layers)
self.decoder = nn.Sequential(*dec_layers)
self.loss_fn = F.smooth_l1_loss if smooth_l1_loss else F.mse_loss
self.kl_div_loss_weight = kl_div_loss_weight
# take care of normalization within class
self.normalization = normalization
self._register_external_parameters()
def _register_external_parameters(self):
"""Register external parameters for DeepSpeed partitioning."""
if (
not distributed_utils.is_distributed
or not distributed_utils.using_backend(
distributed_utils.DeepSpeedBackend)
):
return
deepspeed = distributed_utils.backend.backend_module
deepspeed.zero.register_external_parameter(self, self.codebook.weight)
def norm(self, images):
if not exists(self.normalization):
return images
means, stds = map(lambda t: torch.as_tensor(t).to(images), self.normalization)
means, stds = map(lambda t: rearrange(t, 'c -> () c () ()'), (means, stds))
images = images.clone()
images.sub_(means).div_(stds)
return images
@torch.no_grad()
@eval_decorator
def get_codebook_indices(self, images):
logits = self(images, return_logits = True)
codebook_indices = logits.argmax(dim = 1).flatten(1)
return codebook_indices
def decode(
self,
img_seq
):
image_embeds = self.codebook(img_seq)
b, n, d = image_embeds.shape
h = w = int(sqrt(n))
image_embeds = rearrange(image_embeds, 'b (h w) d -> b d h w', h = h, w = w)
images = self.decoder(image_embeds)
return images
def forward(
self,
img,
return_loss = False,
return_recons = False,
return_logits = False,
temp = None,
):
device, num_tokens, image_size, kl_div_loss_weight = img.device, self.num_tokens, self.image_size, self.kl_div_loss_weight
assert img.shape[-1] == image_size and img.shape[-2] == image_size, f'input must have the correct image size {image_size}'
img = self.norm(img)
logits = self.encoder(img)
if return_logits:
return logits # return logits for getting hard image indices for DALL-E training
temp = default(temp, self.temperature)
soft_one_hot = F.gumbel_softmax(logits, tau = temp, dim = 1, hard = self.straight_through)
sampled = einsum('b n h w, n d -> b d h w', soft_one_hot, self.codebook.weight)
out = self.decoder(sampled)
if not return_loss:
return out
# reconstruction loss
recon_loss = self.loss_fn(img, out)
# kl divergence
logits = rearrange(logits, 'b n h w -> b (h w) n')
log_qy = F.log_softmax(logits, dim = -1)
log_uniform = torch.log(torch.tensor([1. / num_tokens], device = device))
kl_div = F.kl_div(log_uniform, log_qy, None, None, 'batchmean', log_target = True)
loss = recon_loss + (kl_div * kl_div_loss_weight)
if not return_recons:
return loss
return loss, out
# main classes
class CLIP(nn.Module):
def __init__(
self,
*,
dim_text = 512,
dim_image = 512,
dim_latent = 512,
num_text_tokens = 10000,
text_enc_depth = 6,
text_seq_len = 256,
text_heads = 8,
num_visual_tokens = 512,
visual_enc_depth = 6,
visual_heads = 8,
visual_image_size = 256,
visual_patch_size = 32,
channels = 3
):
super().__init__()
self.text_emb = nn.Embedding(num_text_tokens, dim_text)
self.text_pos_emb = nn.Embedding(text_seq_len, dim_text)
self.text_transformer = Transformer(causal = False, seq_len = text_seq_len, dim = dim_text, depth = text_enc_depth, heads = text_heads)
self.to_text_latent = nn.Linear(dim_text, dim_latent, bias = False)
assert visual_image_size % visual_patch_size == 0, 'Image dimensions must be divisible by the patch size.'
num_patches = (visual_image_size // visual_patch_size) ** 2
patch_dim = channels * visual_patch_size ** 2
self.visual_patch_size = visual_patch_size
self.to_visual_embedding = nn.Linear(patch_dim, dim_image)
self.visual_pos_emb = nn.Embedding(num_patches, dim_image)
self.visual_transformer = Transformer(causal = False, seq_len = num_patches, dim = dim_image, depth = visual_enc_depth, heads = visual_heads)
self.to_visual_latent = nn.Linear(dim_image, dim_latent, bias = False)
self.temperature = nn.Parameter(torch.tensor(1.))
def forward(
self,
text,
image,
text_mask = None,
return_loss = False
):
b, device, p = text.shape[0], text.device, self.visual_patch_size
text_emb = self.text_emb(text)
text_emb += self.text_pos_emb(torch.arange(text.shape[1], device = device))
image_patches = rearrange(image, 'b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1 = p, p2 = p)
image_emb = self.to_visual_embedding(image_patches)
image_emb += self.visual_pos_emb(torch.arange(image_emb.shape[1], device = device))
enc_text = self.text_transformer(text_emb, mask = text_mask)
enc_image = self.visual_transformer(image_emb)
if exists(text_mask):
text_latents = masked_mean(enc_text, text_mask, dim = 1)
else:
text_latents = enc_text.mean(dim = 1)
image_latents = enc_image.mean(dim = 1)
text_latents = self.to_text_latent(text_latents)
image_latents = self.to_visual_latent(image_latents)
text_latents, image_latents = map(lambda t: F.normalize(t, p = 2, dim = -1), (text_latents, image_latents))
temp = self.temperature.exp()
if not return_loss:
sim = einsum('n d, n d -> n', text_latents, image_latents) * temp
return sim
sim = einsum('i d, j d -> i j', text_latents, image_latents) * temp
labels = torch.arange(b, device = device)
loss = (F.cross_entropy(sim, labels) + F.cross_entropy(sim.t(), labels)) / 2
return loss
# main DALL-E class
class DALLE(nn.Module):
def __init__(
self,
*,
dim,
vae,
num_text_tokens = 10000,
text_seq_len = 256,
depth,
heads = 8,
dim_head = 64,
reversible = False,
attn_dropout = 0.,
ff_dropout = 0,
sparse_attn = False,
attn_types = None,
loss_img_weight = 7,
stable = False,
):
super().__init__()
assert isinstance(vae, (DiscreteVAE, OpenAIDiscreteVAE, VQGanVAE)), 'vae must be an instance of DiscreteVAE'
image_size = vae.image_size
num_image_tokens = vae.num_tokens
image_fmap_size = (vae.image_size // (2 ** vae.num_layers))
image_seq_len = image_fmap_size ** 2
num_text_tokens = num_text_tokens + text_seq_len # reserve unique padding tokens for each position (text seq len)
self.text_emb = nn.Embedding(num_text_tokens, dim)
self.image_emb = nn.Embedding(num_image_tokens, dim)
self.text_pos_emb = nn.Embedding(text_seq_len + 1, dim) # +1 for <bos>
self.image_pos_emb = AxialPositionalEmbedding(dim, axial_shape = (image_fmap_size, image_fmap_size))
self.num_text_tokens = num_text_tokens # for offsetting logits index and calculating cross entropy loss
self.num_image_tokens = num_image_tokens
self.text_seq_len = text_seq_len
self.image_seq_len = image_seq_len
seq_len = text_seq_len + image_seq_len
total_tokens = num_text_tokens + num_image_tokens
self.total_tokens = total_tokens
self.total_seq_len = seq_len
self.vae = vae
set_requires_grad(self.vae, False) # freeze VAE from being trained
self.transformer = Transformer(
dim = dim,
causal = True,
seq_len = seq_len,
depth = depth,
heads = heads,
dim_head = dim_head,
reversible = reversible,
attn_dropout = attn_dropout,
ff_dropout = ff_dropout,
attn_types = attn_types,
image_fmap_size = image_fmap_size,
sparse_attn = sparse_attn,
stable = stable
)
self.stable = stable
if stable:
self.norm_by_max = DivideMax(dim = -1)
self.to_logits = nn.Sequential(
nn.LayerNorm(dim),
nn.Linear(dim, self.total_tokens),
)
seq_range = torch.arange(seq_len)
logits_range = torch.arange(total_tokens)
seq_range = rearrange(seq_range, 'n -> () n ()')
logits_range = rearrange(logits_range, 'd -> () () d')
logits_mask = (
((seq_range >= text_seq_len) & (logits_range < num_text_tokens)) |
((seq_range < text_seq_len) & (logits_range >= num_text_tokens))
)
self.register_buffer('logits_mask', logits_mask, persistent=False)
self.loss_img_weight = loss_img_weight
@torch.no_grad()
@eval_decorator
def generate_texts(
self,
text=None,
*,
filter_thres = 0.5,
temperature = 1.
):
text_seq_len = self.text_seq_len
if text is None or text == "":
text_tokens = torch.tensor([[0]]).cuda()
else:
text_tokens = torch.tensor(tokenizer.tokenizer.encode(text)).cuda().unsqueeze(0)
for _ in range(text_tokens.shape[1], text_seq_len):
device = text_tokens.device
tokens = self.text_emb(text_tokens)
tokens += self.text_pos_emb(torch.arange(text_tokens.shape[1], device = device))
seq_len = tokens.shape[1]
output_transf = self.transformer(tokens)
if self.stable:
output_transf = self.norm_by_max(output_transf)
logits = self.to_logits(output_transf)
# mask logits to make sure text predicts text (except last token), and image predicts image
logits_mask = self.logits_mask[:, :seq_len]
max_neg_value = -torch.finfo(logits.dtype).max
logits.masked_fill_(logits_mask, max_neg_value)
logits = logits[:, -1, :]
filtered_logits = top_k(logits, thres = filter_thres)
probs = F.softmax(filtered_logits / temperature, dim = -1)
sample = torch.multinomial(probs, 1)
text_tokens = torch.cat((text_tokens, sample), dim=-1)
padding_tokens = set(np.arange(self.text_seq_len) + (self.num_text_tokens - self.text_seq_len))
texts = [tokenizer.tokenizer.decode(text_token, pad_tokens=padding_tokens) for text_token in text_tokens]
return text_tokens, texts
@torch.no_grad()
@eval_decorator
def generate_images(
self,
text,
*,
clip = None,
mask = None,
filter_thres = 0.5,
temperature = 1.,
img = None,
num_init_img_tokens = None,
):
vae, text_seq_len, image_seq_len, num_text_tokens = self.vae, self.text_seq_len, self.image_seq_len, self.num_text_tokens
total_len = text_seq_len + image_seq_len
text = text[:, :text_seq_len] # make sure text is within bounds
out = text
if exists(img):
image_size = vae.image_size
assert img.shape[1] == 3 and img.shape[2] == image_size and img.shape[3] == image_size, f'input image must have the correct image size {image_size}'
indices = vae.get_codebook_indices(img)
num_img_tokens = default(num_init_img_tokens, int(0.4375 * image_seq_len)) # OpenAI used 14 * 32 initial tokens to prime
assert num_img_tokens < image_seq_len, 'number of initial image tokens for priming must be less than the total image token sequence length'
indices = indices[:, :num_img_tokens]
out = torch.cat((out, indices), dim = -1)
for cur_len in range(out.shape[1], total_len):
is_image = cur_len >= text_seq_len
text, image = out[:, :text_seq_len], out[:, text_seq_len:]
logits = self(text, image, mask = mask)[:, -1, :]
filtered_logits = top_k(logits, thres = filter_thres)
probs = F.softmax(filtered_logits / temperature, dim = -1)
sample = torch.multinomial(probs, 1)
sample -= (num_text_tokens if is_image else 0) # offset sampled token if it is an image token, since logit space is composed of text and then image tokens
out = torch.cat((out, sample), dim=-1)
if out.shape[1] <= text_seq_len:
mask = F.pad(mask, (0, 1), value = True)
text_seq = out[:, :text_seq_len]
img_seq = out[:, -image_seq_len:]
images = vae.decode(img_seq)
if exists(clip):
scores = clip(text_seq, images, return_loss = False)
return images, scores
return images
def forward(
self,
text,
image = None,
mask = None,
return_loss = False,
reduce_loss=True,
):
assert text.shape[-1] == self.text_seq_len, f'the length {text.shape[-1]} of the text tokens you passed in does not have the correct length ({self.text_seq_len})'
device, total_seq_len = text.device, self.total_seq_len
# make sure padding in text tokens get unique padding token id
text_range = torch.arange(self.text_seq_len, device = device) + (self.num_text_tokens - self.text_seq_len)
text = torch.where(text == 0, text_range, text)
# add <bos>
text = F.pad(text, (1, 0), value = 0)
tokens = self.text_emb(text)
tokens += self.text_pos_emb(torch.arange(text.shape[1], device = device))
seq_len = tokens.shape[1]
if exists(image) and not is_empty(image):
is_raw_image = len(image.shape) == 4
# training
if is_raw_image:
image_size = self.vae.image_size
assert tuple(image.shape[1:]) == (3, image_size, image_size), f'invalid image of dimensions {image.shape} passed in during training'
image = self.vae.get_codebook_indices(image)
image_len = image.shape[1]
image_emb = self.image_emb(image)
# image_emb += self.image_pos_emb(image_emb)
image_pos_emb = self.image_pos_emb(image_emb)
image_emb += image_pos_emb
tokens = torch.cat((tokens, image_emb), dim = 1)
seq_len += image_len
# when training, if the length exceeds the total text + image length
# remove the last token, since it needs not to be trained
if tokens.shape[1] > total_seq_len:
seq_len -= 1
tokens = tokens[:, :-1]
# print('after modification', 'seq len:', seq_len, 'tokens:', tokens.shape)
out = self.transformer(tokens)
if self.stable:
out = self.norm_by_max(out)
logits = self.to_logits(out)
# mask logits to make sure text predicts text (except last token), and image predicts image
logits_mask = self.logits_mask[:, :seq_len]
max_neg_value = -torch.finfo(logits.dtype).max
logits.masked_fill_(logits_mask, max_neg_value)
if not return_loss:
return logits
assert exists(image), 'when training, image must be supplied'
offsetted_image = image + self.num_text_tokens
labels = torch.cat((text[:, 1:], offsetted_image), dim = 1)
logits = rearrange(logits, 'b n c -> b c n')
if reduce_loss:
# print('logits', logits.shape)
# print('labels', labels.shape)
loss_text = F.cross_entropy(logits[:, :, :self.text_seq_len], labels[:, :self.text_seq_len])
loss_img = F.cross_entropy(logits[:, :, self.text_seq_len:], labels[:, self.text_seq_len:])
loss = (loss_text + self.loss_img_weight * loss_img) / (self.loss_img_weight + 1)
return loss
else:
# loss_text = F.cross_entropy(logits[:, :, :self.text_seq_len], labels[:, :self.text_seq_len], reduction='none')
text_logits = logits[:, :, :self.text_seq_len]
text_labels = labels[:, :self.text_seq_len]
# loss_text = F.cross_entropy(logits[:, :, :self.text_seq_len], labels[:, :self.text_seq_len])
img_logits = logits[:, :, self.text_seq_len:]
img_labels = labels[:, self.text_seq_len:]
# loss_img = F.cross_entropy(logits[:, :, self.text_seq_len:], labels[:, self.text_seq_len:], reduction='none')
return text_logits, text_labels, img_logits, img_labels
| [] |
2024-01-10 | j-min/DallEval | models~dalle_small~DALLE-pytorch~dalle_pytorch~vae.py | import io
import sys
import os
import requests
import PIL
import warnings
import hashlib
import urllib
import yaml
from pathlib import Path
from tqdm import tqdm
from math import sqrt, log
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel, GumbelVQ
import importlib
import torch
from torch import nn
import torch.nn.functional as F
from einops import rearrange
from dalle_pytorch import distributed_utils
# constants
CACHE_PATH = os.path.expanduser("~/.cache/dalle")
OPENAI_VAE_ENCODER_PATH = 'https://cdn.openai.com/dall-e/encoder.pkl'
OPENAI_VAE_DECODER_PATH = 'https://cdn.openai.com/dall-e/decoder.pkl'
VQGAN_VAE_PATH = 'https://heibox.uni-heidelberg.de/f/140747ba53464f49b476/?dl=1'
VQGAN_VAE_CONFIG_PATH = 'https://heibox.uni-heidelberg.de/f/6ecf2af6c658432c8298/?dl=1'
# helpers methods
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def load_model(path):
with open(path, 'rb') as f:
return torch.load(f, map_location = torch.device('cpu'))
def map_pixels(x, eps = 0.1):
return (1 - 2 * eps) * x + eps
def unmap_pixels(x, eps = 0.1):
return torch.clamp((x - eps) / (1 - 2 * eps), 0, 1)
def download(url, filename = None, root = CACHE_PATH):
if (
not distributed_utils.is_distributed
or distributed_utils.backend.is_local_root_worker()
):
os.makedirs(root, exist_ok = True)
filename = default(filename, os.path.basename(url))
download_target = os.path.join(root, filename)
download_target_tmp = os.path.join(root, f'tmp.{filename}')
if os.path.exists(download_target) and not os.path.isfile(download_target):
raise RuntimeError(f"{download_target} exists and is not a regular file")
if (
distributed_utils.is_distributed
and not distributed_utils.backend.is_local_root_worker()
and not os.path.isfile(download_target)
):
# If the file doesn't exist yet, wait until it's downloaded by the root worker.
distributed_utils.backend.local_barrier()
if os.path.isfile(download_target):
return download_target
with urllib.request.urlopen(url) as source, open(download_target_tmp, "wb") as output:
with tqdm(total=int(source.info().get("Content-Length")), ncols=80) as loop:
while True:
buffer = source.read(8192)
if not buffer:
break
output.write(buffer)
loop.update(len(buffer))
os.rename(download_target_tmp, download_target)
if (
distributed_utils.is_distributed
and distributed_utils.backend.is_local_root_worker()
):
distributed_utils.backend.local_barrier()
return download_target
def make_contiguous(module):
with torch.no_grad():
for param in module.parameters():
param.set_(param.contiguous())
# pretrained Discrete VAE from OpenAI
class OpenAIDiscreteVAE(nn.Module):
def __init__(self):
super().__init__()
self.enc = load_model(download(OPENAI_VAE_ENCODER_PATH))
self.dec = load_model(download(OPENAI_VAE_DECODER_PATH))
make_contiguous(self)
self.num_layers = 3
self.image_size = 256
self.num_tokens = 8192
@torch.no_grad()
def get_codebook_indices(self, img):
img = map_pixels(img)
z_logits = self.enc.blocks(img)
z = torch.argmax(z_logits, dim = 1)
return rearrange(z, 'b h w -> b (h w)')
def decode(self, img_seq):
b, n = img_seq.shape
img_seq = rearrange(img_seq, 'b (h w) -> b h w', h = int(sqrt(n)))
z = F.one_hot(img_seq, num_classes = self.num_tokens)
z = rearrange(z, 'b h w c -> b c h w').float()
x_stats = self.dec(z).float()
x_rec = unmap_pixels(torch.sigmoid(x_stats[:, :3]))
return x_rec
def forward(self, img):
raise NotImplemented
# VQGAN from Taming Transformers paper
# https://arxiv.org/abs/2012.09841
def get_obj_from_str(string, reload=False):
module, cls = string.rsplit(".", 1)
if reload:
module_imp = importlib.import_module(module)
importlib.reload(module_imp)
return getattr(importlib.import_module(module, package=None), cls)
def instantiate_from_config(config):
if not "target" in config:
raise KeyError("Expected key `target` to instantiate.")
return get_obj_from_str(config["target"])(**config.get("params", dict()))
class VQGanVAE(nn.Module):
def __init__(self, vqgan_model_path=None, vqgan_config_path=None):
super().__init__()
if vqgan_model_path is None:
model_filename = 'vqgan.1024.model.ckpt'
config_filename = 'vqgan.1024.config.yml'
download(VQGAN_VAE_CONFIG_PATH, config_filename)
download(VQGAN_VAE_PATH, model_filename)
config_path = str(Path(CACHE_PATH) / config_filename)
model_path = str(Path(CACHE_PATH) / model_filename)
else:
model_path = vqgan_model_path
config_path = vqgan_config_path
config = OmegaConf.load(config_path)
model = instantiate_from_config(config["model"])
state = torch.load(model_path, map_location = 'cpu')['state_dict']
model.load_state_dict(state, strict = False)
print(f"Loaded VQGAN from {model_path} and {config_path}")
self.model = model
# f as used in https://github.com/CompVis/taming-transformers#overview-of-pretrained-models
f = config.model.params.ddconfig.resolution / config.model.params.ddconfig.attn_resolutions[0]
self.num_layers = int(log(f)/log(2))
self.image_size = 256
self.num_tokens = config.model.params.n_embed
self.is_gumbel = isinstance(self.model, GumbelVQ)
# self.is_gumbel = False
self._register_external_parameters()
def _register_external_parameters(self):
"""Register external parameters for DeepSpeed partitioning."""
if (
not distributed_utils.is_distributed
or not distributed_utils.using_backend(
distributed_utils.DeepSpeedBackend)
):
return
deepspeed = distributed_utils.backend.backend_module
deepspeed.zero.register_external_parameter(
self, self.model.quantize.embed.weight if self.is_gumbel else self.model.quantize.embedding.weight)
@torch.no_grad()
def get_codebook_indices(self, img):
b = img.shape[0]
img = (2 * img) - 1
_, _, [_, _, indices] = self.model.encode(img)
if self.is_gumbel:
return rearrange(indices, 'b h w -> b (h w)', b=b)
return rearrange(indices, '(b n) -> b n', b = b)
def decode(self, img_seq):
b, n = img_seq.shape
one_hot_indices = F.one_hot(img_seq, num_classes = self.num_tokens).float()
z = one_hot_indices @ self.model.quantize.embed.weight if self.is_gumbel \
else (one_hot_indices @ self.model.quantize.embedding.weight)
z = rearrange(z, 'b (h w) c -> b c h w', h = int(sqrt(n)))
img = self.model.decode(z)
img = (img.clamp(-1., 1.) + 1) * 0.5
return img
def forward(self, img):
raise NotImplemented
| [] |
2024-01-10 | j-min/DallEval | models~dalle_small~DALLE-pytorch~train_dalle.py | import argparse
from pathlib import Path
import time
from glob import glob
import os
import shutil
import torch
import wandb # Quit early if user doesn't have wandb installed.
from torch.nn.utils import clip_grad_norm_
from torch.optim import Adam
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.utils.data import DataLoader
from dalle_pytorch import OpenAIDiscreteVAE, VQGanVAE, DiscreteVAE, DALLE
from dalle_pytorch import distributed_utils
from dalle_pytorch.loader import TextImageDataset
from dalle_pytorch.tokenizer import tokenizer, HugTokenizer, ChineseTokenizer, YttmTokenizer
# libraries needed for webdataset support
import webdataset as wds
from torchvision import transforms as T
from PIL import Image
from io import BytesIO
# argument parsing
parser = argparse.ArgumentParser()
group = parser.add_mutually_exclusive_group(required=False)
group.add_argument('--vae_path', type=str,
help='path to your trained discrete VAE')
group.add_argument('--dalle_path', type=str,
help='path to your partially trained DALL-E')
parser.add_argument('--vqgan_model_path', type=str, default = None,
help='path to your trained VQGAN weights. This should be a .ckpt file. (only valid when taming option is enabled)')
parser.add_argument('--vqgan_config_path', type=str, default = None,
help='path to your trained VQGAN config. This should be a .yaml file. (only valid when taming option is enabled)')
parser.add_argument('--image_text_folder', type=str, required=True,
help='path to your folder of images and text for learning the DALL-E')
parser.add_argument(
'--wds',
type = str,
default='',
help = 'Comma separated list of WebDataset (1) image and (2) text column names. Must contain 2 values, e.g. img,cap.'
)
parser.add_argument('--truncate_captions', dest='truncate_captions', action='store_true',
help='Captions passed in which exceed the max token length will be truncated if this is set.')
parser.add_argument('--random_resize_crop_lower_ratio', dest='resize_ratio', type=float, default=0.75,
help='Random resized crop lower ratio')
parser.add_argument('--chinese', dest='chinese', action='store_true')
parser.add_argument('--taming', dest='taming', action='store_true')
parser.add_argument('--hug', dest='hug', action='store_true')
parser.add_argument('--bpe_path', type=str,
help='path to your BPE json file')
parser.add_argument('--dalle_output_file_name', type=str, default = "dalle",
help='output_file_name')
parser.add_argument('--fp16', action='store_true',
help='(experimental) - Enable DeepSpeed 16 bit precision. Reduces VRAM.')
parser.add_argument('--amp', action='store_true',
help='Apex "O1" automatic mixed precision. More stable than 16 bit precision. Can\'t be used in conjunction with deepspeed zero stages 1-3.')
parser.add_argument('--wandb_name', default='dalle_train_transformer',
help='Name W&B will use when saving results.\ne.g. `--wandb_name "coco2017-full-sparse"`')
parser.add_argument('--wandb_entity', default=None,
help='(optional) Name of W&B team/entity to log to.')
parser.add_argument('--stable_softmax', dest='stable_softmax', action='store_true',
help='Prevent values from becoming too large during softmax. Helps with stability in fp16 and Mixture of Quantization training.')
parser = distributed_utils.wrap_arg_parser(parser)
train_group = parser.add_argument_group('Training settings')
train_group.add_argument('--flops_profiler', dest = 'flops_profiler', action='store_true', help = 'Exits after printing detailed flops/runtime analysis of forward/backward')
train_group.add_argument('--epochs', default = 20, type = int, help = 'Number of epochs')
train_group.add_argument('--save_every_n_steps', default = 1000, type = int, help = 'Save a checkpoint every n steps')
train_group.add_argument('--keep_n_checkpoints', default = None, type = int, help = '(Careful) Deletes old deepspeed checkpoints if there are more than n')
train_group.add_argument('--batch_size', default = 4, type = int, help = 'Batch size')
train_group.add_argument('--ga_steps', default = 1, type = int, help = 'Number of steps to accumulate gradients across per each iteration. DeepSpeed only.')
train_group.add_argument('--learning_rate', default = 3e-4, type = float, help = 'Learning rate')
train_group.add_argument('--clip_grad_norm', default = 0.5, type = float, help = 'Clip gradient norm')
train_group.add_argument('--lr_decay', dest = 'lr_decay', action = 'store_true')
model_group = parser.add_argument_group('Model settings')
model_group.add_argument('--dim', default = 512, type = int, help = 'Model dimension')
model_group.add_argument('--text_seq_len', default = 256, type = int, help = 'Text sequence length')
model_group.add_argument('--depth', default = 2, type = int, help = 'Model depth')
model_group.add_argument('--heads', default = 8, type = int, help = 'Model number of heads')
model_group.add_argument('--dim_head', default = 64, type = int, help = 'Model head dimension')
train_group.add_argument('--ff_dropout', default = 0.0, type = float, help = 'Feed forward dropout.')
train_group.add_argument('--attn_dropout', default = 0.0, type = float, help = 'Feed forward dropout.')
model_group.add_argument('--reversible', dest = 'reversible', action='store_true')
model_group.add_argument('--loss_img_weight', default = 7, type = int, help = 'Image loss weight')
model_group.add_argument('--attn_types', default = 'full', type = str, help = 'comma separated list of attention types. attention type can be: full or sparse or axial_row or axial_col or conv_like.')
args = parser.parse_args()
# helpers
def exists(val):
return val is not None
def get_trainable_params(model):
return [params for params in model.parameters() if params.requires_grad]
def cp_path_to_dir(cp_path, tag):
"""Convert a checkpoint path to a directory with `tag` inserted.
If `cp_path` is already a directory, return it unchanged.
"""
if not isinstance(cp_path, Path):
cp_path = Path(cp_path)
if cp_path.is_dir():
return cp_path
path_sans_extension = cp_path.parent / cp_path.stem
cp_dir = Path(f'{path_sans_extension}-{tag}-cp')
return cp_dir
# constants
WEBDATASET_IMAGE_TEXT_COLUMNS = tuple(args.wds.split(','))
ENABLE_WEBDATASET = True if len(WEBDATASET_IMAGE_TEXT_COLUMNS) == 2 else False
DALLE_OUTPUT_FILE_NAME = args.dalle_output_file_name + ".pt"
VAE_PATH = args.vae_path
VQGAN_MODEL_PATH = args.vqgan_model_path
VQGAN_CONFIG_PATH = args.vqgan_config_path
DALLE_PATH = args.dalle_path
RESUME = exists(DALLE_PATH)
EPOCHS = args.epochs
BATCH_SIZE = args.batch_size
LEARNING_RATE = args.learning_rate
GRAD_CLIP_NORM = args.clip_grad_norm
LR_DECAY = args.lr_decay
SAVE_EVERY_N_STEPS = args.save_every_n_steps
KEEP_N_CHECKPOINTS = args.keep_n_checkpoints
MODEL_DIM = args.dim
TEXT_SEQ_LEN = args.text_seq_len
DEPTH = args.depth
HEADS = args.heads
DIM_HEAD = args.dim_head
REVERSIBLE = args.reversible
LOSS_IMG_WEIGHT = args.loss_img_weight
FF_DROPOUT = args.ff_dropout
ATTN_DROPOUT = args.attn_dropout
STABLE = args.stable_softmax
ATTN_TYPES = tuple(args.attn_types.split(','))
DEEPSPEED_CP_AUX_FILENAME = 'auxiliary.pt'
if not ENABLE_WEBDATASET:
# quit early if you used the wrong folder name
assert Path(args.image_text_folder).exists(), f'The path {args.image_text_folder} was not found.'
else:
# quit early if no tar files were found
if Path(args.image_text_folder).is_dir():
DATASET = [str(p) for p in Path(args.image_text_folder).glob("**/*") if ".tar" in str(p).lower()] # .name
assert len(DATASET) > 0, 'The directory ({}) does not contain any WebDataset/.tar files.'.format(args.image_text_folder)
print('Found {} WebDataset .tar(.gz) file(s) under given path {}!'.format(len(DATASET), args.image_text_folder))
elif ('http://' in args.image_text_folder.lower()) | ('https://' in args.image_text_folder.lower()):
DATASET = f"pipe:curl -L -s {args.image_text_folder} || true"
print('Found {} http(s) link under given path!'.format(len(DATASET), args.image_text_folder))
elif 'gs://' in args.image_text_folder.lower():
DATASET = f"pipe:gsutil cat {args.image_text_folder} || true"
print('Found {} GCS link under given path!'.format(len(DATASET), args.image_text_folder))
elif '.tar' in args.image_text_folder:
DATASET = args.image_text_folder
print('Found WebDataset .tar(.gz) file under given path {}!'.format(args.image_text_folder))
else:
raise Exception('No folder, no .tar(.gz) and no url pointing to tar files provided under {}.'.format(args.image_text_folder))
# initialize distributed backend
distr_backend = distributed_utils.set_backend_from_args(args)
distr_backend.initialize()
using_deepspeed = \
distributed_utils.using_backend(distributed_utils.DeepSpeedBackend)
# tokenizer
if exists(args.bpe_path):
klass = HugTokenizer if args.hug else YttmTokenizer
tokenizer = klass(args.bpe_path)
elif args.chinese:
tokenizer = ChineseTokenizer()
# reconstitute vae
if RESUME:
dalle_path = Path(DALLE_PATH)
if using_deepspeed:
cp_dir = cp_path_to_dir(dalle_path, 'ds')
assert cp_dir.is_dir(), \
f'DeepSpeed checkpoint directory {cp_dir} not found'
dalle_path = cp_dir / DEEPSPEED_CP_AUX_FILENAME
else:
assert dalle_path.exists(), 'DALL-E model file does not exist'
loaded_obj = torch.load(str(dalle_path), map_location='cpu')
dalle_params, vae_params, weights = loaded_obj['hparams'], loaded_obj['vae_params'], loaded_obj['weights']
opt_state = loaded_obj.get('opt_state')
scheduler_state = loaded_obj.get('scheduler_state')
if vae_params is not None:
vae = DiscreteVAE(**vae_params)
else:
if args.taming:
vae = VQGanVAE(VQGAN_MODEL_PATH, VQGAN_CONFIG_PATH)
else:
vae = OpenAIDiscreteVAE()
dalle_params = dict(
**dalle_params
)
IMAGE_SIZE = vae.image_size
resume_epoch = loaded_obj.get('epoch', 0)
else:
if exists(VAE_PATH):
vae_path = Path(VAE_PATH)
assert vae_path.exists(), 'VAE model file does not exist'
assert not vae_path.is_dir(), \
('Cannot load VAE model from directory; please use a '
'standard *.pt checkpoint. '
'Currently, merging a DeepSpeed-partitioned VAE into a DALLE '
'model is not supported.')
loaded_obj = torch.load(str(vae_path))
vae_params, weights = loaded_obj['hparams'], loaded_obj['weights']
vae = DiscreteVAE(**vae_params)
vae.load_state_dict(weights)
else:
if distr_backend.is_root_worker():
print('using pretrained VAE for encoding images to tokens')
vae_params = None
if args.taming:
vae = VQGanVAE(VQGAN_MODEL_PATH, VQGAN_CONFIG_PATH)
else:
vae = OpenAIDiscreteVAE()
IMAGE_SIZE = vae.image_size
dalle_params = dict(
num_text_tokens=tokenizer.vocab_size,
text_seq_len=TEXT_SEQ_LEN,
dim=MODEL_DIM,
depth=DEPTH,
heads=HEADS,
dim_head=DIM_HEAD,
reversible=REVERSIBLE,
loss_img_weight=LOSS_IMG_WEIGHT,
attn_types=ATTN_TYPES,
ff_dropout=FF_DROPOUT,
attn_dropout=ATTN_DROPOUT,
stable=STABLE,
)
resume_epoch = 0
# configure OpenAI VAE for float16s
if isinstance(vae, OpenAIDiscreteVAE) and args.fp16:
vae.enc.blocks.output.conv.use_float16 = True
# helpers
def group_weight(model):
group_decay, group_no_decay = [], []
for params in model.named_parameters():
if 'transformer' in params[0]:
if 'bias' in params[0] or 'norm' in params[0]:
group_no_decay.append(params[1])
continue
group_decay.append(params[1])
assert len(list(model.parameters())) == len(group_decay) + len(group_no_decay)
groups = [dict(params=group_decay), dict(params=group_no_decay, weight_decay=.0)]
return groups
# create dataset and dataloader
is_shuffle = not distributed_utils.using_backend(distributed_utils.HorovodBackend)
imagepreproc = T.Compose([
T.Lambda(lambda img: img.convert('RGB')
if img.mode != 'RGB' else img),
T.RandomResizedCrop(IMAGE_SIZE,
scale=(args.resize_ratio, 1.),
ratio=(1., 1.)),
T.ToTensor(),
])
def imagetransform(b):
return Image.open(BytesIO(b))
def tokenize(s):
return tokenizer.tokenize(
s.decode('utf-8'),
TEXT_SEQ_LEN,
truncate_text=args.truncate_captions).squeeze(0)
if ENABLE_WEBDATASET:
DATASET_SIZE = int(1e9) # You need to set a nominal length for the Dataset in order to avoid warnings from DataLoader
myimg, mycap = WEBDATASET_IMAGE_TEXT_COLUMNS
image_text_mapping = {
myimg: imagetransform,
mycap: tokenize
}
image_mapping = {
myimg: imagepreproc
}
num_batches = DATASET_SIZE // BATCH_SIZE
ds = (
wds.WebDataset(DATASET, length=num_batches)
# .shuffle(is_shuffle) # Commented out for WebDataset as the behaviour cannot be predicted yet
.map_dict(**image_text_mapping)
.map_dict(**image_mapping)
.to_tuple(mycap, myimg)
.batched(BATCH_SIZE, partial=False) # It is good to avoid partial batches when using Distributed training
)
else:
ds = TextImageDataset(
args.image_text_folder,
text_len=TEXT_SEQ_LEN,
image_size=IMAGE_SIZE,
resize_ratio=args.resize_ratio,
truncate_captions=args.truncate_captions,
tokenizer=tokenizer,
shuffle=is_shuffle,
)
assert len(ds) > 0, 'dataset is empty'
if distr_backend.is_root_worker():
if not ENABLE_WEBDATASET:
print(f'{len(ds)} image-text pairs found for training')
if not is_shuffle:
data_sampler = torch.utils.data.distributed.DistributedSampler(
ds,
num_replicas=distr_backend.get_world_size(),
rank=distr_backend.get_rank()
)
else:
data_sampler = None
if ENABLE_WEBDATASET:
# WebLoader for WebDataset and DeepSpeed compatibility
dl = wds.WebLoader(ds, batch_size=None, shuffle=False) # optionally add num_workers=2 (n) argument
number_of_batches = DATASET_SIZE // (BATCH_SIZE * distr_backend.get_world_size())
dl = dl.repeat(2).slice(number_of_batches)
dl.length = number_of_batches
else:
# Regular DataLoader for image-text-folder datasets
dl = DataLoader(ds, batch_size=BATCH_SIZE, shuffle=is_shuffle, drop_last=True, sampler=data_sampler)
# initialize DALL-E
dalle = DALLE(vae=vae, **dalle_params)
if not using_deepspeed:
if args.fp16:
dalle = dalle.half()
dalle = dalle.cuda()
if RESUME and not using_deepspeed:
dalle.load_state_dict(weights)
# optimizer
opt = Adam(get_trainable_params(dalle), lr=LEARNING_RATE)
if RESUME and opt_state:
opt.load_state_dict(opt_state)
if LR_DECAY:
scheduler = ReduceLROnPlateau(
opt,
mode="min",
factor=0.5,
patience=10,
cooldown=10,
min_lr=1e-6,
verbose=True,
)
if RESUME and scheduler_state:
scheduler.load_state_dict(scheduler_state)
else:
scheduler = None
if distr_backend.is_root_worker():
# experiment tracker
model_config = dict(
depth=DEPTH,
heads=HEADS,
dim_head=DIM_HEAD
)
run = wandb.init(
project=args.wandb_name,
entity=args.wandb_entity,
resume=False,
config=model_config,
)
# distribute
distr_backend.check_batch_size(BATCH_SIZE)
deepspeed_config = {
'train_batch_size': BATCH_SIZE,
'gradient_accumulation_steps': args.ga_steps,
'gradient_clipping': GRAD_CLIP_NORM,
'fp16': {
'enabled': args.fp16,
},
'amp': {
'enabled': args.amp,
'opt_level': 'O1',
},
"flops_profiler": {
"enabled": args.flops_profiler,
"profile_step": 200,
"module_depth": -1,
"top_modules": 1,
"detailed": True,
"output_file": None # TODO Can't get this to work.
},
}
if deepspeed_config.get('zero_optimization', {}).get('stage', 0) >= 2:
print(f"Checkpoints made with DeepSpeed ZeRO Stages 2 and 3 will be stored in deepspeed checkpoint folder")
print(f"As such, they will require DeepSpeed as a dependency in order to resume from or generate with.")
print("See the deespeed conversion script for details on how to convert your ZeRO stage 2/3 checkpoint to a single file.")
print("If using a single GPU, consider running with apex automatic mixed precision instead for a similar speedup to ZeRO.")
time.sleep(2)
(distr_dalle, distr_opt, distr_dl, distr_scheduler) = distr_backend.distribute(
args=args,
model=dalle,
optimizer=opt,
model_parameters=get_trainable_params(dalle),
training_data=ds if using_deepspeed else dl,
# Do not pass the LR scheduler to DeepSpeed so we can manually
# advance it.
lr_scheduler=scheduler if LR_DECAY and not using_deepspeed else None,
config_params=deepspeed_config,
)
# Prefer scheduler in `deepspeed_config`.
if LR_DECAY and distr_scheduler is None:
distr_scheduler = scheduler
avoid_model_calls = using_deepspeed and args.fp16
if RESUME and using_deepspeed:
distr_dalle.load_checkpoint(str(cp_dir))
def save_model(path, epoch=0):
save_obj = {
'hparams': dalle_params,
'vae_params': vae_params,
'epoch': epoch,
}
if using_deepspeed:
cp_dir = cp_path_to_dir(path, 'ds')
if KEEP_N_CHECKPOINTS is not None and distr_backend.is_root_worker():
checkpoints = sorted(glob(str(cp_dir / "global*")), key=os.path.getmtime, reverse=True)
for checkpoint in checkpoints[KEEP_N_CHECKPOINTS:]:
shutil.rmtree(checkpoint)
distr_dalle.save_checkpoint(cp_dir, client_state=save_obj)
if not distr_backend.is_root_worker():
return
# Save auxiliary values so we can reuse the standard routine
# for loading.
save_obj = {
**save_obj,
# Save a nonsense value that directs the user to
# further help.
'weights': (
'To get a working standard checkpoint, '
'look into consolidating DeepSpeed checkpoints.'
),
}
torch.save(save_obj, str(cp_dir / DEEPSPEED_CP_AUX_FILENAME))
if deepspeed_config.get('zero_optimization', {}).get('stage', 0) >= 2: # see https://github.com/lucidrains/DALLE-pytorch/wiki/DeepSpeed-Checkpoints
return
if not distr_backend.is_root_worker():
return
save_obj = {
**save_obj,
'weights': dalle.state_dict(),
'opt_state': opt.state_dict(),
}
save_obj['scheduler_state'] = (scheduler.state_dict() if scheduler else None)
torch.save(save_obj, path)
# training
# Saves a checkpoint before training begins to fail early when mis-configured.
# See https://github.com/lucidrains/DALLE-pytorch/wiki/DeepSpeed-Checkpoints
save_model(DALLE_OUTPUT_FILE_NAME, epoch=resume_epoch)
for epoch in range(resume_epoch, EPOCHS):
if data_sampler:
data_sampler.set_epoch(epoch)
for i, (text, images) in enumerate((dl if ENABLE_WEBDATASET else distr_dl)):
if i % 10 == 0 and distr_backend.is_root_worker():
t = time.time()
if args.fp16:
images = images.half()
text, images = map(lambda t: t.cuda(), (text, images))
loss = distr_dalle(text, images, return_loss=True)
if using_deepspeed:
distr_dalle.backward(loss)
distr_dalle.step()
# Gradients are automatically zeroed after the step
else:
loss.backward()
clip_grad_norm_(distr_dalle.parameters(), GRAD_CLIP_NORM)
distr_opt.step()
distr_opt.zero_grad()
# Collective loss, averaged
avg_loss = distr_backend.average_all(loss)
log = {}
if i % 10 == 0 and distr_backend.is_root_worker():
print(epoch, i, f'loss - {avg_loss.item()}')
log = {
**log,
'epoch': epoch,
'iter': i,
'loss': avg_loss.item()
}
if i % SAVE_EVERY_N_STEPS == 0:
save_model(DALLE_OUTPUT_FILE_NAME, epoch=epoch)
if i % 100 == 0:
if distr_backend.is_root_worker():
sample_text = text[:1]
token_list = sample_text.masked_select(sample_text != 0).tolist()
decoded_text = tokenizer.decode(token_list)
if not avoid_model_calls:
# CUDA index errors when we don't guard this
image = dalle.generate_images(text[:1], filter_thres=0.9) # topk sampling at 0.9
log = {
**log,
}
if not avoid_model_calls:
log['image'] = wandb.Image(image, caption=decoded_text)
if i % 10 == 9 and distr_backend.is_root_worker():
sample_per_sec = BATCH_SIZE * 10 / (time.time() - t)
log["sample_per_sec"] = sample_per_sec
print(epoch, i, f'sample_per_sec - {sample_per_sec}')
if i == 201 and args.flops_profiler:
raise StopIteration("Profiler has finished running. Stopping training early.")
if distr_backend.is_root_worker():
wandb.log(log)
if LR_DECAY:
distr_scheduler.step(avg_loss)
save_model(DALLE_OUTPUT_FILE_NAME, epoch=epoch)
if distr_backend.is_root_worker():
# save trained model to wandb as an artifact every epoch's end
model_artifact = wandb.Artifact('trained-dalle', type='model', metadata=dict(model_config))
model_artifact.add_file(DALLE_OUTPUT_FILE_NAME)
run.log_artifact(model_artifact)
save_model(DALLE_OUTPUT_FILE_NAME, epoch=epoch)
if distr_backend.is_root_worker():
wandb.save(DALLE_OUTPUT_FILE_NAME)
model_artifact = wandb.Artifact('trained-dalle', type='model', metadata=dict(model_config))
model_artifact.add_file(DALLE_OUTPUT_FILE_NAME)
run.log_artifact(model_artifact)
wandb.finish()
| [] |
2024-01-10 | Walchshofer/ChatDev | camel~functions~math_functions.py | # =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
# Licensed under the Apache License, Version 2.0 (the “License”);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an “AS IS” BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
from typing import List
from .openai_function import OpenAIFunction
def add(a: int, b: int) -> int:
r"""Adds two numbers.
Args:
a (integer): The first number to be added.
b (integer): The second number to be added.
Returns:
integer: The sum of the two numbers.
"""
return a + b
def sub(a: int, b: int) -> int:
r"""Do subtraction between two numbers.
Args:
a (integer): The minuend in subtraction.
b (integer): The subtrahend in subtraction.
Returns:
integer: The result of subtracting :obj:`b` from :obj:`a`.
"""
return a - b
def mul(a: int, b: int) -> int:
r"""Multiplies two integers.
Args:
a (integer): The multiplier in the multiplication.
b (integer): The multiplicand in the multiplication.
Returns:
integer: The product of the two numbers.
"""
return a * b
MATH_FUNCS: List[OpenAIFunction] = [
OpenAIFunction(func) for func in [add, sub, mul]
] | [] |
2024-01-10 | Walchshofer/ChatDev | camel~test_model.py | import os
from dotenv import load_dotenv
import openai
# Load the .env file to get the OPENAI_API_KEY
load_dotenv()
# Retrieve the API key from the environment variables
openai_api_key = os.getenv("OPENAI_API_KEY")
# Set the OpenAI API key
openai.api_key = openai_api_key
import requests
# Replace the URL with the appropriate URL of your local OpenAI API server
url = "http://localhost:5001/v1/engines"
# Make a GET request to the server
response = requests.get(url)
print(response)
# Parse the JSON response
data = response.json()
# Extract and print the model name or ID
print("Model Information:", data) | [] |
2024-01-10 | shukehi/ChatGPT | tests~test_index.py | #!/usr/bin/env python3
import readline
import openai
from llama_index import GPTSimpleVectorIndex, SimpleDirectoryReader
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
documents = SimpleDirectoryReader('data').load_data()
index = GPTSimpleVectorIndex(documents)
while True:
req = input("> ").strip()
resp = index.query(req)
print(resp) | [] |
2024-01-10 | shukehi/ChatGPT | gptcli.py | #!/usr/bin/env python3
import os
import asyncio
import argparse
import openai
from rich.console import Console
from rich.markdown import Markdown, MarkdownIt
from rich.live import Live
from aiohttp import ClientSession
import readline
try:
import rlcompleter
except ImportError:
pass
c = Console()
systemPrompt = {
"role": "system",
"content": "Use triple backticks with the language name for every code block in your markdown response, if any."
}
class Config:
base_dir = os.path.dirname(os.path.realpath(__file__))
default_key = os.path.join(base_dir, ".key")
aio_socks_proxy = None
sep = Markdown("---")
history = []
def query_openai(data: dict):
messages = [ systemPrompt ]
messages.extend(data)
try:
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages
)
content = response["choices"][0]["message"]["content"]
c.print(Markdown(content), Config.sep)
return content
except openai.error.OpenAIError as e:
c.print(e)
return ""
async def query_openai_stream(data: dict):
messages = [ systemPrompt ]
messages.extend(data)
md = Markdown("")
parser = MarkdownIt().enable("strikethrough")
if Config.aio_socks_proxy:
try:
from aiohttp_socks import ProxyConnector
connector = ProxyConnector.from_url(Config.aio_socks_proxy)
openai.aiosession.set(ClientSession(connector=connector))
except ImportError:
c.print("aiohttp_socks not installed, socks proxy for aiohttp won't work")
Config.aio_socks_proxy = None
answer = ""
try:
with Live(md, auto_refresh=False) as lv:
async for part in await openai.ChatCompletion.acreate(
model="gpt-3.5-turbo",
messages=messages,
stream=True
):
finish_reason = part["choices"][0]["finish_reason"]
if "content" in part["choices"][0]["delta"]:
content = part["choices"][0]["delta"]["content"]
answer += content
md.markup = answer
md.parsed = parser.parse(md.markup)
lv.refresh()
elif finish_reason:
pass
except openai.error.OpenAIError as e:
c.print(e)
answer = ""
c.print(Config.sep)
if Config.aio_socks_proxy:
await openai.aiosession.get().close()
return answer
class ChatConsole:
def __init__(self) -> None:
parser = argparse.ArgumentParser("Input", add_help=False, exit_on_error=False)
parser.add_argument('-help', action='help', default=argparse.SUPPRESS, help="show this help message")
parser.add_argument("-reset", action='store_true',
help="reset session, i.e. clear chat history")
parser.add_argument("-exit", action='store_true',
help="exit console")
parser.add_argument("-multiline", action='store_true',
help="input multiple lines, end with ctrl-d(Linux/macOS) or ctrl-z(Windows). cancel with ctrl-c")
self.parser = parser
try:
self.init_readline([opt for action in parser._actions for opt in action.option_strings])
except Exception:
c.print("Failed to setup readline, autocomplete may not work:", e)
def init_readline(self, options: dict[str]):
def completer(text, state):
matches = [o for o in options if o.startswith(text)]
if state < len(matches):
return matches[state]
else:
return None
readline.set_completer(completer)
readline.set_completer_delims(readline.get_completer_delims().replace('-', ''))
readline.parse_and_bind('tab:complete')
def parse_input(self) -> str:
# content = c.input("[bold yellow]Input:[/] ").strip()
with c.capture() as capture:
c.print("[bold yellow]Input:[/] ", end="")
content = input(capture.get())
if not content.startswith("-"):
return content
# handle console options locally
try:
args = self.parser.parse_args(content.split())
except SystemExit:
return ""
except argparse.ArgumentError as e:
print(e)
return ""
if args.reset:
Config.history.clear()
elif args.multiline:
return self.read_multiline()
elif args.exit:
raise EOFError
else:
print("???", args)
return ""
def read_multiline(self) -> str:
contents = []
while True:
try:
line = input("> ")
except EOFError:
c.print("--- EOF ---")
break
except KeyboardInterrupt:
return ""
contents.append(line)
return "\n".join(contents)
if __name__ == '__main__':
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("-n", dest="no_stream", action="store_true", help="query openai in non-stream mode")
parser.add_argument("-r", dest="response", action="store_true",
help="attach server response in request prompt, consume more tokens to get better results")
parser.add_argument("-k", dest="key", help="path to api_key", default=Config.default_key)
parser.add_argument("-p", dest="proxy", help="http/https proxy to use")
args = parser.parse_args()
c.print(f"Loading key from {args.key}")
with open(args.key, "r") as f:
openai.api_key = f.read().strip()
stream = not args.no_stream
if args.proxy:
c.print(f"Using proxy: {args.proxy}")
if stream and args.proxy.startswith("socks"):
Config.aio_socks_proxy = args.proxy
else:
openai.proxy = args.proxy
c.print(f"Response in prompt: {args.response}")
c.print(f"Stream mode: {stream}")
data = Config.history
chat = ChatConsole()
while True:
try:
content = chat.parse_input().strip()
if not content:
continue
data.append({"role": "user", "content": content})
if stream:
answer = asyncio.run(query_openai_stream(data))
else:
answer = query_openai(data)
except KeyboardInterrupt:
c.print("Bye!")
break
except EOFError as e:
c.print("Bye!")
break
if not answer:
data.pop()
elif args.response:
data.append({"role": "assistant", "content": answer})
| [
"{'role': 'system', 'content': 'Use triple backticks with the language name for every code block in your markdown response, if any.'}",
"Use triple backticks with the language name for every code block in your markdown response, if any."
] |
2024-01-10 | PAIXAI/aider | aider~coders~base_coder.py | #!/usr/bin/env python
import hashlib
import json
import os
import sys
import traceback
from json.decoder import JSONDecodeError
from pathlib import Path, PurePosixPath
import backoff
import git
import openai
import requests
from jsonschema import Draft7Validator
from openai.error import APIError, RateLimitError, ServiceUnavailableError, Timeout
from rich.console import Console, Text
from rich.live import Live
from rich.markdown import Markdown
from aider import models, prompts, utils
from aider.commands import Commands
from aider.repomap import RepoMap
from ..dump import dump # noqa: F401
class MissingAPIKeyError(ValueError):
pass
class ExhaustedContextWindow(Exception):
pass
def wrap_fence(name):
return f"<{name}>", f"</{name}>"
class Coder:
abs_fnames = None
repo = None
last_aider_commit_hash = None
last_asked_for_commit_time = 0
repo_map = None
functions = None
total_cost = 0.0
num_exhausted_context_windows = 0
@classmethod
def create(
self,
main_model,
edit_format,
io,
openai_api_key,
openai_api_base="https://api.openai.com/v1",
**kwargs,
):
from . import (
EditBlockCoder,
EditBlockFunctionCoder,
SingleWholeFileFunctionCoder,
WholeFileCoder,
WholeFileFunctionCoder,
)
openai.api_key = openai_api_key
openai.api_base = openai_api_base
if not main_model:
main_model = models.GPT35_16k
if not main_model.always_available:
if not check_model_availability(main_model):
if main_model != models.GPT4:
io.tool_error(
f"API key does not support {main_model.name}, falling back to"
f" {models.GPT35_16k.name}"
)
main_model = models.GPT35_16k
if edit_format is None:
edit_format = main_model.edit_format
if edit_format == "diff":
return EditBlockCoder(main_model, io, **kwargs)
elif edit_format == "whole":
return WholeFileCoder(main_model, io, **kwargs)
elif edit_format == "whole-func":
return WholeFileFunctionCoder(main_model, io, **kwargs)
elif edit_format == "single-whole-func":
return SingleWholeFileFunctionCoder(main_model, io, **kwargs)
elif edit_format == "diff-func-list":
return EditBlockFunctionCoder("list", main_model, io, **kwargs)
elif edit_format in ("diff-func", "diff-func-string"):
return EditBlockFunctionCoder("string", main_model, io, **kwargs)
else:
raise ValueError(f"Unknown edit format {edit_format}")
def __init__(
self,
main_model,
io,
fnames=None,
pretty=True,
show_diffs=False,
auto_commits=True,
dirty_commits=True,
dry_run=False,
map_tokens=1024,
verbose=False,
assistant_output_color="blue",
code_theme="default",
stream=True,
use_git=True,
):
if not fnames:
fnames = []
self.chat_completion_call_hashes = []
self.chat_completion_response_hashes = []
self.verbose = verbose
self.abs_fnames = set()
self.cur_messages = []
self.done_messages = []
self.num_control_c = 0
self.io = io
self.stream = stream
if not auto_commits:
dirty_commits = False
self.auto_commits = auto_commits
self.dirty_commits = dirty_commits
self.assistant_output_color = assistant_output_color
self.code_theme = code_theme
self.dry_run = dry_run
self.pretty = pretty
if pretty:
self.console = Console()
else:
self.console = Console(force_terminal=False, no_color=True)
self.main_model = main_model
self.io.tool_output(f"Model: {main_model.name}")
self.show_diffs = show_diffs
self.commands = Commands(self.io, self)
if use_git:
self.set_repo(fnames)
else:
self.abs_fnames = set([str(Path(fname).resolve()) for fname in fnames])
if self.repo:
rel_repo_dir = os.path.relpath(self.repo.git_dir, os.getcwd())
self.io.tool_output(f"Git repo: {rel_repo_dir}")
else:
self.io.tool_output("Git repo: none")
self.find_common_root()
if main_model.use_repo_map and self.repo and self.gpt_prompts.repo_content_prefix:
self.repo_map = RepoMap(
map_tokens,
self.root,
self.main_model,
io,
self.gpt_prompts.repo_content_prefix,
self.verbose,
)
if self.repo_map.use_ctags:
self.io.tool_output(f"Repo-map: universal-ctags using {map_tokens} tokens")
elif not self.repo_map.has_ctags and map_tokens > 0:
self.io.tool_output(
f"Repo-map: basic using {map_tokens} tokens"
f" ({self.repo_map.ctags_disabled_reason})"
)
else:
self.io.tool_output("Repo-map: disabled because map_tokens == 0")
else:
self.io.tool_output("Repo-map: disabled")
for fname in self.get_inchat_relative_files():
self.io.tool_output(f"Added {fname} to the chat.")
# validate the functions jsonschema
if self.functions:
for function in self.functions:
Draft7Validator.check_schema(function)
if self.verbose:
self.io.tool_output("JSON Schema:")
self.io.tool_output(json.dumps(self.functions, indent=4))
def find_common_root(self):
if len(self.abs_fnames) == 1:
self.root = os.path.dirname(list(self.abs_fnames)[0])
elif self.abs_fnames:
self.root = os.path.commonpath(list(self.abs_fnames))
else:
self.root = os.getcwd()
self.root = os.path.abspath(self.root)
def set_repo(self, cmd_line_fnames):
if not cmd_line_fnames:
cmd_line_fnames = ["."]
repo_paths = []
for fname in cmd_line_fnames:
fname = Path(fname)
if not fname.exists():
self.io.tool_output(f"Creating empty file {fname}")
fname.parent.mkdir(parents=True, exist_ok=True)
fname.touch()
fname = fname.resolve()
try:
repo_path = git.Repo(fname, search_parent_directories=True).working_dir
repo_path = os.path.abspath(repo_path)
repo_paths.append(repo_path)
except git.exc.InvalidGitRepositoryError:
pass
if fname.is_dir():
continue
self.abs_fnames.add(str(fname))
num_repos = len(set(repo_paths))
if num_repos == 0:
return
if num_repos > 1:
self.io.tool_error("Files are in different git repos.")
return
# https://github.com/gitpython-developers/GitPython/issues/427
self.repo = git.Repo(repo_paths.pop(), odbt=git.GitDB)
self.root = os.path.abspath(self.repo.working_tree_dir)
new_files = []
for fname in self.abs_fnames:
relative_fname = self.get_rel_fname(fname)
tracked_files = set(self.get_tracked_files())
if relative_fname not in tracked_files:
new_files.append(relative_fname)
if new_files:
rel_repo_dir = os.path.relpath(self.repo.git_dir, os.getcwd())
self.io.tool_output(f"Files not tracked in {rel_repo_dir}:")
for fn in new_files:
self.io.tool_output(f" - {fn}")
if self.io.confirm_ask("Add them?"):
for relative_fname in new_files:
self.repo.git.add(relative_fname)
self.io.tool_output(f"Added {relative_fname} to the git repo")
show_files = ", ".join(new_files)
commit_message = f"Added new files to the git repo: {show_files}"
self.repo.git.commit("-m", commit_message, "--no-verify")
commit_hash = self.repo.head.commit.hexsha[:7]
self.io.tool_output(f"Commit {commit_hash} {commit_message}")
else:
self.io.tool_error("Skipped adding new files to the git repo.")
return
# fences are obfuscated so aider can modify this file!
fences = [
("``" + "`", "``" + "`"),
wrap_fence("source"),
wrap_fence("code"),
wrap_fence("pre"),
wrap_fence("codeblock"),
wrap_fence("sourcecode"),
]
fence = fences[0]
def get_abs_fnames_content(self):
for fname in list(self.abs_fnames):
content = self.io.read_text(fname)
if content is None:
relative_fname = self.get_rel_fname(fname)
self.io.tool_error(f"Dropping {relative_fname} from the chat.")
self.abs_fnames.remove(fname)
else:
yield fname, content
def choose_fence(self):
all_content = ""
for _fname, content in self.get_abs_fnames_content():
all_content += content + "\n"
good = False
for fence_open, fence_close in self.fences:
if fence_open in all_content or fence_close in all_content:
continue
good = True
break
if good:
self.fence = (fence_open, fence_close)
else:
self.fence = self.fences[0]
self.io.tool_error(
"Unable to find a fencing strategy! Falling back to:"
" {self.fence[0]}...{self.fence[1]}"
)
return
def get_files_content(self, fnames=None):
if not fnames:
fnames = self.abs_fnames
prompt = ""
for fname, content in self.get_abs_fnames_content():
relative_fname = self.get_rel_fname(fname)
prompt = "\n"
prompt += relative_fname
prompt += f"\n{self.fence[0]}\n"
prompt += content
prompt += f"{self.fence[1]}\n"
return prompt
def get_files_messages(self):
all_content = ""
if self.abs_fnames:
files_content = self.gpt_prompts.files_content_prefix
files_content += self.get_files_content()
else:
files_content = self.gpt_prompts.files_no_full_files
all_content += files_content
other_files = set(self.get_all_abs_files()) - set(self.abs_fnames)
if self.repo_map:
repo_content = self.repo_map.get_repo_map(self.abs_fnames, other_files)
if repo_content:
if all_content:
all_content += "\n"
all_content += repo_content
files_messages = [
dict(role="user", content=all_content),
dict(role="assistant", content="Ok."),
]
if self.abs_fnames:
files_messages += [
dict(role="system", content=self.fmt_system_reminder()),
]
return files_messages
def run(self, with_message=None):
while True:
try:
if with_message:
new_user_message = with_message
self.io.user_input(with_message)
else:
new_user_message = self.run_loop()
while new_user_message:
new_user_message = self.send_new_user_message(new_user_message)
if with_message:
return
except KeyboardInterrupt:
self.num_control_c += 1
if self.num_control_c >= 2:
break
self.io.tool_error("^C again or /exit to quit")
except EOFError:
return
def should_dirty_commit(self, inp):
is_commit_command = inp and inp.startswith("/commit")
if is_commit_command:
return
if not self.dirty_commits:
return
if not self.repo:
return
if not self.repo.is_dirty():
return
if self.last_asked_for_commit_time >= self.get_last_modified():
return
return True
def move_back_cur_messages(self, message):
self.done_messages += self.cur_messages
if message:
self.done_messages += [
dict(role="user", content=message),
dict(role="assistant", content="Ok."),
]
self.cur_messages = []
def run_loop(self):
inp = self.io.get_input(
self.root,
self.get_inchat_relative_files(),
self.get_addable_relative_files(),
self.commands,
)
self.num_control_c = 0
if self.should_dirty_commit(inp):
self.commit(ask=True, which="repo_files")
# files changed, move cur messages back behind the files messages
self.move_back_cur_messages(self.gpt_prompts.files_content_local_edits)
if inp.strip():
self.io.tool_output("Use up-arrow to retry previous command:", inp)
return
if not inp:
return
if self.commands.is_command(inp):
return self.commands.run(inp)
self.check_for_file_mentions(inp)
return self.send_new_user_message(inp)
def fmt_system_reminder(self):
prompt = self.gpt_prompts.system_reminder
prompt = prompt.format(fence=self.fence)
return prompt
def send_new_user_message(self, inp):
self.choose_fence()
self.cur_messages += [
dict(role="user", content=inp),
]
main_sys = self.gpt_prompts.main_system
# if self.main_model.max_context_tokens > 4 * 1024:
main_sys += "\n" + self.fmt_system_reminder()
messages = [
dict(role="system", content=main_sys),
]
messages += self.done_messages
messages += self.get_files_messages()
messages += self.cur_messages
if self.verbose:
utils.show_messages(messages, functions=self.functions)
exhausted = False
interrupted = False
try:
interrupted = self.send(messages, functions=self.functions)
except ExhaustedContextWindow:
exhausted = True
except openai.error.InvalidRequestError as err:
if "maximum context length" in str(err):
exhausted = True
if exhausted:
self.num_exhausted_context_windows += 1
self.io.tool_error("The chat session is larger than the context window!\n")
self.commands.cmd_tokens("")
self.io.tool_error("\nTo reduce token usage:")
self.io.tool_error(" - Use /drop to remove unneeded files from the chat session.")
self.io.tool_error(" - Use /clear to clear chat history.")
return
if self.partial_response_function_call:
args = self.parse_partial_args()
if args:
content = args["explanation"]
else:
content = ""
elif self.partial_response_content:
content = self.partial_response_content
else:
content = ""
if interrupted:
self.io.tool_error("\n\n^C KeyboardInterrupt")
self.num_control_c += 1
content += "\n^C KeyboardInterrupt"
self.io.tool_output()
if interrupted:
self.cur_messages += [dict(role="assistant", content=content)]
return
edited, edit_error = self.apply_updates()
if edit_error:
return edit_error
# TODO: this shouldn't use content, should use self.partial_....
self.update_cur_messages(content, edited)
if edited:
if self.repo and self.auto_commits and not self.dry_run:
saved_message = self.auto_commit()
elif hasattr(self.gpt_prompts, "files_content_gpt_edits_no_repo"):
saved_message = self.gpt_prompts.files_content_gpt_edits_no_repo
else:
saved_message = None
self.move_back_cur_messages(saved_message)
add_rel_files_message = self.check_for_file_mentions(content)
if add_rel_files_message:
return add_rel_files_message
def update_cur_messages(self, content, edited):
self.cur_messages += [dict(role="assistant", content=content)]
def auto_commit(self):
res = self.commit(history=self.cur_messages, prefix="aider: ")
if res:
commit_hash, commit_message = res
self.last_aider_commit_hash = commit_hash
saved_message = self.gpt_prompts.files_content_gpt_edits.format(
hash=commit_hash,
message=commit_message,
)
else:
if self.repo:
self.io.tool_error("Warning: no changes found in tracked files.")
saved_message = self.gpt_prompts.files_content_gpt_no_edits
return saved_message
def check_for_file_mentions(self, content):
words = set(word for word in content.split())
# drop sentence punctuation from the end
words = set(word.rstrip(",.!;") for word in words)
# strip away all kinds of quotes
quotes = "".join(['"', "'", "`"])
words = set(word.strip(quotes) for word in words)
addable_rel_fnames = self.get_addable_relative_files()
mentioned_rel_fnames = set()
fname_to_rel_fnames = {}
for rel_fname in addable_rel_fnames:
fname = os.path.basename(rel_fname)
if fname not in fname_to_rel_fnames:
fname_to_rel_fnames[fname] = []
fname_to_rel_fnames[fname].append(rel_fname)
for fname, rel_fnames in fname_to_rel_fnames.items():
if len(rel_fnames) == 1 and fname in words:
mentioned_rel_fnames.add(rel_fnames[0])
if not mentioned_rel_fnames:
return
for rel_fname in mentioned_rel_fnames:
self.io.tool_output(rel_fname)
if not self.io.confirm_ask("Add these files to the chat?"):
return
for rel_fname in mentioned_rel_fnames:
self.abs_fnames.add(os.path.abspath(os.path.join(self.root, rel_fname)))
return prompts.added_files.format(fnames=", ".join(mentioned_rel_fnames))
@backoff.on_exception(
backoff.expo,
(
Timeout,
APIError,
ServiceUnavailableError,
RateLimitError,
requests.exceptions.ConnectionError,
),
max_tries=5,
on_backoff=lambda details: print(f"Retry in {details['wait']} seconds."),
)
def send_with_retries(self, model, messages, functions):
kwargs = dict(
model=model,
messages=messages,
temperature=0,
stream=self.stream,
)
if functions is not None:
kwargs["functions"] = self.functions
# Generate SHA1 hash of kwargs and append it to chat_completion_call_hashes
hash_object = hashlib.sha1(json.dumps(kwargs, sort_keys=True).encode())
self.chat_completion_call_hashes.append(hash_object.hexdigest())
res = openai.ChatCompletion.create(**kwargs)
return res
def send(self, messages, model=None, silent=False, functions=None):
if not model:
model = self.main_model.name
self.partial_response_content = ""
self.partial_response_function_call = dict()
interrupted = False
try:
completion = self.send_with_retries(model, messages, functions)
if self.stream:
self.show_send_output_stream(completion, silent)
else:
self.show_send_output(completion, silent)
except KeyboardInterrupt:
interrupted = True
if not silent:
if self.partial_response_content:
self.io.ai_output(self.partial_response_content)
elif self.partial_response_function_call:
# TODO: push this into subclasses
args = self.parse_partial_args()
if args:
self.io.ai_output(json.dumps(args, indent=4))
return interrupted
def show_send_output(self, completion, silent):
if self.verbose:
print(completion)
show_func_err = None
show_content_err = None
try:
self.partial_response_function_call = completion.choices[0].message.function_call
except AttributeError as func_err:
show_func_err = func_err
try:
self.partial_response_content = completion.choices[0].message.content
except AttributeError as content_err:
show_content_err = content_err
resp_hash = dict(
function_call=self.partial_response_function_call,
content=self.partial_response_content,
)
resp_hash = hashlib.sha1(json.dumps(resp_hash, sort_keys=True).encode())
self.chat_completion_response_hashes.append(resp_hash.hexdigest())
if show_func_err and show_content_err:
self.io.tool_error(show_func_err)
self.io.tool_error(show_content_err)
raise Exception("No data found in openai response!")
prompt_tokens = completion.usage.prompt_tokens
completion_tokens = completion.usage.completion_tokens
tokens = f"{prompt_tokens} prompt tokens, {completion_tokens} completion tokens"
if self.main_model.prompt_price:
cost = prompt_tokens * self.main_model.prompt_price / 1000
cost += completion_tokens * self.main_model.completion_price / 1000
tokens += f", ${cost:.6f} cost"
self.total_cost += cost
show_resp = self.render_incremental_response(True)
if self.pretty:
show_resp = Markdown(
show_resp, style=self.assistant_output_color, code_theme=self.code_theme
)
else:
show_resp = Text(show_resp or "<no response>")
self.io.console.print(show_resp)
self.io.console.print(tokens)
def show_send_output_stream(self, completion, silent):
live = None
if self.pretty and not silent:
live = Live(vertical_overflow="scroll")
try:
if live:
live.start()
for chunk in completion:
if chunk.choices[0].finish_reason == "length":
raise ExhaustedContextWindow()
try:
func = chunk.choices[0].delta.function_call
# dump(func)
for k, v in func.items():
if k in self.partial_response_function_call:
self.partial_response_function_call[k] += v
else:
self.partial_response_function_call[k] = v
except AttributeError:
pass
try:
text = chunk.choices[0].delta.content
if text:
self.partial_response_content += text
except AttributeError:
pass
if silent:
continue
if self.pretty:
self.live_incremental_response(live, False)
else:
sys.stdout.write(text)
sys.stdout.flush()
finally:
if live:
self.live_incremental_response(live, True)
live.stop()
def live_incremental_response(self, live, final):
show_resp = self.render_incremental_response(final)
if not show_resp:
return
md = Markdown(show_resp, style=self.assistant_output_color, code_theme=self.code_theme)
live.update(md)
def render_incremental_response(self, final):
return self.partial_response_content
def get_context_from_history(self, history):
context = ""
if history:
context += "# Context:\n"
for msg in history:
context += msg["role"].upper() + ": " + msg["content"] + "\n"
return context
def get_commit_message(self, diffs, context):
if len(diffs) >= 4 * 1024 * 4:
self.io.tool_error(
f"Diff is too large for {models.GPT35.name} to generate a commit message."
)
return
diffs = "# Diffs:\n" + diffs
messages = [
dict(role="system", content=prompts.commit_system),
dict(role="user", content=context + diffs),
]
try:
interrupted = self.send(
messages,
model=models.GPT35.name,
silent=True,
)
except openai.error.InvalidRequestError:
self.io.tool_error(
f"Failed to generate commit message using {models.GPT35.name} due to an invalid"
" request."
)
return
commit_message = self.partial_response_content
commit_message = commit_message.strip()
if commit_message and commit_message[0] == '"' and commit_message[-1] == '"':
commit_message = commit_message[1:-1].strip()
if interrupted:
self.io.tool_error(
f"Unable to get commit message from {models.GPT35.name}. Use /commit to try again."
)
return
return commit_message
def get_diffs(self, *args):
if self.pretty:
args = ["--color"] + list(args)
diffs = self.repo.git.diff(*args)
return diffs
def commit(self, history=None, prefix=None, ask=False, message=None, which="chat_files"):
repo = self.repo
if not repo:
return
if not repo.is_dirty():
return
def get_dirty_files_and_diffs(file_list):
diffs = ""
relative_dirty_files = []
for fname in file_list:
relative_fname = self.get_rel_fname(fname)
relative_dirty_files.append(relative_fname)
try:
current_branch_commit_count = len(
list(self.repo.iter_commits(self.repo.active_branch))
)
except git.exc.GitCommandError:
current_branch_commit_count = None
if not current_branch_commit_count:
continue
these_diffs = self.get_diffs("HEAD", "--", relative_fname)
if these_diffs:
diffs += these_diffs + "\n"
return relative_dirty_files, diffs
if which == "repo_files":
all_files = [os.path.join(self.root, f) for f in self.get_all_relative_files()]
relative_dirty_fnames, diffs = get_dirty_files_and_diffs(all_files)
elif which == "chat_files":
relative_dirty_fnames, diffs = get_dirty_files_and_diffs(self.abs_fnames)
else:
raise ValueError(f"Invalid value for 'which': {which}")
if self.show_diffs or ask:
# don't use io.tool_output() because we don't want to log or further colorize
print(diffs)
context = self.get_context_from_history(history)
if message:
commit_message = message
else:
commit_message = self.get_commit_message(diffs, context)
if not commit_message:
commit_message = "work in progress"
if prefix:
commit_message = prefix + commit_message
if ask:
if which == "repo_files":
self.io.tool_output("Git repo has uncommitted changes.")
else:
self.io.tool_output("Files have uncommitted changes.")
res = self.io.prompt_ask(
"Commit before the chat proceeds [y/n/commit message]?",
default=commit_message,
).strip()
self.last_asked_for_commit_time = self.get_last_modified()
self.io.tool_output()
if res.lower() in ["n", "no"]:
self.io.tool_error("Skipped commmit.")
return
if res.lower() not in ["y", "yes"] and res:
commit_message = res
repo.git.add(*relative_dirty_fnames)
full_commit_message = commit_message + "\n\n" + context
repo.git.commit("-m", full_commit_message, "--no-verify")
commit_hash = repo.head.commit.hexsha[:7]
self.io.tool_output(f"Commit {commit_hash} {commit_message}")
return commit_hash, commit_message
def get_rel_fname(self, fname):
return os.path.relpath(fname, self.root)
def get_inchat_relative_files(self):
files = [self.get_rel_fname(fname) for fname in self.abs_fnames]
return sorted(set(files))
def get_all_relative_files(self):
if self.repo:
files = self.get_tracked_files()
else:
files = self.get_inchat_relative_files()
return sorted(set(files))
def get_all_abs_files(self):
files = self.get_all_relative_files()
files = [os.path.abspath(os.path.join(self.root, path)) for path in files]
return files
def get_last_modified(self):
files = self.get_all_abs_files()
if not files:
return 0
return max(Path(path).stat().st_mtime for path in files)
def get_addable_relative_files(self):
return set(self.get_all_relative_files()) - set(self.get_inchat_relative_files())
def allowed_to_edit(self, path, write_content=None):
full_path = os.path.abspath(os.path.join(self.root, path))
if full_path in self.abs_fnames:
if write_content:
self.io.write_text(full_path, write_content)
return full_path
if not Path(full_path).exists():
question = f"Allow creation of new file {path}?" # noqa: E501
else:
question = f"Allow edits to {path} which was not previously provided?" # noqa: E501
if not self.io.confirm_ask(question):
self.io.tool_error(f"Skipping edit to {path}")
return
if not Path(full_path).exists() and not self.dry_run:
Path(full_path).parent.mkdir(parents=True, exist_ok=True)
Path(full_path).touch()
self.abs_fnames.add(full_path)
# Check if the file is already in the repo
if self.repo:
tracked_files = set(self.get_tracked_files())
relative_fname = self.get_rel_fname(full_path)
if relative_fname not in tracked_files and self.io.confirm_ask(f"Add {path} to git?"):
if not self.dry_run:
self.repo.git.add(full_path)
if write_content:
self.io.write_text(full_path, write_content)
return full_path
def get_tracked_files(self):
# convert to appropriate os.sep, since git always normalizes to /
files = set(self.repo.git.ls_files().splitlines())
res = set(str(Path(PurePosixPath(path))) for path in files)
return res
apply_update_errors = 0
def apply_updates(self):
max_apply_update_errors = 2
try:
edited = self.update_files()
except ValueError as err:
err = err.args[0]
self.apply_update_errors += 1
if self.apply_update_errors < max_apply_update_errors:
self.io.tool_error(f"Malformed response #{self.apply_update_errors}, retrying...")
self.io.tool_error(str(err))
return None, err
else:
self.io.tool_error(f"Malformed response #{self.apply_update_errors}, aborting.")
return False, None
except Exception as err:
print(err)
print()
traceback.print_exc()
self.apply_update_errors += 1
if self.apply_update_errors < max_apply_update_errors:
self.io.tool_error(f"Update exception #{self.apply_update_errors}, retrying...")
return None, str(err)
else:
self.io.tool_error(f"Update exception #{self.apply_update_errors}, aborting")
return False, None
self.apply_update_errors = 0
if edited:
for path in sorted(edited):
if self.dry_run:
self.io.tool_output(f"Did not apply edit to {path} (--dry-run)")
else:
self.io.tool_output(f"Applied edit to {path}")
return edited, None
def parse_partial_args(self):
# dump(self.partial_response_function_call)
data = self.partial_response_function_call.get("arguments")
if not data:
return
try:
return json.loads(data)
except JSONDecodeError:
pass
try:
return json.loads(data + "]}")
except JSONDecodeError:
pass
try:
return json.loads(data + "}]}")
except JSONDecodeError:
pass
try:
return json.loads(data + '"}]}')
except JSONDecodeError:
pass
def check_model_availability(main_model):
available_models = openai.Model.list()
model_ids = [model.id for model in available_models["data"]]
return main_model.name in model_ids
| [
"\n"
] |
2024-01-10 | nomanmurtaza786/Gents-chatbot | chains_bot.py | import os
from dotenv import load_dotenv
from langchain.agents import (AgentExecutor, AgentType, Tool, create_sql_agent,
initialize_agent, load_tools)
from langchain.agents.agent_toolkits import SQLDatabaseToolkit
from langchain.chains import APIChain, ConversationalRetrievalChain
from langchain.chains.llm import LLMChain
from langchain.chat_models import ChatOpenAI
from langchain.llms import OpenAI
from langchain.prompts.prompt import PromptTemplate
from langchain.sql_database import SQLDatabase
from supabase_db import get_vector_store_retriever
load_dotenv()
openApiKey: str = os.getenv("OPENAI_API_KEY", "default_key")
huggingFaceApikey = os.getenv("API_KEY", "default_key")
openAillm = OpenAI(
model="text-davinci-003",
openai_api_key=openApiKey,
)
template = """
Question: {question}
think step by step
Answer:
"""
prompt = PromptTemplate(template=template, input_variables=["question"])
chatLLM = ChatOpenAI(temperature=0.1,)
llm_chain = LLMChain(llm=chatLLM, prompt=prompt,verbose=True,)
#print("predict", chatLLM.predict('Captial of USA'))
crc = ConversationalRetrievalChain.from_llm(llm=chatLLM, retriever=get_vector_store_retriever(), verbose=True, )
api_chain = APIChain.from_llm_and_api_docs(llm=chatLLM, api_docs='' ,verbose=True,)
def get_answer(question: str, chat_history: list = []):
result = crc({"question": question, "chat_history": chat_history})
return result
def callingApiChain(question: str, chat_history: list):
result = crc({"question": question, "chat_history": chat_history})
return result["answer"]
## database connect string from env file
db_connect_string = os.getenv("DB_Connection_Str", "default_key")
db = SQLDatabase.from_uri("postgresql://postgres:postgres@localhost:5432/HR")
toolkit = SQLDatabaseToolkit(db=db, llm=OpenAI(temperature=0))
agent_executor = create_sql_agent(
llm=ChatOpenAI(temperature=0, model="gpt-3.5-turbo-0613"),
toolkit=toolkit,
verbose=True,
agent_type=AgentType.OPENAI_FUNCTIONS
)
def get_answer_from_agent(question: str, chat_history: list = []):
result = agent_executor(question)
return result
tools = [
Tool(
name="sql_agent",
description='use to employee data from database such as active employees, performance rating, location, department, etc.',
func=get_answer_from_agent,
),
Tool(
name="resume_reader",
description='use to read resume and extract information such as name, email, phone, skills, etc.',
func=get_answer,
)
]
agents = initialize_agent(tools=tools, verbose=True, llm=ChatOpenAI(temperature=0, model="gpt-3.5-turbo-0613", verbose=True), )
def run_multiple_agents(question: str, chat_history: list = []):
result = agents.run(question)
return result
# print("predict", get_answer_from_agent('Tell me best location with respect to employees performance. IF rating A is consider as best performer')) | [
"question",
" \n Question: {question} \n think step by step\n Answer: \n "
] |
2024-01-10 | nomanmurtaza786/Gents-chatbot | gen_embeddings.py |
import os
from dotenv import load_dotenv
from langchain.document_loaders import PyPDFLoader
from langchain.document_loaders.csv_loader import CSVLoader
from langchain.embeddings import OpenAIEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter
from supabase_db import saveDocVectorSupabase, saveToSupabase
embeddingOpenAi= OpenAIEmbeddings()
# loader = PyPDFLoader('/Users/nomanmurtaza/Documents/Noman_Murtaza_CV.pdf')
loader = CSVLoader(file_path="/Users/nomanmurtaza/Downloads/user details.csv")
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=20)
pages = loader.load_and_split(text_splitter=text_splitter)
#saveDocVectorSupabase(pages) | [] |
2024-01-10 | nomanmurtaza786/Gents-chatbot | supabase_db.py | import os
from dotenv import load_dotenv
from langchain.embeddings import OpenAIEmbeddings
from langchain.schema.embeddings import Embeddings
from langchain.vectorstores import SupabaseVectorStore
from supabase.client import Client, create_client
load_dotenv()
url: str = os.environ.get("SUPABASE_URL", "default_url")
key: str = os.environ.get("SUPABASE_KEY", "default_key")
supabaseClient: Client = create_client(url, key)
##response = supabase.table("coins_details").select("*").execute()
embeddings = OpenAIEmbeddings()
vector_store = SupabaseVectorStore(embedding=embeddings, client=supabaseClient, table_name="documents", query_name="match_documents")
# create a getter function for the vector store
def get_vector_store_retriever() :
return vector_store.as_retriever()
def saveToSupabase(content: str, metadata: dict, embedding: list):
response = supabaseClient.table("documents").upsert({"content": content, "metadata": metadata, "embedding": embedding}).execute()
def saveDocVectorSupabase(docs: list):
supabaseVec=vector_store.from_documents(docs, embeddings, client=supabaseClient, table_name="documents")
def getSimilarDocuments(text: str):
return vector_store.similarity_search(text, 10)
# def getSimilarDocuments(text: str, embeddings: Embeddings):
# return SupabaseVectorStore.similarity_search(query=text,)
| [] |
2024-01-10 | nomanmurtaza786/Gents-chatbot | chat_ui.py | import os
import openai
import streamlit as st
from dotenv import load_dotenv
from chains_bot import get_answer, get_answer_from_agent, run_multiple_agents
from supabase_db import getSimilarDocuments
load_dotenv()
st.title("GEN-TS HR Partner")
openai.api_key = os.getenv("OPENAI_API_KEY", "default_key")
if "openai_model" not in st.session_state:
st.session_state["openai_model"] = "gpt-3.5-turbo"
if "messages" not in st.session_state:
st.session_state.messages = []
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
if prompt := st.chat_input("What is up?"):
st.session_state.messages.append({"role": "user", "content": prompt})
with st.chat_message("user"):
st.markdown(prompt)
with st.chat_message("assistant"):
response = get_answer(prompt)
st.markdown(response)
st.session_state.messages.append({"role": "assistant", "content": response})
| [] |
2024-01-10 | chrisluo5311/NYCU_AI_ChatGPT_Class | LineBot~LineFlask_Project.py | # 安置CKIP套件並且下載中文Transformer權重檔
from ckiptagger import data_utils
#data_utils.download_data("./")
from flask import Flask, request, abort
from ckiptagger import WS
from linebot.v3 import (
WebhookHandler
)
from linebot.v3.exceptions import (
InvalidSignatureError
)
from linebot.v3.messaging import (
Configuration,
ApiClient,
MessagingApi,
ReplyMessageRequest,
TextMessage
)
from linebot.v3.webhooks import (
MessageEvent,
TextMessageContent
)
# 建立OpenAI的連線與ChatGPT的問答連線 & 設定OpenAI金鑰
import openai
key = ""
openai.api_key = key
def chatgpt_qa(q):
response = openai.Completion.create(
model="text-davinci-003",
prompt=q,
temperature=0,
max_tokens=500,
top_p=1,
frequency_penalty=0.5,
presence_penalty=0
)
# 返回答案
return response["choices"][0]["text"].strip()
#ws = WS("./data")
# access_token
configuration = Configuration(
access_token='')
# CHANNEL_SECRET
handler = WebhookHandler('')
app = Flask(__name__)
@app.route("/test",methods=['GET'])
def test():
return 'test'
@app.route("/callback", methods=['POST'])
def callback():
# get X-Line-Signature header value
signature = request.headers['X-Line-Signature']
# get request body as text
body = request.get_data(as_text=True)
app.logger.info("Request body: " + body)
# handle webhook body
try:
handler.handle(body, signature)
except InvalidSignatureError:
app.logger.info("Invalid signature. Please check your channel access token/channel secret.")
abort(400)
return 'OK'
@handler.add(MessageEvent, message=TextMessageContent)
def handle_message(event):
with ApiClient(configuration) as api_client:
line_bot_api = MessagingApi(api_client)
line_bot_api.reply_message_with_http_info(
ReplyMessageRequest(
reply_token=event.reply_token,
messages=[TextMessage(text=event.message.text)]
)
)
if __name__ == "__main__":
app.run(port=8080)
| [] |
2024-01-10 | jnwan/nas-tools | app~plugins~modules~_autosignin~chdbits.py | import json
import os
import random
import re
from lxml import etree
from app.helper.openai_helper import OpenAiHelper
from app.plugins.modules._autosignin._base import _ISiteSigninHandler
from app.utils import StringUtils, RequestUtils
from config import Config
class CHDBits(_ISiteSigninHandler):
"""
彩虹岛签到
如果填写openai key则调用chatgpt获取答案
否则随机
"""
# 匹配的站点Url,每一个实现类都需要设置为自己的站点Url
site_url = "ptchdbits.co"
# 已签到
_sign_regex = ['今天已经签过到了']
# 签到成功,待补充
_success_regex = ['\\d+点魔力值']
# 存储正确的答案,后续可直接查
_answer_path = os.path.join(Config().get_temp_path(), "signin")
_answer_file = _answer_path + "/chdbits.json"
@classmethod
def match(cls, url):
"""
根据站点Url判断是否匹配当前站点签到类,大部分情况使用默认实现即可
:param url: 站点Url
:return: 是否匹配,如匹配则会调用该类的signin方法
"""
return True if StringUtils.url_equal(url, cls.site_url) else False
def signin(self, site_info: dict):
"""
执行签到操作
:param site_info: 站点信息,含有站点Url、站点Cookie、UA等信息
:return: 签到结果信息
"""
site = site_info.get("name")
site_cookie = site_info.get("cookie")
ua = site_info.get("ua")
proxy = Config().get_proxies() if site_info.get("proxy") else None
# 创建正确答案存储目录
if not os.path.exists(os.path.dirname(self._answer_file)):
os.makedirs(os.path.dirname(self._answer_file))
# 判断今日是否已签到
index_res = RequestUtils(cookies=site_cookie,
headers=ua,
proxies=proxy
).get_res(url='https://ptchdbits.co/bakatest.php')
if not index_res or index_res.status_code != 200:
self.error(f"签到失败,请检查站点连通性")
return False, f'【{site}】签到失败,请检查站点连通性'
if "login.php" in index_res.text:
self.error(f"签到失败,cookie失效")
return False, f'【{site}】签到失败,cookie失效'
sign_status = self.sign_in_result(html_res=index_res.text,
regexs=self._sign_regex)
if sign_status:
self.info(f"今日已签到")
return True, f'【{site}】今日已签到'
# 没有签到则解析html
html = etree.HTML(index_res.text)
if not html:
return False, f'【{site}】签到失败'
# 获取页面问题、答案
questionid = html.xpath("//input[@name='questionid']/@value")[0]
option_ids = html.xpath("//input[@name='choice[]']/@value")
option_values = html.xpath("//input[@name='choice[]']/following-sibling::text()")
question_str = html.xpath("//td[@class='text' and contains(text(),'请问:')]/text()")[0]
answers = list(zip(option_ids, option_values))
# 正则获取问题
match = re.search(r'请问:(.+)', question_str)
if match:
question_str = match.group(1)
self.debug(f"获取到签到问题 {question_str}")
else:
self.error(f"未获取到签到问题")
return False, f"【{site}】签到失败,未获取到签到问题"
# 查询已有答案
exits_answers = {}
try:
with open(self._answer_file, 'r') as f:
json_str = f.read()
exits_answers = json.loads(json_str)
# 查询本地本次验证码hash答案
question_answer = exits_answers[question_str]
# question_answer是数组
if not isinstance(question_answer, list):
question_answer = [question_answer]
# 本地存在本次hash对应的正确答案再遍历查询
choice = []
for q in question_answer:
for num, answer in answers:
if str(q) == str(num):
choice.append(int(q))
if len(choice) > 0:
# 签到
return self.__signin(questionid=questionid,
choice=choice,
site_cookie=site_cookie,
ua=ua,
proxy=proxy,
site=site)
except (FileNotFoundError, IOError, OSError) as e:
self.debug("查询本地已知答案失败,继续请求豆瓣查询")
# 正确答案,默认随机,如果gpt返回则用gpt返回的答案提交
choice = [option_ids[random.randint(0, len(option_ids) - 1)]]
# 组装gpt问题
gpt_options = "{\n" + ",\n".join([f"{num}:{value}" for num, value in answers]) + "\n}"
gpt_question = f"题目:{question_str}\n" \
f"选项:{gpt_options}"
self.debug(f"组装chatgpt问题 {gpt_question}")
# chatgpt获取答案
answer = OpenAiHelper().get_question_answer(question=gpt_question)
self.debug(f"chatpgt返回结果 {answer}")
# 处理chatgpt返回的答案信息
if answer is None:
self.warn(f"ChatGPT未启用, 开始随机签到")
# return f"【{site}】签到失败,ChatGPT未启用"
elif answer:
# 正则获取字符串中的数字
answer_nums = list(map(int, re.findall("\d+", answer)))
if not answer_nums:
self.warn(f"无法从chatgpt回复 {answer} 中获取答案, 将采用随机签到")
else:
choice = []
for answer in answer_nums:
# 如果返回的数字在option_ids范围内,则直接作为答案
if str(answer) in option_ids:
choice.append(int(answer))
self.info(f"chatgpt返回答案id {answer} 在签到选项 {option_ids} 中")
# 签到
return self.__signin(questionid=questionid,
choice=choice,
site_cookie=site_cookie,
ua=ua,
proxy=proxy,
site=site,
exits_answers=exits_answers,
question=question_str)
def __signin(self, questionid, choice, site, site_cookie, ua, proxy, exits_answers=None, question=None):
"""
签到请求
questionid: 450
choice[]: 8
choice[]: 4
usercomment: 此刻心情:无
submit: 提交
多选会有多个choice[]....
"""
data = {
'questionid': questionid,
'choice[]': choice[0] if len(choice) == 1 else choice,
'usercomment': '太难了!',
'wantskip': '不会'
}
self.debug(f"签到请求参数 {data}")
sign_res = RequestUtils(cookies=site_cookie,
headers=ua,
proxies=proxy
).post_res(url='https://ptchdbits.co/bakatest.php', data=data)
if not sign_res or sign_res.status_code != 200:
self.error(f"签到失败,签到接口请求失败")
return False, f'【{site}】签到失败,签到接口请求失败'
# 判断是否签到成功
sign_status = self.sign_in_result(html_res=sign_res.text,
regexs=self._success_regex)
if sign_status:
self.info(f"签到成功")
if exits_answers and question:
# 签到成功写入本地文件
self.__write_local_answer(exits_answers=exits_answers or {},
question=question,
answer=choice)
return True, f'【{site}】签到成功'
else:
sign_status = self.sign_in_result(html_res=sign_res.text,
regexs=self._sign_regex)
if sign_status:
self.info(f"今日已签到")
return True, f'【{site}】今日已签到'
self.error(f"签到失败,请到页面查看")
return False, f'【{site}】签到失败,请到页面查看'
def __write_local_answer(self, exits_answers, question, answer):
"""
签到成功写入本地文件
"""
try:
exits_answers[question] = answer
# 序列化数据
formatted_data = json.dumps(exits_answers, indent=4)
with open(self._answer_file, 'w') as f:
f.write(formatted_data)
except (FileNotFoundError, IOError, OSError) as e:
self.debug("签到成功写入本地文件失败")
| [] |
2024-01-10 | while-basic/superagent | app~lib~agents.py | from typing import Any
import requests
import yaml
from decouple import config
from langchain.agents import (
AgentExecutor,
LLMSingleActionAgent,
)
from langchain.agents.agent_toolkits.openapi import planner
from langchain.agents.agent_toolkits.openapi.spec import reduce_openapi_spec
from langchain.chains import ConversationalRetrievalChain, LLMChain
from langchain.chains.conversational_retrieval.prompts import (
CONDENSE_QUESTION_PROMPT,
QA_PROMPT,
)
from langchain.chains.question_answering import load_qa_chain
from langchain.chat_models import ChatAnthropic, ChatOpenAI
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.llms import Cohere, OpenAI
from langchain.memory import ChatMessageHistory, ConversationBufferMemory
from langchain.prompts.prompt import PromptTemplate
from langchain.requests import RequestsWrapper
from langchain.vectorstores.pinecone import Pinecone
from app.lib.callbacks import StreamingCallbackHandler
from app.lib.parsers import CustomOutputParser
from app.lib.prisma import prisma
from app.lib.prompts import (
CustomPromptTemplate,
agent_template,
default_chat_prompt,
)
from app.lib.tools import get_search_tool
class Agent:
def __init__(
self,
agent: dict,
has_streaming: bool = False,
on_llm_new_token=None,
on_llm_end=None,
on_chain_end=None,
):
self.id = agent.id
self.document = agent.document
self.has_memory = agent.hasMemory
self.type = agent.type
self.llm = agent.llm
self.prompt = agent.prompt
self.tool = agent.tool
self.has_streaming = has_streaming
self.on_llm_new_token = on_llm_new_token
self.on_llm_end = on_llm_end
self.on_chain_end = on_chain_end
def _get_api_key(self) -> str:
if self.llm["provider"] == "openai-chat" or self.llm["provider"] == "openai":
return (
self.llm["api_key"]
if "api_key" in self.llm
else config("OPENAI_API_KEY")
)
if self.llm["provider"] == "anthropic":
return (
self.llm["api_key"]
if "api_key" in self.llm
else config("ANTHROPIC_API_KEY")
)
if self.llm["provider"] == "cohere":
return (
self.llm["api_key"]
if "api_key" in self.llm
else config("COHERE_API_KEY")
)
def _get_tool(self) -> Any:
try:
if self.tool.type == "SEARCH":
tools = get_search_tool()
return tools
except Exception:
return None
def _get_prompt(self) -> Any:
if self.prompt:
if self.tool:
prompt = CustomPromptTemplate(
template=self.prompt.template,
tools=self._get_tool(),
input_variables=self.prompt.input_variables,
)
else:
prompt = PromptTemplate(
input_variables=self.prompt.input_variables,
template=self.prompt.template,
)
return prompt
else:
if self.tool:
return CustomPromptTemplate(
template=agent_template,
tools=self._get_tool(),
input_variables=[
"human_input",
"intermediate_steps",
"chat_history",
],
)
return default_chat_prompt
def _get_llm(self) -> Any:
if self.llm["provider"] == "openai-chat":
return (
ChatOpenAI(
temperature=0,
openai_api_key=self._get_api_key(),
model_name=self.llm["model"],
streaming=self.has_streaming,
callbacks=[
StreamingCallbackHandler(
on_llm_new_token_=self.on_llm_new_token,
on_llm_end_=self.on_llm_end,
on_chain_end_=self.on_chain_end,
)
],
)
if self.has_streaming
else ChatOpenAI(model_name=self.llm["model"])
)
if self.llm["provider"] == "openai":
return OpenAI(
model_name=self.llm["model"], openai_api_key=self._get_api_key()
)
if self.llm["provider"] == "anthropic":
return (
ChatAnthropic(
streaming=self.has_streaming,
anthropic_api_key=self._get_api_key(),
callbacks=[
StreamingCallbackHandler(
on_llm_new_token_=self.on_llm_new_token,
on_llm_end_=self.on_llm_end,
on_chain_end_=self.on_chain_end,
)
],
)
if self.has_streaming
else ChatAnthropic(anthropic_api_key=self._get_api_key())
)
if self.llm["provider"] == "cohere":
return (
Cohere(
cohere_api_key=self._get_api_key(),
model=self.llm["model"],
callbacks=[
StreamingCallbackHandler(
on_llm_new_token_=self.on_llm_new_token,
on_llm_end_=self.on_llm_end,
on_chain_end_=self.on_chain_end,
)
],
)
if self.has_streaming
else Cohere(cohere_api_key=self._get_api_key(), model=self.llm["model"])
)
# Use ChatOpenAI as default llm in agents
return ChatOpenAI(temperature=0, openai_api_key=self._get_api_key())
def _get_memory(self) -> Any:
if self.has_memory:
memories = prisma.agentmemory.find_many(
where={"agentId": self.id},
order={"createdAt": "desc"},
take=5,
)
history = ChatMessageHistory()
[
history.add_ai_message(memory.message)
if memory.agent == "AI"
else history.add_user_message(memory.message)
for memory in memories
]
memory = ConversationBufferMemory(
chat_memory=history, memory_key="chat_history"
)
return memory
return None
def _get_document(self) -> Any:
if self.document:
embeddings = OpenAIEmbeddings()
docsearch = Pinecone.from_existing_index(
"superagent", embedding=embeddings, namespace=self.document.id
)
return docsearch
return None
def get_agent(self) -> Any:
llm = self._get_llm()
memory = self._get_memory()
document = self._get_document()
tools = self._get_tool()
if self.document:
if self.document.type != "OPENAPI":
question_generator = LLMChain(
llm=OpenAI(temperature=0), prompt=CONDENSE_QUESTION_PROMPT
)
doc_chain = load_qa_chain(
llm, chain_type="stuff", prompt=QA_PROMPT, verbose=True
)
agent = ConversationalRetrievalChain(
retriever=document.as_retriever(),
combine_docs_chain=doc_chain,
question_generator=question_generator,
memory=memory,
get_chat_history=lambda h: h,
)
elif self.document.type == "OPENAPI":
requests_wrapper = (
RequestsWrapper(
headers={
self.document.authorization[
"key"
]: self.document.authorization["value"]
}
)
if self.document.authorization
else RequestsWrapper()
)
yaml_response = requests.get(self.document.url)
content = yaml_response.content
raw_odds_api_spec = yaml.load(content, Loader=yaml.Loader)
odds_api_spec = reduce_openapi_spec(raw_odds_api_spec)
agent = planner.create_openapi_agent(
odds_api_spec, requests_wrapper, llm
)
elif self.tool:
output_parser = CustomOutputParser()
tool_names = [tool.name for tool in tools]
llm_chain = LLMChain(llm=llm, prompt=self._get_prompt())
agent_config = LLMSingleActionAgent(
llm_chain=llm_chain,
output_parser=output_parser,
stop=["\nObservation:"],
allowed_tools=tool_names,
)
agent = AgentExecutor.from_agent_and_tools(
agent=agent_config, tools=tools, verbose=True, memory=memory
)
else:
agent = LLMChain(
llm=llm, memory=memory, verbose=True, prompt=self._get_prompt()
)
return agent
| [] |
2024-01-10 | while-basic/superagent | app~lib~parsers.py | import re
from typing import Union
from langchain.agents import AgentOutputParser
from langchain.schema import AgentAction, AgentFinish
class CustomOutputParser(AgentOutputParser):
def parse(self, llm_output: str) -> Union[AgentAction, AgentFinish]:
if "Final Answer:" in llm_output:
return AgentFinish(
return_values={"output": llm_output.split("Final Answer:")[-1].strip()},
log=llm_output,
)
regex = r"Action\s*\d*\s*:(.*?)\nAction\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)"
match = re.search(regex, llm_output, re.DOTALL)
if not match:
raise ValueError(f"Could not parse LLM output: `{llm_output}`")
action = match.group(1).strip()
action_input = match.group(2)
return AgentAction(
tool=action, tool_input=action_input.strip(" ").strip('"'), log=llm_output
)
| [] |
2024-01-10 | edly-io/ai-coach-xblock | ai_coach~ai_coach.py | from xblockutils.studio_editable import StudioEditableXBlockMixin
from xblock.fields import Float, Integer, Scope, String
from xblock.core import XBlock
from xblock.completable import CompletableXBlockMixin
from web_fragments.fragment import Fragment
from django.template import Context, Template
from django.conf import settings
import pkg_resources
import logging
from openai import OpenAI
log = logging.getLogger(__name__)
def _(text): return text
CHAT_COMPLETION_MODELS = ['gpt-3.5-turbo']
TEXT_COMPLETION_MODELS = [
'text-davinci-003',
'text-davinci-002',
'text-curie-001',
'text-babbage-001',
'text-ada-001',
]
AI_MODELS = CHAT_COMPLETION_MODELS + TEXT_COMPLETION_MODELS
@XBlock.wants('i18n')
class AICoachXBlock(XBlock, StudioEditableXBlockMixin, CompletableXBlockMixin):
"""
AI Coach xblock - Helps student to ask for improvement of their answer once
"""
display_name = String(
display_name=_('Display Name'),
help=_('Display name for this module'),
default="AI Coach",
scope=Scope.settings
)
question = String(
display_name=_('Question'),
default='',
scope=Scope.settings,
multiline_editor=True,
help=_('The question asked by the teacher'),
)
student_answer = String(
display_name=_('Answer'),
default='',
scope=Scope.user_state,
help=_('The answer provided by Student')
)
context = String(
display_name=_('Context'),
default="",
scope=Scope.settings,
multiline_editor=True,
help=_("Write the question context here"),
)
feedback_threshold = Integer(
display_name=_('Feedback Threshold'),
default=1, scope=Scope.settings,
help=_("Maximum no. of times student asks for feedback")
)
feedback_count = Integer(
default=0, scope=Scope.user_state,
help=_("No. of times student asks for feedback")
)
api_key = String(
display_name=_("API Key"),
default=settings.OPENAI_SECRET_KEY,
scope=Scope.settings,
help=_(
"Your OpenAI API key, which can be found at <a href='https://platform.openai.com/account/api-keys' target='_blank'>https://platform.openai.com/account/api-keys</a>"
),
)
model_name = String(
display_name=_("AI Model Name"), values=AI_MODELS,
default="text-davinci-003", scope=Scope.settings,
help=_("Select an AI Text model.")
)
temperature = Float(
display_name=_('Temperature'),
default=0.5,
values={'min': 0.1, 'max': 2, 'step': 0.1},
scope=Scope.settings,
help=_(
'Higher values like 0.8 will make the output more random, while lower values \n like 0.2 will make it more focused and deterministic.'
)
)
description = String(
display_name=_('Description'),
default='Description here...',
scope=Scope.settings,
help=_('Any Description')
)
editable_fields = [
'display_name',
'context',
'question',
'model_name',
'api_key',
'temperature',
'description',
'feedback_threshold'
]
def get_openai_client(self):
"""
Initialize and return an OpenAI client using the API key stored in the XBlock settings.
"""
api_key = self.api_key
try:
client = OpenAI(api_key=api_key)
return client
except Exception:
# Handle the exception as appropriate for your application
return {'error': _('Failed to initialize OpenAI client')}
def resource_string(self, path):
"""Handy helper for getting resources from our kit."""
data = pkg_resources.resource_string(__name__, path)
return data.decode("utf8")
def get_context(self):
return {
'title': self.display_name,
'question': self.question,
'student_answer': self.student_answer,
'feedback_count': self.feedback_count,
'feedback_threshold': self.feedback_threshold,
}
def render_template(self, template_path, context):
"""Handy helper for rendering html template."""
template_str = self.resource_string(template_path)
template = Template(template_str)
context = self.get_context()
return template.render(Context(context))
def student_view(self, context=None):
"""
The primary view of the AITutorXBlock, shown to students
when viewing courses.
"""
html = self.render_template("static/html/ai_coach.html", context)
frag = Fragment(html)
frag.add_css(self.resource_string("static/css/ai_coach.css"))
frag.add_javascript(self.resource_string("static/js/src/ai_coach.js"))
frag.initialize_js('AICoachXBlock', json_args=self.get_context())
return frag
def get_chat_completion(
self, prompt='', model='gpt-3.5-turbo', temperature=0.5, max_tokens=150, n=1
):
""" Returns the improvement for student answer using ChatGPT Model """
client = self.get_openai_client()
if client is None:
return {'error': _('Unable to initialize OpenAI client. Please check configuration.')}
messages = [{"role": "user", "content": prompt}]
try:
response = client.chat.completions.create(messages=messages,
model=model,
temperature=temperature,
max_tokens=max_tokens,
n=n)
except Exception as err:
log.error(err)
return {'error': _('Unable to connect to AI-coach. Please contact your administrator')}
return {'response': response.choices[0].message['content']}
def get_completion(
self, prompt='', model='text-davinci-003', temperature=0.5, max_tokens=150, n=1
):
""" Returns the improvement for student answer using Text AI Model """
client = self.get_openai_client()
if client is None:
return {'error': _('Unable to initialize OpenAI client. Please check configuration.')}
try:
response = client.completions.create(
prompt=prompt, model=model, temperature=temperature, max_tokens=max_tokens, n=n)
except Exception as err:
log.error(err)
return {'error': _('Unable to connect to AI-coach. Please contact your administrator')}
return {'response': response.choices[0].text}
@XBlock.json_handler
def ask_from_coach(self, data, suffix=''):
if not data['answer']:
return {'error': _('Answer must be required')}
if self.feedback_count >= self.feedback_threshold:
return {'error': _("You've exhausted all available chances to ask the coach for help")}
student_answer = data['answer'].strip()
prompt = self.context.replace('{{question}}', f'"{self.question}"')
prompt = prompt.replace('{{answer}}', f'"{student_answer}"')
if self.model_name in CHAT_COMPLETION_MODELS:
response = self.get_chat_completion(
prompt, self.model_name, self.temperature
)
elif self.model_name in TEXT_COMPLETION_MODELS:
response = self.get_completion(
prompt, self.model_name, self.temperature
)
if 'error' in response:
return {'error': response['error']}
coach_answer = response['response']
self.feedback_count += 1
return {
'success': True,
'coach_answer': coach_answer,
'feedback_count': self.feedback_count,
'feedback_threshold': self.feedback_threshold
}
@XBlock.json_handler
def submit_answer(self, data, suffix=''):
if not data['answer']:
return {'error': _('Answer must be required')}
self.student_answer = data['answer'].strip()
self.emit_completion(1.0)
return {'success': True}
@staticmethod
def workbench_scenarios():
"""A canned scenario for display in the workbench."""
return [
("AICoachXBlock",
"""<ai_coach/>
"""),
("Multiple AICoachXBlock",
"""<vertical_demo>
<ai_coach/>
<ai_coach/>
<ai_coach/>
</vertical_demo>
"""),
]
| [
"{{question}}",
"\"PLACEHOLDER\"",
"{{answer}}"
] |
2024-01-10 | bclavie/dnebackend | app~website.py | import os
import random
import json
from tracemalloc import start
import openai
from typing import Literal
from retry import retry
import time
from app.simple_redis import redis_store, redis_retrieve, redis_check
# openai.api_key = os.getenv("OPENAI_KEY")
openai.api_type = "azure"
openai.api_base = "https://silicongrovegpt.openai.azure.com/"
# openai.api_base = "https://thispagedoesnotexist.openai.azure.com/"
openai.api_version = "2023-07-01-preview"
openai.api_key = os.getenv("AZURE_OPENAI_KEY")
# openai.api_key = "fa67770c7d4143aa89117da2ebe19dd3"
@retry(tries=3, delay=0.2)
def _gpt(messages):
print("trying...")
print(openai.api_base)
raise Exception("no calls right now thanks")
response = openai.ChatCompletion.create(
# model="gpt-3.5-turbo-0613",
engine="gpt35-16k-sg",
# engine="turbo-west",
messages=messages,
temperature=0.5,
max_tokens=2300,
)
# print(response)
print('yay!')
# print(response)
content = response["choices"][0]["message"]["content"]
website = parse_html(content)
return content, website
# @retry(tries=3, delay=0.2)
# def _gpt(messages):
# print("trying...")
# response = openai.ChatCompletion.create(
# model="gpt-3.5-turbo-0613",
# messages=messages,
# temperature=0.4,
# max_tokens=1500,
# )
# # print(response)
# print('yay!')
# print(response)
# content = response["choices"][0]["message"]["content"]
# website = parse_html(content)
# return content, website
SYSTEM_MESSAGE = """You are an AI programmer specialised in creating single-file demo websites. You are modelled after the world's best programmers, such as Jeff Dean and Grady Booch. Your programming skills are unparalleled, and you use them to perform the requests for your users. You always iterate on your design, to reach the best possible page."""
START_USER_MESSAGE = """Hey! You're the world's best programming AI expert, modelled after Jeff Dean and Grady Booch. Your skills in creating efficient, beautiful, one-page website demos are unparalleled.
Please, create a sample one-page landing page for a {theme} {type_}. Make up everything. Make sure the CSS is aligned with theme.
Don't include content like copyright notices or similar things! Try to not make the page too big too :)
You can use bootstrap, html5, css and javascript. You will make sure your answer is in a markdown codeblock, starting with "```html" and ending with "````". You must include everything within this code block, including the css <style> and javascript <script>. You cannot provide any other file but this one big html file.
Let's go!
"""
REFINE_1 = """Good start... Now make it look better! Improve on the design! Improve on the colour scheme... Ensure your website looks fantastic and very modern!"""
REFINE_2 = """You're doing great... Remember, you don't have access to images, so think of something to replace them. Maybe ASCII? Keep on improving. Self-critique and improve on the website, return the updated page in a code block."""
REFINE_PERSO = """This is good, but how about making it a bit more personalised? Give the website a name, write some content, don't just stick to the name by what it is! Return an improved version of the page in a code block."""
REFINE_4 = """Time to find some more... Jeff Dean himself would review the website, but he's busy at the moment. Please, try to make do without the review and improve the code. If you have clickable buttons, maybe open a small closable overlay on click? Return an improved version of the page based on your findings."""
REFINE_5 = """Okay, it's time to finish up, and add an ad if you can. Add some content and better design if you can. Please insert one of those three ads somewhere and return a code block."""
APPRAISAL = """As the lead AI Programming Expert modelled after Jeff Dean, you're shown this website made by """
REFINES = [REFINE_1, REFINE_2, REFINE_PERSO, REFINE_4, REFINE_5]
def store_website_in_redis(key: str, website: str, messages: dict, response: str, iteration: int=0, start: bool = False):
key = f"{key}:website"
if start:
redis_json = {}
redis_json['website'] = {}
redis_json['website']['v0'] = website
redis_json['most_recent'] = 0
else:
redis_json = redis_retrieve(key)
redis_json['website'][f'v{iteration}'] = website
redis_json['most_recent'] = iteration
messages_to_store = messages + [{"role": "assistant", "content": response}]
redis_json['messages'] = messages_to_store
redis_store(key, redis_json)
def store_fetch_in_redis(key: str, start: bool = False):
key = f"{key}:interaction"
if start:
redis_json = {}
redis_json['interaction'] = 0
else:
redis_json = redis_retrieve(key)
redis_json['interaction'] += 1
redis_store(key, redis_json)
def generate_website(session_id: str):
theme = ""
type_ =""
messages = [
{"role": "system", "content": SYSTEM_MESSAGE},
{"role": "user", "content": START_USER_MESSAGE.format(theme=theme, type_=type_)},
]
start_time = time.time()
response, website = _gpt(messages)
print(f"Generated in: {time.time() - start_time}")
start_time = time.time()
store_fetch_in_redis(key=session_id, start=True)
store_website_in_redis(key=session_id, website= website, messages= messages, response= response, iteration= 0, start= True)
print(f"Stored in: {time.time() - start_time}")
return website
def fetch_iteration(key: str, target_interaction: int | str = "default"):
if target_interaction == "default":
current_interaction = redis_retrieve(f"{key}:interaction")['interaction'] + 1
else:
current_interaction = target_interaction
current_website = redis_retrieve(f"{key}:website")['website'][f"v{current_interaction}"]
store_fetch_in_redis(key=key)
return current_website, current_interaction
def parse_html(response):
try:
assert "```html" in response
assert "```" in response.split("```html")[1]
except:
print("______")
print("______")
print("ASSERTION ERROR")
print("______")
print("______")
print(response)
raise AssertionError
return response.split("```html")[1].split("```")[0]
def iterate_on_website(session_id: str):
for i in range(0, len(REFINES)):
print(f"iteration {i} for {session_id}")
print('doing this')
if i == 4:
if random.random() > 0.5:
# Appraisal
pass
iteration = i + 1
prompt = redis_retrieve(f"{session_id}:website")['messages']
if len(prompt) >= 5:
prompt = prompt[:2] + prompt[-3:]
prompt.append({"role": "user", "content": REFINES[i]})
# keep all the elements except the first assistant message, and the first user reply
# we need to keep elements 0 and 1 because they are the system message and the first user message
response, website = _gpt(prompt)
store_website_in_redis(key=session_id, website= website, messages= prompt, response= response, iteration= iteration, start= False)
print(f'stored iteration {iteration}') | [
"PLACEHOLDER:website",
"PLACEHOLDERPLACEHOLDER"
] |
2024-01-10 | bclavie/dnebackend | app~simple_inference.py | import os
import random
import json
import openai
from typing import Literal
from retry import retry
import time
from app.simple_redis import redis_store, redis_retrieve, redis_check
# openai.api_type = "azure"
# openai.api_base = os.getenv("AZURE_OPENAI_ENDPOINT")
# openai.api_version = "2023-07-01-preview"
# openai.api_key = os.getenv("AZURE_OPENAI_KEY")
openai.api_key = os.getenv("OPENAI_KEY")
FUNCTIONS = [
{
"name": "play_story",
"description": "Generate the story flashcard based on the content and potential choices.",
"parameters": {
"type": "object",
"properties": {
"content": {
"type": "string",
"description": "The content of the story, in 25 words or less. This is the main development part!",
},
"choice_A": {
"type": "string",
"description": "The first option to continue the story, expressed as a short (4 word maximum) action.",
},
"choice_B": {
"type": "string",
"description": "The second option to continue the story, expressed as a short (4 word maximum) action.",
},
"is_over": {
"type": "boolean",
"description": "whether or not you have chosen to end the story.",
},
},
"required": ["content", "choice_A", "choice_B", "is_over"],
},
},
]
SYSTEM_MESSAGE = """You are GaimanAI, an AI storyteller modelled after Neil Gaiman. You write in a new format: short continue-your-own-adventure flashcards called FlashShorts. You write short bits of a story at a time, providing succinct option to continue. You do so within the constraints given to you."""
USER_MESSAGE = """Hi, today we're going to write a FlashShort story in the style of Neil Gaiman. The setting is {setting}. We're doing this choose-your-own adventure, flashcard style: write less than 25 words at a time, and provide the user two options to continue the story. The options you provide are choices A and B. They must be super short, 4 words at most, and contain a verb. Ensure the actions both offer different ways to continue the story. Make sure you write a compelling start to the story, even though your "content" value can only be 25 words at most! Craft a nice story and a nice starting point to the story!"""
SETTING_STRUCTURE = """a {style}{world}"""
GPT_MODEL = "gpt-3.5-turbo-0613"
FUNCTION_CALL = {"name": "play_story"}
# @retry(tries=3, delay=0.2)
def _gpt(messages):
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo-0613",
messages=messages,
functions=FUNCTIONS,
function_call=FUNCTION_CALL, # auto is default, but we'll be explicit
)
# print(response)
function_args = json.loads(
response["choices"][0]["message"]["function_call"]["arguments"]
)
# print(function_args)
assert "content" in function_args
assert "choice_A" in function_args
assert "choice_B" in function_args
return function_args
POTENTIAL_STYLES = [
"a noir-type setting ",
"a fantasy setting ",
"a thrilling setting ",
"a sci-fi setting ",
"a superhero setting ",
"a mysterious setting ",
] + [""] * 15
POTENTIAL_WORLDS = [
"in a cyber-punk universe",
"in the 20s",
"in an ancient kingdom",
"in a magical universe",
"in a distant, high-tech future",
"in a post-apocalyptic world",
"in ancient Rome",
"in ancient Greece",
"in ancient Egypt",
"in a medieval kingdom",
"in a small village",
"in a massive city",
"in New York City",
"in rural America (pick a state!)",
"in the north of Canada",
"in a mysterious land",
"in an extremely advanced city-state",
"in the ruins of an ancient generation",
"beginning in a small town",
"beginning in a pub",
"in a seemingly inoccuous place",
"in a place of your choice, be creative!",
"in a world where mythology is real",
"in a world that has outlawed technology",
]
def build_start_messages():
user_prompt = USER_MESSAGE.format(
setting=SETTING_STRUCTURE.format(
style=random.choice(POTENTIAL_STYLES), world=random.choice(POTENTIAL_WORLDS)
)
)
user_prompt = {"role": "user", "content": user_prompt}
messages = [{"role": "system", "content": SYSTEM_MESSAGE}, user_prompt]
return messages
def format_for_story_logging(response):
assistant_message = {"role": "assistant", "content": json.dumps(response)}
return assistant_message
def start_story(story_id: str):
messages = build_start_messages()
response = _gpt(messages=messages)
store_in_redis(story_id, response, messages, prompt=messages, is_start=True)
return response
def store_in_redis(
story_id: str,
response: dict,
messages: list,
redis_story: dict | None = None,
is_start: bool = False,
end_in: int | None = None,
prompt: list | None = None,
):
if is_start:
redis_story = {}
redis_story["initial_prompt"] = prompt
redis_story["story"] = []
redis_story["end_in"] = 999
redis_story["index"] = -1
redis_story["follow_up_index"] = {"A": 0, "B": 0}
if end_in:
redis_story["end_in"] = end_in
redis_story["messages"] = messages + [format_for_story_logging(response)]
redis_story["story"].append(response)
redis_story["follow_up"] = {}
redis_story["index"] += 1
redis_store(story_id, redis_story)
def store_followup_in_redis(story_id: str, followup, user_choice, followup_index: int):
redis_story = redis_retrieve(story_id)
redis_story["follow_up"][user_choice] = followup
redis_story["follow_up_index"][user_choice] = followup_index
redis_store(story_id, redis_story)
def generate_followup(story_id: str, user_choice: Literal["A", "B"], story_json):
is_ending = story_json["end_in"] < 99
messages = story_json["messages"]
chosen_path = (
story_json["story"][-1]["choice_A"]
if user_choice == "A"
else story_json["story"][-1]["choice_B"]
)
if len(messages) % 4 == 0 and not is_ending:
messages.append(
{
"role": "system",
"content": "Well done so far! Remember to continue the story in the style of Neil Gaiman, using the play_story function, never mentioning your prompt and keeping it both engaging and within the FlashShort system!",
}
)
elif is_ending:
if story_json["end_in"] > 1:
messages.append(
{
"role": "system",
"content": f"The story is nearing completion! Begin wrapping up, you must end after {story_json['end_in']} more actions!",
}
)
elif story_json["end_in"] == 1:
messages.append(
{
"role": "system",
"content": "The story is ending! You must end the story in the next message! This is the last action you can give the user! Please make sure you're ready to wrap nicely no matter their choice!",
}
)
else:
messages.append(
{
"role": "system",
"content": """The story is finished. Both "choice_A" and "choice_B" must say "End the story...", set the is_over flag to true, and write the conclusion of the story in the content!""",
}
)
chosen_path = f"{user_choice}, {chosen_path}"
messages.append({"role": "user", "content": chosen_path})
response = _gpt(messages=messages)
return response
def generate_followups(
story_id: str,
):
story_json = redis_retrieve(story_id)
start_time = time.time()
follow_A = generate_followup(story_id, "A", story_json)
print(f"follow up A generated in {time.time() - start_time}")
store_followup_in_redis(
story_id=story_id,
followup=follow_A,
user_choice="A",
followup_index=story_json["follow_up_index"]["A"] + 1,
)
start_time = time.time()
follow_B = generate_followup(story_id, "B", story_json)
store_followup_in_redis(
story_id=story_id,
followup=follow_B,
user_choice="B",
followup_index=story_json["follow_up_index"]["B"] + 1,
)
print(f"follow up B generated in {time.time() - start_time}")
def continue_story(story_id: str, user_choice: Literal["A", "B"]):
redis_json = redis_retrieve(story_id)
print(redis_json["index"])
print(redis_json["follow_up_index"])
if redis_json["follow_up_index"][user_choice] > redis_json["index"]:
print("using stored response")
response = redis_json["follow_up"][user_choice]
else:
print("generating response")
response = generate_followup(story_id, user_choice, redis_json)
messages = redis_json["messages"]
end_in = redis_json["end_in"] if redis_json["end_in"] < 10 else None
if not end_in:
if len(messages) > 7:
should_end = (
True if random.randint(0, 100) > 66 - (len(messages) * 2) else False
)
if should_end:
end_in = random.randint(2, 6)
store_in_redis(
story_id=story_id,
response=response,
messages=messages,
end_in=end_in,
redis_story=redis_json,
is_start=False,
)
return response
def generate_response(story_id, user_choice):
if not bool(redis_check(story_id)):
return start_story(story_id)
if user_choice in ["A", "B"]:
return continue_story(story_id, user_choice)
print(user_choice)
raise ValueError("Invalid user choice")
| [
"The story is nearing completion! Begin wrapping up, you must end after PLACEHOLDER more actions!",
"re going to write a Flasa {style}{world}he style of Neil Gaiman. The setting is {s[",
"The story is finished. Both \"choice_A\" and \"choice_B\" must say \"End the story...\", set the is_over flag to true, and write the conclusion of the story in the content!",
"PLACEHOLDER, chosen_path1375c0ba-dfd0-4771-a1c8-d78ca8d5eb85",
"The story is ending! You must end the story in the next message! This is the last action you can give the user! Please make sure you're ready to wrap nicely no matter their choice!",
"Well done so far! Remember to continue the story in the style of Neil Gaiman, using the play_story function, never mentioning your prompt and keeping it both engaging and within the FlashShort system!",
"{'role': 'user', 'content': PLACEHOLDER}",
"You are GaimanAI, an AI storyteller modelled after Neil Gaiman. You write in a new format: short continue-your-own-adventure flashcards called FlashShorts. You write short bits of a story at a time, providing succinct option to continue. You do so within the constraints given to you.",
"content",
", ",
"{'type': 'string', 'description': 'The content of the story, in 25 words or less. This is the main development part!'}"
] |
2024-01-10 | pjaskulski/gpt_historical_text | src~keyword_test_imie.py | """ openai test - extraction basic info about main character """
import os
from pathlib import Path
from dotenv import load_dotenv
import openai
env_path = Path(".") / ".env"
load_dotenv(dotenv_path=env_path)
OPENAI_ORG_ID = os.environ.get('OPENAI_ORG_ID')
OPENAI_API_KEY = os.environ.get('OPENAI_API_KEY')
openai.api_key = OPENAI_API_KEY
# dane z pliku tekstowego
file_data = Path("..") / "data" / "morteski_ludwik.txt"
with open(file_data, 'r', encoding='utf-8') as f:
data = f.read()
response = openai.Completion.create(
model="text-davinci-003",
prompt=f"From this text give the name, surname and date of death of the main character. Show the result in the form of a table.\n\n {data}\n\n | Name | Surname | Date of death |",
temperature=0.5,
max_tokens=500,
top_p=1.0,
frequency_penalty=0.8,
presence_penalty=0.0
)
print(response['choices'][0]['text']) | [
"From this text give the name, surname and date of death of the main character. Show the result in the form of a table.\n\n PLACEHOLDER\n\n | Name | Surname | Date of death |"
] |
2024-01-10 | pjaskulski/gpt_historical_text | src~rosicki_relacje_rodzinne.py | """ openai test - extraction info about parents, children, wife,
husband from bio
"""
import os
from pathlib import Path
from dotenv import load_dotenv
import openai
env_path = Path(".") / ".env"
load_dotenv(dotenv_path=env_path)
OPENAI_ORG_ID = os.environ.get('OPENAI_ORG_ID')
OPENAI_API_KEY = os.environ.get('OPENAI_API_KEY')
openai.organization = OPENAI_ORG_ID
openai.api_key = OPENAI_API_KEY
# dane z pliku tekstowego
file_data = Path("..") / "data" / "rosicki_andrzej.txt"
with open(file_data, 'r', encoding='utf-8') as f:
data = f.read()
response = openai.Completion.create(
model="text-davinci-003",
prompt=f"From this text extract information about parents, wife, siblings, children and grandchildren for the main character:\n\n {data}",
temperature=0.5,
max_tokens=500,
top_p=1.0,
frequency_penalty=0.8,
presence_penalty=0.0
)
print(response['choices'][0]['text'])
file_output = Path("..") / "output" / "rosicki_relacje_rodzinne.txt"
with open(file_output, 'w', encoding='utf-8') as f:
f.write(response['choices'][0]['text'])
| [
"From this text extract information about parents, wife, siblings, children and grandchildren for the main character:\n\n PLACEHOLDER"
] |
2024-01-10 | pjaskulski/gpt_historical_text | src~keyword_test_urzedy.py | """ openai test - extraction info about functions or positions from bio """
import os
from pathlib import Path
from dotenv import load_dotenv
import openai
env_path = Path(".") / ".env"
load_dotenv(dotenv_path=env_path)
OPENAI_ORG_ID = os.environ.get('OPENAI_ORG_ID')
OPENAI_API_KEY = os.environ.get('OPENAI_API_KEY')
openai.api_key = OPENAI_API_KEY
# dane z pliku tekstowego
file_data = Path("..") / "data" / "morteski_ludwik.txt"
with open(file_data, 'r', encoding='utf-8') as f:
data = f.read()
response = openai.Completion.create(
model="text-davinci-003",
prompt=f"From this text, extract information about the offices, functions and positions held by the person Ludwik Mortęski, present them in the form of a list:\n\n {data}",
temperature=0.5,
max_tokens=500,
top_p=1.0,
frequency_penalty=0.8,
presence_penalty=0.0
)
print(response['choices'][0]['text']) | [
"From this text, extract information about the offices, functions and positions held by the person Ludwik Mortęski, present them in the form of a list:\n\n PLACEHOLDER"
] |
2024-01-10 | pjaskulski/gpt_historical_text | src~gotland_jan_mlodszy_urzedy.py | """ openai test - extraction info about functions or positions from bio """
import os
from pathlib import Path
from dotenv import load_dotenv
import openai
env_path = Path(".") / ".env"
load_dotenv(dotenv_path=env_path)
OPENAI_ORG_ID = os.environ.get('OPENAI_ORG_ID')
OPENAI_API_KEY = os.environ.get('OPENAI_API_KEY')
openai.api_key = OPENAI_API_KEY
# dane z pliku tekstowego
file_data = Path("..") / "data" / "gotland_jan_mlodszy.txt"
with open(file_data, 'r', encoding='utf-8') as f:
data = f.read()
# prompt - zapytanie do GPT, jeżeli potrzebne to z przykładami odpowiedzi
prompt = "From this text, extract information about the occupation of the main character, " \
"present them in the form of a list:\n\n" + data
response = openai.Completion.create(
model="text-davinci-003", # najlepszy ale i najdroższy model w openai
prompt=prompt,
temperature=0.3, # domyślnie 0.5, zmniejszenie powoduje mniejszą 'płynność'
# generowanej odpowiedzi, ale jest bardziej konkretna
max_tokens=500,
top_p=1.0,
frequency_penalty=0.8,
presence_penalty=0.0
)
print(response['choices'][0]['text'])
file_output = Path("..") / "output" / "gotland_jan_mlodszy2.txt"
with open(file_output, 'w', encoding='utf-8') as f:
f.write(response['choices'][0]['text'])
| [
"From this text, extract information about the occupation of the main character, present them in the form of a list:\n\nPLACEHOLDER"
] |
2024-01-10 | pjaskulski/gpt_historical_text | src~sedlaczek_urzedy_babbage.py | """ openai test - extraction info about functions or positions from bio """
import os
from pathlib import Path
from dotenv import load_dotenv
import openai
env_path = Path(".") / ".env"
load_dotenv(dotenv_path=env_path)
OPENAI_ORG_ID = os.environ.get('OPENAI_ORG_ID')
OPENAI_API_KEY = os.environ.get('OPENAI_API_KEY')
openai.api_key = OPENAI_API_KEY
# dane z pliku tekstowego
file_data = Path("..") / "data" / "sedlaczek_edward_jozef_short.txt"
with open(file_data, 'r', encoding='utf-8') as f:
data = f.read()
# prompt - zapytanie do GPT, jeżeli potrzebne to z przykładami odpowiedzi
prompt = "From this text, extract information about the offices, " \
"functions and positions held by the person Edward Józef Sedlaczek, " \
"present them in the form of a list:\n\n" + data
response = openai.Completion.create(
model="text-davinci-001", #
prompt=prompt,
temperature=0.3, # domyślnie 0.5, zmniejszenie powoduje mniejszą 'płynność'
# generowanej odpowiedzi, ale jest bardziej konkretna
max_tokens=360,
top_p=1.0,
frequency_penalty=0.8,
presence_penalty=0.0
)
print(response['choices'][0]['text'])
file_output = Path("..") / "output" / "sedlaczek_urzedy_curie.txt"
with open(file_output, 'w', encoding='utf-8') as f:
f.write(response['choices'][0]['text'])
| [
"From this text, extract information about the offices, functions and positions held by the person Edward Józef Sedlaczek, present them in the form of a list:\n\nPLACEHOLDER"
] |
2024-01-10 | pjaskulski/gpt_historical_text | src~psb_relacje_rodzinne.py | """ openai test - extraction info about parents, children, wife,
husband from bio
"""
import os
from pathlib import Path
from dotenv import load_dotenv
import openai
import spacy
import tiktoken
def get_data_gpt3(text:str='', query_prompt:str='') -> str:
""" zwraca wynik zapytania do GPT-3 """
result = ''
response = openai.Completion.create(
model="text-davinci-003",
prompt=f"{query_prompt}\n\n {text}",
temperature=0.0,
max_tokens=900,
top_p=1.0,
frequency_penalty=0.8,
presence_penalty=0.0)
result = response['choices'][0]['text']
return result
def short_version(text:str) -> str:
""" short_version"""
select_data = []
words = ['ojciec', 'matka', 'syn', 'córka', 'brat', 'siostra', 'żona',
'mąż', 'teść', 'teściowa', 'dziadek', 'babcia', 'wnuk', 'wnuczka',
'szwagier', 'szwagierka', 'siostrzeniec', 'siostrzenica', 'bratanek',
'bratanica', 'kuzyn', 'kuzynka', 'zięć', 'synowa', 'dziecko', 'wuj',
'ciotka', 'rodzina', 'krewni', 'krewny', "ożenić", "bezdzietny", "ożeniony", "zamężna",
"rodzic", "rodzice", "spokrewniony", "spokrewnieni", "małżeństwo", "rodzeństwo",
"bratankowie", "siostrzeńcy", "bratanice", "siostrzenice", "małżeństwa"]
doc = nlp(text)
sentences = [sent.text for sent in doc.sents]
# pierwszych pięć zdań
select_data = sentences[0:5]
# ze środkowych zdań tylko takie wskazujące na opisy relacji rodzinnych
for i in range(5,len(sentences) - 5):
sent_doc = nlp(sentences[i])
for token in sent_doc:
if token.lemma_ in words:
select_data.append(sentences[i])
break
# ostatnie pięć zdań
select_data += sentences[len(sentences) - 5:]
result = ' '.join(select_data)
return result
def count_tokens(text:str, model:str = "gpt2") -> int:
""" funkcja zlicza tokeny """
num_of_tokens = 0
enc = tiktoken.get_encoding(model)
num_of_tokens = len(enc.encode(text))
return num_of_tokens
# ------------------------------------------------------------------------------
if __name__ == '__main__':
env_path = Path(".") / ".env"
load_dotenv(dotenv_path=env_path)
OPENAI_API_KEY = os.environ.get('OPENAI_API_KEY')
openai.api_key = OPENAI_API_KEY
# spacy do podziału tekstu na zdania
nlp = spacy.load('pl_core_news_md')
# dane z pliku tekstowego
data_folder = Path("..") / "data" / "psb_probki_200_txt_gpt3"
data_file_list = data_folder.glob('*.txt')
max_tokens = 3200
licznik = 0
for data_file in data_file_list:
# ograniczona liczba biogramów
licznik += 1
if licznik > 50:
break
print(data_file)
with open(data_file, 'r', encoding='utf-8') as f:
data = f.read()
data_file_name = os.path.basename(data_file)
# jeżeli biogram jest zbyt duży dla modelu gpt-3 (4000 tokenów - 800
# na odpowiedź) to biogram jest skracany do treści o największym prawdopodobieństwie
# zawierania informacji o relacjach rodzinnych bohatera biogramu
tokens_in_data = count_tokens(data, "gpt2")
if len(data) > max_tokens:
data = short_version(data)
prompt = "Na podstawie podanego tekstu wyszukaj " \
"wszystkich krewnych lub powinowatych głównego bohatera tekstu. " \
"Możliwe rodzaje pokrewieństwa: ojciec, matka, syn, córka, brat, siostra, żona, mąż, teść, teściowa, dziadek, babcia, wnuk, wnuczka," \
"szwagier, szwagierka, siostrzeniec, siostrzenica, bratanek, bratanica, kuzyn, kuzynka, zięć, synowa, teść bratanicy." \
"Wynik wypisz w formie listy nienumerowanej, " \
"w formie: główny bohater -> rodzaj pokrewieństwa -> osoba " \
"Każda pozycja w osobnej linii. Na przykład: " \
"- główny bohater -> brat -> Jan Kowalski" \
"- główny bohater -> siostra -> Anna" \
"Pomiń rodzaj pokrewieństwa jeżeli nie występuje w tekście. " \
"Jeżeli w tekście nie ma żadnych informacji o pokrewieństwach głównego bohatera napisz: brak danych."
file_output = Path("..") / "output" / "psb_probki_200_txt_gpt3" / data_file_name.replace('.txt', '.dane')
with open(file_output, 'w', encoding='utf-8') as f:
f.write(data)
output = get_data_gpt3(data, prompt)
file_output = Path("..") / "output" / "psb_probki_200_txt_gpt3" / data_file_name.replace('.txt', '.relacje')
with open(file_output, 'w', encoding='utf-8') as f:
f.write(output + '\n')
| [
"Na podstawie podanego tekstu wyszukaj wszystkich krewnych lub powinowatych głównego bohatera tekstu. Możliwe rodzaje pokrewieństwa: ojciec, matka, syn, córka, brat, siostra, żona, mąż, teść, teściowa, dziadek, babcia, wnuk, wnuczka,szwagier, szwagierka, siostrzeniec, siostrzenica, bratanek, bratanica, kuzyn, kuzynka, zięć, synowa, teść bratanicy.Wynik wypisz w formie listy nienumerowanej, w formie: główny bohater -> rodzaj pokrewieństwa -> osoba Każda pozycja w osobnej linii. Na przykład: - główny bohater -> brat -> Jan Kowalski- główny bohater -> siostra -> AnnaPomiń rodzaj pokrewieństwa jeżeli nie występuje w tekście. Jeżeli w tekście nie ma żadnych informacji o pokrewieństwach głównego bohatera napisz: brak danych.",
"PLACEHOLDER\n\n PLACEHOLDER"
] |
2024-01-10 | pjaskulski/gpt_historical_text | src~psb_relacje_rodzinne_gpt4.py | """ openai test - extraction info about parents, children, wife,
husband from bio
"""
import os
from pathlib import Path
from dotenv import load_dotenv
import openai
import spacy
import tiktoken
def get_answer(model:str='gpt-4', text:str='', query_prompt:str='', text_example:str='', answer_example:str='') -> str:
""" funkcja konstruuje prompt do modelu GPT dostępnego przez API i zwraca wynik """
result = ''
response = openai.ChatCompletion.create(
model=model,
messages=[
{"role": "system", "content": "Jesteś pomocnym asystentem."},
{"role":"user", "content":f"{query_prompt}\n\n{text_example}"},
{"role":"assistant", "content":answer_example},
{"role": "user", "content": f"{query_prompt}\n\n{text}"}
],
temperature=0.0,
top_p = 1.0)
result = response['choices'][0]['message']['content']
return result
def short_version(text:str) -> str:
""" short_version"""
select_data = []
words = ['ojciec', 'matka', 'syn', 'córka', 'brat', 'siostra', 'żona',
'mąż', 'teść', 'teściowa', 'dziadek', 'babcia', 'wnuk', 'wnuczka',
'szwagier', 'szwagierka', 'siostrzeniec', 'siostrzenica', 'bratanek',
'bratanica', 'kuzyn', 'kuzynka', 'zięć', 'synowa', 'dziecko', 'wuj',
'ciotka', 'rodzina', 'krewni', 'krewny', "ożenić", "bezdzietny", "ożeniony", "zamężna",
"rodzic", "rodzice", "spokrewniony", "spokrewnieni", "małżeństwo", "rodzeństwo",
"bratankowie", "siostrzeńcy", "bratanice", "siostrzenice", "małżeństwa"]
doc = nlp(text)
sentences = [sent.text for sent in doc.sents]
# pierwszych pięć zdań
select_data = sentences[0:5]
# ze środkowych zdań tylko takie wskazujące na opisy relacji rodzinnych
for i in range(5,len(sentences) - 5):
sent_doc = nlp(sentences[i])
for token in sent_doc:
if token.lemma_ in words:
select_data.append(sentences[i])
break
# ostatnie pięć zdań
select_data += sentences[len(sentences) - 5:]
result = ' '.join(select_data)
return result
def count_tokens(text:str, model:str = "gpt2") -> int:
""" funkcja zlicza tokeny """
num_of_tokens = 0
enc = tiktoken.get_encoding(model)
num_of_tokens = len(enc.encode(text))
return num_of_tokens
# ------------------------------------------------------------------------------
if __name__ == '__main__':
env_path = Path(".") / ".env"
load_dotenv(dotenv_path=env_path)
OPENAI_API_KEY = os.environ.get('OPENAI_API_KEY')
openai.api_key = OPENAI_API_KEY
# spacy do podziału tekstu na zdania
nlp = spacy.load('pl_core_news_md')
# dane z pliku tekstowego
data_folder = Path("..") / "data" / "psb_probki_200_txt_gpt3"
data_file_list = data_folder.glob('*.txt')
max_tokens = 3200
licznik = 0
for data_file in data_file_list:
# ograniczona liczba biogramów
licznik += 1
if licznik > 5:
break
print(data_file)
with open(data_file, 'r', encoding='utf-8') as f:
data = f.read()
data_file_name = os.path.basename(data_file)
# jeżeli biogram jest zbyt duży dla modelu gpt-4 (4000 tokenów - 800
# na odpowiedź) to biogram jest skracany do treści o największym prawdopodobieństwie
# zawierania informacji o relacjach rodzinnych bohatera biogramu
tokens_in_data = count_tokens(data, "gpt2")
if len(data) > max_tokens:
data = short_version(data)
# ///
# Przykład 2: "Sapieha Jan Fryderyk h. Lis (1618–1664), pisarz polny kor. Był wnukiem woj.
# witebskiego Mikołaja (zob.), najstarszym synem podkomorzego włodzimierskiego
# Fryderyka (zm. 1626) i Ewy ze Skaszewskich, bratem oboźnego lit. Tomasza Kazimierza
# (zob.), bpa wileńskiego Aleksandra Kazimierza i krajczego lit. Krzysztofa Franciszka."
# Wynik:
# [{"relacja":"dziadek", "osoba":"Mikołaj Sapieha"},
# {"relacja":"ojciec", "osoba":"Fryderyk Spaieha"},
# {"relacja":"matka", "osoba":"Ewa ze Skaszewskich"},
# {"relacja":"brat", "osoba":"Tomasz Kazimierz Sapieha"},
# {"relacja":"brat", "osoba":"Aleksander Kazimierz Sapieha"},
# {"relacja":"brat", "osoba":"Krzysztof Franciszek Sapieha"}
# ]
query = """Na podstawie podanego tekstu wyszukaj
wszystkich krewnych lub powinowatych głównego bohatera/bohaterki tekstu, który
określony jest na początku pierwszego zdania tekstu.
Możliwe rodzaje pokrewieństwa: ojciec, matka, syn, córka, brat, siostra, żona,
mąż, teść, teściowa, dziadek, babcia, wnuk, wnuczka, szwagier, szwagierka,
siostrzeniec, siostrzenica, bratanek, bratanica, kuzyn, kuzynka, zięć, synowa,
wuj, ciotka, stryj, stryjenka.
Wynik przedstaw w formie listy obiektów JSON zawierających pola:
relacja: rodzaj pokrewieństwa (kim osoba była dla bohatera/bohaterki )
osoba: nazwa (imię i nazwisko osoby związanej relacją z bohaterem)
Uwzględnij tylko rodzaje pokrewieństwa, które występują w tekście. Relacje rodzinne,
które nie dotyczą bezpośrednio głównego bohater/bohaterki, np. między innymi osobami, zignoruj.
Jeżeli w tekście nie ma żadnych informacji o pokrewieństwach głównego bohatera/bohaterki napisz: brak danych.
"""
example_text = """
"Soderini Carlo (ok. 1537–1581), kupiec i bankier. Był jednym z pięciu synów Niccola i Annaleny
Ricasoli, młodszym bratem Bernarda. Jego bratanicą była Małgorzata Anna, żona Winfrida de Loeve. S.
ożenił się z Joanną, córką burgrabiego krakowskiego Adama Kurozwęckiego."
"""
example_answer = """
[{"relacja":"ojciec", "osoba":"Niccola Ricasoli"},
{"relacja":"matka": "osoba":"Annalena Ricasoli"},
{"relacja":"brat": "osoba":"Bernard"},
{"relacja":"bratanica": "osoba":"Małgorzata Anna"},
{"relacja":"żona": "osoba":"Joanna"},
{"relacja":"teść": "osoba":"Adam Kurozwęcki"}
]
"""
file_output = Path("..") / "output" / "gpt-4-api-dane" / data_file_name.replace('.txt', '.dane')
with open(file_output, 'w', encoding='utf-8') as f:
f.write(data)
output = get_answer(model="gpt-4",
text=data,
query_prompt=query,
text_example=example_text,
answer_example=example_answer)
file_output = Path("..") / "output" / "gpt-4-api" / data_file_name.replace('.txt', '.relacje_gpt4')
with open(file_output, 'w', encoding='utf-8') as f:
f.write(output + '\n')
| [
"PLACEHOLDER\n\nPLACEHOLDER",
"Jesteś pomocnym asystentem."
] |
2024-01-10 | pjaskulski/gpt_historical_text | src~wejher_relacje.py | """ openai test - extraction info about parents, children, wife,
husband from bio
"""
import os
from pathlib import Path
from dotenv import load_dotenv
import openai
env_path = Path(".") / ".env"
load_dotenv(dotenv_path=env_path)
#OPENAI_ORG_ID = os.environ.get('OPENAI_ORG_ID')
OPENAI_API_KEY = os.environ.get('OPENAI_API_KEY')
openai.api_key = OPENAI_API_KEY
# dane z pliku tekstowego
file_data = Path("..") / "data" / "wejher.txt"
with open(file_data, 'r', encoding='utf-8') as f:
lines = f.readlines()
biogram = []
for line in lines:
line = line.strip()
if line != '---':
biogram.append(line)
else:
break
data = '\n'.join(biogram)
# prompt = "Na podstawie podanego tekstu wymień wszystkie " \
# "ważne postacie w życiu głównego bohatera tekstu. Wynik wypisz w formie listy. "
prompt = "Na podstawie podanego tekstu wymień wszystkie " \
"stanowiska i urzędy głównego bohatera tekstu. Wynik wypisz w formie listy. "
#prompt = "Na podstawie podanego tekstu z biografią podaj imię, nazwisko, herb, datę urodzenia, " \
# "datę śmierci, zawód głównego bohatera tekstu. Wynik wypisz w formie listy nienumerowanej. "
# prompt = "Na podstawie podanego tekstu wyszukaj " \
# "wszystkie relacje rodzinne głównego bohatera (tylko jego krewnych, powinowatych, teściów, szwagrów, szwagierki). " \
# "Wynik wypisz w formie listy nienumerowanej " \
# "z rodzajem pokrewieństwa w nawiasie. Na przykład: " \
# "- Jan Kowalski (brat) " \
# "- Anna (siostra) " \
# "Jeżeli w tekście nie ma takich informacji napisz: brak danych."
response = openai.Completion.create(
model="text-davinci-003",
prompt=f"{prompt}\n\n {data}",
temperature=0.0,
max_tokens=500,
top_p=1.0,
frequency_penalty=0.0,
presence_penalty=0.0
)
print(response['choices'][0]['text'])
file_output = Path("..") / "output" / "wejher.urzedy"
with open(file_output, 'w', encoding='utf-8') as f:
f.write(response['choices'][0]['text'])
| [
"Na podstawie podanego tekstu wymień wszystkie stanowiska i urzędy głównego bohatera tekstu. Wynik wypisz w formie listy. ",
"PLACEHOLDER\n\n PLACEHOLDER"
] |
2024-01-10 | pjaskulski/gpt_historical_text | src~swiejkowski_relacje.py | """ openai test - extraction info about parents, children, wife,
husband from bio
"""
import os
from pathlib import Path
from dotenv import load_dotenv
import openai
env_path = Path(".") / ".env"
load_dotenv(dotenv_path=env_path)
#OPENAI_ORG_ID = os.environ.get('OPENAI_ORG_ID')
OPENAI_API_KEY = os.environ.get('OPENAI_API_KEY')
openai.api_key = OPENAI_API_KEY
# dane z pliku tekstowego
file_data = Path("..") / "data" / "swiejkowski_prosty.txt"
with open(file_data, 'r', encoding='utf-8') as f:
lines = f.readlines()
biogram = []
for line in lines:
line = line.strip()
if line != '---':
biogram.append(line)
else:
break
data = '\n'.join(biogram)
# prompt = "Na podstawie podanego tekstu wymień wszystkie " \
# "ważne postacie w życiu głównego bohatera tekstu. Wynik wypisz w formie listy. "
#prompt = "Na podstawie podanego tekstu wymień wszystkie " \
# "stanowiska i urzędy głównego bohatera tekstu. Wynik wypisz w formie listy. "
#prompt = "Na podstawie podanego tekstu z biografią podaj imię, nazwisko, herb, datę urodzenia, " \
# "datę śmierci, zawód głównego bohatera tekstu. Wynik wypisz w formie listy nienumerowanej. "
# "takich jak: ojciec, matka, dziadkowie, brat, siostra, teść, teściowa, szwagier, szwagierka, zięć, synowa, kuzyn, wuj, ciotka. " \
prompt = "Na podstawie podanego tekstu wyszukaj " \
"wszystkich krewnych i powinowatych głównego bohatera. " \
"Wynik wypisz w formie listy nienumerowanej " \
"z rodzajem pokrewieństwa w nawiasie. Na przykład: " \
"- Jan Kowalski (brat) " \
"- Anna (siostra) " \
"Jeżeli w tekście nie ma informacji na temat jaichś rodzajów pokrewieństwa pomiń te rodzaje."
prompt = "Based on the given text, search for " \
" all relatives and affinities of the main character. " \
"List the result in the form of an unnumbered list " \
" with the type of kinship in parentheses. For example: " \
"- John Kowalski (brother) " \
"- Anna (sister) " \
"If there is no information in the text about some types of kinship omit these types."
#prompt = "From the given text, search all the family relations of the main character, " \
# "based solely on the facts in the text. " \
# "Write the result in the form of a list. " \
# "If there is no such information in the text write: no data."
response = openai.Completion.create(
model="text-davinci-003",
prompt=f"{prompt}\n\n {data}",
temperature=0.0,
max_tokens=500,
top_p=1.0,
frequency_penalty=0.0,
presence_penalty=0.0
)
print(response['choices'][0]['text'])
file_output = Path("..") / "output" / "swiejkowski.relacje"
with open(file_output, 'w', encoding='utf-8') as f:
f.write(response['choices'][0]['text'])
| [
"PLACEHOLDER\n\n PLACEHOLDER",
"Na podstawie podanego tekstu wyszukaj wszystkich krewnych i powinowatych głównego bohatera. Wynik wypisz w formie listy nienumerowanej z rodzajem pokrewieństwa w nawiasie. Na przykład: - Jan Kowalski (brat) - Anna (siostra) Jeżeli w tekście nie ma informacji na temat jaichś rodzajów pokrewieństwa pomiń te rodzaje.",
"Based on the given text, search for all relatives and affinities of the main character. List the result in the form of an unnumbered list with the type of kinship in parentheses. For example: - John Kowalski (brother) - Anna (sister) If there is no information in the text about some types of kinship omit these types."
] |
2024-01-10 | pjaskulski/gpt_historical_text | src~latin_tei.py | """ openai test - extraction info about parents, children, wife,
husband from bio
"""
import os
from pathlib import Path
from dotenv import load_dotenv
import openai
env_path = Path(".") / ".env"
load_dotenv(dotenv_path=env_path)
OPENAI_ORG_ID = os.environ.get('OPENAI_ORG_ID')
OPENAI_API_KEY = os.environ.get('OPENAI_API_KEY')
openai.api_key = OPENAI_API_KEY
data = "Francisco Cruswicz magistro in theologia [in civitate Constantiensi commo-ranti]: Eidem decano eccl. s. Floriani extra muros Crac. (15 m. arg. p.), canonica-tus in eccl. s. Michaelis in castro Crac. (12 m. arg. p.), per obitum apud SA ea va-cante Nicolai Falkemberg vacans, confertur.;s. m. scholastico et cantori ac custodi Crac. XI, XIII."
response = openai.Completion.create(
model="text-davinci-003",
prompt=f"Tag text and convert to TEI XML format:\n\n {data}",
temperature=0.3,
max_tokens=500,
top_p=1.0,
frequency_penalty=0.8,
presence_penalty=0.0
)
print(response['choices'][0]['text'])
file_output = Path("..") / "output" / "latin_tei.txt"
with open(file_output, 'w', encoding='utf-8') as f:
f.write(response['choices'][0]['text']) | [
"Tag text and convert to TEI XML format:\n\n Francisco Cruswicz magistro in theologia [in civitate Constantiensi commo-ranti]: Eidem decano eccl. s. Floriani extra muros Crac. (15 m. arg. p.), canonica-tus in eccl. s. Michaelis in castro Crac. (12 m. arg. p.), per obitum apud SA ea va-cante Nicolai Falkemberg vacans, confertur.;s. m. scholastico et cantori ac custodi Crac. XI, XIII."
] |
2024-01-10 | pjaskulski/gpt_historical_text | src~keyword_test_instytucje.py | """ openai test - extraction info about institutions from bio """
import os
from pathlib import Path
from dotenv import load_dotenv
import openai
env_path = Path(".") / ".env"
load_dotenv(dotenv_path=env_path)
OPENAI_ORG_ID = os.environ.get('OPENAI_ORG_ID')
OPENAI_API_KEY = os.environ.get('OPENAI_API_KEY')
openai.api_key = OPENAI_API_KEY
# dane z pliku tekstowego
file_data = Path("..") / "data" / "morteski_ludwik.txt"
with open(file_data, 'r', encoding='utf-8') as f:
data = f.read()
response = openai.Completion.create(
model="text-davinci-003",
prompt=f"From this text, extract information about institutions related to the the person Ludwik Mortęski, present them in the form of a list:\n\n {data}",
temperature=0.5,
max_tokens=500,
top_p=1.0,
frequency_penalty=0.8,
presence_penalty=0.0
)
print(response['choices'][0]['text']) | [
"From this text, extract information about institutions related to the the person Ludwik Mortęski, present them in the form of a list:\n\n PLACEHOLDER"
] |
2024-01-10 | pjaskulski/gpt_historical_text | src~sedlaczek_urzedy.py | """ openai test - extraction info about functions or positions from bio """
import os
from pathlib import Path
from dotenv import load_dotenv
import openai
env_path = Path(".") / ".env"
load_dotenv(dotenv_path=env_path)
#OPENAI_ORG_ID = os.environ.get('OPENAI_ORG_ID')
OPENAI_API_KEY = os.environ.get('OPENAI_API_KEY')
openai.api_key = OPENAI_API_KEY
# dane z pliku tekstowego
file_data = Path("..") / "data" / "sedlaczek_edward_jozef.txt"
with open(file_data, 'r', encoding='utf-8') as f:
data = f.read()
# prompt - zapytanie do GPT, jeżeli potrzebne to z przykładami odpowiedzi
prompt = "From this text, extract information about the offices, " \
"functions and positions held by the person Edward Józef Sedlaczek, " \
"present them in the form of a list:\n\n" + data
response = openai.Completion.create(
model="text-davinci-003", # najlepszy ale i najdroższy model w openai
prompt=prompt,
temperature=0.0, # domyślnie 0.5, zmniejszenie powoduje mniejszą 'płynność'
# generowanej odpowiedzi, ale jest bardziej konkretna, mniej losowa
max_tokens=500,
top_p=1.0,
frequency_penalty=0.8,
presence_penalty=0.0
)
print(response['choices'][0]['text'])
file_output = Path("..") / "output" / "sedlaczek_urzedy_temp_0_fp_0.txt"
with open(file_output, 'w', encoding='utf-8') as f:
f.write(response['choices'][0]['text'])
| [
"From this text, extract information about the offices, functions and positions held by the person Edward Józef Sedlaczek, present them in the form of a list:\n\nPLACEHOLDER"
] |
2024-01-10 | pjaskulski/gpt_historical_text | src~sedlaczek_instytucje.py | """ openai test - extraction info about institutions from bio """
import os
from pathlib import Path
from dotenv import load_dotenv
import openai
env_path = Path(".") / ".env"
load_dotenv(dotenv_path=env_path)
OPENAI_ORG_ID = os.environ.get('OPENAI_ORG_ID')
OPENAI_API_KEY = os.environ.get('OPENAI_API_KEY')
openai.api_key = OPENAI_API_KEY
# dane z pliku tekstowego
file_data = Path("..") / "data" / "sedlaczek_edward_jozef.txt"
with open(file_data, 'r', encoding='utf-8') as f:
data = f.read()
response = openai.Completion.create(
model="text-davinci-003",
prompt=f"From this text, extract information about institutions related to the the person Edward Józef Sedlaczek, present them in the form of a list:\n\n {data}",
temperature=0.5,
max_tokens=500,
top_p=1.0,
frequency_penalty=0.8,
presence_penalty=0.0
)
print(response['choices'][0]['text'])
file_output = Path("..") / "output" / "sedlaczek_instytucje.txt"
with open(file_output, 'w', encoding='utf-8') as f:
f.write(response['choices'][0]['text'])
| [
"From this text, extract information about institutions related to the the person Edward Józef Sedlaczek, present them in the form of a list:\n\n PLACEHOLDER"
] |
2024-01-10 | pjaskulski/gpt_historical_text | src~keyword_test_relacje.py | """ openai test - extraction info about institutions from bio """
import os
from pathlib import Path
from dotenv import load_dotenv
import openai
env_path = Path(".") / ".env"
load_dotenv(dotenv_path=env_path)
OPENAI_ORG_ID = os.environ.get('OPENAI_ORG_ID')
OPENAI_API_KEY = os.environ.get('OPENAI_API_KEY')
openai.api_key = OPENAI_API_KEY
# dane z pliku tekstowego
file_data = Path("..") / "data" / "morteski_ludwik.txt"
with open(file_data, 'r', encoding='utf-8') as f:
data = f.read()
example = "Ludwik Mortęski -> father -> woj. chełmiński Ludwik; " \
"Ludwik Mortęski -> son -> Melchior; " \
"Ludwik Mortęski -> mother -> Anna; "
prompt = f"From this text extract family relationships: {example} \n\n {data}"
response = openai.Completion.create(
model="text-davinci-003",
prompt=prompt,
temperature=0.3,
max_tokens=500,
top_p=1.0,
frequency_penalty=0.8,
presence_penalty=0.0
)
print(response['choices'][0]['text']) | [
"From this text extract family relationships: PLACEHOLDER \n\n PLACEHOLDER"
] |
2024-01-10 | pjaskulski/gpt_historical_text | src~mikolaj_urzedy.py | """ openai test - extraction info about functions or positions from bio """
import os
from pathlib import Path
from dotenv import load_dotenv
import openai
env_path = Path(".") / ".env"
load_dotenv(dotenv_path=env_path)
OPENAI_ORG_ID = os.environ.get('OPENAI_ORG_ID')
OPENAI_API_KEY = os.environ.get('OPENAI_API_KEY')
openai.api_key = OPENAI_API_KEY
# dane z pliku tekstowego
file_data = Path("..") / "data" / "mikolaj_z_woli.txt"
with open(file_data, 'r', encoding='utf-8') as f:
data = f.read()
# prompt - zapytanie do GPT, jeżeli potrzebne to z przykładami odpowiedzi
prompt = "From this text, extract information about the offices, " \
"held by the person Mikołaj z Woli, " \
"present them in the form of a list:\n\n" + data
response = openai.Completion.create(
model="text-davinci-003", # najlepszy ale i najdroższy model w openai
prompt=prompt,
temperature=0.3, # domyślnie 0.5, zmniejszenie powoduje mniejszą 'płynność'
# generowanej odpowiedzi, ale jest bardziej konkretna
max_tokens=500,
top_p=1.0,
frequency_penalty=0.8,
presence_penalty=0.0
)
print(response['choices'][0]['text'])
file_output = Path("..") / "output" / "mikolaj_urzedy.txt"
with open(file_output, 'w', encoding='utf-8') as f:
f.write(response['choices'][0]['text'])
| [
"From this text, extract information about the offices, held by the person Mikołaj z Woli, present them in the form of a list:\n\nPLACEHOLDER"
] |
2024-01-10 | pjaskulski/gpt_historical_text | src~sedlaczek_imie.py | """ openai test - extraction basic info about main character """
import os
from pathlib import Path
from dotenv import load_dotenv
import openai
env_path = Path(".") / ".env"
load_dotenv(dotenv_path=env_path)
OPENAI_ORG_ID = os.environ.get('OPENAI_ORG_ID')
OPENAI_API_KEY = os.environ.get('OPENAI_API_KEY')
openai.api_key = OPENAI_API_KEY
# dane z pliku tekstowego
file_data = Path("..") / "data" / "sedlaczek_edward_jozef.txt"
with open(file_data, 'r', encoding='utf-8') as f:
data = f.read()
response = openai.Completion.create(
model="text-davinci-003",
prompt=f"From this text give the name, surname, date of birth, date of death of the main character. Show the result in the form of a table.\n\n {data}\n\n | Name | Surname | Date of birth | Date of death |",
temperature=0.5,
max_tokens=500,
top_p=1.0,
frequency_penalty=0.8,
presence_penalty=0.0
)
print(response['choices'][0]['text'])
file_output = Path("..") / "output" / "sedlaczek_imie.txt"
with open(file_output, 'w', encoding='utf-8') as f:
f.write(response['choices'][0]['text']) | [
"From this text give the name, surname, date of birth, date of death of the main character. Show the result in the form of a table.\n\n PLACEHOLDER\n\n | Name | Surname | Date of birth | Date of death |"
] |
2024-01-10 | pjaskulski/gpt_historical_text | src~psb_relacje_rodzinne_gpt35.py | """ openai test - extraction info about parents, children, wife,
husband from bio
"""
import os
from pathlib import Path
from dotenv import load_dotenv
import openai
import spacy
import tiktoken
def get_answer(model:str='gpt-4', text:str='', query_prompt:str='', text_example:str='', answer_example:str='') -> str:
""" funkcja konstruuje prompt do modelu GPT dostępnego przez API i zwraca wynik """
result = ''
response = openai.ChatCompletion.create(
model=model,
messages=[
{"role": "system", "content": "Jesteś pomocnym asystentem."},
{"role":"user", "content":f"{query_prompt}\n\n{text_example}"},
{"role":"assistant", "content":answer_example},
{"role": "user", "content": f"{query_prompt}\n\n{text}"}
],
temperature=0.0,
top_p = 1.0)
result = response['choices'][0]['message']['content']
return result
def short_version(text:str) -> str:
""" short_version"""
select_data = []
words = ['ojciec', 'matka', 'syn', 'córka', 'brat', 'siostra', 'żona',
'mąż', 'teść', 'teściowa', 'dziadek', 'babcia', 'wnuk', 'wnuczka',
'szwagier', 'szwagierka', 'siostrzeniec', 'siostrzenica', 'bratanek',
'bratanica', 'kuzyn', 'kuzynka', 'zięć', 'synowa', 'dziecko', 'wuj',
'ciotka', 'rodzina', 'krewni', 'krewny', "ożenić", "bezdzietny", "ożeniony", "zamężna",
"rodzic", "rodzice", "spokrewniony", "spokrewnieni", "małżeństwo", "rodzeństwo",
"bratankowie", "siostrzeńcy", "bratanice", "siostrzenice", "małżeństwa"]
doc = nlp(text)
sentences = [sent.text for sent in doc.sents]
# pierwszych pięć zdań
select_data = sentences[0:5]
# ze środkowych zdań tylko takie wskazujące na opisy relacji rodzinnych
for i in range(5,len(sentences) - 5):
sent_doc = nlp(sentences[i])
for token in sent_doc:
if token.lemma_ in words:
select_data.append(sentences[i])
break
# ostatnie pięć zdań
select_data += sentences[len(sentences) - 5:]
result = ' '.join(select_data)
return result
def count_tokens(text:str, model:str = "gpt2") -> int:
""" funkcja zlicza tokeny """
num_of_tokens = 0
enc = tiktoken.get_encoding(model)
num_of_tokens = len(enc.encode(text))
return num_of_tokens
# ------------------------------------------------------------------------------
if __name__ == '__main__':
env_path = Path(".") / ".env"
load_dotenv(dotenv_path=env_path)
OPENAI_API_KEY = os.environ.get('OPENAI_API_KEY')
openai.api_key = OPENAI_API_KEY
# spacy do podziału tekstu na zdania
nlp = spacy.load('pl_core_news_md')
# dane z pliku tekstowego
data_folder = Path("..") / "data" / "psb_probki_200_txt_gpt3"
data_file_list = data_folder.glob('*.txt')
max_tokens = 3200
licznik = 0
for data_file in data_file_list:
# ograniczona liczba biogramów
licznik += 1
if licznik > 5:
break
print(data_file)
with open(data_file, 'r', encoding='utf-8') as f:
data = f.read()
data_file_name = os.path.basename(data_file)
# jeżeli biogram jest zbyt duży dla modelu gpt-4 (4000 tokenów - 800
# na odpowiedź) to biogram jest skracany do treści o największym prawdopodobieństwie
# zawierania informacji o relacjach rodzinnych bohatera biogramu
tokens_in_data = count_tokens(data, "gpt2")
if len(data) > max_tokens:
data = short_version(data)
# ///
# Przykład 2: "Sapieha Jan Fryderyk h. Lis (1618–1664), pisarz polny kor. Był wnukiem woj.
# witebskiego Mikołaja (zob.), najstarszym synem podkomorzego włodzimierskiego
# Fryderyka (zm. 1626) i Ewy ze Skaszewskich, bratem oboźnego lit. Tomasza Kazimierza
# (zob.), bpa wileńskiego Aleksandra Kazimierza i krajczego lit. Krzysztofa Franciszka."
# Wynik:
# [{"relacja":"dziadek", "osoba":"Mikołaj Sapieha"},
# {"relacja":"ojciec", "osoba":"Fryderyk Spaieha"},
# {"relacja":"matka", "osoba":"Ewa ze Skaszewskich"},
# {"relacja":"brat", "osoba":"Tomasz Kazimierz Sapieha"},
# {"relacja":"brat", "osoba":"Aleksander Kazimierz Sapieha"},
# {"relacja":"brat", "osoba":"Krzysztof Franciszek Sapieha"}
# ]
query = """Na podstawie podanego tekstu wyszukaj
wszystkich krewnych lub powinowatych głównego bohatera/bohaterki tekstu, który
określony jest na początku pierwszego zdania tekstu.
Możliwe rodzaje pokrewieństwa: ojciec, matka, syn, córka, brat, siostra, żona,
mąż, teść, teściowa, dziadek, babcia, wnuk, wnuczka, szwagier, szwagierka,
siostrzeniec, siostrzenica, bratanek, bratanica, kuzyn, kuzynka, zięć, synowa,
wuj, ciotka, stryj, stryjenka.
Wynik przedstaw w formie listy obiektów JSON zawierających pola:
relacja: rodzaj pokrewieństwa (kim osoba była dla bohatera/bohaterki )
osoba: nazwa (imię i nazwisko osoby związanej relacją z bohaterem)
Uwzględnij tylko rodzaje pokrewieństwa, które występują w tekście. Relacje rodzinne,
które nie dotyczą bezpośrednio głównego bohater/bohaterki, np. między innymi osobami, zignoruj.
Jeżeli w tekście nie ma żadnych informacji o pokrewieństwach głównego bohatera/bohaterki napisz: brak danych.
"""
example_text = """
"Soderini Carlo (ok. 1537–1581), kupiec i bankier. Był jednym z pięciu synów Niccola i Annaleny
Ricasoli, młodszym bratem Bernarda. Jego bratanicą była Małgorzata Anna, żona Winfrida de Loeve. S.
ożenił się z Joanną, córką burgrabiego krakowskiego Adama Kurozwęckiego."
"""
example_answer = """
[{"relacja":"ojciec", "osoba":"Niccola Ricasoli"},
{"relacja":"matka": "osoba":"Annalena Ricasoli"},
{"relacja":"brat": "osoba":"Bernard"}],
{"relacja":"bratanica": "osoba":"Małgorzata Anna"}],
{"relacja":"żona": "osoba":"Joanna"}],
{"relacja":"teść": "osoba":"Adam Kurozwęcki"}],
"""
file_output = Path("..") / "output" / "gpt-35-api-dane" / data_file_name.replace('.txt', '.dane')
with open(file_output, 'w', encoding='utf-8') as f:
f.write(data)
output = get_answer(model="gpt-3.5-turbo",
text=data,
query_prompt=query,
text_example=example_text,
answer_example=example_answer)
file_output = Path("..") / "output" / "gpt-35-api" / data_file_name.replace('.txt', '.relacje_gpt35')
with open(file_output, 'w', encoding='utf-8') as f:
f.write(output + '\n')
| [
"PLACEHOLDER\n\nPLACEHOLDER",
"Jesteś pomocnym asystentem."
] |
2024-01-10 | pjaskulski/gpt_historical_text | src~latin.py | """ openai test - extraction info about parents, children, wife,
husband from bio
"""
import os
from pathlib import Path
from dotenv import load_dotenv
import openai
env_path = Path(".") / ".env"
load_dotenv(dotenv_path=env_path)
OPENAI_ORG_ID = os.environ.get('OPENAI_ORG_ID')
OPENAI_API_KEY = os.environ.get('OPENAI_API_KEY')
openai.api_key = OPENAI_API_KEY
data = "Francisco Cruswicz magistro in theologia [in civitate Constantiensi commo-ranti]: Eidem decano eccl. s. Floriani extra muros Crac. (15 m. arg. p.), canonica-tus in eccl. s. Michaelis in castro Crac. (12 m. arg. p.), per obitum apud SA ea va-cante Nicolai Falkemberg vacans, confertur.;s. m. scholastico et cantori ac custodi Crac. XI, XIII."
response = openai.Completion.create(
model="text-davinci-003",
prompt=f"From this text extract named entities. Show results as a list with type of each entity:\n\n {data}",
temperature=0.3,
max_tokens=500,
top_p=1.0,
frequency_penalty=0.8,
presence_penalty=0.0
)
print(response['choices'][0]['text'])
file_output = Path("..") / "output" / "latin.txt"
with open(file_output, 'w', encoding='utf-8') as f:
f.write(response['choices'][0]['text']) | [
"From this text extract named entities. Show results as a list with type of each entity:\n\n Francisco Cruswicz magistro in theologia [in civitate Constantiensi commo-ranti]: Eidem decano eccl. s. Floriani extra muros Crac. (15 m. arg. p.), canonica-tus in eccl. s. Michaelis in castro Crac. (12 m. arg. p.), per obitum apud SA ea va-cante Nicolai Falkemberg vacans, confertur.;s. m. scholastico et cantori ac custodi Crac. XI, XIII."
] |
2024-01-10 | pjaskulski/gpt_historical_text | src~bodniak_ner.py | """ openai test - extraction information from text
"""
import os
from pathlib import Path
from dotenv import load_dotenv
import openai
env_path = Path(".") / ".env"
load_dotenv(dotenv_path=env_path)
OPENAI_ORG_ID = os.environ.get('OPENAI_ORG_ID')
OPENAI_API_KEY = os.environ.get('OPENAI_API_KEY')
openai.api_key = OPENAI_API_KEY
# dane z pliku tekstowego
file_data = Path("..") / "data" / "bodniak.txt"
with open(file_data, 'r', encoding='utf-8') as f:
data = f.read()
prompt = f"W podanym tekście wyszukaj informacje o osobach, znalezione osoby " \
"wypisz w formie listy, jeżeli są znane funkcje lub urzędy osób " \
"umieść te informacje w nawiasach obok osób: \n\n" + data
prompt = f"W podanym tekście wyszukaj słowa kluczowe będące nazwami własnymi, znalezione słowa kluczowe " \
"wypisz w formie listy, w nawiasach obok słowa kluczowego umieść informację " \
"o typie słowa kluczowego np. osoba, miejsce, rzeka, morze.\n\n" + data
response = openai.Completion.create(
model="text-davinci-003",
prompt=prompt,
temperature=0.0,
max_tokens=500,
top_p=1.0,
frequency_penalty=0.0,
presence_penalty=0.0
)
print(response['choices'][0]['text']) | [
"W podanym tekście wyszukaj słowa kluczowe będące nazwami własnymi, znalezione słowa kluczowe wypisz w formie listy, w nawiasach obok słowa kluczowego umieść informację o typie słowa kluczowego np. osoba, miejsce, rzeka, morze.\n\nPLACEHOLDER",
"W podanym tekście wyszukaj informacje o osobach, znalezione osoby wypisz w formie listy, jeżeli są znane funkcje lub urzędy osób umieść te informacje w nawiasach obok osób: \n\nPLACEHOLDER"
] |
2024-01-10 | pjaskulski/gpt_historical_text | src~keyword_test.py | """ openai test - extraction info about parents, children, wife,
husband from bio
"""
import os
from pathlib import Path
from dotenv import load_dotenv
import openai
env_path = Path(".") / ".env"
load_dotenv(dotenv_path=env_path)
OPENAI_ORG_ID = os.environ.get('OPENAI_ORG_ID')
OPENAI_API_KEY = os.environ.get('OPENAI_API_KEY')
openai.api_key = OPENAI_API_KEY
# dane z pliku tekstowego
#file_data = Path("..") / "data" / "morteski_ludwik.txt"
#with open(file_data, 'r', encoding='utf-8') as f:
# data = f.read()
response = openai.Completion.create(
model="text-davinci-003",
prompt=f"Na podstawie poprzednich pytań wymień krewnych postaci Pion Maurice.",
temperature=0.0,
max_tokens=500,
top_p=1.0,
frequency_penalty=0.8,
presence_penalty=0.0
)
print(response['choices'][0]['text']) | [
"Na podstawie poprzednich pytań wymień krewnych postaci Pion Maurice."
] |
2024-01-10 | pjaskulski/gpt_historical_text | src~sedlaczek_relacje_rodzinne.py | """ openai test - extraction info about parents, children, wife,
husband from bio
"""
import os
from pathlib import Path
from dotenv import load_dotenv
import openai
env_path = Path(".") / ".env"
load_dotenv(dotenv_path=env_path)
OPENAI_ORG_ID = os.environ.get('OPENAI_ORG_ID')
OPENAI_API_KEY = os.environ.get('OPENAI_API_KEY')
openai.api_key = OPENAI_API_KEY
# dane z pliku tekstowego
file_data = Path("..") / "data" / "sedlaczek_edward_jozef.txt"
with open(file_data, 'r', encoding='utf-8') as f:
data = f.read()
response = openai.Completion.create(
model="text-davinci-003",
prompt=f"From this text extract information about parents, wife, siblings, children and grandchildren for the main character:\n\n {data}",
temperature=0.5,
max_tokens=500,
top_p=1.0,
frequency_penalty=0.8,
presence_penalty=0.0
)
print(response['choices'][0]['text'])
file_output = Path("..") / "output" / "sedlaczek_relacje_rodzinne.txt"
with open(file_output, 'w', encoding='utf-8') as f:
f.write(response['choices'][0]['text'])
| [
"From this text extract information about parents, wife, siblings, children and grandchildren for the main character:\n\n PLACEHOLDER"
] |
2024-01-10 | pjaskulski/gpt_historical_text | src~sedlaczek_urzedy_xml.py | """ openai test - extraction info about functions or positions from bio """
import os
from pathlib import Path
from dotenv import load_dotenv
import openai
env_path = Path(".") / ".env"
load_dotenv(dotenv_path=env_path)
OPENAI_ORG_ID = os.environ.get('OPENAI_ORG_ID')
OPENAI_API_KEY = os.environ.get('OPENAI_API_KEY')
openai.api_key = OPENAI_API_KEY
# dane z pliku tekstowego
file_data = Path("..") / "data" / "sedlaczek_edward_jozef.txt"
with open(file_data, 'r', encoding='utf-8') as f:
data = f.read()
# prompt - zapytanie do GPT, jeżeli potrzebne to z przykładami odpowiedzi
prompt = "From this text, extract information about the offices, " \
"functions and positions held by the person Edward Józef Sedlaczek, " \
"present them in the form of xml:\n\n" \
"<xml><person><office>office, function or position</office><person></xml>" + data
response = openai.Completion.create(
model="text-davinci-003", # najlepszy ale i najdroższy model w openai
prompt=prompt,
temperature=0.2, # domyślnie 0.5, zmniejszenie powoduje mniejszą 'płynność'
# generowanej odpowiedzi, ale jest bardziej konkretna
max_tokens=675,
top_p=1.0,
frequency_penalty=0.8,
presence_penalty=0.0
)
print(response['choices'][0]['text'])
file_output = Path("..") / "output" / "sedlaczek_urzedy_xml.txt"
with open(file_output, 'w', encoding='utf-8') as f:
f.write(response['choices'][0]['text'])
| [
"From this text, extract information about the offices, functions and positions held by the person Edward Józef Sedlaczek, present them in the form of xml:\n\n<xml><person><office>office, function or position</office><person></xml>PLACEHOLDER"
] |
2024-01-10 | pjaskulski/gpt_historical_text | src~psb_relacje_rodzinne_biogram.py | """ openai test - extraction info about parents, children, wife,
husband from bio
"""
import os
from pathlib import Path
from dotenv import load_dotenv
import openai
import spacy
import tiktoken
def get_answer(model:str='gpt-4', text:str='', prompt:str='') -> str:
""" funkcja konstruuje prompt do modelu GPT dostępnego przez API i zwraca wynik """
result = ''
response = openai.ChatCompletion.create(
model=model,
messages=[
{"role": "system", "content": "Jesteś pomocnym asystentem, specjalistą w dziedzinie historii, genealogii, życiorysów znanych postaci."},
{"role": "user", "content": f"{prompt}\n\nTekst:\n\n{text}"}
],
temperature=0.0,
top_p = 1.0)
result = response['choices'][0]['message']['content']
return result
def short_version(text:str) -> str:
""" short_version"""
select_data = []
words = ['ojciec', 'matka', 'syn', 'córka', 'brat', 'siostra', 'żona',
'mąż', 'teść', 'teściowa', 'dziadek', 'babcia', 'wnuk', 'wnuczka',
'szwagier', 'szwagierka', 'siostrzeniec', 'siostrzenica', 'bratanek',
'bratanica', 'kuzyn', 'kuzynka', 'zięć', 'synowa', 'dziecko', 'wuj',
'ciotka', 'rodzina', 'krewni', 'krewny', "ożenić", "bezdzietny", "ożeniony",
"zamężna", "stryj", "stryjenka", "wujenka",
"rodzic", "rodzice", "spokrewniony", "spokrewnieni", "małżeństwo", "rodzeństwo",
"bratankowie", "siostrzeńcy", "bratanice", "siostrzenice", "małżeństwa, wyszła"]
doc = nlp(text)
sentences = [sent.text for sent in doc.sents]
# pierwszych pięć zdań
select_data = sentences[0:5]
# ze środkowych zdań tylko takie wskazujące na opisy relacji rodzinnych
for i in range(5,len(sentences) - 5):
sent_doc = nlp(sentences[i])
for token in sent_doc:
if token.lemma_ in words:
select_data.append(sentences[i])
break
# ostatnie pięć zdań
select_data += sentences[len(sentences) - 5:]
result = ' '.join(select_data)
return result
def count_tokens(text:str, model:str = "gpt2") -> int:
""" funkcja zlicza tokeny """
num_of_tokens = 0
enc = tiktoken.get_encoding(model)
num_of_tokens = len(enc.encode(text))
return num_of_tokens
# ------------------------------------------------------------------------------
if __name__ == '__main__':
env_path = Path(".") / ".env"
load_dotenv(dotenv_path=env_path)
OPENAI_API_KEY = os.environ.get('OPENAI_API_KEY')
openai.api_key = OPENAI_API_KEY
# spacy do podziału tekstu na zdania
nlp = spacy.load('pl_core_news_md')
# dane z pliku tekstowego
data_file_list = ['../data/psb_probki_200_txt_gpt3/Jadwiga_Jagiellonka.txt']
licznik = 0
for data_file in data_file_list:
print(data_file)
# wczytanie tekstu z podanego pliku
text_from_file = ''
with open(data_file, 'r', encoding='utf-8') as f:
text_from_file = f.read().strip()
if not text_from_file:
print('Brak tekstu w pliku:', data_file)
continue
data_file_name = os.path.basename(data_file)
# jeżeli biogram jest zbyt duży dla modelu gpt-4 (8000 tokenów - 1000
# na odpowiedź) to biogram jest skracany do treści o największym prawdopodobieństwie
# zawierania informacji o relacjach rodzinnych bohatera biogramu
# jeżeli plik ze skróconymi danymi już istnieje to dane są wczytywane z dysku
max_tokens = 7000
tokens_in_data = count_tokens(text_from_file, "gpt2")
print('Liczba tokenów:', tokens_in_data)
if tokens_in_data > max_tokens:
file_output = Path("..") / "output" / "gpt-4-api-dane" / data_file_name.replace('.txt', '.dane')
if os.path.exists(file_output):
with open(file_output, 'r', encoding='utf-8') as f:
text_from_file = f.read()
else:
text_from_file = short_version(text_from_file)
with open(file_output, 'w', encoding='utf-8') as f:
f.write(text_from_file)
# ///
# Przykład 2: "Sapieha Jan Fryderyk h. Lis (1618–1664), pisarz polny kor. Był wnukiem woj.
# witebskiego Mikołaja (zob.), najstarszym synem podkomorzego włodzimierskiego
# Fryderyka (zm. 1626) i Ewy ze Skaszewskich, bratem oboźnego lit. Tomasza Kazimierza
# (zob.), bpa wileńskiego Aleksandra Kazimierza i krajczego lit. Krzysztofa Franciszka."
# Wynik:
# [{"relacja":"dziadek", "osoba":"Mikołaj Sapieha"},
# {"relacja":"ojciec", "osoba":"Fryderyk Spaieha"},
# {"relacja":"matka", "osoba":"Ewa ze Skaszewskich"},
# {"relacja":"brat", "osoba":"Tomasz Kazimierz Sapieha"},
# {"relacja":"brat", "osoba":"Aleksander Kazimierz Sapieha"},
# {"relacja":"brat", "osoba":"Krzysztof Franciszek Sapieha"}
# ]
prompt_template = """Na podstawie podanego tekstu wyszukaj wszystkich krewnych lub powinowatych głównego bohatera tekstu: {name}. Możliwe rodzaje pokrewieństwa: ojciec, matka, syn, córka, brat, siostra, żona, mąż, teść, teściowa, dziadek, babcia, wnuk, wnuczka, szwagier, szwagierka, siostrzeniec, siostrzenica, bratanek, bratanica, kuzyn, kuzynka, zięć, synowa.
Wynik przedstaw w formie listy obiektów JSON zawierających pola:
relacja: rodzaj pokrewieństwa (kim osoba była dla bohatera/bohaterki )
osoba: nazwa (imię i nazwisko osoby związanej relacją z bohaterem)
Wypisz tylko rodzaje pokrewieństwa, które występują w tekście.
Jeżeli w tekście nie ma żadnych informacji o pokrewieństwach głównego bohatera napisz: brak danych.
Przykład: "Soderini Carlo (ok. 1537–1581), kupiec i bankier. Był jednym z pięciu synów Niccola i Annaleny Ricasoli, młodszym bratem Bernarda (zob.).
Jego bratanicą była Małgorzata Anna, żona Winfrida de Loeve. S. ożenił się z Joanną, córką burgrabiego krakowskiego Adama Kurozwęckiego."
Wynik:
[{"relacja":"ojciec", "osoba":"Niccolo"},
{"relacja":"matka": "osoba":"Annalena Ricasoli"},
{"relacja":"brat": "osoba":"Bernard"},
{"relacja":"bratanica": "osoba":"Małgorzata Anna"},
{"relacja":"żona": "osoba":"Joanna"},
{"relacja":"teść": "osoba":"Adam Kurozwęcki"}
]
Tekst:
"""
output = get_answer(model="gpt-4",
text=text_from_file,
prompt=prompt_template)
file_output = Path("..") / "output" / "gpt-4-api" / data_file_name.replace('.txt', '.relacje_gpt4')
with open(file_output, 'w', encoding='utf-8') as f:
f.write(output + '\n')
# koszty
tokens_in_data = count_tokens(text_from_file, "gpt2")
tokens_in_result = count_tokens(output, "gpt2")
# cena gpt-4 w openai 0.03$ za prompt, 0.06$ za wygenerowaną odpowiedź
cena = ((tokens_in_data/1000) * 0.03) + ((tokens_in_result/1000) * 0.06)
print(f'Koszt: {cena:.2f}$') | [
"Jesteś pomocnym asystentem, specjalistą w dziedzinie historii, genealogii, życiorysów znanych postaci.",
"PLACEHOLDER\n\nTekst:\n\nPLACEHOLDER",
"Na podstawie podanego tekstu wyszukaj wszystkich krewnych lub powinowatych głównego bohatera tekstu: {name}. Możliwe rodzaje pokrewieństwa: ojciec, matka, syn, córka, brat, siostra, żona, mąż, teść, teściowa, dziadek, babcia, wnuk, wnuczka, szwagier, szwagierka, siostrzeniec, siostrzenica, bratanek, bratanica, kuzyn, kuzynka, zięć, synowa.\nWynik przedstaw w formie listy obiektów JSON zawierających pola:\nrelacja: rodzaj pokrewieństwa (kim osoba była dla bohatera/bohaterki )\nosoba: nazwa (imię i nazwisko osoby związanej relacją z bohaterem)\nWypisz tylko rodzaje pokrewieństwa, które występują w tekście.\nJeżeli w tekście nie ma żadnych informacji o pokrewieństwach głównego bohatera napisz: brak danych.\n\nPrzykład: \"Soderini Carlo (ok. 1537–1581), kupiec i bankier. Był jednym z pięciu synów Niccola i Annaleny Ricasoli, młodszym bratem Bernarda (zob.).\nJego bratanicą była Małgorzata Anna, żona Winfrida de Loeve. S. ożenił się z Joanną, córką burgrabiego krakowskiego Adama Kurozwęckiego.\"\nWynik:\n[{\"relacja\":\"ojciec\", \"osoba\":\"Niccolo\"},\n {\"relacja\":\"matka\": \"osoba\":\"Annalena Ricasoli\"},\n {\"relacja\":\"brat\": \"osoba\":\"Bernard\"},\n {\"relacja\":\"bratanica\": \"osoba\":\"Małgorzata Anna\"},\n {\"relacja\":\"żona\": \"osoba\":\"Joanna\"},\n {\"relacja\":\"teść\": \"osoba\":\"Adam Kurozwęcki\"}\n]\n\nTekst:\n"
] |
2024-01-10 | andrewlo1011/azure-open-ai-embeddings-qna | code~utilities~helper.py | import os
import openai
from dotenv import load_dotenv
import logging
import re
import hashlib
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.llms import AzureOpenAI
from langchain.vectorstores.base import VectorStore
from langchain.chains import ChatVectorDBChain
from langchain.chains import ConversationalRetrievalChain
from langchain.chains.qa_with_sources import load_qa_with_sources_chain
from langchain.chains.llm import LLMChain
from langchain.chains.chat_vector_db.prompts import CONDENSE_QUESTION_PROMPT
from langchain.prompts import PromptTemplate
from langchain.document_loaders.base import BaseLoader
from langchain.document_loaders import WebBaseLoader
from langchain.text_splitter import TokenTextSplitter, TextSplitter
from langchain.document_loaders.base import BaseLoader
from langchain.document_loaders import TextLoader
from langchain.chat_models import ChatOpenAI
from langchain.schema import AIMessage, HumanMessage, SystemMessage
from utilities.formrecognizer import AzureFormRecognizerClient
from utilities.azureblobstorage import AzureBlobStorageClient
from utilities.translator import AzureTranslatorClient
from utilities.customprompt import PROMPT
from utilities.redis import RedisExtended
import pandas as pd
import urllib
from fake_useragent import UserAgent
class LLMHelper:
def __init__(self,
document_loaders : BaseLoader = None,
text_splitter: TextSplitter = None,
embeddings: OpenAIEmbeddings = None,
llm: AzureOpenAI = None,
temperature: float = None,
max_tokens: int = None,
custom_prompt: str = "",
vector_store: VectorStore = None,
k: int = None,
pdf_parser: AzureFormRecognizerClient = None,
blob_client: AzureBlobStorageClient = None,
enable_translation: bool = False,
translator: AzureTranslatorClient = None):
load_dotenv()
# openai.api_type = "azure"
# openai.api_base = os.getenv('OPENAI_API_BASE')
# openai.api_version = "2023-03-15-preview"
openai.api_key = os.getenv("OPENAI_API_KEY")
# Azure OpenAI settings
# self.api_base = openai.api_base
# self.api_version = openai.api_version
self.index_name: str = "embeddings"
self.model: str = os.getenv('OPENAI_EMBEDDINGS_ENGINE_DOC', "text-embedding-ada-002")
self.deployment_name: str = os.getenv("OPENAI_ENGINE", os.getenv("OPENAI_ENGINES", "text-davinci-003"))
self.deployment_type: str = os.getenv("OPENAI_DEPLOYMENT_TYPE", "Text")
self.temperature: float = float(os.getenv("OPENAI_TEMPERATURE", 0.7)) if temperature is None else temperature
self.max_tokens: int = int(os.getenv("OPENAI_MAX_TOKENS", -1)) if max_tokens is None else max_tokens
self.prompt = PROMPT if custom_prompt == '' else PromptTemplate(template=custom_prompt, input_variables=["summaries", "question"])
# Vector store settings
self.vector_store_address: str = os.getenv('REDIS_ADDRESS', "localhost")
self.vector_store_port: int= int(os.getenv('REDIS_PORT', 6379))
self.vector_store_protocol: str = os.getenv("REDIS_PROTOCOL", "redis://")
self.vector_store_password: str = os.getenv("REDIS_PASSWORD", None)
if self.vector_store_password:
self.vector_store_full_address = f"{self.vector_store_protocol}:{self.vector_store_password}@{self.vector_store_address}:{self.vector_store_port}"
else:
self.vector_store_full_address = f"{self.vector_store_protocol}{self.vector_store_address}:{self.vector_store_port}"
self.chunk_size = int(os.getenv('CHUNK_SIZE', 500))
self.chunk_overlap = int(os.getenv('CHUNK_OVERLAP', 100))
self.document_loaders: BaseLoader = WebBaseLoader if document_loaders is None else document_loaders
self.text_splitter: TextSplitter = TokenTextSplitter(chunk_size=self.chunk_size, chunk_overlap=self.chunk_overlap) if text_splitter is None else text_splitter
self.embeddings: OpenAIEmbeddings = OpenAIEmbeddings(model=self.model, chunk_size=1) if embeddings is None else embeddings
if self.deployment_type == "Chat":
self.llm: ChatOpenAI = ChatOpenAI(model_name=self.deployment_name, engine=self.deployment_name, temperature=self.temperature, max_tokens=self.max_tokens if self.max_tokens != -1 else None) if llm is None else llm
else:
self.llm: AzureOpenAI = AzureOpenAI(deployment_name=self.deployment_name, temperature=self.temperature, max_tokens=self.max_tokens) if llm is None else llm
self.vector_store: RedisExtended = RedisExtended(redis_url=self.vector_store_full_address, index_name=self.index_name, embedding_function=self.embeddings.embed_query) if vector_store is None else vector_store
self.k : int = 3 if k is None else k
self.pdf_parser : AzureFormRecognizerClient = AzureFormRecognizerClient() if pdf_parser is None else pdf_parser
self.blob_client: AzureBlobStorageClient = AzureBlobStorageClient() if blob_client is None else blob_client
self.enable_translation : bool = False if enable_translation is None else enable_translation
self.translator : AzureTranslatorClient = AzureTranslatorClient() if translator is None else translator
self.user_agent: UserAgent() = UserAgent()
self.user_agent.random
def add_embeddings_lc(self, source_url):
try:
documents = self.document_loaders(source_url).load()
# Convert to UTF-8 encoding for non-ascii text
for(document) in documents:
try:
if document.page_content.encode("iso-8859-1") == document.page_content.encode("latin-1"):
document.page_content = document.page_content.encode("iso-8859-1").decode("utf-8", errors="ignore")
except:
pass
docs = self.text_splitter.split_documents(documents)
# Remove half non-ascii character from start/end of doc content (langchain TokenTextSplitter may split a non-ascii character in half)
pattern = re.compile(r'[\x00-\x1f\x7f\u0080-\u00a0\u2000-\u3000\ufff0-\uffff]')
for(doc) in docs:
doc.page_content = re.sub(pattern, '', doc.page_content)
if doc.page_content == '':
docs.remove(doc)
keys = []
for i, doc in enumerate(docs):
# Create a unique key for the document
source_url = source_url.split('?')[0]
filename = "/".join(source_url.split('/')[4:])
hash_key = hashlib.sha1(f"{source_url}_{i}".encode('utf-8')).hexdigest()
hash_key = f"doc:{self.index_name}:{hash_key}"
keys.append(hash_key)
doc.metadata = {"source": f"[{source_url}]({source_url}_SAS_TOKEN_PLACEHOLDER_)" , "chunk": i, "key": hash_key, "filename": filename}
self.vector_store.add_documents(documents=docs, redis_url=self.vector_store_full_address, index_name=self.index_name, keys=keys)
except Exception as e:
logging.error(f"Error adding embeddings for {source_url}: {e}")
raise e
def convert_file_and_add_embeddings(self, source_url, filename, enable_translation=False):
# Extract the text from the file
text = self.pdf_parser.analyze_read(source_url)
# Translate if requested
text = list(map(lambda x: self.translator.translate(x), text)) if self.enable_translation else text
# Upload the text to Azure Blob Storage
converted_filename = f"converted/{filename}.txt"
source_url = self.blob_client.upload_file("\n".join(text), f"converted/{filename}.txt", content_type='text/plain; charset=utf-8')
print(f"Converted file uploaded to {source_url} with filename {filename}")
# Update the metadata to indicate that the file has been converted
self.blob_client.upsert_blob_metadata(filename, {"converted": "true"})
self.add_embeddings_lc(source_url=source_url)
return converted_filename
def get_all_documents(self, k: int = None):
result = self.vector_store.similarity_search(query="*", k= k if k else self.k)
return pd.DataFrame(list(map(lambda x: {
'key': x.metadata['key'],
'filename': x.metadata['filename'],
'source': urllib.parse.unquote(x.metadata['source']),
'content': x.page_content,
'metadata' : x.metadata,
}, result)))
def get_semantic_answer_lang_chain(self, question, chat_history):
question_generator = LLMChain(llm=self.llm, prompt=CONDENSE_QUESTION_PROMPT, verbose=False)
doc_chain = load_qa_with_sources_chain(self.llm, chain_type="stuff", verbose=True, prompt=self.prompt)
chain = ConversationalRetrievalChain(
retriever=self.vector_store.as_retriever(),
question_generator=question_generator,
combine_docs_chain=doc_chain,
return_source_documents=True,
# top_k_docs_for_context= self.k
)
result = chain({"question": question, "chat_history": chat_history})
context = "\n".join(list(map(lambda x: x.page_content, result['source_documents'])))
sources = "\n".join(set(map(lambda x: x.metadata["source"], result['source_documents'])))
container_sas = self.blob_client.get_container_sas()
result['answer'] = result['answer'].split('SOURCES:')[0].split('Sources:')[0].split('SOURCE:')[0].split('Source:')[0]
sources = sources.replace('_SAS_TOKEN_PLACEHOLDER_', container_sas)
return question, result['answer'], context, sources
def get_embeddings_model(self):
OPENAI_EMBEDDINGS_ENGINE_DOC = os.getenv('OPENAI_EMEBDDINGS_ENGINE', os.getenv('OPENAI_EMBEDDINGS_ENGINE_DOC', 'text-embedding-ada-002'))
OPENAI_EMBEDDINGS_ENGINE_QUERY = os.getenv('OPENAI_EMEBDDINGS_ENGINE', os.getenv('OPENAI_EMBEDDINGS_ENGINE_QUERY', 'text-embedding-ada-002'))
return {
"doc": OPENAI_EMBEDDINGS_ENGINE_DOC,
"query": OPENAI_EMBEDDINGS_ENGINE_QUERY
}
def get_completion(self, prompt, **kwargs):
if self.deployment_type == 'Chat':
return self.llm([HumanMessage(content=prompt)]).content
else:
return self.llm(prompt)
| [] |
2024-01-10 | sankhya10/legal_chat_bot | law_formulation.py | from bs4 import BeautifulSoup
import re
from langchain.chat_models import ChatOpenAI
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.schema import HumanMessage, SystemMessage
from termcolor import colored
import ast
import openai
import streamlit as st
def check_api_key(api_key_input):
try:
openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": "test"}],
api_key=api_key_input,
)
except Exception as e:
st.error(f"{e.__class__.__name__}: {e}")
return False
else:
return True
def create_llm(api_key_input):
if check_api_key(api_key_input):
global model
try:
response = openai.Completion.create(engine="gpt4",prompt="This is a test.",max_tokens=5)
except:
model = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0, openai_api_key=api_key_input)
return True
else:
model = ChatOpenAI(model_name="gpt-4", temperature=0, openai_api_key=api_key_input)
return True
else:
return False
def further_refine(query):
prompt = f""" Paraphrase the following sentence:
Sentence :{query}
Emit a single new sentence in a list!
"""
messages = [
SystemMessage(content=prompt),
HumanMessage(content=""),
]
responses = model(messages)
return responses
def outline_guided(keywords, query):
prompt = f"""You are an expert in detecting whether a query is relevant to dictionary of keywords :{keywords}
Is the following query relevant or not relevant ?
Query: {query}
"""
messages = [
SystemMessage(content=prompt),
HumanMessage(content="Answer either relevant or not relevant"),
]
responses = model(messages)
return responses
def create_agent_schema(keywords, query):
context = f"""You are an expert in finding whether a query is either personal-law or business-law based on this dictionary: {keywords} \
Is the following query personal-law or business-law ?
Query:{query}
"""
messages = [SystemMessage(content=context), HumanMessage(content="")]
responses = model(messages)
return responses
def refine_keywords(lawyer_keyword, query):
context = f""" You have a list:{lawyer_keyword} that contains dictionary elements
Your only job is extract the whole element (along with any values available) based on {query}.
That ELEMENT SHOULD BE INSIDE THE LIST !! YOU CAN"T OUTPUT ANY CODE RESULTS!!
"""
messages = [SystemMessage(content=context), HumanMessage(content="")]
responses = model(messages)
return responses
def select_value(extracted, query):
context = f""" You are an expert in determining a single best value out of {extracted} values Relevant to query:{query}
THE OUTPUT SHOULD BE A SINGLE VALUE FROM THIS {extracted} NOTHING ELSE THAN THAT!!
"""
messages = [SystemMessage(content=context), HumanMessage(content="")]
responses = model(messages)
return responses
def create_lawyer_dict(file_path):
pattern = r"personal-law_([\S]+)"
with open(file_path, "r") as file:
soup = BeautifulSoup(file, "html.parser")
# Parse the HTML
# soup = BeautifulSoup(html_data, 'html.parser')
# Initialize dictionaries for business law and personal law
law_dict = {"business-law": [], "personal-law": []}
check_list = []
# Find all <option> elements
options = soup.find_all("option")
# Process the options and organize into dictionaries
law_dict = {"business-law": [], "personal-law": []}
type_check = {}
type_alo = {}
for option in options:
class_values = option.get("class", [])
law_value = option["value"]
# print(class_values,law_value)
if "business-law" in class_values[0]:
parts = class_values[0].split("_")
if len(parts) >= 2 and parts[0] == "business-law":
extracted_part = parts[1]
if extracted_part not in law_value:
type_check[f"{extracted_part}"] = []
elif "personal-law" in class_values[0]:
clever = option.text
match = re.search(pattern, class_values[0])
if match:
extracted_part = match.group(1)
if "-personal-law" in extracted_part:
extracted_part = extracted_part.replace("-personal-law", "")
if "You have selected" in option.text:
clever = clever.replace("You have selected", "")
if law_value not in class_values[0]:
type_alo[extracted_part] = []
for option in options:
class_values = option.get("class", [])
law_value = option["value"]
# print(class_values,law_value)
if "business-law" in class_values[0]:
parts = class_values[0].split("_")
if len(parts) >= 2 and parts[0] == "business-law":
extracted_part = parts[1]
# print(extracted_part)
if extracted_part in law_value:
law_dict["business-law"].append({f"{law_value}"})
else:
if extracted_part in type_check.keys():
type_check[f"{extracted_part}"].append(law_value)
elif "personal-law" in class_values[0]:
clever = option.text
match = re.search(pattern, class_values[0])
if match:
extracted_part = match.group(1)
if "-personal-law" in extracted_part:
extracted_part = extracted_part.replace("-personal-law", "")
if "You have selected" in option.text:
clever = clever.replace("You have selected", "")
if law_value in class_values[0]:
law_dict["personal-law"].append({f"{clever}"})
else:
if extracted_part in type_alo.keys():
type_alo[extracted_part].append(clever)
law_dict["business-law"].append(type_check)
law_dict["personal-law"].append(type_alo)
return law_dict
def identify_answer(law_dict, answer):
propmt = f""" You are an expert in extracting a single best law related keyword from a query
Find a single keyword from a
Query: {answer} based on {law_dict}
If you can't find any keyword just emit NO!!
"""
messages = [SystemMessage(content=propmt), HumanMessage(content="")]
responses = model(messages)
return responses
def search_dict_pattern(extraction, query):
pattern = r"\{.*\}"
match = re.search(pattern, extraction)
if match:
dictionary_str = match.group()
try:
extracted_dictionary = ast.literal_eval(dictionary_str)
if isinstance(extracted_dictionary, set):
return extracted_dictionary.pop()
elif isinstance(extracted_dictionary, dict):
if len(extracted_dictionary.values()) == 1:
return (
list(extracted_dictionary.keys())[0],
list(extracted_dictionary.values())[0][0],
)
else:
value = select_value(extracted_dictionary, query)
return list(extracted_dictionary.keys())[0], value.content
elif isinstance(extracted_dictionary, tuple):
if len(extracted_dictionary) > 2:
last_element = extracted_dictionary[-1]
if isinstance(last_element,dict):
return list(last_element.keys())[0], last_element[list(last_element.keys())[0]][0]
elif isinstance(last_element,set):
return next(iter(last_element))
else:
return extracted_dictionary[0], extracted_dictionary[1]
except (SyntaxError, ValueError):
pattern = r'\{[^{}]*\}'
matches = re.findall(pattern, dictionary_str)
if matches:
extracted_dictionary = ast.literal_eval(dictionary_str)
if isinstance(extracted_dictionary, dict):
if len(extracted_dictionary.values()) == 1:
return (
list(extracted_dictionary.keys())[0],
list(extracted_dictionary.values())[0][0],
)
else:
value = select_value(extracted_dictionary, query)
return list(extracted_dictionary.keys())[0], value.content
else:
return extraction
def main():
law_dict = create_lawyer_dict("keywords.html")
query = " I have encountered an intellectual property issue with a colleague who has wrongfully claimed credit for my work. I am seeking legal assistance. Can you provide guidance or help in this matter?"
while True:
query = input("Enter your query regarding the Legally:Yours \n >>>input:")
if "End" in query:
break
refine = outline_guided(law_dict, query).content
# print(colored(refine,"yellow"))
refine = refine.lower()
if "not relevant" in refine:
print(colored(refine, "red"))
else:
keywords = ["personal", "business", "business law", "personal law"]
pattern = "|".join(re.escape(keyword) for keyword in keywords)
what_law = create_agent_schema(keywords, query).content
print(colored(what_law, "red"))
matches = re.findall(pattern, what_law, flags=re.IGNORECASE)
if matches:
if "business" in matches[0]:
extracted_answer = refine_keywords(
law_dict["business-law"], query
).content
search_dict_pattern(extracted_answer, query)
if "personal" in matches[0]:
extracted_answer = refine_keywords(
law_dict["personal-law"], query
).content
print(extracted_answer)
search_dict_pattern(extracted_answer, query)
if __name__ == "__main__":
main()
| [
"This is a test.",
"test",
"Answer either relevant or not relevant",
" Paraphrase the following sentence:\n Sentence :PLACEHOLDER\n Emit a single new sentence in a list!\n ",
"You are an expert in detecting whether a query is relevant to dictionary of keywords :PLACEHOLDER\n Is the following query relevant or not relevant ?\n\n Query: PLACEHOLDER\n "
] |
2024-01-10 | saqib772/MemoryBot | memorybot.py | """
This is a Python script that serves as a frontend for a conversational AI model built with the `langchain` and `llms` libraries.
The code creates a web application using Streamlit, a Python library for building interactive web apps.
"""
# Import necessary libraries
import streamlit as st
from PIL import Image
from langchain.chains import ConversationChain
from langchain.chains.conversation.memory import ConversationEntityMemory
from langchain.chains.conversation.prompt import ENTITY_MEMORY_CONVERSATION_TEMPLATE
from langchain.llms import OpenAI
from langchain.callbacks import get_openai_callback
import re
def is_four_digit_number(string):
pattern = r'^\d{4}$' # Matches exactly four digits
return bool(re.match(pattern, string))
# Set Streamlit page configuration
im = Image.open('sricon.png')
st.set_page_config(page_title=' 🤖ChatGPT with Memory🧠', layout='wide', page_icon = im)
# Initialize session states
if "generated" not in st.session_state:
st.session_state["generated"] = []
if "past" not in st.session_state:
st.session_state["past"] = []
if "input" not in st.session_state:
st.session_state["input"] = ""
if "stored_session" not in st.session_state:
st.session_state["stored_session"] = []
if "just_sent" not in st.session_state:
st.session_state["just_sent"] = False
if "temp" not in st.session_state:
st.session_state["temp"] = ""
if "balance" not in st.session_state:
st.session_state["balance"] = 0.0
if "deposit" not in st.session_state:
st.session_state["deposit"] = 3.0
def clear_text():
st.session_state["temp"] = st.session_state["input"]
st.session_state["input"] = ""
# Define function to get user input
def get_text():
"""
Get the user input text.
Returns:
(str): The text entered by the user
"""
input_text = st.text_input("You: ", st.session_state["input"], key="input",
placeholder="Your AI assistant here! Ask me anything ...请在这里打字问问题吧",
on_change=clear_text,
label_visibility='hidden')
input_text = st.session_state["temp"]
return input_text
# Define function to start a new chat
def new_chat():
"""
Clears session state and starts a new chat.
"""
save = []
for i in range(len(st.session_state['generated'])-1, -1, -1):
save.append("User:" + st.session_state["past"][i])
save.append("Bot:" + st.session_state["generated"][i])
st.session_state["stored_session"].append(save)
st.session_state["generated"] = []
st.session_state["past"] = []
st.session_state["input"] = ""
st.session_state.entity_memory.store = {}
st.session_state.entity_memory.buffer.clear()
# Set up sidebar with various options
#with st.sidebar.expander("🛠️ ", expanded=False):
# # Option to preview memory store
# if st.checkbox("Preview memory store"):
# with st.expander("Memory-Store", expanded=False):
# st.session_state.entity_memory.store
# # Option to preview memory buffer
# if st.checkbox("Preview memory buffer"):
# with st.expander("Bufffer-Store", expanded=False):
# st.session_state.entity_memory.buffer
# MODEL = st.selectbox(label='Model', options=['gpt-3.5-turbo','text-davinci-003','text-davinci-002','code-davinci-002'])
# K = st.number_input(' (#)Summary of prompts to consider',min_value=3,max_value=1000)
MODEL = "gpt-3.5-turbo"
K = 100
with st.sidebar:
st.markdown("---")
st.markdown("# About")
st.markdown(
"ChatGPTm is ChatGPT added memory. "
"It can do anything you asked and also remember you."
)
st.markdown(
"This tool is a work in progress. "
)
st.markdown("---")
st.markdown("# 简介")
st.markdown(
"ChatGPTm就是增加了记忆的ChatGPT。 "
"你可以在右边的对话框问任何问题。"
)
st.markdown(
"希望给国内没法注册使用ChatGPT的朋友带来方便!"
)
# Set up the Streamlit app layout
st.title("🤖 ChatGPT with Memory 🧠")
#st.subheader(" Powered by 🦜 LangChain + OpenAI + Streamlit")
hide_default_format = """
<style>
#MainMenu {visibility: hidden; }
footer {visibility: hidden;}
</style>
"""
st.markdown(hide_default_format, unsafe_allow_html=True)
# Let user select version
st.write("GPT4.0上线了!无需注册就可以体验只有OpenAI付费用户才可以体验的GPT4.0了!")
version = st.selectbox("Choose ChatGPT version 请选择您想使用的ChatGPT版本", ("3.5", "4.0"))
if version == "3.5":
# Use GPT-3.5 model
MODEL = "gpt-3.5-turbo"
else:
# USe GPT-4.0 model
MODEL = "gpt-4"
# Ask the user to enter their OpenAI API key
#API_O = st.sidebar.text_input("API-KEY", type="password")
# Read API from Streamlit secrets
API_O = st.secrets["OPENAI_API_KEY"]
# Session state storage would be ideal
if API_O:
# Create an OpenAI instance
llm = OpenAI(temperature=0,
openai_api_key=API_O,
model_name=MODEL,
verbose=False)
# Create a ConversationEntityMemory object if not already created
if 'entity_memory' not in st.session_state:
st.session_state.entity_memory = ConversationEntityMemory(llm=llm, k=K )
# Create the ConversationChain object with the specified configuration
Conversation = ConversationChain(
llm=llm,
prompt=ENTITY_MEMORY_CONVERSATION_TEMPLATE,
memory=st.session_state.entity_memory
)
else:
st.sidebar.warning('API key required to try this app.The API key is not stored in any form.')
# st.stop()
# Add a button to start a new chat
#st.sidebar.button("New Chat", on_click = new_chat, type='primary')
# Get the user input
user_input = get_text()
# Generate the output using the ConversationChain object and the user input, and add the input/output to the session
if user_input:
if st.session_state["balance"] > -0.03:
with get_openai_callback() as cb:
output = Conversation.run(input=user_input)
st.session_state.past.append(user_input)
st.session_state.generated.append(output)
st.session_state["balance"] -= cb.total_cost * 4
else:
st.session_state.past.append(user_input)
if is_four_digit_number(user_input) :
st.session_state["balance"] += st.session_state["deposit"]
st.session_state.generated.append("谢谢支付,你可以继续使用了")
else:
st.session_state.generated.append("请用下面的支付码支付¥10后才可以再继续使用。我会再送你¥10元。支付时请记下转账单号的最后4位数字,在上面对话框输入这四位数字")
# Allow to download as well
download_str = []
# Display the conversation history using an expander, and allow the user to download it
with st.expander("Conversation", expanded=True):
for i in range(len(st.session_state['generated'])-1, -1, -1):
st.info(st.session_state["past"][i],icon="🧐")
st.success(st.session_state["generated"][i], icon="🤖")
download_str.append(st.session_state["past"][i])
download_str.append(st.session_state["generated"][i])
# Can throw error - requires fix
download_str = '\n'.join(download_str)
if download_str:
st.download_button('Download 下载',download_str)
# Display stored conversation sessions in the sidebar
for i, sublist in enumerate(st.session_state.stored_session):
with st.sidebar.expander(label= f"Conversation-Session:{i}"):
st.write(sublist)
# Allow the user to clear all stored conversation sessions
if st.session_state.stored_session:
if st.sidebar.checkbox("Clear-all"):
del st.session_state.stored_session
# Load the images
image1 = Image.open("wechatqrcode_leo.jpg")
image2 = Image.open("zhifubaoqrcode_kyle.jpg")
image3 = Image.open("paypalqrcode.png")
image4 = Image.open("drpang_shipinhao2.jpg")
# Display the image with text on top
st.write("I have to pay OpenAI API for each of your usage. Please consider donating $5 to keep this service alive! Thank you!")
st.write("您现在账上的余额是:", round (st.session_state["balance"]*7, 2), "人民币。")
st.write("我是史丹福机器人庞博士,我提供此应用的初衷是让国内的人也可以体验使用增加了记忆的ChatGPT。我在为你的每次使用支付调用OpenAI API的费用,包括3.5版,请扫码微信或支付宝支付¥10人民币来使用,我会再送你10元,按流量计费。")
st.write("长期用户可交¥1688年费(和OpenAI付费用户收费一致),填上你的邮箱,我会发给你专属的小程序,记忆力是这个的10倍。")
st.write("OpenAI对GPT4.0 API的收费是3.5的20倍,请大家体验时注意。")
st.write("我在我的《史丹福机器人庞博士》微信视频号也有很多关于ChatGPT和怎样使用ChatGPT魔法的视频,还有怎么使用这个小程序的视频,欢迎白嫖。也有系统的收费课程《零基础精通掌握ChatGPT魔法》给愿意知识付费的同学深入学习。 ")
st.write("所有6节课在我的视频号主页的直播回放里, 每节课99元,第一节课大家可以免费试听。 如果想购买全部6节课,有50%折扣,只要299元。可以在我的视频号主页私信我购买,注明ChatGPT课程。")
#st.image(img, caption=None, width=200)
# Divide the app page into two columns
col1, col2, col3 = st.columns(3)
# Display the first image in the first column
with col1:
st.image(image1, caption="微信支付", width=200)
# Display the second image in the second column
with col2:
st.image(image2, caption="支付宝", width=200)
# Display the third image in the third column
with col3:
st.image(image3, caption="PayPal", width=200)
st.image(image4, caption="史丹福机器人庞博士视频号,微信扫码前往", width=200)
| [] |
2024-01-10 | akhilpenugonda/LLM_Journey | RagOverCode~RAGoverCode.py | from langchain.document_loaders.generic import GenericLoader
from langchain.document_loaders.parsers import LanguageParser
from langchain.text_splitter import Language, RecursiveCharacterTextSplitter
from langchain.prompts import ChatPromptTemplate
from langchain.chat_models import ChatOpenAI
from langchain.schema.output_parser import StrOutputParser
from langchain.embeddings import GooglePalmEmbeddings
from langchain.llms import GooglePalm
import pprint
import google.generativeai as palm
repo_path = "/Users/akhilkumarp/development/personal/github/CodeWithAI-Python"
loader = GenericLoader.from_filesystem(
repo_path,
glob="**/*.py",
suffixes=[".py"],
parser=LanguageParser(language=Language.PYTHON, parser_threshold=500),
)
documents = loader.load()
palm.configure(api_key='AIzaSyD3d3npOFWRAbYPaKP1Yk8KWhGyiHtumZM')
python_splitter = RecursiveCharacterTextSplitter.from_language(
language=Language.PYTHON, chunk_size=2000, chunk_overlap=200
)
texts = python_splitter.split_documents(documents)
# prompt = """
# You are an expert at solving word problems.
# Solve the following problem:
# I have three houses, each with three cats.
# each cat owns 4 mittens, and a hat. Each mitten was
# knit from 7m of yarn, each hat from 4m.
# How much yarn was needed to make all the items?
# Think about it step by step, and show your work.
# """
# completion = palm.generate_text(
# model='models/text-bison-001',
# prompt=prompt,
# temperature=0,
# # The maximum length of the response
# max_output_tokens=800,
# )
import requests
import os
def call_palm_api(prompt):
body = {
"prompt": {"text": prompt},
}
url = f'https://generativelanguage.googleapis.com/v1beta3/models/text-bison-001:generateText?key=AIzaSyDYTaObtIda0gQKytXUpWlDU0AsPSR4Gvo'
print(body)
try:
response = requests.post(url, json=body)
response.raise_for_status()
res = response.json()
res = res['candidates'][0]['output']
return res
except requests.exceptions.HTTPError as err:
print(f"HTTP error occurred: {err}")
except Exception as err:
print(f"An error occurred: {err}")
call_palm_api("Explain advantages of liver and generate the response in markdown format")
for text in texts:
print(text)
prompt = 'Explain this code '+text.page_content
call_palm_api(prompt)
completion = palm.generate_text(
model='models/text-bison-001',
prompt=prompt,
temperature=0.5,
# The maximum length of the response
max_output_tokens=4000,
)
print(completion.result)
print("Hello")
# weaviate_url = "http://localhost:8080"
# embeddings = [] # Replace with your own embeddings
# docsearch = Weaviate.from_texts(
# texts,
# embeddings,
# weaviate_url=weaviate_url,
# by_text=False,
# metadatas=[{"source": f"{i}-pl"} for i in range(len(texts))],
# )
# retriever = docsearch.as_retriever()
# template = """
# You are an assistant for generating documentation over code. Use the following pieces of retrieved context to generate the documentation. If you don't know the answer, just say that you don't know. Use three sentences maximum and keep the answer concise.
# Question: {question}
# Context: {context}
# Answer:
# """
# prompt = ChatPromptTemplate.from_template(template)
# llm = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0)
# rag_chain = (
# {"context": retriever, "question": RunnablePassthrough()}
# | prompt
# | llm
# | StrOutputParser()
# )
# generated_documentation = rag_chain.invoke("How does this function work?")
| [
"Explain this code "
] |
2024-01-10 | akhilpenugonda/LLM_Journey | RagOverCode~MongoDBVectors.py | from pymongo import MongoClient
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.embeddings.openai import GooglePaLMEmbeddings
from langchain.vectorstores import MongoDBAtlasVectorSearch
from langchain.document_loaders import DirectoryLoader
from langchain.llms import OpenAI
from langchain.chains import RetrievalQA
import gradio as gr
from gradio.themes.base import Base
from langchain.document_loaders import TextLoader
from langchain.document_loaders import PyPDFLoader
client = MongoClient(host='mdburl')
dbName = "langchain_demo"
collectionName = "collection_of_text_blobs"
collection = client[dbName][collectionName]
loader = PyPDFLoader("/Users/akhilkumarp/development/personal/github/LLM_Journey/RagOverCode/AnthropicLatest3.pdf", extract_images=True)
pages = loader.load()
pages[4].page_content
print("Yes - 1")
| [] |
2024-01-10 | akhilpenugonda/LLM_Journey | RagOverCode~ChromaDB.py | from langchain.document_loaders import DirectoryLoader
from langchain.document_loaders.generic import GenericLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings import SentenceTransformerEmbeddings
from langchain.vectorstores import chroma
from langchain.chat_models import ChatOpenAI
from langchain.chat_models import ChatGooglePalm
from langchain.chains import RetrievalQA
directory = '/Users/akhilkumarp/development/personal/github/LLM_Journey/RagOverCode'
# loader = GenericLoader.from_filesystem(
# directory,
# glob="*",
# suffixes=[".pdf"],
# )
# documents = loader.load()
def load_docs(directory):
loader = DirectoryLoader(directory)
documents = loader.load()
return documents
documents = load_docs(directory)
len(documents)
def split_docs(documents,chunk_size=1000,chunk_overlap=20):
text_splitter = RecursiveCharacterTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap)
docs = text_splitter.split_documents(documents)
return docs
docs = split_docs(documents)
print(len(docs))
embeddings = SentenceTransformerEmbeddings(model_name="all-MiniLM-L6-v2")
db = chroma.from_documents(docs, embeddings)
query = "What are the different types of concepts explained"
matching_docs = db.similarity_search(query)
matching_docs[0]
persist_directory = "chroma_db"
vectordb = Chroma.from_documents(
documents=docs, embedding=embeddings, persist_directory=persist_directory
)
vectordb.persist()
import os
os.environ["OPENAI_API_KEY"] = "key"
# model_name = "gpt-3.5-turbo"
# llm = ChatOpenAI(model_name=model_name)
model_name = "text-bison-001"
llm = ChatGooglePalm(model_name=model_name)
from langchain.chains.question_answering import load_qa_chain
chain = load_qa_chain(llm, chain_type="stuff",verbose=True)
query = "What are the emotional benefits of owning a pet?"
matching_docs = db.similarity_search(query)
answer = chain.run(input_documents=matching_docs, question=query)
answer
retrieval_chain = RetrievalQA.from_chain_type(llm, chain_type="stuff", retriever=db.as_retriever())
retrieval_chain.run(query)
| [] |
2024-01-10 | akhilpenugonda/LLM_Journey | RagOverCode~COBOL~cobolconvertor.py | from langchain.document_loaders import DirectoryLoader
from langchain.document_loaders.generic import GenericLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings import SentenceTransformerEmbeddings
from langchain.vectorstores import Chroma
import chromadb
from chromadb.config import Settings
from langchain.chat_models import ChatOpenAI
from langchain.chat_models import ChatGooglePalm
from langchain.chains import RetrievalQA
from langchain.document_loaders.parsers import LanguageParser
from langchain.text_splitter import Language, RecursiveCharacterTextSplitter
import google.generativeai as palm
repo_path = "/Users/akhilkumarp/development/personal/github/LLM_Journey/RagOverCode/COBOL/game"
loader = GenericLoader.from_filesystem(
repo_path,
glob="**/*.cbl",
suffixes=[".cbl"],
parser=LanguageParser(language=Language.COBOL, parser_threshold=500),
)
documents = loader.load()
# palm.configure(api_key='key')
python_splitter = RecursiveCharacterTextSplitter.from_language(
language=Language.PYTHON, chunk_size=2000, chunk_overlap=200
)
texts = python_splitter.split_documents(documents)
embeddings = SentenceTransformerEmbeddings(model_name="all-MiniLM-L6-v2")
db = Chroma.from_documents(texts, embeddings)
query = "What are the different types of concepts explained"
matching_docs = db.similarity_search(query)
matching_docs[0]
persist_directory = "chroma_db"
vectordb = Chroma.from_documents(
documents=texts, embedding=embeddings, persist_directory=persist_directory
)
vectordb.persist()
import os
os.environ["OPENAI_API_KEY"] = "key"
model_name = "gpt-3.5-turbo"
llm = ChatOpenAI(model_name=model_name)
# llm = ChatGooglePalm(model_name="text-bison-001", google_api_key="key")
from langchain.chains.question_answering import load_qa_chain
chain = load_qa_chain(llm, chain_type="stuff",verbose=True)
query = "Generate the complete code in a single java file, with all the dependencies"
matching_docs = db.similarity_search(query)
answer = chain.run(input_documents=matching_docs, question=query)
print(answer)
| [] |
2024-01-10 | gowthamthenarasu/pak_saeeda | mine.py | import streamlit as st
import os
from PyPDF2 import PdfReader
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores import FAISS
from langchain.chains.question_answering import load_qa_chain
from langchain.llms import OpenAI
# Set API keys
os.environ["OPENAI_API_KEY"] = "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"
os.environ["SERPAPI_API_KEY"] = "28c2445d1bfe7530595be6fbc858b1968d776af69c6034aa5feda50deab4b990"
# PDF processing
pdfreader = PdfReader('XYZ_contract_pdf_Sumit Yenugwar 4.pdf')
raw_text = ''
for i, page in enumerate(pdfreader.pages):
content = page.extract_text()
if content:
raw_text += content
# Text splitting
text_splitter = CharacterTextSplitter(
separator="\n",
chunk_size=800,
chunk_overlap=200,
length_function=len,
)
texts = text_splitter.split_text(raw_text)
# Download embeddings from OpenAI
embeddings = OpenAIEmbeddings()
# Create document search
document_search = FAISS.from_texts(texts, embeddings)
# Load QA chain
chain = load_qa_chain(OpenAI(), chain_type="stuff")
with st.sidebar:
st.title('🤗💬 LLM Chat APP')
st.markdown('''
## About
This app is an LLM-powered chatbot built using:
- [Streamlit](https://streamlit.io/)
- [LangChain](https://python.langchain.com/)
- [OpenAI](https://platform.openai.com/docs/models) LLM model
''')
st.markdown("<br>", unsafe_allow_html=True) # Add vertical space
st.write('Made with ❤️ by [Prompt Engineer](https://www.youtube.com/watch?v=M4mc-z_K1NU&list=PLUTApKyNO6MwrOioHGaFCeXtZpchoGv6W)')
# Streamlit app
def main():
st.title("DAMA-Data Management body of knowledge")
# Text input area
user_input = st.text_area("Enter your MCQ question ",height=150)
# Button to trigger model inference
if st.button("Get Answer"):
# Combine user input with the prompt and query
prompt_query = f"you have provided with MCQ question and its option as a chatbot model: {user_input}"
text_query = prompt_query + user_input
# Perform similarity search
docs = document_search.similarity_search(text_query)
# Run the model with the combined text and query
model_answer = chain.run(input_documents=docs, question=user_input)
# Display the model's answer
st.text_area("Model Answer:", value=model_answer)
# Run the Streamlit app
if __name__ == "__main__":
main()
| [
"you have provided with MCQ question and its option as a chatbot model: PLACEHOLDER"
] |
2024-01-10 | zeroc0de2022/gpt-3.5-turbo | gpt-3.5-turbo.py | import sys
import json
import os
from PyQt5.QtWidgets import QApplication, QMainWindow, QVBoxLayout, QWidget, QTextEdit, QPlainTextEdit, QPushButton, \
QHBoxLayout, QDialog, QMessageBox, QCheckBox, QComboBox, QSpinBox, QLineEdit, QLabel
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QKeyEvent
import openai
import datetime
# -----------------------------------------------------------
# gpt-3.5-turbo chat
#
# (C) 2023 A.M. RU, Stav.
# Released under GNU Public License (GPL)
# @author zeroc0de <[email protected]>
# Date 2023.06.04
# -----------------------------------------------------------
class SettingsDialog(QDialog):
def __init__(self):
super().__init__()
self.setWindowTitle("Настройки")
self.resize(300, 200)
self.save_history_checkbox = QCheckBox("Хранить историю чата")
self.history_option_combo = QComboBox()
self.history_option_combo.addItem("Не учитывать в ответе историю переписки")
self.history_option_combo.addItem("Учитывать в ответе всю историю переписки")
self.history_option_combo.addItem("Учитывать в ответе последние 5 сообщений")
self.history_option_combo.addItem("Учитывать в ответе последние 3 сообщения")
self.max_tokens_spinbox_label = QLabel("Количество токенов:")
self.max_tokens_spinbox = QSpinBox()
self.max_tokens_spinbox.setMinimum(50)
self.max_tokens_spinbox.setMaximum(2016)
self.max_tokens_spinbox.setValue(50)
self.settings_file = 'settings.json'
self.api_key_label = QLabel("API ключ:")
self.api_key_edit = QLineEdit()
self.save_button = QPushButton("Сохранить")
self.save_button.clicked.connect(self.save_settings)
layout = QVBoxLayout()
layout.addWidget(self.save_history_checkbox)
layout.addWidget(self.history_option_combo)
layout.addWidget(self.max_tokens_spinbox_label)
layout.addWidget(self.max_tokens_spinbox)
layout.addWidget(self.api_key_label)
layout.addWidget(self.api_key_edit)
layout.addWidget(self.save_button)
self.setLayout(layout)
def load_settings(self):
try:
with open(self.settings_file, 'r') as file:
settings = json.load(file)
save_history = settings.get('save_history', False)
history_option = settings.get('history_option', 0)
api_key = settings.get('api_key', '')
max_tokens = settings.get('max_tokens', 50)
self.save_history_checkbox.setChecked(save_history)
self.history_option_combo.setCurrentIndex(history_option)
self.max_tokens_spinbox.setValue(max_tokens)
self.api_key_edit.setText(api_key)
except FileNotFoundError:
# Если файл settings.txt не найден, используются значения по умолчанию
self.save_history_checkbox.setChecked(False)
self.history_option_combo.setCurrentIndex(0)
self.max_tokens_spinbox.setValue(50)
self.api_key_edit.setText('')
def save_settings(self):
save_history = self.save_history_checkbox.isChecked()
history_option = self.history_option_combo.currentIndex()
max_tokens = self.max_tokens_spinbox.value()
api_key = self.api_key_edit.text()
settings = {
"save_history": save_history,
"history_option": history_option,
"api_key": api_key,
"max_tokens": max_tokens
}
with open(self.settings_file, "w") as file:
json.dump(settings, file)
self.accept()
class ChatWindow(QMainWindow):
def __init__(self):
super().__init__()
self.setWindowTitle("ChatGPT. Model: gpt-3.5-turbo")
self.resize(400, 600)
self.settings_dialog = None
self.save_history = False
self.history_option = 0
self.max_tokens = 50
self.api_key = ""
self.messages = []
self.chat_text_edit = QTextEdit()
self.chat_text_edit.setReadOnly(True)
# Заменяем QPlainTextEdit на QTextEdit
self.input_text_edit = QTextEdit()
self.input_text_edit.installEventFilter(self)
self.send_button = QPushButton("Отправить")
self.send_button.clicked.connect(self.send_message)
self.settings_button = QPushButton("Настройки")
self.settings_button.clicked.connect(self.open_settings)
date = datetime.datetime.now()
self.formatted_date = date.strftime("%Y-%m-%d")
self.history_file = 'history/' + self.formatted_date + '_history.json'
self.settings_file = 'settings.json'
central_widget = QWidget()
main_layout = QVBoxLayout()
main_layout.addWidget(self.chat_text_edit)
main_layout.addWidget(self.input_text_edit)
button_layout = QHBoxLayout()
button_layout.addWidget(self.send_button)
button_layout.addWidget(self.settings_button)
main_layout.addLayout(button_layout)
central_widget.setLayout(main_layout)
self.setCentralWidget(central_widget)
self.load_settings()
openai.api_key = self.api_key
def display_message(self, message):
self.chat_text_edit.append(message)
def load_settings(self):
try:
with open(self.settings_file, 'r') as file:
settings = json.load(file)
self.save_history = settings.get('save_history', False)
self.history_option = settings.get('history_option', 0)
self.max_tokens = settings.get('max_tokens', 50)
self.api_key = settings.get('api_key', '')
except FileNotFoundError:
self.save_history = False
self.history_option = 0
self.max_tokens = 50
self.api_key = ''
def save_chat_history(self):
chat_history = {
"messages": self.messages
}
if self.save_history:
history = self.chat_text_edit.toPlainText()
os.makedirs('history', exist_ok=True)
with open(self.history_file, 'w', encoding='utf-8') as file:
json.dump(chat_history, file, ensure_ascii=False)
# file.write(history)
def load_chat_history(self):
try:
with open(self.history_file, 'r', encoding='utf-8') as file:
history = json.load(file)
self.messages = history["messages"]
# Отображение сохраненной истории в окне чата
for message in self.messages:
role = message["role"]
content = message["content"]
self.display_message(f"{role}: - {content}")
self.save_chat_history() # Сохранение истории чата после загрузки
except FileNotFoundError:
# Если файл chat_history.txt не найден, не загружаем историю чата
self.messages = []
def send_message(self):
message = self.input_text_edit.toPlainText().rstrip()
self.input_text_edit.clear()
if message:
self.messages.append({"role": "user", "content": message})
response = self.get_chatbot_response(message)
# Сохранение ответа модели в истории чата
self.messages.append({"role": "assistant", "content": response})
self.display_message("\nuser: - " + message)
self.display_message("\nassistant: - " + response)
self.input_text_edit.clear()
if self.save_history:
self.save_chat_history()
def get_chatbot_response(self, message):
history = self.chat_text_edit.toPlainText()
if self.history_option == 1:
# Включение всей истории переписки в запрос
prompt = history + "\nuser: " + message
elif self.history_option == 2:
# Включение последних 5 сообщений из истории переписки в запрос
last_messages = self.messages[-5:]
last_messages_content = [m["content"] for m in last_messages]
last_messages_str = "\n".join(last_messages_content)
prompt = last_messages_str + "\nuser: " + message
elif self.history_option == 3:
# Включение последних 3 сообщений из истории переписки в запрос
last_messages = self.messages[-3:]
last_messages_content = [m["content"] for m in last_messages]
last_messages_str = "\n".join(last_messages_content)
prompt = last_messages_str + "\nuser: " + message
else:
prompt = message
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[{"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": prompt}],
max_tokens=self.max_tokens,
temperature=0.7
)
return response.choices[0].message.content
def open_settings(self):
self.settings_dialog = SettingsDialog()
self.settings_dialog.load_settings()
if self.settings_dialog.exec_() == QDialog.Accepted:
self.load_settings()
self.settings_dialog = None
def eventFilter(self, source, event):
if (event.type() == QKeyEvent.KeyPress and
source is self.input_text_edit and
event.key() == Qt.Key_Return and
event.modifiers() == Qt.ControlModifier):
self.send_message()
return True
return super().eventFilter(source, event)
def closeEvent(self, event):
self.save_chat_history()
event.accept()
if __name__ == "__main__":
app = QApplication(sys.argv)
window = ChatWindow()
window.load_chat_history()
window.show()
sys.exit(app.exec_())
| [
"You are a helpful assistant.",
"PLACEHOLDER\nuser: PLACEHOLDER"
] |
2024-01-10 | marcioBigolin/aulagtech | aula.py | import streamlit as st
import pandas as pd
from sqlalchemy import create_engine
st.set_page_config(page_title="MDI - Análise com IA", page_icon="🌍", layout="wide")
# Recebe os parâmetros via GET enquanto sem criptografia mandando direto (usar bearertok)
params = st.experimental_get_query_params()
# Obtém o valor do parâmetro 'variavel' da URL
senha = params.get('senha', ['SEM_DADOS'])[0]
#montando a tela
st.write("Escolha uma das abas")
def gepeto():
from pandasai import SmartDataFrame
from pandasai.llm.openai import OpenAI
import matplotlib.pyplot as plt
import os
api_token = "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"
st.session_state.prompt_history = []
with st.form("Question"):
question = st.text_input("Question", value="", type="default")
submitted = st.form_submit_button("Gerar")
if submitted:
with st.spinner():
llm = OpenAI(api_token=api_token)
pandas_ai = PandasAI(llm)
x = pandas_ai.run(df, prompt=question)
if os.path.isfile('temp_chart.png'):
im = plt.imread('temp_chart.png')
st.image(im)
os.remove('temp_chart.png')
if x is not None:
st.write(x)
st.session_state.prompt_history.append(question)
st.subheader("Prompt history:")
st.write(st.session_state.prompt_history)
if "prompt_history" in st.session_state.prompt_history and len(st.session_state.prompt_history) > 0:
if st.button("Limpar"):
st.session_state.prompt_history = []
st.session_state.df = None
if senha == 'SEM_DADOS':
st.write("Sem dados")
else:
#criando conexão com o banco de dados
conn = create_engine(f"postgresql://revisao_data:{senha}@revisao_data.postgresql.dbaas.com.br:5432/revisao_data")
#consultando o banco de dados (conhecimento de SQL)
sql_query = pd.read_sql_query (f"SELECT * FROM moodle_marcio2.fato_join;", con=conn)
df = pd.DataFrame(sql_query, columns = ['titulo', 'nome_completo', 'coh_frazier', 'coh_brunet', 'data_entrega'])
tab1, tab2, tab3 = st.tabs(["Entendendo meus dados", "Gerador de gráfico", "ChatGPT" ])
with tab1:
st.dataframe(df)
st.write(df['coh_brunet'].describe())
with tab2:
import pygwalker as pyg
from pygwalker.api.streamlit import StreamlitRenderer, init_streamlit_comm
# Establish communication between pygwalker and streamlit
init_streamlit_comm()
@st.cache_resource
def get_pyg_renderer() -> "StreamlitRenderer":
# When you need to publish your app to the public, you should set the debug parameter to False to prevent other users from writing to your chart configuration file.
return StreamlitRenderer(df, spec="./gw_config.json", debug=False)
renderer = get_pyg_renderer()
# Render your data exploration interface. Developers can use it to build charts by drag and drop.
renderer.render_explore()
with tab3:
gepeto() | [] |
2024-01-10 | AbhinandanSingla/Flipify | tools~agents_tools.py | from langchain.tools import tool
from flipkart_scraper.product import Product
from flipkart_scraper.scrapper import search_products, dataframe_to_object
@tool
def searchCloth(query: str) -> str:
"""This function searches the products like cloths,footwear,outfits,gifts/colours/size/shades etc."""
print('\n')
print(query)
searched_products = []
df = search_products(query)
for _, row in df.iterrows():
searched_products.append(dataframe_to_object(Product, row))
print(len(searched_products))
return f"""
First Part: Following is the product information You should display the first product to the customer, and interact with him in a convincing way providing information about the customer ratings and product quality. Make the conversation appear human-like (important).
Second Part:
Now, the next step is to essentially display the corresponding link to relevant product as per the following:
{[{'name': searched_products[x].name, 'price': searched_products[x].price,
'image_link': searched_products[x].image, 'product_link': searched_products[x].detail_url} for x in
range(len(searched_products))][:3]}
As soon as a relevant product is found, stop calling the function again.
"""
if __name__ == '__main__':
print(search_products("shirts"))
| [
"This function searches the products like cloths,footwear,outfits,gifts/colours/size/shades etc."
] |
2024-01-10 | HPCL-EI/RoboWaiter | robowaiter~llm_client~tool_api_test.py | import json
import openai
from colorama import init, Fore
from loguru import logger
import json
from robowaiter.llm_client.tool_register import get_tools, dispatch_tool
import requests
import json
import urllib3
init(autoreset=True)
########################################
# 该文件实现了与大模型的通信以及工具调用
########################################
# 忽略https的安全性警告
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
base_url = "https://45.125.46.134:25344" # 本地部署的地址,或者使用你访问模型的API地址
def get_response(**kwargs):
data = kwargs
response = requests.post(f"{base_url}/v1/chat/completions", json=data, stream=data["stream"], verify=False)
decoded_line = response.json()
return decoded_line
functions = get_tools()
def run_conversation(query: str, stream=False, max_retry=5):
params = dict(model="chatglm3", messages=[{"role": "user", "content": query}], stream=stream)
params["functions"] = functions
response = get_response(**params)
for _ in range(max_retry):
if response["choices"][0]["message"].get("function_call"):
function_call = response["choices"][0]["message"]["function_call"]
logger.info(f"Function Call Response: {function_call}")
if "sub_task" in function_call["name"]:
return {
"Answer": "好的",
"Goal": json.loads(function_call["arguments"])["goal"]
}
function_args = json.loads(function_call["arguments"])
tool_response = dispatch_tool(function_call["name"], function_args)
logger.info(f"Tool Call Response: {tool_response}")
params["messages"].append(response["choices"][0]["message"])
params["messages"].append(
{
"role": "function",
"name": function_call["name"],
"content": tool_response, # 调用函数返回结果
}
)
else:
reply = response["choices"][0]["message"]["content"]
return {
"Answer": reply,
"Goal": None
}
logger.info(f"Final Reply: \n{reply}")
return
response = get_response(**params)
if __name__ == "__main__":
query = "可以带我去吗"
print(run_conversation(query, stream=False))
| [
"可以带我去吗"
] |
2024-01-10 | HPCL-EI/RoboWaiter | robowaiter~llm_client~tool_api_multi_round.py | import json
import openai
from colorama import init, Fore
from loguru import logger
import json
from robowaiter.llm_client.tool_register import get_tools, dispatch_tool
import requests
import json
import urllib3
init(autoreset=True)
########################################
# 该文件实现了与大模型的通信以及工具调用
########################################
# 忽略https的安全性警告
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
base_url = "https://45.125.46.134:25344" # 本地部署的地址,或者使用你访问模型的API地址
def get_response(**kwargs):
data = kwargs
response = requests.post(f"{base_url}/v1/chat/completions", json=data, stream=data["stream"], verify=False)
decoded_line = response.json()
return decoded_line
functions = get_tools()
if __name__ == "__main__":
question = input("\n顾客:")
data_memory = [{
"role": "system",
"content": "你是RoboWaiter,一个由HPCL团队开发的机器人服务员,你在咖啡厅工作。接受顾客的指令并调用工具函数来完成各种服务任务。如果顾客问你们这里有什么,或者想要点单,你说我们咖啡厅提供咖啡,水,点心,酸奶等食物。如果顾客不需要你了,你就回到吧台招待。",
},]
n = 1
max_retry = 5
params = dict(model="RoboWaiter",messages=data_memory, stream=False)
params["functions"] = functions
while question != 'end':
user_dict = {"role": "user", "content": question}
params["messages"].append(user_dict)
# print(data_memory)
response = get_response(**params)
for _ in range(max_retry):
if response["choices"][0]["message"].get("function_call"):
function_call = response["choices"][0]["message"]["function_call"]
logger.info(f"Function Call Response: {function_call}")
function_args = json.loads(function_call["arguments"])
tool_response = dispatch_tool(function_call["name"], function_args)
logger.info(f"Tool Call Response: {tool_response}")
return_message = response["choices"][0]["message"]
params["messages"].append(return_message)
t = {
"role": "function",
"name": function_call["name"],
"content": tool_response, # 调用函数返回结果
}
params["messages"].append(t)
response = get_response(**params)
else:
return_message = response["choices"][0]["message"]
reply = return_message["content"]
params["messages"].append(return_message)
logger.info(f"Final Reply: \n{reply}")
break
question = input("\n顾客:")
| [
"你是RoboWaiter,一个由HPCL团队开发的机器人服务员,你在咖啡厅工作。接受顾客的指令并调用工具函数来完成各种服务任务。如果顾客问你们这里有什么,或者想要点单,你说我们咖啡厅提供咖啡,水,点心,酸奶等食物。如果顾客不需要你了,你就回到吧台招待。"
] |
2024-01-10 | HPCL-EI/RoboWaiter | robowaiter~llm_client~multi_rounds.py | import time
import openai
from colorama import init, Fore
from loguru import logger
from robowaiter.llm_client.tool_register import get_tools, dispatch_tool
import requests
import json
from collections import deque
import urllib3
import copy
init(autoreset=True)
from robowaiter.utils import get_root_path
import os
import re
from robowaiter.llm_client.single_round import single_round
########################################
# 该文件实现了与大模型的通信以及工具调用
########################################
# 忽略https的安全性警告
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
base_url = "https://45.125.46.134:25344" # 本地部署的地址,或者使用你访问模型的API地址
root_path = get_root_path()
# load test questions
file_path = os.path.join(root_path,"robowaiter/llm_client/data/fix_questions.txt")
functions = get_tools()
fix_questions_dict = {}
no_reply_functions = ["create_sub_task"]
with open(file_path,'r',encoding="utf-8") as f:
#读取所有行
lines = f.read().strip()
sections = re.split(r'\n\s*\n', lines)
for s in sections:
x = s.strip().splitlines()
if len(x) == 2:
fix_questions_dict[x[0]] = {
"answer": x[1],
"function": None
}
else:
fix_questions_dict[x[0]] = {
"answer": x[1],
"function": x[2],
"args": x[3]
}
role_system = [{
"role": "system",
"content": "你是RoboWaiter,一个由HPCL团队开发的机器人服务员,你在咖啡厅工作。接受顾客的指令并调用工具函数来完成各种服务任务。如果顾客问你们这里有什么,或者想要点单,你说我们咖啡厅提供咖啡,水,点心,酸奶等食物。如果顾客不需要你了,你就回到吧台招待。如果顾客叫你去做某事,你回复:好的,我马上去做这件事。",
}]
def new_history(max_length=7):
history = deque(maxlen=max_length)
return history
def new_response():
return {'choices': [{'index': 0, 'message':{} }]}
def parse_fix_question(question):
response = new_response()
fix_ans = fix_questions_dict[question]
if not fix_ans['function']: #简单对话
message = {'role': 'assistant', 'content': fix_ans["answer"], 'name': None,
'function_call': None}
else:
func = fix_ans["function"]
args = fix_ans["args"]
# tool_response = dispatch_tool(function_call["name"], json.loads(args))
# logger.info(f"Tool Call Response: {tool_response}")
message = {'role': 'assistant',
'content': f"\n <|assistant|> {func}({args})\n ```python\ntool_call(goal={args})\n```",
'name': None,
'function_call': {'name': func, 'arguments': args}}
response["choices"][0]["message"] = message
return response
def get_response(sentence, history, allow_function_call = True):
if sentence:
history.append({"role": "user", "content": sentence})
if sentence in fix_questions_dict:
time.sleep(2)
return True, parse_fix_question(sentence)
params = dict(model="RoboWaiter")
params['messages'] = role_system + list(history)
if allow_function_call:
params["functions"] = functions
response = requests.post(f"{base_url}/v1/chat/completions", json=params, stream=False, verify=False)
decoded_line = response.json()
return False, decoded_line
def deal_response(response, history, func_map=None ):
if response["choices"][0]["message"].get("function_call"):
function_call = response["choices"][0]["message"]["function_call"]
logger.info(f"Function Call Response: {function_call}")
function_name = function_call["name"]
function_args = json.loads(function_call["arguments"])
if func_map:
tool_response = func_map[function_name](**function_args)
else:
try:
tool_response = dispatch_tool(function_call["name"], function_args)
logger.info(f"Tool Call Response: {tool_response}")
except:
logger.info(f"重试工具调用")
# tool_response = dispatch_tool(function_call["name"], function_args)
return function_name,None
return_message = response["choices"][0]["message"]
history.append(return_message)
t = {
"role": "function",
"name": function_call["name"],
"content": str(tool_response), # 调用函数返回结果
}
history.append(t)
return function_call["name"], tool_response
else:
return_message = response["choices"][0]["message"]
reply = return_message["content"]
history.append(return_message)
logger.info(f"Final Reply: \n{reply}")
return False, reply
def ask_llm(question,history, func_map=None, retry=3):
fixed, response = get_response(question, history)
function_call,result = deal_response(response, history, func_map)
if function_call:
if question in fix_questions_dict:
if fix_questions_dict[question]['function'] in no_reply_functions:
reply = fix_questions_dict[question]["answer"]
# result = single_round(reply,
# "你是机器人服务员,请把以下句子换一种表述方式对顾客说,但是意思不变,尽量简短:\n")
result = single_round(reply,
"You are a robot waiter. Please change the following sentence to the customer in a different way, but the meaning remains the same and be as brief as possible:\n")
else:
reply = fix_questions_dict[question]["answer"]
# result = single_round(f"你是机器人服务员,顾客想知道{question}, 你的具身场景查询返回的是{result},把返回的英文名词翻译成中文,请把按照以下句子对顾客说,{reply}, 尽量简短。\n")
result = single_round(
f"You are a robot waiter. The customer wants to know {question}. Your embodied scene query returns {result}. Translate the returned Chinese nouns into English. Please tell the customer according to the following sentence, {reply}, keep it as short as possible.\n")
message = {'role': 'assistant', 'content': result, 'name': None,
'function_call': None}
history.append(message)
else:
fixed, response = get_response(None, history,allow_function_call=False)
_,result = deal_response(response, history, func_map)
# print(f'{len(history)}条历史记录:')
print(f'{len(history)} history records:')
for x in history:
print(x)
return function_call, result
if __name__ == "__main__":
question = input("\nCustomer:")
history = new_history()
n = 1
max_retry = 2
while question != 'end':
function_call, return_message = ask_llm(question,history)
# question = input("\n顾客:")
question = input("\nCustomer:")
| [
"你是RoboWaiter,一个由HPCL团队开发的机器人服务员,你在咖啡厅工作。接受顾客的指令并调用工具函数来完成各种服务任务。如果顾客问你们这里有什么,或者想要点单,你说我们咖啡厅提供咖啡,水,点心,酸奶等食物。如果顾客不需要你了,你就回到吧台招待。如果顾客叫你去做某事,你回复:好的,我马上去做这件事。",
"answer"
] |
2024-01-10 | HPCL-EI/RoboWaiter | robowaiter~llm_client~multi_rounds_retri.py | import time
import openai
from colorama import init, Fore
from loguru import logger
from robowaiter.llm_client.tool_register import get_tools, dispatch_tool
import requests
import json
from collections import deque
import urllib3
import copy
init(autoreset=True)
from robowaiter.utils import get_root_path
import os
import re
from robowaiter.llm_client.single_round import single_round
from robowaiter.algos.retrieval.retrieval_lm.retrieval import Retrieval
########################################
# 该文件实现了与大模型的通信以及工具调用
########################################
# 忽略https的安全性警告
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
base_url = "https://45.125.46.134:25344" # 本地部署的地址,或者使用你访问模型的API地址
root_path = get_root_path()
# load test questions
# file_path = os.path.join(root_path,"robowaiter/llm_client/data/fix_questions.txt")
#
# fix_questions_dict = {}
# no_reply_functions = ["create_sub_task"]
#
functions = get_tools()
retrieval = Retrieval(threshold=1.9)
role_system = [{
"role": "system",
"content": "你是RoboWaiter,一个由HPCL团队开发的机器人服务员,你在咖啡厅工作。接受顾客的指令并调用工具函数来完成各种服务任务。如果顾客问你们这里有什么,或者想要点单,你说我们咖啡厅提供咖啡,水,点心,酸奶等食物。如果顾客不需要你了,你就回到吧台招待。如果顾客叫你去做某事,你回复:好的,我马上去做这件事。",
}]
def new_history(max_length=7):
history = deque(maxlen=max_length)
return history
def new_response():
return {'choices': [{'index': 0, 'message':{} }]}
def parse_fix_question(question):
response = new_response()
fix_ans = question
if not fix_ans['function']: #简单对话
message = {'role': 'assistant', 'content': fix_ans["answer"], 'name': None,
'function_call': None}
else:
func = fix_ans["function"]
args = fix_ans["args"]
# tool_response = dispatch_tool(function_call["name"], json.loads(args))
# logger.info(f"Tool Call Response: {tool_response}")
message = {'role': 'assistant',
'content': f"\n <|assistant|> {func}({args})\n ```python\ntool_call(goal={args})\n```",
'name': None,
'function_call': {'name': func, 'arguments': args}}
response["choices"][0]["message"] = message
return response, question["answer"]
def get_response(sentence, history, allow_function_call = True):
if sentence:
history.append({"role": "user", "content": sentence})
retrieval_result = retrieval.get_result(sentence)
if retrieval_result is not None:
time.sleep(1.2)
# 处理多轮
if retrieval_result["answer"] == "multi_rounds" and len(history) >= 2:
print("触发多轮检索")
last_content = ""
for i in range(-2,-len(history)):
if history[i]["role"] == "user":
last_content = history[i]["content"]
break
retrieval_result = retrieval.get_result(last_content + sentence)
if retrieval_result is not None:
response, answer = parse_fix_question(retrieval_result)
return True,response, answer
params = dict(model="RoboWaiter")
params['messages'] = role_system + list(history)
if allow_function_call:
params["functions"] = functions
response = requests.post(f"{base_url}/v1/chat/completions", json=params, stream=False, verify=False)
decoded_line = response.json()
return False, decoded_line, None
def deal_response(response, history, func_map=None ):
if response["choices"][0]["message"].get("function_call"):
function_call = response["choices"][0]["message"]["function_call"]
logger.info(f"Function Call Response: {function_call}")
function_name = function_call["name"]
function_args = json.loads(function_call["arguments"])
if func_map:
tool_response = func_map[function_name](**function_args)
else:
try:
tool_response = dispatch_tool(function_call["name"], function_args)
logger.info(f"Tool Call Response: {tool_response}")
except:
logger.info(f"重试工具调用")
# tool_response = dispatch_tool(function_call["name"], function_args)
return function_name,None
return_message = response["choices"][0]["message"]
history.append(return_message)
t = {
"role": "function",
"name": function_call["name"],
"content": str(tool_response), # 调用函数返回结果
}
history.append(t)
return function_call["name"], tool_response
else:
return_message = response["choices"][0]["message"]
reply = return_message["content"]
history.append(return_message)
logger.info(f"Final Reply: \n{reply}")
return False, reply
def ask_llm(question,history, func_map=None, retry=3):
fixed, response, answer = get_response(question, history)
print(f"response: {response}")
function_call,result = deal_response(response, history, func_map)
if function_call:
if fixed:
if function_call == "create_sub_task":
result = single_round(answer,
"你是机器人服务员,请把以下句子换一种表述方式对顾客说,但是意思不变,尽量简短:\n")
# elif function_call in ["get_object_info","find_location"] :
else:
result = single_round(f"你是机器人服务员,顾客想知道{question}, 你的具身场景查询返回的是{result},把返回的英文名词翻译成中文,请把按照以下句子对顾客说,{answer}, 尽量简短。\n")
else:
# _,response,_ = get_response(None, history,allow_function_call=False)
# _,result = deal_response(response, history, func_map)
result = single_round(history[-1]["content"],
"你是机器人服务员,请把以下句子换一种表述方式对顾客说,但是意思不变,尽量简短:\n")
message = {'role': 'assistant', 'content': result, 'name': None,
'function_call': None}
history.append(message)
print(f'{len(history)}条历史记录:')
for x in history:
print(x)
return function_call, result
if __name__ == "__main__":
question = input("\n顾客:")
history = new_history()
n = 1
max_retry = 2
while question != 'end':
function_call, return_message = ask_llm(question,history)
question = input("\n顾客:")
| [
"你是RoboWaiter,一个由HPCL团队开发的机器人服务员,你在咖啡厅工作。接受顾客的指令并调用工具函数来完成各种服务任务。如果顾客问你们这里有什么,或者想要点单,你说我们咖啡厅提供咖啡,水,点心,酸奶等食物。如果顾客不需要你了,你就回到吧台招待。如果顾客叫你去做某事,你回复:好的,我马上去做这件事。",
"answer"
] |
2024-01-10 | zsc/langchain | tests~unit_tests~chains~test_conversation.py | """Test conversation chain and memory."""
import pytest
from langchain.chains.base import Memory
from langchain.chains.conversation.base import ConversationChain
from langchain.chains.conversation.memory import (
ConversationBufferMemory,
ConversationBufferWindowMemory,
ConversationSummaryMemory,
)
from langchain.prompts.prompt import PromptTemplate
from tests.unit_tests.llms.fake_llm import FakeLLM
def test_memory_ai_prefix() -> None:
"""Test that ai_prefix in the memory component works."""
memory = ConversationBufferMemory(memory_key="foo", ai_prefix="Assistant")
memory.save_context({"input": "bar"}, {"output": "foo"})
assert memory.buffer == "\nHuman: bar\nAssistant: foo"
def test_conversation_chain_works() -> None:
"""Test that conversation chain works in basic setting."""
llm = FakeLLM()
prompt = PromptTemplate(input_variables=["foo", "bar"], template="{foo} {bar}")
memory = ConversationBufferMemory(memory_key="foo")
chain = ConversationChain(llm=llm, prompt=prompt, memory=memory, input_key="bar")
chain.run("foo")
def test_conversation_chain_errors_bad_prompt() -> None:
"""Test that conversation chain works in basic setting."""
llm = FakeLLM()
prompt = PromptTemplate(input_variables=[], template="nothing here")
with pytest.raises(ValueError):
ConversationChain(llm=llm, prompt=prompt)
def test_conversation_chain_errors_bad_variable() -> None:
"""Test that conversation chain works in basic setting."""
llm = FakeLLM()
prompt = PromptTemplate(input_variables=["foo"], template="{foo}")
memory = ConversationBufferMemory(memory_key="foo")
with pytest.raises(ValueError):
ConversationChain(llm=llm, prompt=prompt, memory=memory, input_key="foo")
@pytest.mark.parametrize(
"memory",
[
ConversationBufferMemory(memory_key="baz"),
ConversationBufferWindowMemory(memory_key="baz"),
ConversationSummaryMemory(llm=FakeLLM(), memory_key="baz"),
],
)
def test_conversation_memory(memory: Memory) -> None:
"""Test basic conversation memory functionality."""
# This is a good input because the input is not the same as baz.
good_inputs = {"foo": "bar", "baz": "foo"}
# This is a good output because these is one variable.
good_outputs = {"bar": "foo"}
memory.save_context(good_inputs, good_outputs)
# This is a bad input because there are two variables that aren't the same as baz.
bad_inputs = {"foo": "bar", "foo1": "bar"}
with pytest.raises(ValueError):
memory.save_context(bad_inputs, good_outputs)
# This is a bad input because the only variable is the same as baz.
bad_inputs = {"baz": "bar"}
with pytest.raises(ValueError):
memory.save_context(bad_inputs, good_outputs)
# This is a bad output because it is empty.
with pytest.raises(ValueError):
memory.save_context(good_inputs, {})
# This is a bad output because there are two keys.
bad_outputs = {"foo": "bar", "foo1": "bar"}
with pytest.raises(ValueError):
memory.save_context(good_inputs, bad_outputs)
@pytest.mark.parametrize(
"memory",
[
ConversationBufferMemory(memory_key="baz"),
ConversationSummaryMemory(llm=FakeLLM(), memory_key="baz"),
ConversationBufferWindowMemory(memory_key="baz"),
],
)
def test_clearing_conversation_memory(memory: Memory) -> None:
"""Test clearing the conversation memory."""
# This is a good input because the input is not the same as baz.
good_inputs = {"foo": "bar", "baz": "foo"}
# This is a good output because there is one variable.
good_outputs = {"bar": "foo"}
memory.save_context(good_inputs, good_outputs)
memory.clear()
assert memory.load_memory_variables({}) == {"baz": ""}
| [
"{foo} {bar}",
"{foo}",
"nothing here"
] |
2024-01-10 | zsc/langchain | langchain~chains~combine_documents~map_reduce.py | """Combining documents by mapping a chain over them first, then combining results."""
from __future__ import annotations
from typing import Any, Callable, Dict, List, Optional, Tuple
from pydantic import BaseModel, Extra, root_validator
from langchain.chains.combine_documents.base import BaseCombineDocumentsChain
from langchain.chains.llm import LLMChain
from langchain.docstore.document import Document
def _split_list_of_docs(
docs: List[Document], length_func: Callable, token_max: int, **kwargs: Any
) -> List[List[Document]]:
new_result_doc_list = []
_sub_result_docs = []
for doc in docs:
_sub_result_docs.append(doc)
_num_tokens = length_func(_sub_result_docs, **kwargs)
if _num_tokens > token_max:
if len(_sub_result_docs) == 1:
raise ValueError(
"A single document was longer than the context length,"
" we cannot handle this."
)
if len(_sub_result_docs) == 2:
raise ValueError(
"A single document was so long it could not be combined "
"with another document, we cannot handle this."
)
new_result_doc_list.append(_sub_result_docs[:-1])
_sub_result_docs = _sub_result_docs[-1:]
new_result_doc_list.append(_sub_result_docs)
return new_result_doc_list
def _collapse_docs(
docs: List[Document],
combine_document_func: Callable,
**kwargs: Any,
) -> Document:
result = combine_document_func(docs, **kwargs)
combined_metadata = {k: str(v) for k, v in docs[0].metadata.items()}
for doc in docs[1:]:
for k, v in doc.metadata.items():
if k in combined_metadata:
combined_metadata[k] += f", {v}"
else:
combined_metadata[k] = str(v)
return Document(page_content=result, metadata=combined_metadata)
class MapReduceDocumentsChain(BaseCombineDocumentsChain, BaseModel):
"""Combining documents by mapping a chain over them, then combining results."""
llm_chain: LLMChain
"""Chain to apply to each document individually."""
combine_document_chain: BaseCombineDocumentsChain
"""Chain to use to combine results of applying llm_chain to documents."""
collapse_document_chain: Optional[BaseCombineDocumentsChain] = None
"""Chain to use to collapse intermediary results if needed.
If None, will use the combine_document_chain."""
document_variable_name: str
"""The variable name in the llm_chain to put the documents in.
If only one variable in the llm_chain, this need not be provided."""
return_map_steps: bool = False
"""Return the results of the map steps in the output."""
@property
def output_keys(self) -> List[str]:
"""Expect input key.
:meta private:
"""
_output_keys = super().output_keys
if self.return_map_steps:
_output_keys = _output_keys + ["map_steps"]
return _output_keys
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@root_validator(pre=True)
def get_default_document_variable_name(cls, values: Dict) -> Dict:
"""Get default document variable name, if not provided."""
if "document_variable_name" not in values:
llm_chain_variables = values["llm_chain"].prompt.input_variables
if len(llm_chain_variables) == 1:
values["document_variable_name"] = llm_chain_variables[0]
else:
raise ValueError(
"document_variable_name must be provided if there are "
"multiple llm_chain input_variables"
)
else:
llm_chain_variables = values["llm_chain"].prompt.input_variables
if values["document_variable_name"] not in llm_chain_variables:
raise ValueError(
f"document_variable_name {values['document_variable_name']} was "
f"not found in llm_chain input_variables: {llm_chain_variables}"
)
return values
@property
def _collapse_chain(self) -> BaseCombineDocumentsChain:
if self.collapse_document_chain is not None:
return self.collapse_document_chain
else:
return self.combine_document_chain
def combine_docs(
self, docs: List[Document], token_max: int = 3000, **kwargs: Any
) -> Tuple[str, dict]:
"""Combine documents in a map reduce manner.
Combine by mapping first chain over all documents, then reducing the results.
This reducing can be done recursively if needed (if there are many documents).
"""
results = self.llm_chain.apply(
# FYI - this is parallelized and so it is fast.
[{**{self.document_variable_name: d.page_content}, **kwargs} for d in docs]
)
question_result_key = self.llm_chain.output_key
result_docs = [
Document(page_content=r[question_result_key], metadata=docs[i].metadata)
# This uses metadata from the docs, and the textual results from `results`
for i, r in enumerate(results)
]
length_func = self.combine_document_chain.prompt_length
num_tokens = length_func(result_docs, **kwargs)
while num_tokens is not None and num_tokens > token_max:
new_result_doc_list = _split_list_of_docs(
result_docs, length_func, token_max, **kwargs
)
result_docs = []
for docs in new_result_doc_list:
new_doc = _collapse_docs(
docs, self._collapse_chain.combine_docs, **kwargs
)
result_docs.append(new_doc)
num_tokens = self.combine_document_chain.prompt_length(
result_docs, **kwargs
)
if self.return_map_steps:
_results = [r[self.llm_chain.output_key] for r in results]
extra_return_dict = {"map_steps": _results}
else:
extra_return_dict = {}
output, _ = self.combine_document_chain.combine_docs(result_docs, **kwargs)
return output, extra_return_dict
| [] |
2024-01-10 | zsc/langchain | langchain~chains~combine_documents~refine.py | """Combining documents by doing a first pass and then refining on more documents."""
from __future__ import annotations
from typing import Any, Dict, List, Tuple
from pydantic import BaseModel, Extra, Field, root_validator
from langchain.chains.combine_documents.base import BaseCombineDocumentsChain
from langchain.chains.llm import LLMChain
from langchain.docstore.document import Document
from langchain.prompts.base import BasePromptTemplate
from langchain.prompts.prompt import PromptTemplate
def _get_default_document_prompt() -> PromptTemplate:
return PromptTemplate(input_variables=["page_content"], template="{page_content}")
class RefineDocumentsChain(BaseCombineDocumentsChain, BaseModel):
"""Combine documents by doing a first pass and then refining on more documents."""
initial_llm_chain: LLMChain
"""LLM chain to use on initial document."""
refine_llm_chain: LLMChain
"""LLM chain to use when refining."""
document_variable_name: str
"""The variable name in the initial_llm_chain to put the documents in.
If only one variable in the initial_llm_chain, this need not be provided."""
initial_response_name: str
"""The variable name to format the initial response in when refining."""
document_prompt: BasePromptTemplate = Field(
default_factory=_get_default_document_prompt
)
"""Prompt to use to format each document."""
return_refine_steps: bool = False
"""Return the results of the refine steps in the output."""
@property
def output_keys(self) -> List[str]:
"""Expect input key.
:meta private:
"""
_output_keys = super().output_keys
if self.return_refine_steps:
_output_keys = _output_keys + ["refine_steps"]
return _output_keys
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@root_validator(pre=True)
def get_default_document_variable_name(cls, values: Dict) -> Dict:
"""Get default document variable name, if not provided."""
if "document_variable_name" not in values:
llm_chain_variables = values["initial_llm_chain"].prompt.input_variables
if len(llm_chain_variables) == 1:
values["document_variable_name"] = llm_chain_variables[0]
else:
raise ValueError(
"document_variable_name must be provided if there are "
"multiple llm_chain input_variables"
)
else:
llm_chain_variables = values["initial_llm_chain"].prompt.input_variables
if values["document_variable_name"] not in llm_chain_variables:
raise ValueError(
f"document_variable_name {values['document_variable_name']} was "
f"not found in llm_chain input_variables: {llm_chain_variables}"
)
return values
def combine_docs(self, docs: List[Document], **kwargs: Any) -> Tuple[str, dict]:
"""Combine by mapping first chain over all, then stuffing into final chain."""
base_info = {"page_content": docs[0].page_content}
base_info.update(docs[0].metadata)
document_info = {k: base_info[k] for k in self.document_prompt.input_variables}
base_inputs: dict = {
self.document_variable_name: self.document_prompt.format(**document_info)
}
inputs = {**base_inputs, **kwargs}
res = self.initial_llm_chain.predict(**inputs)
refine_steps = [res]
for doc in docs[1:]:
base_info = {"page_content": doc.page_content}
base_info.update(doc.metadata)
document_info = {
k: base_info[k] for k in self.document_prompt.input_variables
}
base_inputs = {
self.document_variable_name: self.document_prompt.format(
**document_info
),
self.initial_response_name: res,
}
inputs = {**base_inputs, **kwargs}
res = self.refine_llm_chain.predict(**inputs)
refine_steps.append(res)
if self.return_refine_steps:
extra_return_dict = {"refine_steps": refine_steps}
else:
extra_return_dict = {}
return res, extra_return_dict
| [
"{page_content}"
] |
2024-01-10 | eugeneleychenko/one80-Compass | app~test_w_FAISS.py | from fastapi import FastAPI, Request, HTTPException
from fastapi.responses import JSONResponse
from fastapi.middleware.cors import CORSMiddleware
import json
import requests
from dotenv import load_dotenv
from langchain.chat_models import ChatOpenAI
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain
from langchain.vectorstores import FAISS
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.schema import Document
import os
import numpy as np
import os.path
app = FastAPI()
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
# allow_origins=["http://localhost:3000"], # Allows only specific origins
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
load_dotenv()
openai_api_key = os.getenv("OPENAI_API_KEY")
FAISS_INDEX_FILE = 'faiss_index.bin'
def save_faiss_index(db):
db.save_local(FAISS_INDEX_FILE)
def load_faiss_index_if_exists(embeddings):
if os.path.exists(FAISS_INDEX_FILE):
return FAISS.load_local(FAISS_INDEX_FILE, embeddings)
return None
def load_data_from_url(url):
response = requests.get(url)
response.raise_for_status()
return response.json()
def get_agenda_items(recipe_name, data):
agenda_items = []
for entry in data:
if entry['Journey Name (N)'] == recipe_name:
agenda_items.append(entry['Agenda Items (Description)'])
return agenda_items if agenda_items else None
def get_methods(recipe_name, data):
methods = []
for entry in data:
if entry['Journey Name (N)'] == recipe_name:
methods.append(entry['Methods (N)'])
return methods if methods else None
def get_method_details(methods_list, data):
method_details = []
for method in methods_list:
method = method.strip() # Remove any leading/trailing whitespace
for entry in data:
if entry.get('Uniques') == method:
method_details.append({
'method': method,
'description_short': entry.get('Description (short)', ''),
'ai_response': entry.get('AI Response', '')
})
break # Stop searching after the first match
return method_details
@app.post('/get_recipe')
async def get_recipe(request: Request):
content = await request.json()
recipe_name = content.get('recipe_name')
if not recipe_name:
raise HTTPException(status_code=400, detail="No recipe name provided")
tasks_url = 'https://gs.jasonaa.me/?url=https://docs.google.com/spreadsheets/d/e/2PACX-1vSmp889ksBKKVVwpaxhlIzpDzXNOWjnszEXBP7SC5AyoebSIBFuX5qrcwwv6ud4RCYw2t_BZRhGLT0u/pubhtml?gid=1980586524&single=true'
flow_url = 'https://gs.jasonaa.me/?url=https://docs.google.com/spreadsheets/d/e/2PACX-1vSmp889ksBKKVVwpaxhlIzpDzXNOWjnszEXBP7SC5AyoebSIBFuX5qrcwwv6ud4RCYw2t_BZRhGLT0u/pubhtml?gid=1980586524&single=true'
tasks_data = load_data_from_url(tasks_url)
flow_data = load_data_from_url(flow_url)
# Initialize the language model
llm = ChatOpenAI(model="gpt-4", temperature=.2, openai_api_key=openai_api_key)
# Create an embeddings model
embeddings = OpenAIEmbeddings(api_key=openai_api_key)
# Replace the FAISS index creation with a check to load if it exists
# If it doesn't exist, create it and save
db = load_faiss_index_if_exists(embeddings)
if db is None:
documents = [Document(page_content=task['Journey Name (N)']) for task in tasks_data]
db = FAISS.from_documents(documents, embeddings)
save_faiss_index(db)
# Perform a similarity search
similar_docs = db.similarity_search(recipe_name, k=1)
if similar_docs:
closest_task = similar_docs[0].page_content
similarity = np.linalg.norm(np.array(embeddings.embed_query(recipe_name)) - np.array(embeddings.embed_query(closest_task)))
# Get agenda items and methods for the closest task
agenda_items = get_agenda_items(closest_task, flow_data)
methods_str = get_methods(closest_task, flow_data) # New line to get methods
method_details = get_method_details(methods_str, tasks_data) # Get method details
if agenda_items and method_details:
# Create a chain that uses the language model to generate a complete sentence
template = "Based on your input, I suggest you to follow these steps: {agenda_items}. This suggestion is based on the recipe '{recipe_name}', which is {similarity}% similar to your input. The original recipe that it is matching with is '{closest_task}'."
prompt = PromptTemplate(template=template, input_variables=["agenda_items", "recipe_name", "similarity", "closest_task"])
llm_chain = LLMChain(prompt=prompt, llm=llm)
response = await llm_chain.run({"agenda_items": ', '.join(agenda_items), "recipe_name": recipe_name, "similarity": round(similarity * 100, 2), "closest_task": closest_task})
return JSONResponse({
"response": response,
"details": {
"Closest Luma Task": closest_task,
"Methods": '| '.join([detail['method'] for detail in method_details]),
"Method Details": method_details,
"Similarity": f"{similarity}% similar to that task"
}
})
else:
raise HTTPException(status_code=404, detail="Agenda Items or Methods not found for the task")
else:
raise HTTPException(status_code=404, detail="Task not found")
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=8000)
| [
"{closest_task}",
"agenda_items",
"recipe_name",
"similarity",
"Based on your input, I suggest you to follow these steps: {agenda_items}. This suggestion is based on the recipe '{recipe_name}', which is {similarity}% similar to your input. The original recipe that it is matching with is '{closest_task}'.",
"{recipe_name}",
"closest_task"
] |
2024-01-10 | eugeneleychenko/one80-Compass | app~FAISS_test.py | from fastapi import FastAPI, HTTPException
from fastapi.responses import JSONResponse
from langchain.vectorstores import FAISS
from fastapi.middleware.cors import CORSMiddleware
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.schema import Document
import requests
import os
from dotenv import load_dotenv
import uvicorn
app = FastAPI()
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
# allow_origins=["http://localhost:3000"], # Allows only specific origins
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
load_dotenv()
FAISS_INDEX_FILE = '/tmp/faiss_index.bin'
def save_faiss_index(db):
db.save_local(FAISS_INDEX_FILE)
def load_faiss_index_if_exists(embeddings):
if os.path.exists(FAISS_INDEX_FILE):
return FAISS.load_local(FAISS_INDEX_FILE, embeddings)
return None
def load_data_from_url(url):
response = requests.get(url)
response.raise_for_status()
return response.json()
@app.post("/find_closest_match")
async def find_closest_match(payload: dict):
user_input = payload.get("user_input")
if not user_input:
raise HTTPException(status_code=400, detail="User input is required in the payload")
# Load the JSON from the provided URL
json_url = "https://opensheet.elk.sh/1vgJJHgyIrjip-6Z-yCW6caMgrZpJo1-waucKqvfg1HI/5"
data = load_data_from_url(json_url)
#
agenda_items = [item.get('Journey Name (N)', 'Default Description') for item in data]
# Initialize embeddings and check if FAISS index exists
embeddings = OpenAIEmbeddings()
faiss_index = load_faiss_index_if_exists(embeddings)
if faiss_index is None:
# Create a new index and save it
documents = [Document(page_content=item) for item in agenda_items]
faiss_index = FAISS.from_documents(documents, embeddings)
save_faiss_index(faiss_index)
# Find the closest match
similar_docs = faiss_index.similarity_search(user_input, k=1)
if similar_docs:
closest_match = similar_docs[0].page_content
# Find the entry with the matching Journey Name and gather all associated methods
methods = []
alternatives_list = []
# Load methods data from URL
methods_url = "https://opensheet.elk.sh/1vgJJHgyIrjip-6Z-yCW6caMgrZpJo1-waucKqvfg1HI/6"
methods_data = load_data_from_url(methods_url)
# print("methods mapping: ", methods_mapping)
# Match 'closest_match' with Journey Name (N) in the json_url
matched_entries = [entry for entry in data if entry.get('Journey Name (N)') == closest_match]
if not matched_entries:
raise HTTPException(status_code=404, detail="No matching agenda items found")
# Retrieve all the Methods (N) that are associated with Journey Name (N) in the json_url
methods = []
for entry in matched_entries:
entry_methods = entry.get('Methods (N)', '')
if entry_methods:
methods.extend(entry_methods.split('; '))
# Remove duplicates from methods list
methods = list(set(methods))
# From the methods_data json, find all the methods in the Unique key
# and return Alt 1, Alt 2, Alt 3, Description (short), AI Response
alternatives_list = []
for method in methods:
method_entry = next((item for item in methods_data if item['Uniques'] == method), None)
if method_entry:
alternatives = {
'Alt 1': method_entry.get('Alt 1', ''),
'Alt 2': method_entry.get('Alt 2', ''),
'Alt 3': method_entry.get('Alt 3', ''),
'Description (short)': method_entry.get('Description (short)', ''),
'AI Response': method_entry.get('AI Response', '')
}
alternatives_list.append({method: alternatives})
return JSONResponse({
"closest_match": closest_match,
"methods": methods,
"alternatives": alternatives_list
})
if __name__ == "__main__":
uvicorn.run(app, host="0.0.0.0", port=8000)
| [] |
2024-01-10 | eugeneleychenko/one80-Compass | app~test_use_embeddings.py | from flask import Flask, request, jsonify
from flask_cors import CORS # Added for CORS support
import json
import requests
from dotenv import load_dotenv
from langchain.chat_models import ChatOpenAI
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain
from langchain.vectorstores import Chroma
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.schema import Document
import os
import numpy as np
app = Flask(__name__)
CORS(app) # Initialize CORS on the Flask app
load_dotenv()
openai_api_key = os.getenv("OPENAI_API_KEY")
def load_data_from_url(url):
response = requests.get(url)
response.raise_for_status()
return response.json()
def get_agenda_items(recipe_name, data):
agenda_items = []
for entry in data:
if entry['Journey Name (N)'] == recipe_name:
agenda_items.append(entry['Agenda Items (Description)'])
return agenda_items if agenda_items else None
def get_methods(recipe_name, data):
methods = []
for entry in data:
if entry['Journey Name (N)'] == recipe_name:
methods.append(entry['Methods (N)'])
return methods if methods else None
def get_method_details(methods_list, data):
method_details = []
for method in methods_list:
method = method.strip() # Remove any leading/trailing whitespace
for entry in data:
if entry.get('Uniques') == method:
method_details.append({
'method': method,
'description_short': entry.get('Description (short)', ''),
'ai_response': entry.get('AI Response', '')
})
break # Stop searching after the first match
return method_details
@app.route('/get_recipe', methods=['POST'])
def get_recipe():
content = request.json
recipe_name = content.get('recipe_name')
if not recipe_name:
return jsonify({"error": "No recipe name provided"}), 400
tasks_url = 'https://gs.jasonaa.me/?url=https://docs.google.com/spreadsheets/d/e/2PACX-1vSmp889ksBKKVVwpaxhlIzpDzXNOWjnszEXBP7SC5AyoebSIBFuX5qrcwwv6ud4RCYw2t_BZRhGLT0u/pubhtml?gid=1980586524&single=true'
flow_url = 'https://gs.jasonaa.me/?url=https://docs.google.com/spreadsheets/d/e/2PACX-1vSmp889ksBKKVVwpaxhlIzpDzXNOWjnszEXBP7SC5AyoebSIBFuX5qrcwwv6ud4RCYw2t_BZRhGLT0u/pubhtml?gid=1980586524&single=true'
tasks_data = load_data_from_url(tasks_url)
flow_data = load_data_from_url(flow_url)
# Initialize the language model
llm = ChatOpenAI(model="gpt-4", temperature=.2, openai_api_key=openai_api_key)
# Load or create vectorstore
# Load or create vectorstore
vectorstore_path = './chroma_db'
embedding_function = OpenAIEmbeddings(api_key=openai_api_key)
if os.path.exists(vectorstore_path) and os.listdir(vectorstore_path):
vectorstore = Chroma(persist_directory=vectorstore_path, embedding_function=embedding_function)
else:
# Convert the tasks to Document objects
documents = [Document(page_content=task['Journey Name (N)']) for task in tasks_data]
# Create an embeddings model
embeddings = OpenAIEmbeddings(api_key=openai_api_key)
# Create a Chroma vectorstore from the documents
# Save the vectorstore for future use
# Save the vectorstore to disk with persistence
vectorstore = Chroma.from_documents(documents, embeddings, persist_directory="./chroma_db")
# vectorstore.save(vectorstore_path)
# Perform a similarity search
similar_docs = vectorstore.similarity_search(recipe_name, k=1)
if similar_docs:
closest_task = similar_docs[0].page_content
similarity = np.linalg.norm(np.array(vectorstore.embeddings.embed_query(recipe_name)) - np.array(vectorstore.embeddings.embed_query(closest_task)))
# Get agenda items and methods for the closest task
agenda_items = get_agenda_items(closest_task, flow_data)
methods_str = get_methods(closest_task, flow_data) # New line to get methods
method_details = get_method_details(methods_str, tasks_data) # Get method details
if agenda_items and method_details:
# Create a chain that uses the language model to generate a complete sentence
template = "Based on your input, I suggest you to follow these steps: {agenda_items}. This suggestion is based on the recipe '{recipe_name}', which is {similarity}% similar to your input. The original recipe that it is matching with is '{closest_task}'."
prompt = PromptTemplate(template=template, input_variables=["agenda_items", "recipe_name", "similarity", "closest_task"])
llm_chain = LLMChain(prompt=prompt, llm=llm)
response = llm_chain.run({"agenda_items": ', '.join(agenda_items), "recipe_name": recipe_name, "similarity": round(similarity * 100, 2), "closest_task": closest_task})
return jsonify({
"response": response,
"details": {
"Closest Luma Task": closest_task,
"Methods": '| '.join([detail['method'] for detail in method_details]),
"Method Details": method_details,
"Similarity": f"{similarity}% similar to that task"
}
})
else:
return jsonify({"error": "Agenda Items or Methods not found for the task"}), 404
else:
return jsonify({"error": "Task not found"}), 404
if __name__ == "__main__":
app.run(debug=True)
| [
"{closest_task}",
"agenda_items",
"recipe_name",
"similarity",
"Based on your input, I suggest you to follow these steps: {agenda_items}. This suggestion is based on the recipe '{recipe_name}', which is {similarity}% similar to your input. The original recipe that it is matching with is '{closest_task}'.",
"{recipe_name}",
"closest_task"
] |
2024-01-10 | Sami-C4C/Studies-Assistent | hilfsbeispiele~script.py | import openai
api_key = "YOUR_API_KEY"
MODEL = "gpt-3.5-turbo"
user_input_thema = input("Das Thema: ")
user_input_fragen = input("Die Anzahl von Fragen: ")
user_input_moeglichkeiten = input("Die Anzahl von Antwortmoeglichkeiten: ")
response = openai.chat.completions.create(
model=MODEL,
messages=[
{"role": "system", "content": "You are a helpful quiz generator."},
{"role": "user", "content": "Erstelle mir ein Quiz über das Thema " + user_input_thema + ", mit " + user_input_fragen + " Fragen. Jede Frage hat " + user_input_moeglichkeiten + " Antwortmoeglichkeiten."},
],
temperature=0,
)
print(response.choices[0].message.content) | [
"You are a helpful quiz generator.",
"Erstelle mir ein Quiz über das Thema PLACEHOLDER, mit PLACEHOLDER Fragen. Jede Frage hat PLACEHOLDER Antwortmoeglichkeiten."
] |
2024-01-10 | Sami-C4C/Studies-Assistent | hilfsbeispiele~script2.py | import openai
def ask_openai(prompt, model="text-davinci-003", max_tokens=150):
openai.api_key = "YOUR_API_KEY"
try:
response = openai.Completion.create(
model=model,
prompt=prompt,
max_tokens=max_tokens
)
return response.choices[0].text.strip()
except Exception as e:
return f"An error occurred: {e}"
def main():
print("OpenAI Chat - Type 'quit' to exit.")
while True:
user_input = input("You: ")
if user_input.lower() == 'quit':
break
response = ask_openai(user_input)
print("AI:", response)
if __name__ == "__main__":
main()
| [] |
2024-01-10 | xcz1997/wenda | plugins~gen_data_st.py |
import argparse
import sentence_transformers
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores.faiss import FAISS
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.docstore.document import Document
import threading
import pdfplumber
import re
import chardet
import os
import sys
import time
os.chdir(sys.path[0][:-8])
from common import success_print
from common import error_helper
from common import settings
from common import CounterLock
source_folder = 'txt'
source_folder_path = os.path.join(os.getcwd(), source_folder)
import logging
logging.basicConfig()
logger = logging.getLogger()
logger.setLevel(logging.ERROR)
root_path_list = source_folder_path.split(os.sep)
docs = []
vectorstore = None
model_path = settings.librarys.rtst.model_path
try:
embeddings = HuggingFaceEmbeddings(model_name='')
embeddings.client = sentence_transformers.SentenceTransformer(
model_path, device="cuda")
except Exception as e:
error_helper("embedding加载失败,请下载相应模型",
r"https://github.com/l15y/wenda#st%E6%A8%A1%E5%BC%8F")
raise e
success_print("Embedding 加载完成")
embedding_lock=CounterLock()
vectorstore_lock=threading.Lock()
def clac_embedding(texts, embeddings, metadatas):
global vectorstore
with embedding_lock:
vectorstore_new = FAISS.from_texts(texts, embeddings, metadatas=metadatas)
with vectorstore_lock:
if vectorstore is None:
vectorstore = vectorstore_new
else:
vectorstore.merge_from(vectorstore_new)
def make_index():
global docs
if hasattr(settings.librarys.rtst,"size") and hasattr(settings.librarys.rtst,"overlap"):
text_splitter = CharacterTextSplitter(
chunk_size=int(settings.librarys.rtst.size), chunk_overlap=int(settings.librarys.rtst.overlap), separator='\n')
else:
text_splitter = CharacterTextSplitter(
chunk_size=20, chunk_overlap=0, separator='\n')
doc_texts = text_splitter.split_documents(docs)
docs = []
texts = [d.page_content for d in doc_texts]
metadatas = [d.metadata for d in doc_texts]
thread = threading.Thread(target=clac_embedding, args=(texts, embeddings, metadatas))
thread.start()
while embedding_lock.get_waiting_threads()>2:
time.sleep(0.1)
all_files=[]
for root, dirs, files in os.walk(source_folder_path):
for file in files:
all_files.append([root, file])
success_print("文件列表生成完成",len(all_files))
length_of_read=0
for i in range(len(all_files)):
root, file=all_files[i]
data = ""
title = ""
try:
file_path = os.path.join(root, file)
_, ext = os.path.splitext(file_path)
if ext.lower() == '.pdf':
#pdf
with pdfplumber.open(file_path) as pdf:
data_list = []
for page in pdf.pages:
print(page.extract_text())
data_list.append(page.extract_text())
data = "\n".join(data_list)
elif ext.lower() == '.txt':
# txt
with open(file_path, 'rb') as f:
b = f.read()
result = chardet.detect(b)
with open(file_path, 'r', encoding=result['encoding']) as f:
data = f.read()
else:
print("目前还不支持文件格式:", ext)
except Exception as e:
print("文件读取失败,当前文件已被跳过:",file,"。错误信息:",e)
data = re.sub(r'!', "!\n", data)
data = re.sub(r':', ":\n", data)
data = re.sub(r'。', "。\n", data)
data = re.sub(r'\r', "\n", data)
data = re.sub(r'\n\n', "\n", data)
data = re.sub(r"\n\s*\n", "\n", data)
length_of_read+=len(data)
docs.append(Document(page_content=data, metadata={"source": file}))
if length_of_read > 1e5:
success_print("处理进度",int(100*i/len(all_files)),f"%\t({i}/{len(all_files)})")
make_index()
# print(embedding_lock.get_waiting_threads())
length_of_read=0
if len(all_files) == 0:
error_print("txt 目录没有数据")
sys.exit(0)
if len(docs) > 0:
make_index()
while embedding_lock.get_waiting_threads()>0:
time.sleep(0.1)
with embedding_lock:
time.sleep(0.1)
with vectorstore_lock:
success_print("处理完成")
try:
vectorstore_old = FAISS.load_local(
'memory/default', embeddings=embeddings)
success_print("合并至已有索引。如不需合并请删除 memory/default 文件夹")
vectorstore_old.merge_from(vectorstore)
vectorstore_old.save_local('memory/default')
except:
print("新建索引")
vectorstore.save_local('memory/default')
success_print("保存完成")
| [] |
2024-01-10 | xcz1997/wenda | plugins~zhishiku_rtst.py | from langchain.vectorstores.faiss import FAISS
from langchain.embeddings import HuggingFaceEmbeddings
import sentence_transformers
import numpy as np
import re,os
from plugins.common import settings,allowCROS
from plugins.common import error_helper
from plugins.common import success_print
divider='\n'
if not os.path.exists('memory'):
os.mkdir('memory')
cunnrent_setting=settings.librarys.rtst
def get_doc_by_id(id,memory_name):
return vectorstores[memory_name].docstore.search(vectorstores[memory_name].index_to_docstore_id[id])
def process_strings(A, C, B):
# find the longest common suffix of A and prefix of B
common = ""
for i in range(1, min(len(A), len(B)) + 1):
if A[-i:] == B[:i]:
common = A[-i:]
# if there is a common substring, replace one of them with C and concatenate
if common:
return A[:-len(common)] + C + B
# otherwise, just return A + B
else:
return A + B
def get_doc(id,score,step,memory_name):
doc = get_doc_by_id(id,memory_name)
final_content=doc.page_content
print("文段分数:",score,[doc.page_content])
if step > 0:
for i in range(1, step+1):
try:
doc_before=get_doc_by_id(id-i,memory_name)
if doc_before.metadata['source']==doc.metadata['source']:
final_content=process_strings(doc_before.page_content,divider,final_content)
# print("上文分数:",score,doc.page_content)
except:
pass
try:
doc_after=get_doc_by_id(id+i,memory_name)
if doc_after.metadata['source']==doc.metadata['source']:
final_content=process_strings(final_content,divider,doc_after.page_content)
except:
pass
if doc.metadata['source'].endswith(".pdf") or doc.metadata['source'].endswith(".txt"):
title=f"[{doc.metadata['source']}](/api/read_news/{doc.metadata['source']})"
else:
title=doc.metadata['source']
return {'title': title,'content':re.sub(r'\n+', "\n", final_content),"score":int(score)}
def find(s,step = 0,memory_name="default"):
try:
embedding = get_vectorstore(memory_name).embedding_function(s)
scores, indices = vectorstores[memory_name].index.search(np.array([embedding], dtype=np.float32), int(cunnrent_setting.count))
docs = []
for j, i in enumerate(indices[0]):
if i == -1:
continue
if scores[0][j]>700:continue
docs.append(get_doc(i,scores[0][j],step,memory_name))
return docs
except Exception as e:
print(e)
return []
try:
embeddings = HuggingFaceEmbeddings(model_name='')
embeddings.client = sentence_transformers.SentenceTransformer(cunnrent_setting.model_path,
device=cunnrent_setting.device)
except Exception as e:
error_helper("embedding加载失败,请下载语义知识库计算模型",r"https://github.com/l15y/wenda#st%E6%A8%A1%E5%BC%8F")
raise e
vectorstores={}
def get_vectorstore(memory_name):
try:
return vectorstores[memory_name]
except Exception as e:
try:
vectorstores[memory_name] = FAISS.load_local(
'memory/'+memory_name, embeddings=embeddings)
return vectorstores[memory_name]
except Exception as e:
success_print("没有读取到RTST记忆区%s,将新建。"%memory_name)
return None
from langchain.docstore.document import Document
from langchain.text_splitter import CharacterTextSplitter
from bottle import route, response, request, static_file, hook
import bottle
@route('/api/upload_rtst_zhishiku', method=("POST","OPTIONS"))
def upload_zhishiku():
allowCROS()
try:
data = request.json
title=data.get("title")
memory_name=data.get("memory_name")
data = re.sub(r'!', "!\n", data.get("txt"))
data = re.sub(r'。', "。\n", data)
data = re.sub(r'[\n\r]+', "\n", data)
docs=[Document(page_content=data, metadata={"source":title })]
print(docs)
text_splitter = CharacterTextSplitter(
chunk_size=20, chunk_overlap=0, separator='\n')
doc_texts = text_splitter.split_documents(docs)
texts = [d.page_content for d in doc_texts]
metadatas = [d.metadata for d in doc_texts]
vectorstore_new = FAISS.from_texts(texts, embeddings, metadatas=metadatas)
vectorstore=get_vectorstore(memory_name)
if vectorstore is None:
vectorstores[memory_name]=vectorstore_new
else:
vectorstores[memory_name].merge_from(vectorstore_new)
return '成功'
except Exception as e:
return str(e)
@route('/api/save_rtst_zhishiku', method=("POST","OPTIONS"))
def save_zhishiku():
allowCROS()
try:
data = request.json
memory_name=data.get("memory_name")
vectorstores[memory_name].save_local('memory/'+memory_name)
return "保存成功"
except Exception as e:
return str(e)
import json
@route('/api/find_rtst_in_memory', method=("POST","OPTIONS"))
def api_find():
allowCROS()
data = request.json
prompt = data.get('prompt')
step = data.get('step')
memory_name=data.get("memory_name")
if step is None:
step = int(settings.library.step)
return json.dumps(find(prompt,int(step),memory_name))
@route('/api/save_news', method=("POST","OPTIONS"))
def save_news():
allowCROS()
try:
data = request.json
if not data:
return 'no data'
title = data.get('title')
txt = data.get('txt')
cut_file = f"txt/{title}.txt"
with open(cut_file, 'w', encoding='utf-8') as f:
f.write(txt)
f.close()
return 'success'
except Exception as e:
return(e)
@route('/api/read_news/:path', method=("GET","OPTIONS"))
def read_news(path=""):
allowCROS()
return static_file(path, root="txt/")
| [
"\n",
"\\n+"
] |
2024-01-10 | mila-iqia/mila-docs-chatbot | cfg.py | import logging
import os
from dataclasses import dataclass
import openai
from buster.busterbot import Buster, BusterConfig
from buster.completers import ChatGPTCompleter, DocumentAnswerer
from buster.formatters.documents import DocumentsFormatter
from buster.formatters.prompts import PromptFormatter
from buster.retriever import Retriever, SQLiteRetriever
from buster.tokenizers import GPTTokenizer
from buster.validators import QuestionAnswerValidator, Validator
from huggingface_hub import hf_hub_download
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
# auth information
username = os.getenv("MILA_USERNAME")
password = os.getenv("MILA_PASSWORD")
# set openAI creds
openai.api_key = os.getenv("OPENAI_API_KEY")
# hf hub information
REPO_ID = os.environ.get("HUB_DATASET_ID")
DB_FILE = "documents_mila.db"
HUB_TOKEN = os.environ.get("HUB_TOKEN")
# download the documents.db hosted on the dataset space
logger.info(f"Downloading {DB_FILE} from hub...")
hf_hub_download(
repo_id=REPO_ID,
repo_type="dataset",
filename=DB_FILE,
token=HUB_TOKEN,
local_dir=".",
local_dir_use_symlinks=False,
)
logger.info("Downloaded.")
buster_cfg = BusterConfig(
validator_cfg={
"unknown_response_templates": [
"I'm sorry, but I am an AI language model trained to assist with questions related to the Mila Cluster. I cannot answer that question as it is not relevant to the cluster or its usage. Is there anything else I can assist you with?",
],
"unknown_threshold": 0.85,
"embedding_model": "text-embedding-ada-002",
"use_reranking": True,
"check_question_prompt": """You are an chatbot answering questions about the mila cluster, a compute infrastructure.
Your job is to determine wether or not a question is valid, and should be answered.
More general questions are not considered valid, even if you might know the response.
A user will submit a question. Respond 'true' if it is valid, respond 'false' if it is invalid.
For example:
Q: How can I run a job with 2 GPUs?
true
Q: What is the meaning of life?
false
A user will submit a question. Respond 'true' if it is valid, respond 'false' if it is invalid.""",
"completion_kwargs": {
"model": "gpt-3.5-turbo",
"stream": False,
"temperature": 0,
},
},
retriever_cfg={
"db_path": DB_FILE,
"top_k": 3,
"thresh": 0.7,
"max_tokens": 2000,
"embedding_model": "text-embedding-ada-002",
},
documents_answerer_cfg={
"no_documents_message": "No documents are available for this question.",
},
completion_cfg={
"completion_kwargs": {
"model": "gpt-3.5-turbo",
"stream": True,
"temperature": 0,
},
},
tokenizer_cfg={
"model_name": "gpt-3.5-turbo",
},
documents_formatter_cfg={
"max_tokens": 3500,
"formatter": "{content}",
},
prompt_formatter_cfg={
"max_tokens": 3500,
"text_before_docs": (
"You are a chatbot assistant answering technical questions about the Mila Cluster, a GPU cluster for Mila Students."
"You are a chatbot assistant answering technical questions about the Mila Cluster."
"You can only respond to a question if the content necessary to answer the question is contained in the following provided documentation."
"If the answer is in the documentation, summarize it in a helpful way to the user."
"If it isn't, simply reply that you cannot answer the question because it is not available in your documentation."
"If it is a coding related question that you know the answer to, answer but warn the user that it wasn't taken directly from the documentation."
"Do not refer to the documentation directly, but use the instructions provided within it to answer questions. "
"Here is the documentation: "
"<DOCUMENTS> "
),
"text_after_docs": (
"<\DOCUMENTS>\n"
"REMEMBER:\n"
"You are a chatbot assistant answering technical questions about the Mila Cluster, a GPU cluster for Mila Students."
"Here are the rules you must follow:\n"
"1) You must only respond with information contained in the documentation above. Say you do not know if the information is not provided.\n"
"2) Make sure to format your answers in Markdown format, including code block and snippets.\n"
"3) Do not reference any links, urls or hyperlinks in your answers.\n"
"4) If you do not know the answer to a question, or if it is completely irrelevant to the library usage, simply reply with:\n"
"5) Do not refer to the documentation directly, but use the instructions provided within it to answer questions. "
"'I'm sorry, but I am an AI language model trained to assist with questions related to AI. I cannot answer that question as it is not relevant to the library or its usage. Is there anything else I can assist you with?'"
"For example:\n"
"What is the meaning of life for a cluster bot?\n"
"I'm sorry, but I am an AI language model trained to assist with questions related to the Mila Cluster. I cannot answer that question as it is not relevant to its usage. Is there anything else I can assist you with?"
"Now answer the following question:\n"
),
},
)
# initialize buster with the config in cfg.py (adapt to your needs) ...
retriever: Retriever = SQLiteRetriever(**buster_cfg.retriever_cfg)
tokenizer = GPTTokenizer(**buster_cfg.tokenizer_cfg)
document_answerer: DocumentAnswerer = DocumentAnswerer(
completer=ChatGPTCompleter(**buster_cfg.completion_cfg),
documents_formatter=DocumentsFormatter(tokenizer=tokenizer, **buster_cfg.documents_formatter_cfg),
prompt_formatter=PromptFormatter(tokenizer=tokenizer, **buster_cfg.prompt_formatter_cfg),
**buster_cfg.documents_answerer_cfg,
)
validator: Validator = QuestionAnswerValidator(**buster_cfg.validator_cfg)
buster: Buster = Buster(retriever=retriever, document_answerer=document_answerer, validator=validator)
| [] |
2024-01-10 | Luke100000/HagridBot | modules~smart_hagrid.py | import datetime
import os
import random
from functools import lru_cache
from typing import List
import numpy as np
import pytz as pytz
import shelve
from cache import AsyncLRU
from discord import Message
from tqdm.auto import tqdm
from data import HAGRID_COMMANDS
from openai_utils import generate_embedding, generate_text, num_tokens_from_string
from stats import stat
os.makedirs("shelve/", exist_ok=True)
progress = shelve.open("shelve/progress")
settings = shelve.open("shelve/settings")
import sqlite3
con = sqlite3.connect("shelve/database.db")
SUPER_SMART = False
def setup():
con.execute(
"""
CREATE TABLE IF NOT EXISTS messages (
id INTEGER PRIMARY KEY,
guild INTEGER,
channel INTEGER,
author INTEGER,
content TEXT,
date DATETIME,
indexed BOOLEAN
)
"""
)
con.execute(
"""
CREATE TABLE IF NOT EXISTS embeddings (
message_id INTEGER PRIMARY KEY,
embedding BLOB
)
"""
)
con.execute(
"""
CREATE TABLE IF NOT EXISTS summaries (
id INTEGER PRIMARY KEY AUTOINCREMENT,
guild INTEGER,
channel INTEGER,
from_date DATETIME,
to_date DATETIME,
summary TEXT,
embedding BLOB
)
"""
)
con.execute(
"""
CREATE TABLE IF NOT EXISTS names (
id INTEGER PRIMARY KEY,
name TEXT
)
"""
)
con.execute(
"""
CREATE TABLE IF NOT EXISTS blocked_users (
id INTEGER PRIMARY KEY,
reasons TEXT
)
"""
)
setup()
@lru_cache(maxsize=1024)
def set_name(identifier: int, name: str) -> None:
con.execute(
"INSERT OR REPLACE INTO names (id, name) VALUES (?, ?)", (identifier, name)
)
@lru_cache(maxsize=1024)
def get_name(identifier: int) -> None:
name = con.execute("FETCH name FROM names WHERE id =?", (identifier,)).fetchone()
return str(identifier) if name is None else name
MIN_EMBEDDING_LENGTH = 32
MAX_CONVERSATION_TIME = 45
active_conversations = {}
def get_all_embeddings(guild_id: int) -> (np.array, List[str]):
"""
:param guild_id: The current guild
:return: The embedding matrix and the jump urls for all indexed messages
"""
results = con.execute(
"""
SELECT messages.id, content, embeddings.embedding, channel, names.name
FROM messages
LEFT JOIN embeddings ON messages.id=embeddings.message_id
LEFT JOIN names ON messages.channel=names.id
LEFT JOIN blocked_users ON messages.author=blocked_users.id
WHERE blocked_users.id IS NULL AND indexed = TRUE AND guild = ? AND length(content) > ?
""",
(
guild_id,
MIN_EMBEDDING_LENGTH,
),
).fetchall()
embeddings = []
messages = []
for result in tqdm(results, "Embedding messages"):
# Fetch missing embeddings
if result[2] is None:
embedding = generate_embedding(result[4] + ": " + result[1])
con.execute(
"INSERT INTO embeddings (message_id, embedding) VALUES (?, ?)",
(result[0], embedding.tobytes()),
)
else:
embedding = np.frombuffer(result[2], dtype=np.float32)
embeddings.append(embedding)
messages.append(
f"https://discord.com/channels/{guild_id}/{result[3]}/{result[0]}"
)
con.commit()
return np.asarray(embeddings), messages
def get_cosine_similarity(arr_a: np.array, arr_b: np.array) -> np.array:
"""
Returns the similarity between all pairs between arr_a and arr_b
:param arr_a: A matrix of shape (n_samples, dim)
:param arr_b: A matrix of shape (n_samples, dim)
:return: A similarity matrix of shape (n_samples_a, n_samples_b)
"""
norms_a = np.linalg.norm(arr_a, axis=-1)[:, np.newaxis]
norms_b = np.linalg.norm(arr_b, axis=-1)[:, np.newaxis]
divisor = norms_a * norms_b.T
dot_p = arr_a @ arr_b.T
return np.divide(dot_p, divisor, dot_p, where=divisor > 0)
def search(guild_id: int, embedding: np.array, samples: int = 3) -> List[str]:
"""
Searches for jump urls to similar messages to the given embedding
:param guild_id: Current guild
:param embedding: Embedding vector
:param samples: Max samples to look for
:return: A list of jump urls
"""
embeddings, messages = get_all_embeddings(guild_id)
similarities = get_cosine_similarity(embeddings, embedding[None, :]).flatten()
best_guess = similarities.max()
top_ids = np.argsort(-similarities)[:samples]
return [messages[i] for i in top_ids if similarities[i] > best_guess * 0.95]
def find_best_summaries(
guild_id: int, embedding: np.array, samples: int = 1
) -> List[str]:
"""
Searches for summaries most similar to a user query
:param guild_id: Current guild
:param embedding: Embedding vector
:param samples: Max samples to look for
:return: A list of jump urls
"""
results = con.execute(
"""
SELECT summary, embedding
FROM summaries
WHERE guild = ?
""",
(guild_id,),
).fetchall()
embeddings = np.asarray([np.frombuffer(r[1], dtype=np.float32) for r in results])
similarities = get_cosine_similarity(embeddings, embedding[None, :]).flatten()
best_guess = similarities.max()
top_ids = np.argsort(-similarities)[:samples]
return [results[i][0] for i in top_ids if similarities[i] > best_guess * 0.75]
def drop_until(messages: List[str], max_size: int, encoding_name="gpt-3.5-turbo"):
while (
len(messages) > 0
and num_tokens_from_string("\n".join(messages), encoding_name) > max_size
):
messages.pop(random.randrange(len(messages)))
return messages
@AsyncLRU(3600)
async def who_is(user_id, max_length=3000):
messages = con.execute(
"SELECT content FROM messages WHERE author=? ORDER BY RANDOM() LIMIT 1000",
(user_id,),
).fetchall()
messages = [m[0] for m in messages if not m[0].startswith("/")]
drop_until(messages, max_length)
prompt = "> " + "\n> ".join(messages)
print(f"Describing user with {len(messages)} messages and {len(prompt)} chars")
system_prompt = "You are a language model tasked with describing this person in a few honest sentences, based on their past messages. Put focus on personality and behavior."
return await generate_text(prompt, system_prompt=system_prompt, max_tokens=256)
def get_yesterday_boundary() -> (datetime, datetime):
# Define the CEST timezone
cest = pytz.timezone("Europe/Berlin") # Central European Summer Time (CEST)
# Get the current date and time in the CEST timezone
now_cest = datetime.datetime.now(cest)
# Set the time to 12:00 PM (noon)
noon_cest = now_cest.replace(hour=12, minute=0, second=0, microsecond=0)
# If the current time is before 12:00 PM, consider yesterday's boundary
if now_cest < noon_cest:
yesterday = noon_cest - datetime.timedelta(days=1)
else:
yesterday = noon_cest
return yesterday - datetime.timedelta(days=1), yesterday
@AsyncLRU(3)
async def get_summary(guild_id, channel_id, offset: int = 0, max_length: int = 3500):
from_date, to_date = get_yesterday_boundary()
from_date = from_date - datetime.timedelta(days=offset)
to_date = to_date - datetime.timedelta(days=offset)
summary = con.execute(
"SELECT summary FROM summaries WHERE guild=? AND channel=? AND from_date=? AND to_date=?",
(guild_id, channel_id, from_date, to_date),
).fetchone()
if summary is None:
messages = con.execute(
"""
SELECT content, names.name as username
FROM messages
LEFT JOIN names ON names.id=messages.author
LEFT JOIN blocked_users ON messages.author=blocked_users.id
WHERE blocked_users.id IS NULL AND guild=? AND (? < 0 OR channel=?) AND date BETWEEN ? AND ?
""",
(guild_id, channel_id, channel_id, from_date, to_date),
).fetchall()
messages = [f"{m[1]}: {m[0]}" for m in messages if not m[0].startswith("/")]
original_count = len(messages)
# Crop to fit into context size
messages = drop_until(messages, max_length)
# Construct prompt
prompt = "> " + "\n> ".join(messages)
# No need to summarize a single message
if len(prompt) < 64:
return "Nothing.", from_date, to_date
prompt = f"Today's new conversations:{prompt}\n\nToday's summary"
# Generate summary
where = (
"public Discord server"
if channel_id < 0
else "specific Discord server channel"
)
system_prompt = f"You are a language model tasked with summarizing following conversation on a {where} in a concise way."
summary = await generate_text(
prompt,
system_prompt=system_prompt,
max_tokens=256,
model="gpt-3.5-turbo",
temperature=0.25,
)
# Embed the summary for faster indexing
embedding = generate_embedding(summary)
# Cache summary
con.execute(
"""
INSERT INTO summaries (guild, channel, from_date, to_date, summary, embedding)
VALUES (?, ?, ?, ?, ?, ?)
""",
(guild_id, channel_id, from_date, to_date, summary, embedding),
)
con.commit()
print(
f"Summarized day with {original_count} original, {len(messages)} compressed messages and {len(prompt)} chars."
)
return summary, from_date, to_date
else:
return summary[0], from_date, to_date
def set_index_status(channel_id, active: bool):
if active:
settings[str(active)] = True
else:
del settings[str(active)]
con.execute(
"""
UPDATE messages
SET indexed=?
WHERE channel=?
""",
(1 if active else 0, channel_id),
)
async def track(message: Message):
after = None
if str(message.channel.id) in progress:
after = progress[str(message.channel.id)]
indexed = str(message.channel.id) in settings
set_name(message.guild.id, message.guild.name)
set_name(message.channel.id, message.channel.name)
count = 0
async for received in message.channel.history(
limit=100, after=after, oldest_first=True
):
if received.clean_content and not received.clean_content.startswith("/hagrid"):
set_name(received.author.id, received.author.name)
count += 1
con.execute(
"INSERT OR REPLACE INTO messages (id, guild, channel, author, content, date, indexed) VALUES (?, ?, ?, ?, ?, ?, ?)",
(
received.id,
received.guild.id,
received.channel.id,
received.author.id,
received.clean_content,
received.created_at,
indexed,
),
)
progress[str(message.channel.id)] = received.created_at
con.commit()
print(
f"Tracked {count} {('indexed ' if indexed else '')}messages in {message.channel.name} until {progress[str(message.channel.id)]}"
)
return count
async def on_smart_message(message):
msg = message.clean_content.lower()
tracked = str(message.guild.id) in settings
# Basic commands
if msg.startswith("/hagrid"):
stat(message, "command")
if msg == "/hagrid guild extend":
settings[str(message.guild.id)] = True
await message.delete()
elif msg == "/hagrid channel index":
set_index_status(message.channel.id, True)
if hasattr(message.channel, "parent"):
set_index_status(message.channel.parent.id, True)
await message.delete()
elif msg == "/hagrid channel noindex":
set_index_status(message.channel.id, False)
if hasattr(message.channel, "parent"):
set_index_status(message.channel.parent.id, False)
await message.delete()
elif msg == "/hagrid scan":
await message.delete()
while await track(message) > 0:
pass
else:
await message.channel.send(HAGRID_COMMANDS)
# Track messages
if tracked:
await track(message)
else:
return
# Search for stuff
if msg.startswith("hagrid search"):
stat(message, "search")
await message.channel.typing()
if message.reference and message.reference.resolved:
embedding = generate_embedding(message.reference.resolved.clean_content)
else:
embedding = generate_embedding(
message.clean_content.replace("hagrid search", "")
)
best = search(message.guild.id, embedding)
await message.channel.send("Check out those links: " + (" ".join(best)))
return True
# Summarize someones personality
if "hagrid who is" in msg:
await message.channel.typing()
if len(message.mentions) == 0:
await message.channel.send(f"Yer have to mention someone!")
else:
who = message.mentions[0].id
description = await who_is(who)
await message.channel.send(description)
return True
# Summarize a timespan
if msg.startswith("hagrid what happened"):
stat(message, "summary")
await message.channel.typing()
here = msg.startswith("hagrid what happened here")
try:
history_length = int(
msg.replace(
"hagrid what happened here" if here else "hagrid what happened", ""
).strip()
)
except ValueError:
history_length = 1
history_length = min(7, history_length)
# Get the summary of the last 3 days
summaries = [
get_summary(message.guild.id, message.channel.id if here else -1, i)
for i in range(history_length)
]
summaries.reverse()
msg = (
""
if history_length == 1
else "Right then, 'ere's the summary o' the last few days!\n"
) + "\n\n".join(
[
f'**{to_date.strftime("%Y, %d %B")}:**\n{summary}'
for (summary, from_date, to_date) in summaries
]
)
# noinspection SpellCheckingInspection
if len(msg) > 1500:
for (summary, from_date, to_date) in summaries:
await message.channel.send(
f'**{to_date.strftime("%Y, %d %B")}:**\n{summary}'
)
else:
await message.channel.send(msg)
return True
convo_id = f"{message.author.id}_{message.channel.id}"
if msg == "bye hagrid" and convo_id in active_conversations:
del active_conversations[convo_id]
return True
if "hallo hagrid" in msg or (
convo_id in active_conversations
and (datetime.datetime.now() - active_conversations[convo_id]).seconds
< MAX_CONVERSATION_TIME
):
stat(message, "hallo hagrid")
await message.channel.typing()
FIXED_HISTORY_N = 5
MAX_HISTORY_N = 32
WHO_IS_CONTEXT_LENGTH = 512
HISTORY_LENGTH = 512
active_conversations[convo_id] = datetime.datetime.now()
# We are not interested in this specific summary, but let's enforce generating it for the lookup
if SUPER_SMART:
await get_summary(message.guild.id, -1)
await get_summary(message.guild.id, message.channel.id)
# Fetch the embedding of the input text to look for similar topics
embedding = await generate_embedding(message.clean_content)
# Use similar memories from the past
summaries = find_best_summaries(message.guild.id, embedding)
summary = "\n\n".join(summaries)
# Fetch info about the trigger
if SUPER_SMART:
who = await who_is(message.author.id, max_length=WHO_IS_CONTEXT_LENGTH)
# Use the last few messages from the channel as context
messages = con.execute(
"""
SELECT content, names.name as username
FROM messages
LEFT JOIN names ON names.id=messages.author
LEFT JOIN blocked_users ON messages.author=blocked_users.id
WHERE channel=? AND blocked_users.id IS NULL
ORDER BY date DESC
LIMIT ?
""",
(message.channel.id, MAX_HISTORY_N),
).fetchall()
messages = [(m[0].replace("\n", " "), m[1]) for m in messages]
messages = [f"{m[1]}: {m[0]}" for m in messages]
messages = messages[0:FIXED_HISTORY_N] + drop_until(
messages[FIXED_HISTORY_N:], HISTORY_LENGTH
)
messages.reverse()
# Concatenate
history = "\n".join(messages).replace("HagridBot", "Hagrid")
# Define Hagrid
system_prompts = "You are the loyal, friendly, and softhearted Rubeus Hagrid with a thick west country accent. You are a man of few words. If asked a question, you answers correctly. This is a conversation between one or more users, where you take part."
# Build prompt
# prompt = f"Context from the past:\n{summary}\n\n{message.author.name}'s profile summary:\n{who}\n\nConversation:\n{history}\n\nWhat short answer would Hagrid now respond? Respond with his thick west country accent!"
prompt = f"Conversation:\n{history}\n\nWhat short answer would Hagrid now respond? Respond with his thick west country accent!"
# Request
await message.channel.send(
(
await generate_text(
prompt=prompt,
system_prompt=system_prompts,
max_tokens=150,
temperature=0.8,
frequency_penalty=0.1,
)
)
.replace("Hagrid:", "")
.strip()
)
return True
| [
"Conversation:\nPLACEHOLDER\n\nWhat short answer would Hagrid now respond? Respond with his thick west country accent!",
"> ",
"Today's new conversations:PLACEHOLDER\n\nToday's summary",
"You are a language model tasked with describing this person in a few honest sentences, based on their past messages. Put focus on personality and behavior.",
"You are a language model tasked with summarizing following conversation on a PLACEHOLDER in a concise way.",
"\n> ",
"You are the loyal, friendly, and softhearted Rubeus Hagrid with a thick west country accent. You are a man of few words. If asked a question, you answers correctly. This is a conversation between one or more users, where you take part."
] |
2024-01-10 | Razcle/langchain | langchain~chains~python.py | """Chain that runs python code.
Heavily borrowed from https://replit.com/@amasad/gptpy?v=1#main.py
"""
import sys
from io import StringIO
from typing import Dict, List
from pydantic import BaseModel
from langchain.chains.base import Chain
from langchain.python import PythonREPL
class PythonChain(Chain, BaseModel):
"""Chain to run python code.
Example:
.. code-block:: python
from langchain import PythonChain
python_chain = PythonChain()
"""
input_key: str = "code" #: :meta private:
output_key: str = "output" #: :meta private:
@property
def input_keys(self) -> List[str]:
"""Expect input key.
:meta private:
"""
return [self.input_key]
@property
def output_keys(self) -> List[str]:
"""Return output key.
:meta private:
"""
return [self.output_key]
def _call(self, inputs: Dict[str, str]) -> Dict[str, str]:
python_repl = PythonREPL()
old_stdout = sys.stdout
sys.stdout = mystdout = StringIO()
python_repl.run(inputs[self.input_key])
sys.stdout = old_stdout
output = mystdout.getvalue()
return {self.output_key: output}
| [] |
2024-01-10 | Razcle/langchain | langchain~vectorstores~elastic_vector_search.py | """Wrapper around Elasticsearch vector database."""
import uuid
from typing import Any, Callable, Dict, Iterable, List, Optional
from langchain.docstore.document import Document
from langchain.embeddings.base import Embeddings
from langchain.utils import get_from_dict_or_env
from langchain.vectorstores.base import VectorStore
def _default_text_mapping(dim: int) -> Dict:
return {
"properties": {
"text": {"type": "text"},
"vector": {"type": "dense_vector", "dims": dim},
}
}
def _default_script_query(query_vector: List[int]) -> Dict:
return {
"script_score": {
"query": {"match_all": {}},
"script": {
"source": "cosineSimilarity(params.query_vector, 'vector') + 1.0",
"params": {"query_vector": query_vector},
},
}
}
class ElasticVectorSearch(VectorStore):
"""Wrapper around Elasticsearch as a vector database.
Example:
.. code-block:: python
from langchain import ElasticVectorSearch
elastic_vector_search = ElasticVectorSearch(
"http://localhost:9200",
"embeddings",
embedding_function
)
"""
def __init__(
self, elasticsearch_url: str, index_name: str, embedding_function: Callable
):
"""Initialize with necessary components."""
try:
import elasticsearch
except ImportError:
raise ValueError(
"Could not import elasticsearch python package. "
"Please install it with `pip install elasticearch`."
)
self.embedding_function = embedding_function
self.index_name = index_name
try:
es_client = elasticsearch.Elasticsearch(elasticsearch_url) # noqa
except ValueError as e:
raise ValueError(
f"Your elasticsearch client string is misformatted. Got error: {e} "
)
self.client = es_client
def add_texts(
self, texts: Iterable[str], metadatas: Optional[List[dict]] = None
) -> None:
"""Run more texts through the embeddings and add to the vectorstore."""
try:
from elasticsearch.helpers import bulk
except ImportError:
raise ValueError(
"Could not import elasticsearch python package. "
"Please install it with `pip install elasticearch`."
)
requests = []
for i, text in enumerate(texts):
metadata = metadatas[i] if metadatas else {}
request = {
"_op_type": "index",
"_index": self.index_name,
"vector": self.embedding_function(text),
"text": text,
"metadata": metadata,
}
requests.append(request)
bulk(self.client, requests)
# TODO: add option not to refresh
self.client.indices.refresh(index=self.index_name)
def similarity_search(self, query: str, k: int = 4) -> List[Document]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of Documents most similar to the query.
"""
embedding = self.embedding_function(query)
script_query = _default_script_query(embedding)
response = self.client.search(index=self.index_name, query=script_query)
hits = [hit["_source"] for hit in response["hits"]["hits"][:k]]
documents = [
Document(page_content=hit["text"], metadata=hit["metadata"]) for hit in hits
]
return documents
@classmethod
def from_texts(
cls,
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
**kwargs: Any,
) -> "ElasticVectorSearch":
"""Construct ElasticVectorSearch wrapper from raw documents.
This is a user-friendly interface that:
1. Embeds documents.
2. Creates a new index for the embeddings in the Elasticsearch instance.
3. Adds the documents to the newly created Elasticsearch index.
This is intended to be a quick way to get started.
Example:
.. code-block:: python
from langchain import ElasticVectorSearch
from langchain.embeddings import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
elastic_vector_search = ElasticVectorSearch.from_texts(
texts,
embeddings,
elasticsearch_url="http://localhost:9200"
)
"""
elasticsearch_url = get_from_dict_or_env(
kwargs, "elasticsearch_url", "ELASTICSEARCH_URL"
)
try:
import elasticsearch
from elasticsearch.helpers import bulk
except ImportError:
raise ValueError(
"Could not import elasticsearch python package. "
"Please install it with `pip install elasticearch`."
)
try:
client = elasticsearch.Elasticsearch(elasticsearch_url)
except ValueError as e:
raise ValueError(
"Your elasticsearch client string is misformatted. " f"Got error: {e} "
)
index_name = uuid.uuid4().hex
embeddings = embedding.embed_documents(texts)
dim = len(embeddings[0])
mapping = _default_text_mapping(dim)
# TODO would be nice to create index before embedding,
# just to save expensive steps for last
client.indices.create(index=index_name, mappings=mapping)
requests = []
for i, text in enumerate(texts):
metadata = metadatas[i] if metadatas else {}
request = {
"_op_type": "index",
"_index": index_name,
"vector": embeddings[i],
"text": text,
"metadata": metadata,
}
requests.append(request)
bulk(client, requests)
client.indices.refresh(index=index_name)
return cls(elasticsearch_url, index_name, embedding.embed_query)
| [] |
2024-01-10 | Razcle/langchain | tests~unit_tests~chains~test_natbot.py | """Test functionality related to natbot."""
from typing import Any, List, Mapping, Optional
from langchain.chains.natbot.base import NatBotChain
from langchain.llms.base import LLM
class FakeLLM(LLM):
"""Fake LLM wrapper for testing purposes."""
def __call__(self, prompt: str, stop: Optional[List[str]] = None) -> str:
"""Return `foo` if longer than 10000 words, else `bar`."""
if len(prompt) > 10000:
return "foo"
else:
return "bar"
@property
def _identifying_params(self) -> Mapping[str, Any]:
return {}
def test_proper_inputs() -> None:
"""Test that natbot shortens inputs correctly."""
nat_bot_chain = NatBotChain(llm=FakeLLM(), objective="testing")
url = "foo" * 10000
browser_content = "foo" * 10000
output = nat_bot_chain.execute(url, browser_content)
assert output == "bar"
def test_variable_key_naming() -> None:
"""Test that natbot handles variable key naming correctly."""
nat_bot_chain = NatBotChain(
llm=FakeLLM(),
objective="testing",
input_url_key="u",
input_browser_content_key="b",
output_key="c",
)
output = nat_bot_chain.execute("foo", "foo")
assert output == "bar"
| [] |
2024-01-10 | Razcle/langchain | tests~unit_tests~llms~fake_llm.py | """Fake LLM wrapper for testing purposes."""
from typing import Any, List, Mapping, Optional
from langchain.llms.base import LLM
class FakeLLM(LLM):
"""Fake LLM wrapper for testing purposes."""
def __init__(self, queries: Optional[Mapping] = None):
"""Initialize with optional lookup of queries."""
self._queries = queries
def __call__(self, prompt: str, stop: Optional[List[str]] = None) -> str:
"""First try to lookup in queries, else return 'foo' or 'bar'."""
if self._queries is not None:
return self._queries[prompt]
if stop is None:
return "foo"
else:
return "bar"
@property
def _identifying_params(self) -> Mapping[str, Any]:
return {}
| [] |
2024-01-10 | Razcle/langchain | langchain~example_generator.py | """Utility functions for working with prompts."""
from typing import List
from langchain.chains.llm import LLMChain
from langchain.llms.base import LLM
from langchain.prompts.few_shot import FewShotPromptTemplate
from langchain.prompts.prompt import PromptTemplate
TEST_GEN_TEMPLATE_SUFFIX = "Add another example."
def generate_example(
examples: List[dict], llm: LLM, prompt_template: PromptTemplate
) -> str:
"""Return another example given a list of examples for a prompt."""
prompt = FewShotPromptTemplate(
examples=examples,
suffix=TEST_GEN_TEMPLATE_SUFFIX,
input_variables=[],
example_prompt=prompt_template,
)
chain = LLMChain(llm=llm, prompt=prompt)
return chain.predict()
| [
"Add another example."
] |
2024-01-10 | Razcle/langchain | langchain~llms~manifest.py | """Wrapper around HazyResearch's Manifest library."""
from typing import Any, Dict, List, Mapping, Optional
from pydantic import BaseModel, Extra, root_validator
from langchain.llms.base import LLM
class ManifestWrapper(LLM, BaseModel):
"""Wrapper around HazyResearch's Manifest library."""
client: Any #: :meta private:
llm_kwargs: Optional[Dict] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that python package exists in environment."""
try:
from manifest import Manifest
if not isinstance(values["client"], Manifest):
raise ValueError
except ImportError:
raise ValueError(
"Could not import manifest python package. "
"Please it install it with `pip install manifest-ml`."
)
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
kwargs = self.llm_kwargs or {}
return {**self.client.client.get_model_params(), **kwargs}
def __call__(self, prompt: str, stop: Optional[List[str]] = None) -> str:
"""Call out to LLM through Manifest."""
if stop is not None and len(stop) != 1:
raise NotImplementedError(
f"Manifest currently only supports a single stop token, got {stop}"
)
kwargs = self.llm_kwargs or {}
if stop is not None:
kwargs["stop_token"] = stop
return self.client.run(prompt, **kwargs)
| [] |
2024-01-10 | Razcle/langchain | langchain~prompts~example_selector~semantic_similarity.py | """Example selector that selects examples based on SemanticSimilarity."""
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Extra
from langchain.embeddings.base import Embeddings
from langchain.prompts.example_selector.base import BaseExampleSelector
from langchain.vectorstores.base import VectorStore
class SemanticSimilarityExampleSelector(BaseExampleSelector, BaseModel):
"""Example selector that selects examples based on SemanticSimilarity."""
vectorstore: VectorStore
"""VectorStore than contains information about examples."""
k: int = 4
"""Number of examples to select."""
example_keys: Optional[List[str]] = None
"""Optional keys to filter examples to."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
def add_example(self, example: Dict[str, str]) -> None:
"""Add new example to vectorstore."""
string_example = " ".join(example.values())
self.vectorstore.add_texts([string_example], metadatas=[example])
def select_examples(self, input_variables: Dict[str, str]) -> List[dict]:
"""Select which examples to use based on semantic similarity."""
# Get the docs with the highest similarity.
query = " ".join(input_variables.values())
example_docs = self.vectorstore.similarity_search(query, k=self.k)
# Get the examples from the metadata.
# This assumes that examples are stored in metadata.
examples = [dict(e.metadata) for e in example_docs]
# If example keys are provided, filter examples to those keys.
if self.example_keys:
examples = [{k: eg[k] for k in self.example_keys} for eg in examples]
return examples
@classmethod
def from_examples(
cls,
examples: List[dict],
embeddings: Embeddings,
vectorstore_cls: VectorStore,
k: int = 4,
**vectorstore_cls_kwargs: Any,
) -> "SemanticSimilarityExampleSelector":
"""Create k-shot example selector using example list and embeddings.
Reshuffles examples dynamically based on query similarity.
Args:
examples: List of examples to use in the prompt.
suffix: String to go after the list of examples. Should generally
set up the user's input.
input_variables: A list of variable names the final prompt template
will expect.
embeddings: An iniialized embedding API interface, e.g. OpenAIEmbeddings().
vectorstore_cls: A vector store DB interface class, e.g. FAISS.
example_separator: The seperator to use in between examples. Defaults
to two new line characters.
prefix: String that should go before any examples. Generally includes
examples. Default to an empty string.
k: Number of examples to select
vectorstore_cls_kwargs: optional kwargs containing url for vector store
Returns:
The ExampleSelector instantiated, backed by a vector store.
"""
string_examples = [" ".join(eg.values()) for eg in examples]
vectorstore = vectorstore_cls.from_texts(
string_examples, embeddings, metadatas=examples, **vectorstore_cls_kwargs
)
return cls(vectorstore=vectorstore, k=k)
| [] |
2024-01-10 | Razcle/langchain | tests~unit_tests~llms~test_utils.py | """Test LLM utility functions."""
from langchain.llms.utils import enforce_stop_tokens
def test_enforce_stop_tokens() -> None:
"""Test removing stop tokens when they occur."""
text = "foo bar baz"
output = enforce_stop_tokens(text, ["moo", "baz"])
assert output == "foo bar "
text = "foo bar baz"
output = enforce_stop_tokens(text, ["moo", "baz", "bar"])
assert output == "foo "
def test_enforce_stop_tokens_none() -> None:
"""Test removing stop tokens when they do not occur."""
text = "foo bar baz"
output = enforce_stop_tokens(text, ["moo"])
assert output == "foo bar baz"
| [] |
2024-01-10 | Razcle/langchain | langchain~llms~ai21.py | """Wrapper around AI21 APIs."""
from typing import Any, Dict, List, Mapping, Optional
import requests
from pydantic import BaseModel, Extra, root_validator
from langchain.llms.base import LLM
from langchain.utils import get_from_dict_or_env
class AI21PenaltyData(BaseModel):
"""Parameters for AI21 penalty data."""
scale: int = 0
applyToWhitespaces: bool = True
applyToPunctuations: bool = True
applyToNumbers: bool = True
applyToStopwords: bool = True
applyToEmojis: bool = True
class AI21(BaseModel, LLM):
"""Wrapper around AI21 large language models.
To use, you should have the environment variable ``AI21_API_KEY``
set with your API key.
Example:
.. code-block:: python
from langchain import AI21
ai21 = AI21(model="j1-jumbo")
"""
model: str = "j1-jumbo"
"""Model name to use."""
temperature: float = 0.7
"""What sampling temperature to use."""
maxTokens: int = 256
"""The maximum number of tokens to generate in the completion."""
minTokens: int = 0
"""The minimum number of tokens to generate in the completion."""
topP: float = 1.0
"""Total probability mass of tokens to consider at each step."""
presencePenalty: AI21PenaltyData = AI21PenaltyData()
"""Penalizes repeated tokens."""
countPenalty: AI21PenaltyData = AI21PenaltyData()
"""Penalizes repeated tokens according to count."""
frequencyPenalty: AI21PenaltyData = AI21PenaltyData()
"""Penalizes repeated tokens according to frequency."""
numResults: int = 1
"""How many completions to generate for each prompt."""
logitBias: Optional[Dict[str, float]] = None
"""Adjust the probability of specific tokens being generated."""
ai21_api_key: Optional[str] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key exists in environment."""
ai21_api_key = get_from_dict_or_env(values, "ai21_api_key", "AI21_API_KEY")
values["ai21_api_key"] = ai21_api_key
return values
@property
def _default_params(self) -> Mapping[str, Any]:
"""Get the default parameters for calling AI21 API."""
return {
"temperature": self.temperature,
"maxTokens": self.maxTokens,
"minTokens": self.minTokens,
"topP": self.topP,
"presencePenalty": self.presencePenalty.dict(),
"countPenalty": self.countPenalty.dict(),
"frequencyPenalty": self.frequencyPenalty.dict(),
"numResults": self.numResults,
"logitBias": self.logitBias,
}
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {**{"model": self.model}, **self._default_params}
def __call__(self, prompt: str, stop: Optional[List[str]] = None) -> str:
"""Call out to AI21's complete endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = ai21("Tell me a joke.")
"""
if stop is None:
stop = []
response = requests.post(
url=f"https://api.ai21.com/studio/v1/{self.model}/complete",
headers={"Authorization": f"Bearer {self.ai21_api_key}"},
json={"prompt": prompt, "stopSequences": stop, **self._default_params},
)
if response.status_code != 200:
optional_detail = response.json().get("error")
raise ValueError(
f"AI21 /complete call failed with status code {response.status_code}."
f" Details: {optional_detail}"
)
response_json = response.json()
return response_json["completions"][0]["data"]["text"]
| [] |
2024-01-10 | Razcle/langchain | langchain~chains~mapreduce.py | """Map-reduce chain.
Splits up a document, sends the smaller parts to the LLM with one prompt,
then combines the results with another one.
"""
from typing import Dict, List
from pydantic import BaseModel, Extra
from langchain.chains.base import Chain
from langchain.chains.llm import LLMChain
from langchain.llms.base import LLM
from langchain.prompts.base import BasePromptTemplate
from langchain.text_splitter import TextSplitter
class MapReduceChain(Chain, BaseModel):
"""Map-reduce chain."""
map_llm: LLMChain
"""LLM wrapper to use for the map step."""
reduce_llm: LLMChain
"""LLM wrapper to use for the reduce step."""
text_splitter: TextSplitter
"""Text splitter to use."""
input_key: str = "input_text" #: :meta private:
output_key: str = "output_text" #: :meta private:
@classmethod
def from_params(
cls, llm: LLM, prompt: BasePromptTemplate, text_splitter: TextSplitter
) -> "MapReduceChain":
"""Construct a map-reduce chain that uses the chain for map and reduce."""
llm_chain = LLMChain(llm=llm, prompt=prompt)
return cls(map_llm=llm_chain, reduce_llm=llm_chain, text_splitter=text_splitter)
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@property
def input_keys(self) -> List[str]:
"""Expect input key.
:meta private:
"""
return [self.input_key]
@property
def output_keys(self) -> List[str]:
"""Return output key.
:meta private:
"""
return [self.output_key]
def _call(self, inputs: Dict[str, str]) -> Dict[str, str]:
# Split the larger text into smaller chunks.
docs = self.text_splitter.split_text(inputs[self.input_key])
# Now that we have the chunks, we send them to the LLM and track results.
# This is the "map" part.
input_list = [{self.map_llm.prompt.input_variables[0]: d} for d in docs]
summary_results = self.map_llm.apply(input_list)
summaries = [res[self.map_llm.output_key] for res in summary_results]
# We then need to combine these individual parts into one.
# This is the reduce part.
summary_str = "\n".join(summaries)
inputs = {self.reduce_llm.prompt.input_variables[0]: summary_str}
output = self.reduce_llm.predict(**inputs)
return {self.output_key: output}
| [] |
2024-01-10 | Razcle/langchain | langchain~llms~nlpcloud.py | """Wrapper around NLPCloud APIs."""
from typing import Any, Dict, List, Mapping, Optional
from pydantic import BaseModel, Extra, root_validator
from langchain.llms.base import LLM
from langchain.utils import get_from_dict_or_env
class NLPCloud(LLM, BaseModel):
"""Wrapper around NLPCloud large language models.
To use, you should have the ``nlpcloud`` python package installed, and the
environment variable ``NLPCLOUD_API_KEY`` set with your API key.
Example:
.. code-block:: python
from langchain import NLPCloud
nlpcloud = NLPCloud(model="gpt-neox-20b")
"""
client: Any #: :meta private:
model_name: str = "finetuned-gpt-neox-20b"
"""Model name to use."""
temperature: float = 0.7
"""What sampling temperature to use."""
min_length: int = 1
"""The minimum number of tokens to generate in the completion."""
max_length: int = 256
"""The maximum number of tokens to generate in the completion."""
length_no_input: bool = True
"""Whether min_length and max_length should include the length of the input."""
remove_input: bool = True
"""Remove input text from API response"""
remove_end_sequence: bool = True
"""Whether or not to remove the end sequence token."""
bad_words: List[str] = []
"""List of tokens not allowed to be generated."""
top_p: int = 1
"""Total probability mass of tokens to consider at each step."""
top_k: int = 50
"""The number of highest probability tokens to keep for top-k filtering."""
repetition_penalty: float = 1.0
"""Penalizes repeated tokens. 1.0 means no penalty."""
length_penalty: float = 1.0
"""Exponential penalty to the length."""
do_sample: bool = True
"""Whether to use sampling (True) or greedy decoding."""
num_beams: int = 1
"""Number of beams for beam search."""
early_stopping: bool = False
"""Whether to stop beam search at num_beams sentences."""
num_return_sequences: int = 1
"""How many completions to generate for each prompt."""
nlpcloud_api_key: Optional[str] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
nlpcloud_api_key = get_from_dict_or_env(
values, "nlpcloud_api_key", "NLPCLOUD_API_KEY"
)
try:
import nlpcloud
values["client"] = nlpcloud.Client(
values["model_name"], nlpcloud_api_key, gpu=True, lang="en"
)
except ImportError:
raise ValueError(
"Could not import nlpcloud python package. "
"Please it install it with `pip install nlpcloud`."
)
return values
@property
def _default_params(self) -> Mapping[str, Any]:
"""Get the default parameters for calling NLPCloud API."""
return {
"temperature": self.temperature,
"min_length": self.min_length,
"max_length": self.max_length,
"length_no_input": self.length_no_input,
"remove_input": self.remove_input,
"remove_end_sequence": self.remove_end_sequence,
"bad_words": self.bad_words,
"top_p": self.top_p,
"top_k": self.top_k,
"repetition_penalty": self.repetition_penalty,
"length_penalty": self.length_penalty,
"do_sample": self.do_sample,
"num_beams": self.num_beams,
"early_stopping": self.early_stopping,
"num_return_sequences": self.num_return_sequences,
}
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {**{"model_name": self.model_name}, **self._default_params}
def __call__(self, prompt: str, stop: Optional[List[str]] = None) -> str:
"""Call out to NLPCloud's create endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Not supported by this interface (pass in init method)
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = nlpcloud("Tell me a joke.")
"""
if stop and len(stop) > 1:
raise ValueError(
"NLPCloud only supports a single stop sequence per generation."
"Pass in a list of length 1."
)
elif stop and len(stop) == 1:
end_sequence = stop[0]
else:
end_sequence = None
response = self.client.generation(
prompt, end_sequence=end_sequence, **self._default_params
)
return response["generated_text"]
| [] |
2024-01-10 | Razcle/langchain | langchain~__init__.py | """Main entrypoint into package."""
from pathlib import Path
with open(Path(__file__).absolute().parents[0] / "VERSION") as _f:
__version__ = _f.read().strip()
from langchain.agents import MRKLChain, ReActChain, SelfAskWithSearchChain
from langchain.chains import (
ConversationChain,
LLMChain,
LLMMathChain,
PythonChain,
SerpAPIChain,
SQLDatabaseChain,
VectorDBQA,
)
from langchain.docstore import InMemoryDocstore, Wikipedia
from langchain.llms import Cohere, HuggingFaceHub, OpenAI
from langchain.prompts import (
BasePromptTemplate,
FewShotPromptTemplate,
Prompt,
PromptTemplate,
)
from langchain.sql_database import SQLDatabase
from langchain.vectorstores import FAISS, ElasticVectorSearch
__all__ = [
"LLMChain",
"LLMMathChain",
"PythonChain",
"SelfAskWithSearchChain",
"SerpAPIChain",
"Cohere",
"OpenAI",
"BasePromptTemplate",
"Prompt",
"FewShotPromptTemplate",
"PromptTemplate",
"ReActChain",
"Wikipedia",
"HuggingFaceHub",
"SQLDatabase",
"SQLDatabaseChain",
"FAISS",
"MRKLChain",
"VectorDBQA",
"ElasticVectorSearch",
"InMemoryDocstore",
"ConversationChain",
]
| [] |
2024-01-10 | Razcle/langchain | tests~integration_tests~llms~test_nlpcloud.py | """Test NLPCloud API wrapper."""
from langchain.llms.nlpcloud import NLPCloud
def test_nlpcloud_call() -> None:
"""Test valid call to nlpcloud."""
llm = NLPCloud(max_length=10)
output = llm("Say foo:")
assert isinstance(output, str)
| [] |
2024-01-10 | Razcle/langchain | tests~integration_tests~chains~test_react.py | """Integration test for self ask with search."""
from langchain.agents.react.base import ReActChain
from langchain.docstore.wikipedia import Wikipedia
from langchain.llms.openai import OpenAI
def test_react() -> None:
"""Test functionality on a prompt."""
llm = OpenAI(temperature=0)
react = ReActChain(llm=llm, docstore=Wikipedia())
question = (
"Author David Chanoff has collaborated with a U.S. Navy admiral "
"who served as the ambassador to the United Kingdom under "
"which President?"
)
output = react.run(question)
assert output == "Bill Clinton"
| [] |
2024-01-10 | Razcle/langchain | langchain~prompts~few_shot.py | """Prompt template that contains few shot examples."""
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Extra, root_validator
from langchain.prompts.base import (
DEFAULT_FORMATTER_MAPPING,
BasePromptTemplate,
check_valid_template,
)
from langchain.prompts.example_selector.base import BaseExampleSelector
from langchain.prompts.prompt import PromptTemplate
class FewShotPromptTemplate(BasePromptTemplate, BaseModel):
"""Prompt template that contains few shot examples."""
examples: Optional[List[dict]] = None
"""Examples to format into the prompt.
Either this or example_selector should be provided."""
example_selector: Optional[BaseExampleSelector] = None
"""ExampleSelector to choose the examples to format into the prompt.
Either this or examples should be provided."""
example_prompt: PromptTemplate
"""PromptTemplate used to format an individual example."""
suffix: str
"""A prompt template string to put after the examples."""
input_variables: List[str]
"""A list of the names of the variables the prompt template expects."""
example_separator: str = "\n\n"
"""String separator used to join the prefix, the examples, and suffix."""
prefix: str = ""
"""A prompt template string to put before the examples."""
template_format: str = "f-string"
"""The format of the prompt template. Options are: 'f-string'."""
@root_validator(pre=True)
def check_examples_and_selector(cls, values: Dict) -> Dict:
"""Check that one and only one of examples/example_selector are provided."""
examples = values.get("examples", None)
example_selector = values.get("example_selector", None)
if examples and example_selector:
raise ValueError(
"Only one of 'examples' and 'example_selector' should be provided"
)
if examples is None and example_selector is None:
raise ValueError(
"One of 'examples' and 'example_selector' should be provided"
)
return values
@root_validator()
def template_is_valid(cls, values: Dict) -> Dict:
"""Check that prefix, suffix and input variables are consistent."""
check_valid_template(
values["prefix"] + values["suffix"],
values["template_format"],
values["input_variables"],
)
return values
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
def _get_examples(self, **kwargs: Any) -> List[dict]:
if self.examples is not None:
return self.examples
elif self.example_selector is not None:
return self.example_selector.select_examples(kwargs)
else:
raise ValueError
def format(self, **kwargs: Any) -> str:
"""Format the prompt with the inputs.
Args:
kwargs: Any arguments to be passed to the prompt template.
Returns:
A formatted string.
Example:
.. code-block:: python
prompt.format(variable1="foo")
"""
# Get the examples to use.
examples = self._get_examples(**kwargs)
# Format the examples.
example_strings = [
self.example_prompt.format(**example) for example in examples
]
# Create the overall template.
pieces = [self.prefix, *example_strings, self.suffix]
template = self.example_separator.join([piece for piece in pieces if piece])
# Format the template with the input variables.
return DEFAULT_FORMATTER_MAPPING[self.template_format](template, **kwargs)
| [
"f-string"
] |
2024-01-10 | Razcle/langchain | tests~integration_tests~llms~test_openai.py | """Test OpenAI API wrapper."""
import pytest
from langchain.llms.openai import OpenAI
def test_openai_call() -> None:
"""Test valid call to openai."""
llm = OpenAI(max_tokens=10)
output = llm("Say foo:")
assert isinstance(output, str)
def test_openai_extra_kwargs() -> None:
"""Test extra kwargs to openai."""
# Check that foo is saved in extra_kwargs.
llm = OpenAI(foo=3, max_tokens=10)
assert llm.max_tokens == 10
assert llm.model_kwargs == {"foo": 3}
# Test that if extra_kwargs are provided, they are added to it.
llm = OpenAI(foo=3, model_kwargs={"bar": 2})
assert llm.model_kwargs == {"foo": 3, "bar": 2}
# Test that if provided twice it errors
with pytest.raises(ValueError):
OpenAI(foo=3, model_kwargs={"foo": 2})
| [] |
2024-01-10 | Razcle/langchain | langchain~chains~serpapi.py | """Chain that calls SerpAPI.
Heavily borrowed from https://github.com/ofirpress/self-ask
"""
import os
import sys
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Extra, root_validator
from langchain.chains.base import Chain
from langchain.utils import get_from_dict_or_env
class HiddenPrints:
"""Context manager to hide prints."""
def __enter__(self) -> None:
"""Open file to pipe stdout to."""
self._original_stdout = sys.stdout
sys.stdout = open(os.devnull, "w")
def __exit__(self, *_: Any) -> None:
"""Close file that stdout was piped to."""
sys.stdout.close()
sys.stdout = self._original_stdout
class SerpAPIChain(Chain, BaseModel):
"""Chain that calls SerpAPI.
To use, you should have the ``google-search-results`` python package installed,
and the environment variable ``SERPAPI_API_KEY`` set with your API key, or pass
`serpapi_api_key` as a named parameter to the constructor.
Example:
.. code-block:: python
from langchain import SerpAPIChain
serpapi = SerpAPIChain()
"""
search_engine: Any #: :meta private:
input_key: str = "search_query" #: :meta private:
output_key: str = "search_result" #: :meta private:
serpapi_api_key: Optional[str] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@property
def input_keys(self) -> List[str]:
"""Return the singular input key.
:meta private:
"""
return [self.input_key]
@property
def output_keys(self) -> List[str]:
"""Return the singular output key.
:meta private:
"""
return [self.output_key]
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
serpapi_api_key = get_from_dict_or_env(
values, "serpapi_api_key", "SERPAPI_API_KEY"
)
values["serpapi_api_key"] = serpapi_api_key
try:
from serpapi import GoogleSearch
values["search_engine"] = GoogleSearch
except ImportError:
raise ValueError(
"Could not import serpapi python package. "
"Please it install it with `pip install google-search-results`."
)
return values
def _call(self, inputs: Dict[str, Any]) -> Dict[str, str]:
params = {
"api_key": self.serpapi_api_key,
"engine": "google",
"q": inputs[self.input_key],
"google_domain": "google.com",
"gl": "us",
"hl": "en",
}
with HiddenPrints():
search = self.search_engine(params)
res = search.get_dict()
if "error" in res.keys():
raise ValueError(f"Got error from SerpAPI: {res['error']}")
if "answer_box" in res.keys() and "answer" in res["answer_box"].keys():
toret = res["answer_box"]["answer"]
elif "answer_box" in res.keys() and "snippet" in res["answer_box"].keys():
toret = res["answer_box"]["snippet"]
elif (
"answer_box" in res.keys()
and "snippet_highlighted_words" in res["answer_box"].keys()
):
toret = res["answer_box"]["snippet_highlighted_words"][0]
elif "snippet" in res["organic_results"][0].keys():
toret = res["organic_results"][0]["snippet"]
else:
toret = "No good search result found"
return {self.output_key: toret}
| [] |
2024-01-10 | dianadima/multi-action | stim_curation~analysis~py~sen_openai.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 11 14:01:52 2023
@author: dianadima
"""
import openai, numpy as np
from pandas import read_csv
from scipy.io import savemat
from parse_sentences import parse_sentences
#CHANGE ME: set API key to use for OpenAI embeddings
openai.api_key = [];
sentences = read_csv("./sentences.csv",header=None)
s_list = parse_sentences(sentences)
#get ada embeddings - current best
emb_ada = []
for s in s_list:
resp = openai.Embedding.create(
input=[s],
engine="text-embedding-ada-002")
emb = resp['data'][0]['embedding']
emb = np.array(emb)
emb_ada.append(emb)
emb_ada = np.array(emb_ada)
savemat('./gpt_ada.mat',{'emb_ada':emb_ada})
#get davinci embeddings - deprecated
emb_davinci = []
for s in s_list:
resp = openai.Embedding.create(
input=[s],
engine="text-similarity-davinci-001")
emb = resp['data'][0]['embedding']
emb = np.array(emb)
emb_davinci.append(emb)
emb_davinci = np.array(emb_davinci)
savemat('./gpt_davinci.mat',{'emb_davinci':emb_davinci})
| [] |
2024-01-10 | dianadima/multi-action | stim_curation~analysis~py~sen_openai_parts.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 19 13:28:38 2023
@author: dianadima
"""
import openai, numpy as np
from pandas import read_csv
from scipy.io import savemat
from parse_sentence_parts import parse_sentence_parts
#CHANGE ME: set API key to use for OpenAI embeddings
openai.api_key = [];
sentences = read_csv("./sentences.csv",header=None)
s_age,s_act,s_con = parse_sentence_parts(sentences)
#get ada embeddings - current best
emb_ada = []
for s in s_age:
resp = openai.Embedding.create(
input=[s],
engine="text-embedding-ada-002")
emb = resp['data'][0]['embedding']
emb = np.array(emb)
emb_ada.append(emb)
emb_ada = np.array(emb_ada)
savemat('./gpt_ada_agent.mat',{'emb_ada':emb_ada})
emb_ada = []
for s in s_act:
resp = openai.Embedding.create(
input=[s],
engine="text-embedding-ada-002")
emb = resp['data'][0]['embedding']
emb = np.array(emb)
emb_ada.append(emb)
emb_ada = np.array(emb_ada)
savemat('./gpt_ada_action.mat',{'emb_ada':emb_ada})
emb_ada = []
for s in s_con:
resp = openai.Embedding.create(
input=[s],
engine="text-embedding-ada-002")
emb = resp['data'][0]['embedding']
emb = np.array(emb)
emb_ada.append(emb)
emb_ada = np.array(emb_ada)
savemat('./gpt_ada_context.mat',{'emb_ada':emb_ada})
| [] |
2024-01-10 | yeahjack/chatgpt_zulip_bot | test_all.py | import pytest
from chatgpt import OpenAI, prompt_manager
from configparser import ConfigParser
import tiktoken
import os
api_version = os.environ.get("API_VERSION")
api_key = os.environ.get("OPENAI_API_KEY")
@pytest.fixture
# Test for chatgpt.py
def openai_object():
return OpenAI(api_version=api_version, api_key=api_key)
def test_trim_conversation_history(openai_object):
def count_tokens(message, api_version):
if "gpt-3.5-turbo" in api_version:
encoding = tiktoken.encoding_for_model("gpt-3.5-turbo")
elif "gpt-4" in api_version:
encoding = tiktoken.encoding_for_model("gpt-4")
else:
raise ValueError("Unsupported API version, your API version is: " + api_version)
return len(encoding.encode(message))
history = [
"User: Hello",
"AI: Hi there!",
"User: How are you?",
"AI: I'm doing great! How about you?",
"User: I'm fine, thank you."
]
# Calculate token count for each message
message_tokens = [count_tokens(
msg, openai_object.api_version) for msg in history]
# Set max_tokens to the sum of tokens of the last two messages
max_tokens = message_tokens[-1] + message_tokens[-2]
trimmed_history = openai_object.trim_conversation_history(
history, max_tokens)
assert len(
trimmed_history) == 2, "Trimmed history should have only the last two messages"
def test_chatgpt_response_strings(openai_object):
prompts = [
"/1",
"/end",
"hi",
"/polish_en this is a test cases",
"/polish_zh 测试用例这一个是。",
"/find_grammar_mistakes this is a test cases",
"/zh-en 这是一个测试用例",
"/en-zh This is a test case",
"/en-ac This is a test case",
"/ex_code_zh print('This is a test case.')"
]
try:
for prompt in prompts:
result = openai_object.get_chatgpt_response(0, prompt)
assert isinstance(
result, str), f"Result should be a string for prompt: {prompt}"
finally:
# Clear the conversation history for test cases.
openai_object.user_conversations[0] = []
def test_prompt_manager():
# incorrect command
msg, code = prompt_manager("/zh-end asdf")
assert msg == 'Sorry, command not found: `/zh-end`, type `/help` to get the list of commands.'
assert code == 2
# correct command
msg, code = prompt_manager("/zh-en asdf")
assert msg == 'As a translator, your task is to accurately translate text from Chinese to English. Please pay attention to context and accurately explain phrases and proverbs. Below is the text you need to translate: \n\nasdf'
assert code == 0
# contextual text information
msg, code = prompt_manager("zh-en asdf")
assert msg == 'zh-en asdf'
assert code == 1
| [
"['/1', '/end', 'hi', '/polish_en this is a test cases', '/polish_zh 测试用例这一个是。', '/find_grammar_mistakes this is a test cases', '/zh-en 这是一个测试用例', '/en-zh This is a test case', '/en-ac This is a test case', \"/ex_code_zh print('This is a test case.')\"]"
] |
2024-01-10 | qgerome/openhexa-test-pipelines | chatgpt~pipeline.py | import openai
from openhexa.sdk import current_run, parameter, pipeline, workspace
@pipeline("chatgpt", name="chatgpt")
@parameter("question", required=True, type=str, default="Hello, how are you?")
def chatgpt(question):
openai.api_key = workspace.custom_connection("openai").api_key
ask(question)
current_run.log_info("Done !")
@chatgpt.task
def ask(question: str):
current_run.log_info(f"Ask ChatGPT to answer '{question}'")
# list models
models = openai.Model.list()
current_run.log_info(models)
# create a completion
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo", messages=[{"role": "user", "content": question}]
)
answer = completion.choices[0].message.content
current_run.log_info(answer)
current_run.log_info("Writing answer to file")
with open(f"{workspace.files_path}/answer.txt", "w") as f:
f.write(answer)
if __name__ == "__main__":
chatgpt()
| [] |
2024-01-10 | qgerome/openhexa-test-pipelines | chatgpt~pipeline~pipeline.py | import openai
from openhexa.sdk import current_run, parameter, pipeline, workspace
@pipeline("chatgpt", name="chatgpt")
@parameter("question", required=True, type=str, default="Hello, how are you?")
def chatgpt(question):
openai.api_key = workspace.custom_connection("openai").api_key
answer = ask(question)
current_run.log_info("Writing answer to file")
with open(f"{workspace.files_path}/answer.txt", "w") as f:
f.write(answer)
current_run.log_info("Done !")
@chatgpt.task
def ask(question: str):
current_run.log_info(f"Ask ChatGPT to answer '{question}'")
# list models
models = openai.Model.list()
current_run.log_info(models)
# create a completion
completion = openai.Completion.create(model="ada", prompt=question)
# print the completion
print(completion.choices[0].text)
if __name__ == "__main__":
chatgpt()
| [] |
2024-01-10 | chazzjimel/WeChat-AIChatbot-WinOnly | voice~factory.py | """
voice factory
"""
def create_voice(voice_type):
"""
create a voice instance
:param voice_type: voice type code
:return: voice instance
"""
if voice_type == "baidu":
from voice.baidu.baidu_voice import BaiduVoice
return BaiduVoice()
elif voice_type == "google":
from voice.google.google_voice import GoogleVoice
return GoogleVoice()
elif voice_type == "openai":
from voice.openai.openai_voice import OpenaiVoice
return OpenaiVoice()
elif voice_type == "pytts":
from voice.pytts.pytts_voice import PyttsVoice
return PyttsVoice()
elif voice_type == "azure":
from voice.azure.azure_voice import AzureVoice
return AzureVoice()
raise RuntimeError
| [] |
2024-01-10 | chazzjimel/WeChat-AIChatbot-WinOnly | plugins~newgpt_turbo~lib~search_google.py | import concurrent
import urllib
import openai
import requests
from datetime import datetime
from bs4 import BeautifulSoup
from common.log import logger
from concurrent.futures import ThreadPoolExecutor, as_completed
"""谷歌独立搜索函数,通过访问相关URL并提交给GPT整理获得更详细的信息"""
__all__ = ['search_google']
__author__ = 'chazzjimel/跃迁'
__date__ = '2023.6.21'
def get_url(url):
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.87 "
"Safari/537.36"
}
try:
response = requests.get(url, headers=headers, timeout=2)
response.raise_for_status()
html = response.text
soup = BeautifulSoup(html, 'html.parser')
paragraphs = soup.find_all('p')
paragraphs_text = [p.get_text() for p in paragraphs]
return paragraphs_text
except requests.exceptions.RequestException as e:
logger.warning("无法访问该URL: %s, error: %s", url, str(e))
return None
def build_search_url(searchTerms, base_url, count=None, startIndex=None, language=None, cx=None, hq=None,
dateRestrict=None,
key=None):
"""
构建谷歌搜索的URL
:param searchTerms: 搜索关键词
:param base_url: 基础URL
:param count: 搜索结果数量
:param startIndex: 搜索结果的起始位置
:param language: 搜索结果的语言
:param cx: 自定义搜索引擎ID
:param hq: 搜索结果的域名
:param dateRestrict: 搜索结果的时间限制
:param key: API密钥
:return: 构建好的URL
"""
params = {
"q": searchTerms, # 搜索关键词
"num": count, # 搜索结果数量
"start": startIndex, # 搜索结果的起始位置
"lr": language, # 搜索结果的语言
"cx": cx, # 自定义搜索引擎ID
"sort": "date", # 搜索结果的排序方式
"filter": 1, # 是否过滤重复结果
"hq": hq, # 搜索结果的域名
"dateRestrict": dateRestrict, # 搜索结果的时间限制
"key": key, # API密钥
"alt": "json" # 返回结果的格式
}
params = {k: v for k, v in params.items() if v is not None} # 去除值为None的参数
encoded_params = urllib.parse.urlencode(params) # 对参数进行URL编码
base_url = base_url # 基础URL
search_url = base_url + encoded_params # 构建完整的URL
return search_url
def get_summary(item, model, search_terms):
logger.debug("正在获取链接内容:%s", item["link"])
link_content = get_url(item["link"])
if not link_content:
logger.warning("无法获取链接内容:%s", item["link"])
return None
logger.debug("link_content: %s", link_content)
# 获取链接内容字符数量
link_content_str = ' '.join(link_content)
content_length = len(link_content_str)
logger.debug("content_length: %s", content_length)
# 如果内容少于200个字符,则pass
if content_length < 200:
logger.warning("链接内容低于200个字符:%s", item["link"])
return None
# 如果内容大于15000个字符,则截取中间部分
elif content_length > 8000:
logger.warning("链接内容高于15000个字符,进行裁断:%s", item["link"])
start = (content_length - 8000) // 2
end = start + 8000
link_content = link_content[start:end]
logger.debug("正在提取摘要:%s", link_content)
summary = process_content(str(link_content), model=model, search_terms=search_terms)
return summary
def search_google(model, base_url, search_terms, count, api_key, cx_id, iterations):
all_summaries = []
for i in range(iterations):
try:
startIndex = i * count + 1
search_url = build_search_url(search_terms, base_url=base_url, count=10, cx=cx_id, key=api_key,
startIndex=startIndex)
logger.debug("正在进行第 %d 次搜索,URL:%s", i + 1, search_url)
response = requests.get(search_url)
model = model
if response.status_code == 200:
items = response.json().get('items', [])
logger.debug(f"search_google items:{items}")
with ThreadPoolExecutor(max_workers=5) as executor:
future_to_item = {executor.submit(get_summary, item, model, search_terms): item for item in items}
for future in as_completed(future_to_item):
try:
summary = future.result(timeout=5) # 设置超时时间
if summary is not None:
all_summaries.append("【搜索结果内容摘要】:\n" + summary)
except concurrent.futures.TimeoutError:
logger.error("处理摘要任务超时")
except Exception as e:
logger.error("在提取摘要过程中出现错误:%s", str(e))
else:
logger.error(f"Request failed with status code {response.status_code}")
# time.sleep(1) # Delay to prevent rate limiting
except Exception as e:
logger.error("在执行搜索过程中出现错误:%s", str(e))
# 判断 all_summaries 是否为空
if not all_summaries:
return ["实时联网暂未获取到有效信息内容,请更换关键词或再次重试······"]
return all_summaries
def process_content(content, model, search_terms=None):
current_date = datetime.now().strftime("%Y年%m月%d日")
summary = openai.ChatCompletion.create(
model=model,
messages=[
{"role": "system",
"content": f"""当前中国北京日期:{current_date},请判断并提取内容中与"{search_terms}"有关的详细内容,必须保留细节,准确的时间线以及富有逻辑的排版!如果与时间、前因后果、上下文等有关内容不能忽略,不可以胡编乱造!"""},
{"role": "assistant", "content": content},
],
temperature=0.8
)
return summary["choices"][0]["message"]["content"]
| [
"当前中国北京日期:PLACEHOLDER,请判断并提取内容中与\"PLACEHOLDER\"有关的详细内容,必须保留细节,准确的时间线以及富有逻辑的排版!如果与时间、前因后果、上下文等有关内容不能忽略,不可以胡编乱造!"
] |
2024-01-10 | chazzjimel/WeChat-AIChatbot-WinOnly | plugins~midjourney_turbo~midjourney_turbo.py | #!/usr/bin/env python
# -*- coding=utf-8 -*-
"""
@time: 2023/5/25 10:46
@Project :chatgpt-on-wechat
@file: midjourney_turbo_nt.py
"""
import base64
import datetime
import re
import sqlite3
import threading
import time
import openai
import requests
import io
import plugins
from tenacity import retry, stop_after_attempt, wait_fixed
from PIL import Image
from plugins.midjourney_turbo.lib.midJourney_module import MidJourneyModule
from bridge.context import ContextType
from bridge.reply import Reply, ReplyType
from plugins import *
from common.log import logger
from common.expired_dict import ExpiredDict
from datetime import timedelta
# 创建并返回相应类型的频道对象
def create_channel_object():
# 从配置中获取频道类型
channel_type = conf().get("channel_type")
# 根据频道类型创建相应的频道对象
if channel_type == 'wework':
from channel.wework.wework_channel import WeworkChannel
return WeworkChannel(), ReplyType.IMAGE_URL, 2
elif channel_type == 'ntchat':
from channel.wechatnt.ntchat_channel import NtchatChannel
return NtchatChannel(), ReplyType.IMAGE_URL, 2
elif channel_type == 'weworktop':
from channel.weworktop.weworktop_channel import WeworkTopChannel
return WeworkTopChannel(), ReplyType.IMAGE_URL, 2
else:
from channel.wechatnt.ntchat_channel import NtchatChannel
return NtchatChannel(), ReplyType.IMAGE_URL, 2
# 对内容进行格式化处理
def format_content(content):
# 将内容中的"—"替换为"--"
if "—" in content:
content = content.replace("—", "--")
# 如果内容中包含"--",则按"--"将内容分割为提示和命令两部分
if "--" in content:
prompt, commands = content.split("--", 1)
commands = " --" + commands.strip()
else:
prompt, commands = content, ""
return prompt, commands
# 根据内容生成提示信息
@retry(stop=stop_after_attempt(5), wait=wait_fixed(2))
def generate_prompt(content, api_key, api_base):
openai.api_key = api_key
openai.api_base = api_base
try:
message_content = "请根据AI生图关键词'{}'预测想要得到的画面,然后用英文拓展描述、丰富细节、添加关键词描述以适用于AI生图。描述要简短直接突出重点,请把优化后的描述直接返回,不需要多余的语言!".format(
content)
completion = openai.ChatCompletion.create(model=conf().get("model", "gpt-3.5-turbo"), messages=[
{"role": "user", "content": message_content}], max_tokens=300, temperature=0.8, top_p=0.9)
prompt = completion['choices'][0]['message']['content']
logger.debug("优化后的关键词:{}".format(prompt))
return prompt
except Exception as e:
logger.warning(f"生成提示信息失败,重试中。错误信息: {str(e)}")
raise e
# 将图片转换为base64编码的字符串
def convert_base64(image):
# 打开图片文件
with open(image, "rb") as image_file:
# 对图片内容进行base64编码
encoded_string = base64.b64encode(image_file.read())
return encoded_string.decode('utf-8')
# 下载并压缩图片
def download_and_compress_image(url, filename, quality):
# 确定保存图片的目录
directory = os.path.join(os.getcwd(), "tmp")
# 如果目录不存在,则创建目录
if not os.path.exists(directory):
os.makedirs(directory)
# 下载图片
response = requests.get(url)
image = Image.open(io.BytesIO(response.content))
# 压缩图片
image_path = os.path.join(directory, f"{filename}.jpg")
image.save(image_path, "JPEG", quality=quality)
return image_path
# 带有重试机制的发送消息
def send_with_retry(comapp, com_reply, e_context, max_retries=3, delay=2):
# 尝试发送消息,如果失败则重试
for i in range(max_retries):
try:
# 尝试发送消息
comapp.send(com_reply, e_context['context'])
break # 如果成功发送,就跳出循环
except requests.exceptions.SSLError as e:
# 如果因为SSL错误而发送失败,记录错误并重试
logger.error(f"Failed to send message due to SSL error: {e}. Attempt {i + 1} of {max_retries}")
if i < max_retries - 1: # 如果不是最后一次尝试,那么等待一段时间再重试
time.sleep(delay) # 等待指定的秒数
else:
# 如果尝试发送消息的次数达到了最大次数,记录错误并放弃
logger.error(f"Failed to send message after {max_retries} attempts. Giving up.")
# 使用装饰器注册一个名为"Midjourney_Turbo"的插件
@plugins.register(name="Midjourney_Turbo", desc="使用Midjourney来画图", desire_priority=1, version="2.0",
author="chazzjimel")
# 定义一个名为 MidjourneyTurbo 的类,继承自 Plugin
class MidjourneyTurbo(Plugin):
# 初始化类
def __init__(self):
# 调用父类的初始化方法
super().__init__()
try:
# 获取当前文件的目录
curdir = os.path.dirname(__file__)
# 配置文件的路径
config_path = os.path.join(curdir, "config.json")
# 创建一个过期字典,有效期为1小时
self.params_cache = ExpiredDict(60 * 60)
# 如果配置文件不存在
if not os.path.exists(config_path):
# 输出日志信息,配置文件不存在,将使用模板
logger.info('[Midjourney_Turbo] 配置文件不存在,将使用config.json.template模板')
# 模板配置文件的路径
config_path = os.path.join(curdir, "config.json.template")
# 打开并读取配置文件
with open(config_path, "r", encoding="utf-8") as f:
# 加载 JSON 文件
config = json.load(f)
rootdir = os.path.dirname(os.path.dirname(curdir))
dbdir = os.path.join(rootdir, "db")
if not os.path.exists(dbdir):
os.mkdir(dbdir)
logger.info("[Midjourney_Turbo] inited")
user_db = os.path.join(dbdir, "user.db")
self.user_db = sqlite3.connect(user_db, check_same_thread=False)
# 创建频道对象
self.comapp, self.type, self.num = create_channel_object()
# 获取配置文件中的各种参数
self.api_key = config.get("api_key", "")
self.domain_name = config["domain_name"]
self.image_ins = config.get("image_ins", "/p")
self.blend_ins = config.get("blend_ins", "/b")
self.change_ins = config.get("change_ins", "/c")
self.split_url = config.get("split_url", False)
self.short_url_api = config.get("short_url_api", "")
self.default_params = config.get("default_params", {"action": "IMAGINE:出图", "prompt": ""})
self.gpt_optimized = config.get("gpt_optimized", False)
self.trial_lock = config.get("trial_lock", 3)
self.lock = config.get("lock", False)
self.group_lock = config.get("group_lock", False)
self.group_list = config.get("group_name_list", [])
self.group_id_list = config.get("group_id_list", [])
self.name_list = config.get("name_list", [])
self.user_id_list = config.get("user_id_list", [])
self.local_data = threading.local()
self.complete_prompt = config.get("complete_prompt", "任务完成!")
self.openai_api_key = config.get("openai_key", conf().get("open_ai_api_key"))
self.openai_api_base = config.get("openai_base", conf().get("open_ai_api_base"))
self.picture_quality = config.get("picture_quality", 30)
# 创建 MidJourneyModule 对象
self.mm = MidJourneyModule(api_key=self.api_key, domain_name=self.domain_name)
# 如果 domain_name 为空或包含"你的域名",则抛出异常
if not self.domain_name or "你的域名" in self.domain_name:
raise Exception("please set your Midjourney domain_name in config or environment variable.")
# 设置事件处理函数
self.handlers[Event.ON_HANDLE_CONTEXT] = self.on_handle_context
# 输出日志信息,表示插件已初始化
logger.info("[Midjourney_Turbo] inited")
except FileNotFoundError: # 配置文件未找到的特定情况
logger.warn(f"[Midjourney_Turbo] init failed, config.json not found.")
raise # 抛出异常,结束程序
# 这个方法是一个事件处理方法,当插件接收到指定类型的事件时,会调用这个方法来处理
def on_handle_context(self, e_context: EventContext):
# 如果事件的类型不是图片创建或图片,则直接返回,不进行后续处理
if e_context['context'].type not in [ContextType.IMAGE_CREATE, ContextType.IMAGE]:
return
# 将图片请求内容的日志输出
logger.info("[Midjourney_Turbo] image_query={}".format(e_context['context'].content))
# 创建一个回复对象
reply = Reply()
try:
# 获取会话ID
user_id = e_context['context']["session_id"]
# 获取事件内容
content = e_context['context'].content[:]
if e_context['context'].type == ContextType.IMAGE_CREATE:
context = e_context['context']
logger.debug("收到 IMAGE_CREATE 事件.")
if self.lock:
logger.debug("使用限制已开启.")
if e_context["context"]["isgroup"]:
if self.group_lock:
msg = context.kwargs.get('msg')
group_name = msg.other_user_nickname
group_id = msg.other_user_id
if group_name in self.group_list or group_id in self.group_id_list:
continue_a, continue_b, remaining = True, False, ""
logger.debug("群组白名单,无限制.")
else:
continue_a, continue_b, remaining = self.check_and_update_usage_limit(
trial_lock=self.trial_lock,
user_id=user_id,
db_conn=self.user_db)
logger.debug(
f"群聊锁已开启. continue_a={continue_a}, continue_b={continue_b}, remaining={remaining}")
else:
continue_a, continue_b, remaining = True, False, ""
logger.debug("群聊锁未开启,无限制.")
else:
msg = context.kwargs.get('msg')
user_name = msg.from_user_nickname
if user_name in self.name_list or user_id in self.user_id_list:
continue_a, continue_b, remaining = True, False, ""
logger.debug("个人白名单,无限制.")
else:
continue_a, continue_b, remaining = self.check_and_update_usage_limit(
trial_lock=self.trial_lock,
user_id=user_id,
db_conn=self.user_db)
else:
continue_a, continue_b, remaining = True, False, ""
logger.debug("使用限制未开启.")
else:
continue_a, continue_b, remaining = True, False, ""
logger.debug("收到图像信息,继续执行.")
if continue_a and continue_b:
self.local_data.reminder_string = f"\n💳您的绘画试用次数剩余:{remaining}次"
elif not continue_a and not continue_b:
reply.type = ReplyType.TEXT
reply.content = f"⚠️提交失败,您的绘画试用次数剩余:0次 "
e_context['reply'] = reply
e_context.action = EventAction.BREAK_PASS
return
else:
self.local_data.reminder_string = remaining
# 如果事件类型是图片创建
if e_context['context'].type == ContextType.IMAGE_CREATE:
# 调用处理图片创建的方法
self.handle_image_create(e_context, user_id, content, reply)
# 如果用户ID存在于参数缓存中
elif user_id in self.params_cache:
# 调用处理参数缓存的方法
self.handle_params_cache(e_context, user_id, content, reply)
# 设置回复内容
e_context['reply'] = reply
# 设置事件动作为打断并传递,跳过处理context的默认逻辑
e_context.action = EventAction.BREAK_PASS
# 记录日志,事件动作设置为打断并传递,回复已设置
logger.debug("Event action set to BREAK_PASS, reply set.")
except Exception as e: # 捕获异常
# 设置回复类型为错误
reply.type = ReplyType.ERROR
# 设置回复内容为异常信息
reply.content = "[Midjourney_Turbo] " + str(e)
# 设置回复
e_context['reply'] = reply
# 记录异常日志
logger.exception("[Midjourney_Turbo] exception: %s" % e)
# 设置事件动作为pass
e_context.action = EventAction.BREAK_PASS
def handle_image_create(self, e_context, user_id, content, reply):
# 使用format_content方法格式化内容
prompt, commands = format_content(content=content)
# 深复制default_params到params
params = {**self.default_params}
# 处理垫图的情况
if self.image_ins in prompt:
# 移除图片插入标记
prompt = prompt.replace(self.image_ins, "")
prompt = generate_prompt(content=prompt, api_key=self.openai_api_key,
api_base=self.openai_api_base) if self.gpt_optimized else prompt
# 将params添加到用户的参数缓存中
self.params_cache[user_id] = {'image_params': params}
# 向params中的prompt添加内容
if params.get("prompt", ""):
params["prompt"] += f", {prompt}"
else:
params["prompt"] += f"{prompt}"
# 记录日志
logger.info("[Midjourney_Turbo] params={}".format(params))
# 设置回复类型为INFO,内容为提示用户发送图片的消息
reply.type = ReplyType.INFO
reply.content = "请发送一张图片给我"
# 处理合图的情况
elif self.blend_ins in prompt:
logger.info("[Midjourney_Turbo] blend_ins prompt={}".format(prompt))
try:
# 从用户的输入中获取需要合成的图片数量
num_pictures = int(prompt.split()[1])
except (IndexError, ValueError):
# 如果出现错误,设置回复类型为ERROR,内容为错误提示
trigger = conf()['image_create_prefix'][0]
reply.type = ReplyType.TEXT
reply.content = f"指令不正确,请根据示例格式重新输入:{trigger} {self.blend_ins} 2\n合图数量仅限2-5张"
e_context['reply'] = reply
e_context.action = EventAction.BREAK_PASS
return
# 检查图片数量是否在2-5张之间
if not 2 <= num_pictures <= 5:
trigger = conf()['image_create_prefix'][0]
reply.type = ReplyType.TEXT
reply.content = f"指令不正确,请根据示例格式重新输入:{trigger} {self.blend_ins} 2\n合图数量仅限2-5张"
e_context['reply'] = reply
e_context.action = EventAction.BREAK_PASS
return
# 添加用户的合成参数到params_cache
self.params_cache[user_id] = {'blend_params': params, 'num_pictures': num_pictures,
'base64_data': []}
# 记录调试日志
logger.debug(f"self.params_cache_2:{self.params_cache}")
# 向params中的prompt添加内容
if params.get("prompt", ""):
params["prompt"] += f", {prompt}"
else:
params["prompt"] += f"{prompt}"
# 记录日志
logger.info("[Midjourney_Turbo] params={}".format(params))
# 设置回复类型为INFO,内容为提示用户发送指定数量的图片的消息
reply.type = ReplyType.INFO
reply.content = f"请直接发送{num_pictures}张图片给我"
elif self.change_ins in prompt: # 处理变换,示例输入:/c V/U 1-4
# 处理提交的UV值
submit_uv = ' '.join(prompt.replace(self.change_ins, "").strip().split())
logger.debug("[Midjourney_Turbo] submit_uv post_json={}".format(" ".join(submit_uv)))
# 检查输入的格式是否正确
pattern = re.compile(r'^\d+\s[vVuU]\d$')
if not pattern.match(submit_uv):
trigger = conf()['image_create_prefix'][0]
reply.type = ReplyType.ERROR
reply.content = f"格式不正确。请使用如下示例格式:\n{trigger} {self.change_ins} 8528881058085979 V1"
else:
# 解析输入的值
number, v_value = submit_uv.split()
logger.debug("Parsed values: Number: {}, V value: {}".format(number, v_value))
v_value_upper = v_value.upper()
# 确保UV值在U1-U4和V1-V4范围内
if v_value_upper in ["U1", "U2", "U3", "U4", "V1", "V2", "V3", "V4"]:
simple_data = self.mm.get_simple(content=number + " " + v_value_upper)
# 发送任务提交消息
self.send_task_submission_message(e_context, messageId=simple_data["result"])
# 获取图片的URL
task_data = self.mm.get_image_url(id=simple_data["result"])
if task_data["failReason"] is None:
# 生成新的URL
new_url = self.generate_new_url(task_data=task_data)
# 生成短URL
short_url = self.get_short_url(short_url_api=self.short_url_api, url=new_url)
# 计算时间差
time_diff_start_finish_td, time_diff_submit_finish_td = self.get_time_diff(task_data)
logger.debug("new_url: %s" % new_url)
# 创建一个新的回复
com_reply = self.create_reply(new_url=new_url, data=simple_data)
# 发送回复
send_with_retry(self.comapp, com_reply, e_context)
logger.debug("The comapp object is an instance of: " + type(self.comapp).__name__)
reply.type = ReplyType.TEXT
# 设置回复内容
reply.content = self.complete_prompt.format(id=simple_data["result"],
change_ins=self.change_ins, imgurl=short_url,
start_finish=time_diff_start_finish_td,
submit_finish=time_diff_submit_finish_td)
logger.debug("Sent image URL and completed prompt.")
else:
reply.type = ReplyType.TEXT
reply.content = task_data["failReason"]
logger.debug("Sent failReason as reply content.")
else:
# 如果没有识别到特定的指令,则执行默认的操作,生成一个新的图像
logger.debug("Generating prompt...")
prompt = generate_prompt(content=prompt, api_key=self.openai_api_key,
api_base=self.openai_api_base) if self.gpt_optimized else prompt
prompt += commands
logger.debug(f"Generated prompt: {prompt}")
logger.debug("Getting imagination data...")
imagine_data = self.mm.get_imagine(prompt=prompt)
if isinstance(imagine_data, str):
# 如果返回的是错误消息,则直接发送错误消息
reply.type = ReplyType.TEXT
reply.content = f"任务提交失败,{imagine_data}"
logger.error(f"Received error message: {imagine_data}")
else:
self.send_task_submission_message(e_context, messageId=imagine_data["result"])
logger.debug(f"Received imagination data: {imagine_data}")
time.sleep(10) # 等待一段时间,以确保任务已经处理完成
logger.debug("Getting image URL...")
task_data = self.mm.get_image_url(id=imagine_data["result"])
logger.debug(f"Received task data: {task_data}")
if isinstance(task_data, str):
# 错误信息响应
reply.type = ReplyType.TEXT
reply.content = task_data
logger.error(f"Received error message: {task_data}")
else:
# 正常的JSON响应
if task_data["failReason"] is None:
# 处理图片链接
new_url = self.generate_new_url(task_data=task_data)
# 生成短链接
short_url = self.get_short_url(short_url_api=self.short_url_api, url=new_url)
# 计算时间差
time_diff_start_finish_td, time_diff_submit_finish_td = self.get_time_diff(
task_data)
logger.debug("new_url: %s" % new_url)
com_reply = self.create_reply(new_url=new_url, data=imagine_data)
# 发送回复
send_with_retry(self.comapp, com_reply, e_context)
reply.type = ReplyType.TEXT
# 设置回复内容
reply.content = self.complete_prompt.format(id=imagine_data["result"],
change_ins=self.change_ins,
imgurl=short_url,
start_finish=time_diff_start_finish_td,
submit_finish=time_diff_submit_finish_td)
logger.debug("Sent image URL and completed prompt.")
else:
reply.type = ReplyType.TEXT
reply.content = task_data["failReason"]
logger.debug("Sent failReason as reply content.")
# 设置回复内容和动作
e_context['reply'] = reply
e_context.action = EventAction.BREAK_PASS # 事件结束后,跳过处理context的默认逻辑
logger.debug("Event action set to BREAK_PASS, reply set.")
def handle_params_cache(self, e_context, user_id, content, reply):
# 如果参数缓存中存在对应用户的图像参数
if 'image_params' in self.params_cache[user_id]:
cmsg = e_context['context']['msg']
logger.debug("params_cache:%s" % self.params_cache)
logger.debug("user_id in self.params_cache[user_id]")
img_params = self.params_cache[user_id]
del self.params_cache[user_id] # 删除已使用的参数缓存
cmsg.prepare()
# 将用户的输入转换为 base64 编码
base64_data = convert_base64(content)
base64_data = 'data:image/png;base64,' + base64_data
# 使用这些参数生成一个新的图像
imagine_data = self.mm.get_imagine(prompt=img_params['image_params']["prompt"], base64_data=base64_data)
if isinstance(imagine_data, str): # 如果返回错误信息,则直接发送错误信息
reply.type = ReplyType.TEXT
reply.content = f"任务提交失败,{imagine_data}"
logger.error(f"Received error message: {imagine_data}")
else:
# 否则,获取新的图像链接,并将其发送给用户
self.send_task_submission_message(e_context, messageId=imagine_data["result"])
logger.debug(f"Received imagination data: {imagine_data}")
time.sleep(10) # 等待一段时间以确保任务已经处理完成
logger.debug("Getting image URL...")
task_data = self.mm.get_image_url(id=imagine_data["result"])
logger.debug(f"Received task data: {task_data}")
if isinstance(task_data, str): # 错误信息响应
reply.type = ReplyType.TEXT
reply.content = task_data
logger.error(f"Received error message: {task_data}")
else: # 正常的JSON响应
if task_data["failReason"] is None:
# 处理图片链接
new_url = self.generate_new_url(task_data=task_data)
# 生成短链接
short_url = self.get_short_url(short_url_api=self.short_url_api, url=new_url)
# 计算时间差
time_diff_start_finish_td, time_diff_submit_finish_td = self.get_time_diff(task_data)
logger.debug("new_url: %s" % new_url)
com_reply = self.create_reply(new_url=new_url, data=imagine_data)
# 发送回复
send_with_retry(self.comapp, com_reply, e_context)
reply.type = ReplyType.TEXT
# 设置回复内容
reply.content = self.complete_prompt.format(id=imagine_data["result"],
change_ins=self.change_ins, imgurl=short_url,
start_finish=time_diff_start_finish_td,
submit_finish=time_diff_submit_finish_td)
logger.debug("Sent image URL and completed prompt.")
else:
reply.type = ReplyType.TEXT
reply.content = task_data["failReason"]
logger.debug("Sent failReason as reply content.")
elif 'num_pictures' in self.params_cache[user_id]:
cmsg = e_context['context']['msg']
logger.debug("params_cache:%s" % self.params_cache)
logger.debug("user_id in self.params_cache[user_id]")
cmsg.prepare()
# 获取当前用户的图像参数
img_params = self.params_cache[user_id]
# 将用户的输入转换为 base64 编码
base64_data = convert_base64(content)
base64_data = 'data:image/png;base64,' + base64_data
# 将新的 base64 数据添加到列表中
img_params['base64_data'].append(base64_data)
# 减少待收集的图片数量
img_params['num_pictures'] -= 1
# 如果收集到足够数量的图片,调用函数并清除用户数据
if img_params['num_pictures'] == 0:
blend_data = self.mm.submit_blend(img_params['base64_data'])
del self.params_cache[user_id] # 删除已使用的参数缓存
if isinstance(blend_data, str):
reply.type = ReplyType.TEXT
reply.content = f"任务提交失败,{blend_data}"
logger.error(f"Received error message: {blend_data}")
else:
# 获取混合后的图像链接,并将其发送给用户
self.send_task_submission_message(e_context, messageId=blend_data["result"])
logger.debug(f"Received imagination data: {blend_data}")
time.sleep(10) # 等待一段时间以确保任务已经处理完成
logger.debug("Getting image URL...")
task_data = self.mm.get_image_url(id=blend_data["result"])
logger.debug(f"Received task data: {task_data}")
if isinstance(task_data, str):
# 错误信息响应
reply.type = ReplyType.TEXT
reply.content = task_data
logger.error(f"Received error message: {task_data}")
else:
# 正常的JSON响应
if task_data["failReason"] is None:
# 处理图片链接
new_url = self.generate_new_url(task_data=task_data)
# 生成短链接
short_url = self.get_short_url(short_url_api=self.short_url_api, url=new_url)
# 计算时间差
time_diff_start_finish_td, time_diff_submit_finish_td = self.get_time_diff(
task_data)
logger.debug("new_url: %s" % new_url)
com_reply = self.create_reply(new_url=new_url, data=blend_data)
# 发送回复
send_with_retry(self.comapp, com_reply, e_context)
reply.type = ReplyType.TEXT
# 设置回复内容
reply.content = self.complete_prompt.format(id=blend_data["result"],
change_ins=self.change_ins,
imgurl=short_url,
start_finish=time_diff_start_finish_td,
submit_finish=time_diff_submit_finish_td)
logger.debug("Sent image URL and completed prompt.")
else:
reply.type = ReplyType.TEXT
reply.content = task_data["failReason"]
logger.debug("Sent failReason as reply content.")
# 定义一个方法,用于生成帮助文本
def get_help_text(self, verbose=False, **kwargs):
# 检查配置中是否启用了画图功能
if not conf().get('image_create_prefix'):
return "画图功能未启用" # 如果未启用,则返回提示信息
else:
# 否则,获取触发前缀
trigger = conf()['image_create_prefix'][0]
# 初始化帮助文本,说明利用 midjourney api 来画图
help_text = "\n🔥使用Midjourney来画图,支持垫图、合图、变换等操作\n"
# 如果不需要详细说明,则直接返回帮助文本
if not verbose:
return help_text
# 否则,添加详细的使用方法到帮助文本中
help_text += f"使用方法:\n使用\"{trigger}[内容描述]\"的格式作画,如\"{trigger}一个中国漂亮女孩\"\n垫图指令:{trigger} {self.image_ins},合图指令:{trigger} {self.blend_ins}\n垫图指令后面可以加关键词,合图指令后面不需要加"
# 返回帮助文本
return help_text
def get_short_url(self, short_url_api, url):
# 检查是否提供了短网址 API
if short_url_api != "":
# 发送POST请求到短网址 API,并传入原始网址
logger.debug("发送POST请求到短网址API...")
logger.debug("传入到接口的网址是:%s", url)
response = requests.post(short_url_api, json={"url": url})
data = response.json()
# 构建完整的短网址,将API基本URL与响应中的键值连接起来
short_url = short_url_api + data["key"]
logger.debug("生成的短网址:%s", short_url)
return short_url
else:
# 如果未提供短网址 API,则返回原始网址
logger.debug("未提供短网址 API。返回原始网址。")
return url
def get_time_diff(self, task_data):
# 将时间戳值转换为秒
startTime_sec = task_data['startTime'] / 1000
finishTime_sec = task_data['finishTime'] / 1000 if task_data['finishTime'] is not None else None
submitTime_sec = task_data['submitTime'] / 1000
if finishTime_sec is not None:
# 计算开始时间和结束时间之间的时间差(秒)
time_diff_start_finish = finishTime_sec - startTime_sec
# 计算提交时间和结束时间之间的时间差(秒)
time_diff_submit_finish = finishTime_sec - submitTime_sec
# 将时间差转换为 timedelta 对象,以便更容易处理
time_diff_start_finish_td = timedelta(seconds=time_diff_start_finish)
time_diff_submit_finish_td = timedelta(seconds=time_diff_submit_finish)
# 获取时间差的总秒数
time_diff_start_finish_td_sec = time_diff_start_finish_td.total_seconds()
time_diff_submit_finish_td_sec = time_diff_submit_finish_td.total_seconds()
else:
# 如果 finishTime_sec 为 None,则将时间差设置为 None
time_diff_start_finish_td_sec = None
time_diff_submit_finish_td_sec = None
return time_diff_start_finish_td_sec, time_diff_submit_finish_td_sec
def send_task_submission_message(self, e_context, messageId):
com_reply = Reply()
com_reply.type = ReplyType.TEXT
context = e_context['context']
if context.kwargs.get('isgroup'):
msg = context.kwargs.get('msg')
nickname = msg.actual_user_nickname # 获取昵称
com_reply.content = "@{name}\n☑️您的绘图任务提交成功!\n🆔ID:{id}\n⏳正在努力出图,请您耐心等待...".format(
name=nickname, id=messageId) + self.local_data.reminder_string
else:
com_reply.content = "☑️您的绘图任务提交成功!\n🆔ID:{id}\n⏳正在努力出图,请您耐心等待...".format(
id=messageId) + self.local_data.reminder_string
self.comapp.send(com_reply, context)
def check_and_update_usage_limit(self, trial_lock, user_id, db_conn):
cur = db_conn.cursor()
# 确保midjourneyturbo表存在
cur.execute("""
CREATE TABLE IF NOT EXISTS midjourneyturbo
(UserID TEXT PRIMARY KEY, TrialCount INTEGER, TrialDate TEXT);
""")
db_conn.commit()
# 从数据库中查询用户
cur.execute("""
SELECT TrialCount, TrialDate FROM midjourneyturbo
WHERE UserID = ?
""", (user_id,))
row = cur.fetchone()
# 如果用户不存在,插入一个新用户并设置试用次数和日期,然后返回True和试用次数减1
if row is None:
trial_count = trial_lock - 1 # 试用次数减1
cur.execute("""
INSERT INTO midjourneyturbo (UserID, TrialCount, TrialDate) VALUES (?, ?, ?)
""", (user_id, trial_count, datetime.date.today().isoformat())) # 插入用户,并设置当前日期和试用次数
db_conn.commit()
return True, True, trial_count
# 用户存在于数据库中,检查试用次数和日期
trial_count = row[0] if row and row[0] is not None else trial_lock
trial_date = row[1] if row and row[1] is not None else None
today = datetime.date.today().isoformat()
if trial_count == 0 and trial_date == today: # 今天的试用次数已经用完
return False, False, ""
if trial_count > 0 and trial_date == today: # 试用次数有剩余,并且日期是今天
trial_count -= 1 # 减少试用次数
else: # 试用次数为0或者日期不是今天
trial_count = trial_lock - 1 # 重置试用次数并减去1
trial_date = today # 更新试用日期
cur.execute("""
UPDATE midjourneyturbo
SET TrialCount = ?, TrialDate = ?
WHERE UserID = ?
""", (trial_count, trial_date, user_id))
db_conn.commit()
return True, True, trial_count
def generate_new_url(self, task_data):
if self.split_url:
split_url = task_data["imageUrl"].split('/')
new_url = '/'.join(split_url[0:3] + split_url[5:])
else:
new_url = task_data["imageUrl"]
return new_url
def create_reply(self, new_url, data):
com_reply = Reply()
com_reply.type = self.type
if self.num != 1:
com_reply.content = new_url
else:
# 下载并压缩图片
image_path = download_and_compress_image(new_url, data['result'], quality=self.picture_quality)
image_storage = open(image_path, 'rb')
com_reply.content = image_storage
return com_reply
| [
"content"
] |
2024-01-10 | chazzjimel/WeChat-AIChatbot-WinOnly | bot~linkai~link_ai_bot.py | # access LinkAI knowledge base platform
# docs: https://link-ai.tech/platform/link-app/wechat
import time
import requests
from bot.bot import Bot
from bot.chatgpt.chat_gpt_session import ChatGPTSession
from bot.openai.open_ai_image import OpenAIImage
from bot.session_manager import SessionManager
from bridge.context import Context, ContextType
from bridge.reply import Reply, ReplyType
from common.log import logger
from config import conf
class LinkAIBot(Bot, OpenAIImage):
# authentication failed
AUTH_FAILED_CODE = 401
NO_QUOTA_CODE = 406
def __init__(self):
super().__init__()
self.base_url = "https://api.link-ai.chat/v1"
self.sessions = SessionManager(ChatGPTSession, model=conf().get("model") or "gpt-3.5-turbo")
def reply(self, query, context: Context = None) -> Reply:
if context.type == ContextType.TEXT:
return self._chat(query, context)
elif context.type == ContextType.IMAGE_CREATE:
ok, res = self.create_img(query, 0)
if ok:
reply = Reply(ReplyType.IMAGE_URL, res)
else:
reply = Reply(ReplyType.ERROR, res)
return reply
else:
reply = Reply(ReplyType.ERROR, "Bot不支持处理{}类型的消息".format(context.type))
return reply
def _chat(self, query, context, retry_count=0) -> Reply:
"""
发起对话请求
:param query: 请求提示词
:param context: 对话上下文
:param retry_count: 当前递归重试次数
:return: 回复
"""
if retry_count >= 2:
# exit from retry 2 times
logger.warn("[LINKAI] failed after maximum number of retry times")
return Reply(ReplyType.ERROR, "请再问我一次吧")
try:
# load config
if context.get("generate_breaked_by"):
logger.info(f"[LINKAI] won't set appcode because a plugin ({context['generate_breaked_by']}) affected the context")
app_code = None
else:
app_code = context.kwargs.get("app_code") or conf().get("linkai_app_code")
linkai_api_key = conf().get("linkai_api_key")
session_id = context["session_id"]
session = self.sessions.session_query(query, session_id)
model = conf().get("model") or "gpt-3.5-turbo"
# remove system message
if session.messages[0].get("role") == "system":
if app_code or model == "wenxin":
session.messages.pop(0)
body = {
"app_code": app_code,
"messages": session.messages,
"model": model, # 对话模型的名称, 支持 gpt-3.5-turbo, gpt-3.5-turbo-16k, gpt-4, wenxin
"temperature": conf().get("temperature"),
"top_p": conf().get("top_p", 1),
"frequency_penalty": conf().get("frequency_penalty", 0.0), # [-2,2]之间,该值越大则更倾向于产生不同的内容
"presence_penalty": conf().get("presence_penalty", 0.0), # [-2,2]之间,该值越大则更倾向于产生不同的内容
}
logger.info(f"[LINKAI] query={query}, app_code={app_code}, mode={body.get('model')}")
headers = {"Authorization": "Bearer " + linkai_api_key}
# do http request
res = requests.post(url=self.base_url + "/chat/completions", json=body, headers=headers,
timeout=conf().get("request_timeout", 180))
if res.status_code == 200:
# execute success
response = res.json()
reply_content = response["choices"][0]["message"]["content"]
total_tokens = response["usage"]["total_tokens"]
logger.info(f"[LINKAI] reply={reply_content}, total_tokens={total_tokens}")
self.sessions.session_reply(reply_content, session_id, total_tokens)
return Reply(ReplyType.TEXT, reply_content)
else:
response = res.json()
error = response.get("error")
logger.error(f"[LINKAI] chat failed, status_code={res.status_code}, "
f"msg={error.get('message')}, type={error.get('type')}")
if res.status_code >= 500:
# server error, need retry
time.sleep(2)
logger.warn(f"[LINKAI] do retry, times={retry_count}")
return self._chat(query, context, retry_count + 1)
return Reply(ReplyType.ERROR, "提问太快啦,请休息一下再问我吧")
except Exception as e:
logger.exception(e)
# retry
time.sleep(2)
logger.warn(f"[LINKAI] do retry, times={retry_count}")
return self._chat(query, context, retry_count + 1)
| [] |
2024-01-10 | chazzjimel/WeChat-AIChatbot-WinOnly | bot~bot_factory.py | """
channel factory
"""
from common import const
def create_bot(bot_type):
"""
create a bot_type instance
:param bot_type: bot type code
:return: bot instance
"""
if bot_type == const.BAIDU:
# 替换Baidu Unit为Baidu文心千帆对话接口
# from bot.baidu.baidu_unit_bot import BaiduUnitBot
# return BaiduUnitBot()
from bot.baidu.baidu_wenxin import BaiduWenxinBot
return BaiduWenxinBot()
elif bot_type == const.CHATGPT:
# ChatGPT 网页端web接口
from bot.chatgpt.chat_gpt_bot import ChatGPTBot
return ChatGPTBot()
elif bot_type == const.OPEN_AI:
# OpenAI 官方对话模型API
from bot.openai.open_ai_bot import OpenAIBot
return OpenAIBot()
elif bot_type == const.CHATGPTONAZURE:
# Azure chatgpt service https://azure.microsoft.com/en-in/products/cognitive-services/openai-service/
from bot.chatgpt.chat_gpt_bot import AzureChatGPTBot
return AzureChatGPTBot()
elif bot_type == const.LINKAI:
from bot.linkai.link_ai_bot import LinkAIBot
return LinkAIBot()
raise RuntimeError
| [] |
2024-01-10 | taskswithcode/semantic_search_app | twc_openai_search.py | from scipy.spatial.distance import cosine
import argparse
import json
import os
import openai
import pdb
def read_text(input_file):
arr = open(input_file).read().split("\n")
return arr[:-1]
class OpenAIQnAModel:
def __init__(self):
self.debug = False
self.q_model_name = None
self.d_model_name = None
self.skip_key = True
print("In OpenAI API constructor")
def init_model(self,model_name = None):
#print("OpenAI: Init model",model_name)
openai.api_key = os.getenv("OPENAI_API_KEY")
if (openai.api_key == None):
openai.api_key = ""
print("API key not set")
if (len(openai.api_key) == 0 and not self.skip_key):
print("Open API key not set")
if (model_name is None):
self.d_model_name = "text-search-ada-doc-001"
else:
self.d_model_name = model_name
self.q_model_name = self.construct_query_model_name(self.d_model_name)
print(f"OpenAI: Init model complete :query model {self.q_model_name} doc:{self.d_model_name}")
def construct_query_model_name(self,d_model_name):
return d_model_name.replace('-doc-','-query-')
def compute_embeddings(self,input_file_name,input_data,is_file):
if (len(openai.api_key) == 0 and not self.skip_key):
print("Open API key not set")
return [],[]
#print("In compute embeddings after key check")
in_file = input_file_name.split('/')[-1]
in_file = self.d_model_name + '_' + '.'.join(in_file.split('.')[:-1]) + "_search.json"
cached = False
try:
fp = open(in_file)
cached = True
embeddings = json.load(fp)
q_embeddings = [embeddings[0]]
d_embeddings = embeddings[1:]
print("Using cached embeddings")
except:
pass
texts = read_text(input_data) if is_file == True else input_data
queries = [texts[0]]
docs = texts[1:]
if (not cached):
print(f"Computing embeddings for {input_file_name} and query model {self.q_model_name}")
query_embeds = openai.Embedding.create(
input=queries,
model=self.q_model_name
)
print(f"Computing embeddings for {input_file_name} and doc model {self.q_model_name}")
doc_embeds = openai.Embedding.create(
input=docs,
model=self.d_model_name
)
q_embeddings = []
d_embeddings = []
for i in range(len(query_embeds['data'])):
q_embeddings.append(query_embeds['data'][i]['embedding'])
for i in range(len(doc_embeds['data'])):
d_embeddings.append(doc_embeds['data'][i]['embedding'])
if (not cached):
embeddings = q_embeddings + d_embeddings
with open(in_file,"w") as fp:
json.dump(embeddings,fp)
return texts,(q_embeddings,d_embeddings)
def output_results(self,output_file,texts,embeddings,main_index = 0):
# Calculate cosine similarities
# Cosine similarities are in [-1, 1]. Higher means more similar
query_embeddings = embeddings[0]
doc_embeddings = embeddings[1]
cosine_dict = {}
queries = [texts[0]]
docs = texts[1:]
if (self.debug):
print("Total sentences",len(texts))
for i in range(len(docs)):
cosine_dict[docs[i]] = 1 - cosine(query_embeddings[0], doc_embeddings[i])
if (self.debug):
print("Input sentence:",texts[main_index])
sorted_dict = dict(sorted(cosine_dict.items(), key=lambda item: item[1],reverse = True))
if (self.debug):
for key in sorted_dict:
print("Cosine similarity with \"%s\" is: %.3f" % (key, sorted_dict[key]))
if (output_file is not None):
with open(output_file,"w") as fp:
fp.write(json.dumps(sorted_dict,indent=0))
return sorted_dict
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='OpenAI model for document search embeddings ',formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-input', action="store", dest="input",required=True,help="Input file with sentences")
parser.add_argument('-output', action="store", dest="output",default="output.txt",help="Output file with results")
parser.add_argument('-model', action="store", dest="model",default="text-search-ada-doc-001",help="model name")
results = parser.parse_args()
obj = OpenAIQnAModel()
obj.init_model(results.model)
texts, embeddings = obj.compute_embeddings(results.input,results.input,is_file = True)
results = obj.output_results(results.output,texts,embeddings)
| [] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.