date_collected
stringclasses 1
value | repo_name
stringlengths 6
116
| file_name
stringlengths 2
220
| file_contents
stringlengths 13
357k
| prompts
sequence |
---|---|---|---|---|
2024-01-10 | AayushMathur7/14Labs | spotify-upload~dsp-agent-deprecated.py | '''
MultiOn has various limitations that will not allow us to upload audio on Spotify.
Additionally, Langchain itself has limitations.
Selenium will be used as an alternative instead.
'''
import os
from dotenv import load_dotenv
from langchain.llms import OpenAI
from langchain.agents import initialize_agent
from langchain.agents import AgentType
os.environ["LANGCHAIN_TRACING"] = "true"
load_dotenv()
os.environ['OPENAI_API_KEY'] = os.getenv('OPENAI_API_KEY')
from langchain.tools import StructuredTool
from human_input import HumanInputRun
from multion import MultionToolSpec
def agent(query: str):
multion_toolkit = MultionToolSpec(use_api=True, mode="auto")
# multion.set_remote(True)
tool = StructuredTool.from_function(multion_toolkit.browse)
human_input = HumanInputRun()
llm = OpenAI(temperature=0)
# Structured tools are compatible with the STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION agent type.
agent_executor = initialize_agent(
[tool, human_input],
llm,
agent=AgentType.STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION,
verbose=True,
)
return agent_executor.run(query)
# 1.
# 1. If it is not logged in yet, then log in to Spotify for Podcasters. Click on continue to spotify if that option appears and log in as whoever's profile saved, else ask for credentials. Else, skip this step.
# 3. Generate in the details for the title, episode description, publish date (now), explicit content (no)
# 4. Generate a sample image and use that as the cover art
# 5. Once filled in details click next until review step
# 6. Publish it and retrieve the spotify link
# Concise summary of content for digestible ear candies!
PROMPT = f"""
You are an expert AI Agent whose job is to `upload a mp3 audio file on spotify podcasters and retrieve the spotify link to the audio` (https://podcasters.spotify.com/pod/dashboard/home).
Here are the high-level steps:
1. Click on the new episode button
2. Take the music-example.mp3 file and upload it
3. Open
"""
# PROMPT = f"""
# You are an expert AI Agent whose job is to `display weather data` (https://www.google.com).
# Here are the high-level steps:
# 1. Go to google
# 2. Get the average temperature of today
# """
agent(query=PROMPT)
| [
"\nYou are an expert AI Agent whose job is to `upload a mp3 audio file on spotify podcasters and retrieve the spotify link to the audio` (https://podcasters.spotify.com/pod/dashboard/home).\n\n Here are the high-level steps:\n 1. Click on the new episode button\n 2. Take the music-example.mp3 file and upload it\n 3. Open\n"
] |
2024-01-10 | princetonvisualai/multimodal_dataset_distillation | networks.py | import torch.nn as nn
import torch.nn.functional as F
import torch
from collections import OrderedDict
from typing import Tuple, Union
import clip
from transformers import ViTConfig, ViTModel, AutoTokenizer, CLIPTextModel, CLIPTextConfig, CLIPProcessor, CLIPConfig
import numpy as np
from transformers import BertTokenizer, BertModel
from torchvision.models import resnet18, resnet
from transformers.models.bert.modeling_bert import BertAttention, BertConfig
tokenizer=BertTokenizer.from_pretrained('bert-base-uncased')
BERT_model = BertModel.from_pretrained('bert-base-uncased')
# Acknowledgement to
# https://github.com/kuangliu/pytorch-cifar,
# https://github.com/BIGBALLON/CIFAR-ZOO,
# adapted from
# https://github.com/VICO-UoE/DatasetCondensation
# https://github.com/Zasder3/train-CLIP
''' MLP '''
class MLP(nn.Module):
def __init__(self, channel, num_classes):
super(MLP, self).__init__()
self.fc_1 = nn.Linear(28*28*1 if channel==1 else 32*32*3, 128)
self.fc_2 = nn.Linear(128, 128)
self.fc_3 = nn.Linear(128, num_classes)
def forward(self, x):
out = x.view(x.size(0), -1)
out = F.relu(self.fc_1(out))
out = F.relu(self.fc_2(out))
out = self.fc_3(out)
return out
''' ConvNet '''
class ConvNet(nn.Module):
def __init__(self, channel, num_classes, net_width=128, net_depth=4, net_act='relu', net_norm='instancenorm', net_pooling='avgpooling', im_size = (224,224)):
super(ConvNet, self).__init__()
self.features, shape_feat = self._make_layers(channel, net_width, net_depth, net_norm, net_act, net_pooling, im_size)
num_feat = shape_feat[0]*shape_feat[1]*shape_feat[2]
self.classifier = nn.Linear(num_feat, num_classes)
def forward(self, x):
# print("MODEL DATA ON: ", x.get_device(), "MODEL PARAMS ON: ", self.classifier.weight.data.get_device())
out = self.features(x)
out = out.view(out.size(0), -1)
out = self.classifier(out)
return out
def _get_activation(self, net_act):
if net_act == 'sigmoid':
return nn.Sigmoid()
elif net_act == 'relu':
return nn.ReLU(inplace=True)
elif net_act == 'leakyrelu':
return nn.LeakyReLU(negative_slope=0.01)
else:
exit('unknown activation function: %s'%net_act)
def _get_pooling(self, net_pooling):
if net_pooling == 'maxpooling':
return nn.MaxPool2d(kernel_size=2, stride=2)
elif net_pooling == 'avgpooling':
return nn.AvgPool2d(kernel_size=2, stride=2)
elif net_pooling == 'none':
return None
else:
exit('unknown net_pooling: %s'%net_pooling)
def _get_normlayer(self, net_norm, shape_feat):
# shape_feat = (c*h*w)
if net_norm == 'batchnorm':
return nn.BatchNorm2d(shape_feat[0], affine=True)
elif net_norm == 'layernorm':
return nn.LayerNorm(shape_feat, elementwise_affine=True)
elif net_norm == 'instancenorm':
return nn.GroupNorm(shape_feat[0], shape_feat[0], affine=True)
elif net_norm == 'groupnorm':
return nn.GroupNorm(4, shape_feat[0], affine=True)
elif net_norm == 'none':
return None
else:
exit('unknown net_norm: %s'%net_norm)
def _make_layers(self, channel, net_width, net_depth, net_norm, net_act, net_pooling, im_size):
layers = []
in_channels = channel
if im_size[0] == 28:
im_size = (32, 32)
shape_feat = [in_channels, im_size[0], im_size[1]]
for d in range(net_depth):
layers += [nn.Conv2d(in_channels, net_width, kernel_size=3, padding=3 if channel == 1 and d == 0 else 1)]
shape_feat[0] = net_width
if net_norm != 'none':
layers += [self._get_normlayer(net_norm, shape_feat)]
layers += [self._get_activation(net_act)]
in_channels = net_width
if net_pooling != 'none':
layers += [self._get_pooling(net_pooling)]
shape_feat[1] //= 2
shape_feat[2] //= 2
return nn.Sequential(*layers), shape_feat
''' ConvNet '''
class ConvNetGAP(nn.Module):
def __init__(self, channel, num_classes, net_width, net_depth, net_act, net_norm, net_pooling, im_size = (32,32)):
super(ConvNetGAP, self).__init__()
self.features, shape_feat = self._make_layers(channel, net_width, net_depth, net_norm, net_act, net_pooling, im_size)
num_feat = shape_feat[0]*shape_feat[1]*shape_feat[2]
# self.classifier = nn.Linear(num_feat, num_classes)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.classifier = nn.Linear(shape_feat[0], num_classes)
def forward(self, x):
out = self.features(x)
out = self.avgpool(out)
out = out.view(out.size(0), -1)
out = self.classifier(out)
return out
def _get_activation(self, net_act):
if net_act == 'sigmoid':
return nn.Sigmoid()
elif net_act == 'relu':
return nn.ReLU(inplace=True)
elif net_act == 'leakyrelu':
return nn.LeakyReLU(negative_slope=0.01)
else:
exit('unknown activation function: %s'%net_act)
def _get_pooling(self, net_pooling):
if net_pooling == 'maxpooling':
return nn.MaxPool2d(kernel_size=2, stride=2)
elif net_pooling == 'avgpooling':
return nn.AvgPool2d(kernel_size=2, stride=2)
elif net_pooling == 'none':
return None
else:
exit('unknown net_pooling: %s'%net_pooling)
def _get_normlayer(self, net_norm, shape_feat):
# shape_feat = (c*h*w)
if net_norm == 'batchnorm':
return nn.BatchNorm2d(shape_feat[0], affine=True)
elif net_norm == 'layernorm':
return nn.LayerNorm(shape_feat, elementwise_affine=True)
elif net_norm == 'instancenorm':
return nn.GroupNorm(shape_feat[0], shape_feat[0], affine=True)
elif net_norm == 'groupnorm':
return nn.GroupNorm(4, shape_feat[0], affine=True)
elif net_norm == 'none':
return None
else:
exit('unknown net_norm: %s'%net_norm)
def _make_layers(self, channel, net_width, net_depth, net_norm, net_act, net_pooling, im_size):
layers = []
in_channels = channel
if im_size[0] == 28:
im_size = (32, 32)
shape_feat = [in_channels, im_size[0], im_size[1]]
for d in range(net_depth):
layers += [nn.Conv2d(in_channels, net_width, kernel_size=3, padding=3 if channel == 1 and d == 0 else 1)]
shape_feat[0] = net_width
if net_norm != 'none':
layers += [self._get_normlayer(net_norm, shape_feat)]
layers += [self._get_activation(net_act)]
in_channels = net_width
if net_pooling != 'none':
layers += [self._get_pooling(net_pooling)]
shape_feat[1] //= 2
shape_feat[2] //= 2
return nn.Sequential(*layers), shape_feat
''' LeNet '''
class LeNet(nn.Module):
def __init__(self, channel, num_classes):
super(LeNet, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(channel, 6, kernel_size=5, padding=2 if channel==1 else 0),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Conv2d(6, 16, kernel_size=5),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
self.fc_1 = nn.Linear(16 * 5 * 5, 120)
self.fc_2 = nn.Linear(120, 84)
self.fc_3 = nn.Linear(84, num_classes)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = F.relu(self.fc_1(x))
x = F.relu(self.fc_2(x))
x = self.fc_3(x)
return x
''' AlexNet '''
class AlexNet(nn.Module):
def __init__(self, channel, num_classes):
super(AlexNet, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(channel, 128, kernel_size=5, stride=1, padding=4 if channel==1 else 2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Conv2d(128, 192, kernel_size=5, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Conv2d(192, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(256, 192, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(192, 192, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
self.fc = nn.Linear(192 * 4 * 4, num_classes)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
''' VGG '''
cfg_vgg = {
'VGG11': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'VGG13': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'VGG16': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'VGG19': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
class VGG(nn.Module):
def __init__(self, vgg_name, channel, num_classes, norm='instancenorm'):
super(VGG, self).__init__()
self.channel = channel
self.features = self._make_layers(cfg_vgg[vgg_name], norm)
self.classifier = nn.Linear(512 if vgg_name != 'VGGS' else 128, num_classes)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
def _make_layers(self, cfg, norm):
layers = []
in_channels = self.channel
for ic, x in enumerate(cfg):
if x == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
layers += [nn.Conv2d(in_channels, x, kernel_size=3, padding=3 if self.channel==1 and ic==0 else 1),
nn.GroupNorm(x, x, affine=True) if norm=='instancenorm' else nn.BatchNorm2d(x),
nn.ReLU(inplace=True)]
in_channels = x
layers += [nn.AvgPool2d(kernel_size=1, stride=1)]
return nn.Sequential(*layers)
def VGG11(channel, num_classes):
return VGG('VGG11', channel, num_classes)
def VGG11BN(channel, num_classes):
return VGG('VGG11', channel, num_classes, norm='batchnorm')
def VGG13(channel, num_classes):
return VGG('VGG13', channel, num_classes)
def VGG16(channel, num_classes):
return VGG('VGG16', channel, num_classes)
def VGG19(channel, num_classes):
return VGG('VGG19', channel, num_classes)
''' ResNet_AP '''
# The conv(stride=2) is replaced by conv(stride=1) + avgpool(kernel_size=2, stride=2)
class BasicBlock_AP(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1, norm='instancenorm'):
super(BasicBlock_AP, self).__init__()
self.norm = norm
self.stride = stride
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=1, padding=1, bias=False) # modification
self.bn1 = nn.GroupNorm(planes, planes, affine=True) if self.norm == 'instancenorm' else nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.GroupNorm(planes, planes, affine=True) if self.norm == 'instancenorm' else nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=1, bias=False),
nn.AvgPool2d(kernel_size=2, stride=2), # modification
nn.GroupNorm(self.expansion * planes, self.expansion * planes, affine=True) if self.norm == 'instancenorm' else nn.BatchNorm2d(self.expansion * planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
if self.stride != 1: # modification
out = F.avg_pool2d(out, kernel_size=2, stride=2)
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class Bottleneck_AP(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1, norm='instancenorm'):
super(Bottleneck_AP, self).__init__()
self.norm = norm
self.stride = stride
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.GroupNorm(planes, planes, affine=True) if self.norm == 'instancenorm' else nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False) # modification
self.bn2 = nn.GroupNorm(planes, planes, affine=True) if self.norm == 'instancenorm' else nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion * planes, kernel_size=1, bias=False)
self.bn3 = nn.GroupNorm(self.expansion * planes, self.expansion * planes, affine=True) if self.norm == 'instancenorm' else nn.BatchNorm2d(self.expansion * planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=1, bias=False),
nn.AvgPool2d(kernel_size=2, stride=2), # modification
nn.GroupNorm(self.expansion * planes, self.expansion * planes, affine=True) if self.norm == 'instancenorm' else nn.BatchNorm2d(self.expansion * planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
if self.stride != 1: # modification
out = F.avg_pool2d(out, kernel_size=2, stride=2)
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet_AP(nn.Module):
def __init__(self, block, num_blocks, channel=3, num_classes=10, norm='instancenorm'):
super(ResNet_AP, self).__init__()
self.in_planes = 64
self.norm = norm
self.conv1 = nn.Conv2d(channel, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.GroupNorm(64, 64, affine=True) if self.norm == 'instancenorm' else nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.classifier = nn.Linear(512 * block.expansion * 3 * 3 if channel==1 else 512 * block.expansion * 4 * 4, num_classes) # modification
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride, self.norm))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, kernel_size=1, stride=1) # modification
out = out.view(out.size(0), -1)
out = self.classifier(out)
return out
def ResNet18BN_AP(channel, num_classes):
return ResNet_AP(BasicBlock_AP, [2,2,2,2], channel=channel, num_classes=num_classes, norm='batchnorm')
def ResNet18_AP(channel, num_classes):
return ResNet_AP(BasicBlock_AP, [2,2,2,2], channel=channel, num_classes=num_classes)
''' ResNet '''
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1, norm='instancenorm'):
super(BasicBlock, self).__init__()
self.norm = norm
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.GroupNorm(planes, planes, affine=True) if self.norm == 'instancenorm' else nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.GroupNorm(planes, planes, affine=True) if self.norm == 'instancenorm' else nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
nn.GroupNorm(self.expansion*planes, self.expansion*planes, affine=True) if self.norm == 'instancenorm' else nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1, norm='instancenorm'):
super(Bottleneck, self).__init__()
self.norm = norm
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.GroupNorm(planes, planes, affine=True) if self.norm == 'instancenorm' else nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.GroupNorm(planes, planes, affine=True) if self.norm == 'instancenorm' else nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False)
self.bn3 = nn.GroupNorm(self.expansion*planes, self.expansion*planes, affine=True) if self.norm == 'instancenorm' else nn.BatchNorm2d(self.expansion*planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
nn.GroupNorm(self.expansion*planes, self.expansion*planes, affine=True) if self.norm == 'instancenorm' else nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNetImageNet(nn.Module):
def __init__(self, block, num_blocks, channel=3, num_classes=10, norm='instancenorm'):
super(ResNetImageNet, self).__init__()
self.in_planes = 64
self.norm = norm
self.conv1 = nn.Conv2d(channel, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.GroupNorm(64, 64, affine=True) if self.norm == 'instancenorm' else nn.BatchNorm2d(64)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.classifier = nn.Linear(512*block.expansion, num_classes)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride, self.norm))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.maxpool(out)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
# out = F.avg_pool2d(out, 4)
# out = out.view(out.size(0), -1)
out = self.avgpool(out)
out = torch.flatten(out, 1)
out = self.classifier(out)
return out
def ResNet18BN(channel, num_classes):
return ResNet(BasicBlock, [2,2,2,2], channel=channel, num_classes=num_classes, norm='batchnorm')
def ResNet18(channel, num_classes):
return ResNet_gn(BasicBlock, [2,2,2,2], channel=channel, num_classes=num_classes)
def ResNet34(channel, num_classes):
return ResNet(BasicBlock, [3,4,6,3], channel=channel, num_classes=num_classes)
def ResNet50(channel, num_classes):
return ResNet(Bottleneck, [3,4,6,3], channel=channel, num_classes=num_classes)
def ResNet101(channel, num_classes):
return ResNet(Bottleneck, [3,4,23,3], channel=channel, num_classes=num_classes)
def ResNet152(channel, num_classes):
return ResNet(Bottleneck, [3,8,36,3], channel=channel, num_classes=num_classes)
def ResNet18ImageNet(channel, num_classes):
return ResNetImageNet(BasicBlock, [2,2,2,2], channel=channel, num_classes=num_classes)
def ResNet6ImageNet(channel, num_classes):
return ResNetImageNet(BasicBlock, [1,1,1,1], channel=channel, num_classes=num_classes)
def resnet18_gn(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
"""
model_args = dict(block=BasicBlock, layers=[2, 2, 2, 2])
return _create_resnet('resnet18', pretrained, **dict(model_args, **kwargs))
## Sourced directly from OpenAI's CLIP repo
class ModifiedResNet(nn.Module):
"""
A ResNet class that is similar to torchvision's but contains the following changes:
- There are now 3 "stem" convolutions as opposed to 1, with an average pool instead of a max pool.
- Performs anti-aliasing strided convolutions, where an avgpool is prepended to convolutions with stride > 1
- The final pooling layer is a QKV attention instead of an average pool
"""
def __init__(self, layers, output_dim, heads, input_resolution=224, width=64):
super().__init__()
self.output_dim = output_dim
self.input_resolution = input_resolution
# the 3-layer stem
self.conv1 = nn.Conv2d(3, width // 2, kernel_size=3, stride=2, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(width // 2)
self.conv2 = nn.Conv2d(width // 2, width // 2, kernel_size=3, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(width // 2)
self.conv3 = nn.Conv2d(width // 2, width, kernel_size=3, padding=1, bias=False)
self.bn3 = nn.BatchNorm2d(width)
self.avgpool = nn.AvgPool2d(2)
self.relu = nn.ReLU(inplace=True)
# residual layers
self._inplanes = width # this is a *mutable* variable used during construction
self.layer1 = self._make_layer(width, layers[0])
self.layer2 = self._make_layer(width * 2, layers[1], stride=2)
self.layer3 = self._make_layer(width * 4, layers[2], stride=2)
self.layer4 = self._make_layer(width * 8, layers[3], stride=2)
embed_dim = width * 32 # the ResNet feature dimension
self.attnpool = AttentionPool2d(input_resolution // 32, embed_dim, heads, output_dim)
def _make_layer(self, planes, blocks, stride=1):
layers = [Bottleneck(self._inplanes, planes, stride)]
self._inplanes = planes * Bottleneck.expansion
for _ in range(1, blocks):
layers.append(Bottleneck(self._inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
def stem(x):
for conv, bn in [(self.conv1, self.bn1), (self.conv2, self.bn2), (self.conv3, self.bn3)]:
x = self.relu(bn(conv(x)))
x = self.avgpool(x)
return x
x = x.type(self.conv1.weight.dtype)
x = stem(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.attnpool(x)
return x
class AttentionPool2d(nn.Module):
def __init__(self, spacial_dim: int, embed_dim: int, num_heads: int, output_dim: int = None):
super().__init__()
self.positional_embedding = nn.Parameter(torch.randn(spacial_dim ** 2 + 1, embed_dim) / embed_dim ** 0.5)
self.k_proj = nn.Linear(embed_dim, embed_dim)
self.q_proj = nn.Linear(embed_dim, embed_dim)
self.v_proj = nn.Linear(embed_dim, embed_dim)
self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim)
self.num_heads = num_heads
def forward(self, x):
x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3]).permute(2, 0, 1) # NCHW -> (HW)NC
x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0) # (HW+1)NC
x = x + self.positional_embedding[:, None, :].to(x.dtype) # (HW+1)NC
x, _ = F.multi_head_attention_forward(
query=x, key=x, value=x,
embed_dim_to_check=x.shape[-1],
num_heads=self.num_heads,
q_proj_weight=self.q_proj.weight,
k_proj_weight=self.k_proj.weight,
v_proj_weight=self.v_proj.weight,
in_proj_weight=None,
in_proj_bias=torch.cat([self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]),
bias_k=None,
bias_v=None,
add_zero_attn=False,
dropout_p=0,
out_proj_weight=self.c_proj.weight,
out_proj_bias=self.c_proj.bias,
use_separate_proj_weight=True,
training=self.training,
need_weights=False
)
return x[0]
import timm
class ProjectionHead(nn.Module):
def __init__(
self,
embedding_dim,
projection_dim=768,
dropout=0.1
):
super().__init__()
self.projection = nn.Linear(embedding_dim, projection_dim)
self.gelu = nn.GELU()
self.fc = nn.Linear(projection_dim, projection_dim)
self.dropout = nn.Dropout(dropout)
self.layer_norm = nn.LayerNorm(projection_dim)
def forward(self, x):
projected = self.projection(x)
x = self.gelu(projected)
x = self.fc(x)
x = self.dropout(x)
x = x + projected
x = self.layer_norm(x)
return x
class ImageEncoder(nn.Module):
"""
Encode images to a fixed size vector
"""
def __init__(self, args, eval_stage):
super().__init__()
self.model_name = args.image_encoder
self.pretrained = args.image_pretrained
self.trainable = args.image_trainable
if self.model_name == 'clip':
if self.pretrained:
self.model, preprocess = clip.load("ViT-B/32", device='cuda')
else:
configuration = ViTConfig()
self.model = ViTModel(configuration)
elif self.model_name == 'nfnet':
self.model = timm.create_model('nfnet_l0', pretrained=self.pretrained, num_classes=0, global_pool="avg")
elif self.model_name == 'vit':
self.model = timm.create_model('vit_tiny_patch16_224', pretrained=True)
elif self.model_name == 'nf_resnet50':
self.model = timm.create_model('nf_resnet50', pretrained=True)
elif self.model_name == 'nf_regnet':
self.model = timm.create_model('nf_regnet_b1', pretrained=True)
else:
self.model = timm.create_model(self.model_name, self.pretrained, num_classes=0, global_pool="avg")
for p in self.model.parameters():
p.requires_grad = self.trainable
def forward(self, x):
if self.model_name == 'clip' and self.pretrained:
return self.model.encode_image(x)
else:
return self.model(x)
def gradient(self, x, y):
# Compute the gradient of the mean squared error loss with respect to the weights
loss = self.loss(x, y)
grad = torch.autograd.grad(loss, self.parameters(), create_graph=True)
return torch.cat([g.view(-1) for g in grad])
class TextEncoder(nn.Module):
def __init__(self, args):
super().__init__()
self.pretrained = args.text_pretrained
self.trainable = args.text_trainable
self.model_name = args.text_encoder
if self.model_name == 'clip':
self.model, preprocess = clip.load("ViT-B/32", device='cuda')
elif self.model_name == 'bert':
if args.text_pretrained:
self.model = BERT_model
else:
self.model = BertModel(BertConfig())
self.model.init_weights()
self.tokenizer = tokenizer
else:
raise NotImplementedError
for p in self.model.parameters():
p.requires_grad = self.trainable
# we are using the CLS token hidden representation as the sentence's embedding
self.target_token_idx = 0
def forward(self, texts, device='cuda'):
if self.model_name == 'clip':
output = self.model.encode_text(clip.tokenize(texts).to('cuda'))
elif self.model_name == 'bert':
# Tokenize the input text
encoding = self.tokenizer.batch_encode_plus(texts, return_tensors='pt', padding=True, truncation=True)
input_ids = encoding['input_ids'].to(device)
attention_mask = encoding['attention_mask'].to(device)
output = self.model(input_ids, attention_mask=attention_mask).last_hidden_state[:, self.target_token_idx, :]
return output
class CLIPModel_full(nn.Module):
def __init__(
self,
args,
temperature=1.0,
eval_stage=False
):
super().__init__()
if args.image_encoder == 'nfnet':
if eval_stage:
self.image_embedding = 1000#2048
else:
self.image_embedding = 2304
elif args.image_encoder == 'convnet':
self.image_embedding = 768
elif args.image_encoder == 'resnet18':
self.image_embedding = 512
elif args.image_encoder == 'convnext':
self.image_embedding = 640
else:
self.image_embedding = 1000
if args.text_encoder == 'clip':
self.text_embedding = 512
elif args.text_encoder == 'bert':
self.text_embedding = 768
else:
raise NotImplementedError
self.image_encoder = ImageEncoder(args, eval_stage=eval_stage)
self.text_encoder = TextEncoder(args)
if args.only_has_image_projection:
self.image_projection = ProjectionHead(embedding_dim=self.image_embedding)
self.text_projection = ProjectionHead(embedding_dim=self.text_embedding, projection_dim=self.image_embedding).to('cuda')
self.temperature = temperature
#self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))
self.args = args
self.distill = args.distill
def forward(self, image, caption, epoch):
self.image_encoder = self.image_encoder.to('cuda')
self.text_encoder = self.text_encoder.to('cuda')
image_features = self.image_encoder(image)
text_features = caption if self.distill else self.text_encoder(caption)
use_image_project = False
im_embed = image_features.float() if not use_image_project else self.image_projection(image_features.float())
txt_embed = self.text_projection(text_features.float())
combined_image_features = im_embed
combined_text_features = txt_embed
image_features = combined_image_features / combined_image_features.norm(dim=1, keepdim=True)
text_features = combined_text_features / combined_text_features.norm(dim=1, keepdim=True)
image_logits = np.exp(np.log(1 / 0.07)) * image_features @ text_features.t()
ground_truth = torch.arange(len(image_logits)).type_as(image_logits).long()
loss = (F.cross_entropy(image_logits, ground_truth) + F.cross_entropy(image_logits.t(), ground_truth))/2
acc_i = (torch.argmax(image_logits, 1) == ground_truth).sum().item()
acc_t = (torch.argmax(image_logits, 0) == ground_truth).sum().item()
acc = (acc_i + acc_t) / 2
return loss, acc | [] |
2024-01-10 | mit-submit/A2rchi | a2rchi~utils~data_manager.py | from a2rchi.utils.scraper import Scraper
from chromadb.config import Settings
from langchain.document_loaders import TextLoader
from langchain.document_loaders import PyPDFLoader
from langchain.memory import ConversationBufferMemory
from langchain.document_loaders import BSHTMLLoader
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import Chroma
from langchain.text_splitter import CharacterTextSplitter
import chromadb
import hashlib
import os
import yaml
import time
class DataManager():
def __init__(self):
from a2rchi.utils.config_loader import Config_Loader
self.config = Config_Loader().config["utils"]
self.global_config = Config_Loader().config["global"]
self.data_path = self.global_config["DATA_PATH"]
# create data path if it doesn't exist
os.makedirs(self.data_path, exist_ok=True)
# scrape data onto the filesystem
print("Scraping documents onto filesystem")
scraper = Scraper()
scraper.hard_scrape(verbose=True)
# get the collection (reset it if it already exists and reset_collection = True)
# the actual name of the collection is the name given by config with the embeddings specified
embedding_name = self.config["embeddings"]["EMBEDDING_NAME"]
self.collection_name = self.config["data_manager"]["collection_name"] + "_with_" + embedding_name
print("Using collection: ", self.collection_name)
# delete the existing collection if specified
self.delete_existing_collection_if_reset()
# get the embedding model
embedding_class_map = self.config["embeddings"]["EMBEDDING_CLASS_MAP"]
embedding_name = self.config["embeddings"]["EMBEDDING_NAME"]
self.embedding_model = embedding_class_map[embedding_name]["class"](**embedding_class_map[embedding_name]["kwargs"])
# create the text_splitter
self.text_splitter = CharacterTextSplitter(
chunk_size=self.config["data_manager"]["CHUNK_SIZE"],
chunk_overlap=self.config["data_manager"]["CHUNK_OVERLAP"],
)
def delete_existing_collection_if_reset(self):
"""
Connect to ChromaDB and delete collection.
"""
# return early if not resetting
if not self.config["data_manager"]["reset_collection"]:
return
# connect to chromadb server
client = None
if self.config["data_manager"]["use_HTTP_chromadb_client"]:
client = chromadb.HttpClient(
host=self.config["data_manager"]["chromadb_host"],
port=self.config["data_manager"]["chromadb_port"],
settings=Settings(allow_reset=True, anonymized_telemetry=False), # NOTE: anonymized_telemetry doesn't actually do anything; need to build Chroma on our own without it
)
else:
client = chromadb.PersistentClient(
path=self.global_config["LOCAL_VSTORE_PATH"],
settings=Settings(allow_reset=True, anonymized_telemetry=False), # NOTE: anonymized_telemetry doesn't actually do anything; need to build Chroma on our own without it
)
if self.collection_name in [collection.name for collection in client.list_collections()]:
client.delete_collection(self.collection_name)
def fetch_collection(self):
"""
Connect to ChromaDB and fetch the collection.
"""
# connect to chromadb server
client = None
if self.config["data_manager"]["use_HTTP_chromadb_client"]:
client = chromadb.HttpClient(
host=self.config["data_manager"]["chromadb_host"],
port=self.config["data_manager"]["chromadb_port"],
settings=Settings(allow_reset=True, anonymized_telemetry=False), # NOTE: anonymized_telemetry doesn't actually do anything; need to build Chroma on our own without it
)
else:
client = chromadb.PersistentClient(
path=self.global_config["LOCAL_VSTORE_PATH"],
settings=Settings(allow_reset=True, anonymized_telemetry=False), # NOTE: anonymized_telemetry doesn't actually do anything; need to build Chroma on our own without it
)
collection = client.get_or_create_collection(self.collection_name)
print(f" n in collection: {collection.count()}")
return collection
def update_vectorstore(self):
"""
Method which looks at the files in the data folder and syncs them to the vectors stored in the vectorstore
"""
# fetch the collection
collection = self.fetch_collection()
# get current status of persistent vstore
files_in_vstore = [metadata["filename"] for metadata in collection.get(include=["metadatas"])["metadatas"]]
# scan data folder and obtain list of files in data. Assumes max depth = 1
dirs = [
os.path.join(self.data_path, dir)
for dir in os.listdir(self.data_path)
if os.path.isdir(os.path.join(self.data_path, dir)) and dir != "vstore"
]
files_in_data_fullpath = [
os.path.join(dir, file)
for dir in dirs
for file in os.listdir(dir)
]
# files in data is a dictionary, with keys of the names of files and values with their full path.
files_in_data = {os.path.basename(file_fullpath): file_fullpath for file_fullpath in files_in_data_fullpath}
# get map between sources and filename hashes
with open(os.path.join(self.data_path, 'sources.yml'), 'r') as file:
sources = yaml.load(file, Loader=yaml.FullLoader)
# control if files in vectorstore == files in data
if set(files_in_data.keys()) == set(files_in_vstore):
print("Vectorstore is up to date")
else:
print("Vectorstore needs to be updated")
# Creates a list of the file names to remove from vectorstore
# Note: the full path of the files is not needed here.
files_to_remove = list(set(files_in_vstore) - set(files_in_data.keys()))
# removes files from the vectorstore
print(f"Files to remove: {files_to_remove}")
collection = self._remove_from_vectorstore(collection, files_to_remove)
# Create dictionary of the files to add, where the keys are the filenames and the values are the path of the file in data
files_to_add = {filename: files_in_data[filename] for filename in list(set(files_in_data.keys()) - set(files_in_vstore))}
# adds the files to the vectorstore
print(f"Files to add: {files_to_add}")
collection = self._add_to_vectorstore(collection, files_to_add, sources)
print("Vectorstore update has been completed")
print(f" N Collection: {collection.count()}")
# delete collection to release collection and client object as well for garbage collection
del collection
return
def _remove_from_vectorstore(self, collection, files_to_remove):
"""
Method which takes as input a list of filenames to remove from the vectorstore,
then removes those filenames from the vectorstore.
"""
for filename in files_to_remove:
collection.delete(where={"filename": filename})
return collection
def _add_to_vectorstore(self, collection, files_to_add, sources={}):
"""
Method which takes as input:
collection: a ChromaDB collection
files_to_add: a dictionary with keys being the filenames and values being the file path
sources: a dictionary, usually loaded from a yaml file, which has keys being the
file hash (everything in the file name except the file extension) and has
values of the url from which the source originated from. Not all files must
be in the source dictionary.
and adds these files to the vectorstore.
"""
for filename, file in files_to_add.items():
# create the chunks
loader = None
try:
loader = self.loader(file)
except Exception as e:
print(f" ERROR - loading: {file} skip and move on. \n Exception: ", e)
# treat case where file extension is not recognized or is broken
if loader is None:
continue
# initialize lists for file chunks and metadata
chunks = []
metadatas = []
# load documents from current file and add to docs and metadata
docs = loader.load()
for doc in docs:
new_chunks = [document.page_content for document in self.text_splitter.split_documents([doc])]
chunks += new_chunks
metadatas += [doc.metadata for chunk in new_chunks]
# explicitly get file metadata
filehash = filename.split(".")[0]
url = sources[filehash] if filehash in sources.keys() else ""
# embed each chunk
embeddings = self.embedding_model.embed_documents(chunks)
# add filename as metadata for each chunk
for metadata in metadatas:
metadata["filename"] = filename
# create unique id for each chunk
# the first 12 bits of the id being the filename, 6 more based on the chunk itself, and the last 6 hashing the time
ids = []
for chunk in chunks:
identifier = hashlib.md5()
identifier.update(chunk.encode('utf-8'))
chunk_hash = str(int(identifier.hexdigest(),16))[0:6]
time_identifier = hashlib.md5()
time_identifier.update(str(time.time()).encode('utf-8'))
time_hash = str(int(identifier.hexdigest(),16))[0:6]
while str(filehash) + str(chunk_hash) + str(time_hash) in ids:
print("INFO: Found conflict with hash: " + str(filehash) + str(chunk_hash) + str(time_hash) + ". Trying again")
time_hash = str(int(time_hash) + 1)
ids.append(str(filehash) + str(chunk_hash) + str(time_hash))
print("Ids: ",ids)
collection.add(embeddings=embeddings, ids=ids, documents=chunks, metadatas=metadatas)
print("succesfully added file ", filename)
return collection
def loader(self, file_path):
"""
Return the document loader from a path, with the correct loader given the extension
"""
_, file_extension = os.path.splitext(file_path)
if file_extension == ".txt":
return TextLoader(file_path)
elif file_extension == ".html":
return BSHTMLLoader(file_path, bs_kwargs={"features": "html.parser"})
elif file_extension == ".pdf":
return PyPDFLoader(file_path)
else:
print(f" Error: format not supported -- {file_path}")
return None
| [] |
2024-01-10 | mit-submit/A2rchi | a2rchi~chains~models.py | from abc import abstractmethod
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import numpy as np
import torch
import time
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.chat_models import ChatOpenAI
from langchain.llms import LlamaCpp
class BaseCustomLLM(LLM):
"""
Abstract class used to load a custom LLM
"""
n_tokens: int = 100 # this has to be here for parent LLM class
@property
def _llm_type(self) -> str:
return "custom"
@abstractmethod
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
) -> str:
pass
class DumbLLM(BaseCustomLLM):
"""
A simple Dumb LLM, perfect for testing
"""
filler: str = None
sleep_time_mean: int = 3
def _call(
self,
prompt: str = None,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
) -> str:
sleep_time = np.random.normal(self.sleep_time_mean, 1)
print(f"DumbLLM: sleeping {sleep_time}")
time.sleep(sleep_time)
return "I am just a dumb LLM, I will give you a number: " + str(np.random.randint(10000, 99999))
class LlamaLLM(BaseCustomLLM):
"""
Loading the Llama LLM from facebook. Make sure that the model
is downloaded and the base_model_path is linked to correct model
"""
base_model: str = None # location of the model (ex. meta-llama/Llama-2-70b)
peft_model: str = None # location of the finetuning of the model
enable_salesforce_content_safety: bool = True
# enable safety check with Salesforce safety flan t5
quantization: bool = True # enables 8-bit quantization
max_new_tokens: int = 4096 # maximum numbers of tokens to generate
seed: int = None # seed value for reproducibility
do_sample: bool = True # use sampling; otherwise greedy decoding
min_length: int = None # minimum length of sequence to generate, input prompt + min_new_tokens
use_cache: bool = True # [optional] model uses past last key/values attentions
top_p: float = .9 # [optional] for float < 1, only smallest set of most probable tokens with prob. that add up to top_p or higher are kept for generation
temperature: float = .6 # [optional] value used to modulate next token probs
top_k: int = 50 # [optional] number of highest prob. vocabulary tokens to keep for top-k-filtering
repetition_penalty: float = 1.0 # parameter for repetition penalty: 1.0 == no penalty
length_penalty: int = 1 # [optional] exponential penalty to length used with beam-based generation
max_padding_length: int = None # the max padding length used with tokenizer padding prompts
tokenizer: Callable = None
llama_model: Callable = None
safety_checker: List = None
def __init__(self, **kwargs):
super().__init__()
for key, value in kwargs.items():
setattr(self, key, value)
#Packages needed
from peft import PeftModel
from transformers import LlamaForCausalLM, LlamaTokenizer
# Set the seeds for reproducibility
if self.seed:
torch.cuda.manual_seed(self.seed)
torch.manual_seed(self.seed)
# create tokenizer
self.tokenizer = None
self.tokenizer = LlamaTokenizer.from_pretrained(pretrained_model_name_or_path=self.base_model, local_files_only= False)
base_model = LlamaForCausalLM.from_pretrained(pretrained_model_name_or_path=self.base_model, local_files_only= False, load_in_8bit=self.quantization, device_map='auto', torch_dtype = torch.float16)
if self.peft_model:
self.llama_model = PeftModel.from_pretrained(base_model, self.peft_model)
else:
self.llama_model = base_model
self.llama_model.eval()
# create safety checker
self.safety_checker = []
if self.enable_salesforce_content_safety:
self.safety_checker.append(SalesforceSafetyChecker())
@property
def _llm_type(self) -> str:
return "custom"
def _call(
self,
prompt: str = None,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
) -> str:
# check if input is safe:
safety_results = [check(prompt) for check in self.safety_checker]
are_safe = all([r[1] for r in safety_results])
if not are_safe:
print("User prompt deemed unsafe.")
for method, is_safe, report in safety_results:
if not is_safe:
print(method)
print(report)
print("Skipping the Llama2 inference as the prompt is not safe.")
return """It looks as if your question may be unsafe.
This may be due to issues relating to toxicity, hate, identity, violence, physical tones, sexual tones, profanity, or biased questions.
Please try to reformat your question."""
# prepare input
batch = self.tokenizer(["[INST]" + prompt + "[/INST]"], padding='max_length', truncation=True,max_length=self.max_padding_length,return_tensors="pt")
batch = {k: v.to("cuda") for k, v in batch.items()}
# perform inference
with torch.no_grad():
outputs = self.llama_model.generate(
**batch,
max_new_tokens=self.max_new_tokens,
do_sample=self.do_sample,
top_p=self.top_p,
temperature=self.temperature,
min_length=self.min_length,
use_cache=self.use_cache,
top_k=self.top_k,
repetition_penalty=self.repetition_penalty,
length_penalty=self.length_penalty,
)
output_text = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
# safety check of the model output
safety_results = [check(output_text) for check in self.safety_checker]
are_safe = all([r[1] for r in safety_results])
if not are_safe:
print("Model output deemed unsafe.")
for method, is_safe, report in safety_results:
if not is_safe:
print(method)
print(report)
return """The response to your question may be unsafe.
This may be due to issues relating to toxicity, hate, identity, violence, physical tones, sexual tones, profanity, or biased questions.
There are two ways to solve this:
- generate the response
- reformat your question so that it does not prompt an unsafe response."""
return output_text[output_text.rfind("[/INST]") + len("[/INST]"):]
class OpenAILLM(ChatOpenAI):
"""
Loading the various OpenAI models, most commonly
model_name = 'gpt-4'
model_name = 'gpt-3.5-turbo
Make sure that the api key is loaded as an environment variable
and the OpenAI package installed.
"""
model_name: str = "gpt-4"
temperature: int = 1
class SalesforceSafetyChecker():
def __init__(self):
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer, AutoConfig
import torch
# load the salesforce safety model from Huggingface: https://huggingface.co/Salesforce/safety-flan-t5-base
config = AutoConfig.from_pretrained("Salesforce/safety-flan-t5-base")
self.tokenizer = AutoTokenizer.from_pretrained("Salesforce/safety-flan-t5-base")
self.model = AutoModelForSeq2SeqLM.from_pretrained("Salesforce/safety-flan-t5-base", config=config)
self.model.eval()
def __call__(self, output_text):
prefix = "Is the <Text> field safe or unsafe?"
input_ids = self.tokenizer(prefix + " <Text> " + output_text + " <Context> ", return_tensors="pt").input_ids
if len(input_ids[0]) > 512:
print("Input length is > 512 token. Safety check result could be incorrect.")
with torch.no_grad():
outputs = self.model.generate(input_ids,output_scores = True,return_dict_in_generate = True,max_new_tokens = 20)
is_safe = self.tokenizer.decode(outputs.sequences[0], skip_special_tokens=True).split(" ")[0] == "safe"
report = ""
if not is_safe:
true_false_ids = self.tokenizer("true false").input_ids[:2]
keys = ["toxicity", "hate", "identity", "violence", "physical", "sexual", "profanity", "biased"]
scores = {}
for k, i in zip(keys, range(3,20,2)):
scores[k] = round(outputs.scores[i][0,true_false_ids].softmax(dim=0)[0].item(), 5)
report += "|" + "|".join(f"{n:^10}" for n in scores.keys()) + "|\n"
report += "|" + "|".join(f"{n:^10}" for n in scores.values()) + "|\n"
return "Salesforce Content Safety Flan T5 Base", is_safe, report
| [] |
2024-01-10 | mit-submit/A2rchi | a2rchi~utils~config_loader.py | from a2rchi.chains.models import OpenAILLM, DumbLLM, LlamaLLM
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.embeddings import HuggingFaceEmbeddings
import os
import yaml
class Config_Loader:
def __init__(self):
self.config = self.load_config()
def load_config(self):
"""
Small function for loading the config.yaml file
"""
env = os.getenv("RUNTIME_ENV")
try:
with open(f"./config/{env}-config.yaml", "r") as f:
config = yaml.load(f, Loader=yaml.FullLoader)
# change the model class parameter from a string to an actual class
MODEL_MAPPING = {
"OpenAILLM": OpenAILLM,
"DumbLLM": DumbLLM,
"LlamaLLM": LlamaLLM
}
for model in config["chains"]["chain"]["MODEL_CLASS_MAP"].keys():
config["chains"]["chain"]["MODEL_CLASS_MAP"][model]["class"] = MODEL_MAPPING[model]
EMBEDDING_MAPPING = {
"OpenAIEmbeddings": OpenAIEmbeddings,
"HuggingFaceEmbeddings": HuggingFaceEmbeddings
}
for model in config["utils"]["embeddings"]["EMBEDDING_CLASS_MAP"].keys():
config["utils"]["embeddings"]["EMBEDDING_CLASS_MAP"][model]["class"] = EMBEDDING_MAPPING[model]
return config
except Exception as e:
raise e
| [] |
2024-01-10 | novano1d/calcGPT | DesktopProgram~calcgpt.py | import openai
import serial
import sys
import glob
def serial_ports(): #pros steal functions from stack overflow
""" Lists serial port names
:raises EnvironmentError:
On unsupported or unknown platforms
:returns:
A list of the serial ports available on the system
"""
if sys.platform.startswith('win'):
ports = ['COM%s' % (i + 1) for i in range(256)]
elif sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):
# this excludes your current terminal "/dev/tty"
ports = glob.glob('/dev/tty[A-Za-z]*')
elif sys.platform.startswith('darwin'):
ports = glob.glob('/dev/tty.*')
else:
raise EnvironmentError('Unsupported platform')
result = []
for port in ports:
try:
s = serial.Serial(port)
s.close()
result.append(port)
except (OSError, serial.SerialException):
pass
return result
print("Available serial ports: ")
for x in range(0, len(serial_ports())):
print(x, serial_ports()[x])
print("Enter your selection:")
port = input()
ser = serial.Serial(serial_ports()[int(port)], 9600)
ser.write(b'ack') #ack connection
openai.api_key = "YOUR KEY HERE"
messages = [ {"role": "system", "content":
"You are a intelligent assistant."} ]
while True:
message = ser.readline()
message = message.decode('utf-8')
print(message)
if message:
messages.append(
{"role": "user", "content": message},
)
chat = openai.ChatCompletion.create(
model="gpt-3.5-turbo", messages=messages
)
reply = chat.choices[0].message.content
print(f"ChatGPT: {reply}")
messages.append({"role": "assistant", "content": reply})
chunk_size = 63 #The buffer is only 64 characters on the calculator. We must break the message down into 64 byte increments to avoid issues.
for i in range(0, len(reply), chunk_size):
chunk = reply[i:i+chunk_size]
ser.write(bytes(chunk, 'utf-8')) | [
"You are a intelligent assistant."
] |
2024-01-10 | QuantDeveloperUSA/ai_assistant_for_attorneys | ai_assistant_for_attorneys.py | import streamlit as st
from streamlit_extras import Release_Mode
from langchain.llms import OpenAI
st.title('👨⚖️ Assistente I.A. para os melhores advogados do Brasil')
# openai_api_key will be read preferably from config.txt if the file exists, else if it will be read from the enviroment variable API_TOKEN, else if it will be read from the text input field in the sidebar
# try to Open the file config.txt and read the API_TOKEN from it
# if the file does not exist, read the API_TOKEN from the enviroment variable API_TOKEN
# if the enviroment variable does not exist, read the API_TOKEN from the text input field in the sidebar
openai_api_key = ""
try:
with open('config.txt') as f:
openai_api_key = f.readline()
except:
pass
if openai_api_key == "":
openai_api_key = st.secrets["API_TOKEN"]
if openai_api_key == "":
openai_api_key = st.sidebar.text_input('Qual é a palavra magica?')
Release_Mode()
Context_for_assistant_Prompt = "O assistente Jarvis é uma Inteligencia Artificial criada pelo Renomado Engenheiro Roberto, um dos melhores Engenheiros do planeta Terra. \nA Dra Debora é uma advogada extremamente importante nas comarcas do Rio de Janeiro no Brasil é muito criativa e super inteligente. Dra Debora perguntou a seu assistente Jarvis: "
Contextualize_the_Assistant_Answer = "O assistente Jarvis, que é também advogado ilustrissimo, já foi inclusive juíz de direito e Desembargador, respondeu: "
def generate_response(input_text):
llm = OpenAI(temperature=0.7, openai_api_key=openai_api_key, max_tokens=3000)
st.info(llm(input_text))
with st.form('my_form'):
assistant_text = Context_for_assistant_Prompt + st.text_area('Olá, como posso te ajudar agora?', '') + Contextualize_the_Assistant_Answer
#the color of the submit button is blue
submitted_to_assistant = st.form_submit_button('Enviar 👨💼', help='Depois que escrever a mensagem pro teu assistente, clique aqui!')
if not openai_api_key.startswith('sk-'):
st.warning('Essa nao é a palavra magica!', icon='⚠')
if submitted_to_assistant and openai_api_key.startswith('sk-'):
generate_response(assistant_text)
Release_Mode()
# to change the color of the submit button to green, we can use this code
# submitted_to_assistant = st.form_submit_button('Ask the Assistant 👨💼', help='Click to submit the form')
# to run this app, use this command: streamlit run ai_assistant_for_attorneys.py
# to run the app online, point the browser to https://o-assistente-da-dra-debora.streamlit.app/ | [
"O assistente Jarvis é uma Inteligencia Artificial criada pelo Renomado Engenheiro Roberto, um dos melhores Engenheiros do planeta Terra. \nA Dra Debora é uma advogada extremamente importante nas comarcas do Rio de Janeiro no Brasil é muito criativa e super inteligente. Dra Debora perguntou a seu assistente Jarvis: "
] |
2024-01-10 | yghokim/llm-chat-web | chatbot~generators~gpt3_generator.py | import os
from os import getcwd, path
from asyncio import to_thread
import yaml
import openai
from chatbot.chatbot import ResponseGenerator, DialogTurn, RegenerateRequestException
class GPT3StaticPromptResponseGenerator(ResponseGenerator):
@classmethod
def from_yml(cls, file_path: str, model: str | None):
with open(path.join(getcwd(), file_path), 'r') as f:
yml_data: dict = yaml.load(f, Loader=yaml.FullLoader)
print(yml_data)
return cls(
prompt_base=yml_data["prompt-base"],
user_prefix=yml_data["user-prefix"],
system_prefix=yml_data["system-prefix"],
line_separator=yml_data["line-separator"],
initial_system_message=yml_data["initial-system-utterance"],
gpt3_params=yml_data["gpt3-params"],
gpt3_model=model
)
def __init__(self,
prompt_base: str,
user_prefix: str = "Customer: ",
system_prefix: str = "Me: ",
line_separator: str = "\n",
initial_system_message: str = "How's your day so far?",
gpt3_model: str = None,
gpt3_params: dict = None
):
openai.api_key = os.getenv('OPENAI_API_KEY')
self.prompt_base = prompt_base
self.user_prefix = user_prefix
self.system_prefix = system_prefix
self.line_separator = line_separator
self.initial_system_message = initial_system_message
if gpt3_model is not None:
self.gpt3_model = gpt3_model
else:
self.gpt3_model = "text-davinci-002"
self.max_tokens = 256
self.gpt3_params = gpt3_params or dict(
temperature=0.9,
presence_penalty=0.6,
frequency_penalty=0.5,
top_p=1
)
def _generate_prompt(self, dialog: list[DialogTurn]) -> str:
first_user_message_index = next((i for i, v in enumerate(dialog) if v.is_user == True), -1)
if first_user_message_index >= 0:
str_arr: list[str] = [self.prompt_base.strip(), " ", dialog[first_user_message_index].message]
str_arr += [f"{self.line_separator}{self.user_prefix if turn.is_user else self.system_prefix}{turn.message}"
for turn in dialog[first_user_message_index + 1:]]
str_arr.append(f"{self.line_separator}{self.system_prefix}")
return "".join(str_arr)
else:
return self.prompt_base
async def _get_response_impl(self, dialog: list[DialogTurn]) -> str:
if len(dialog) == 0:
return self.initial_system_message
else:
prompt = self._generate_prompt(dialog)
result = await to_thread(openai.Completion.create,
engine=self.gpt3_model,
prompt=prompt,
max_tokens=self.max_tokens,
stop=[self.user_prefix, self.system_prefix],
**self.gpt3_params,
)
top_choice = result.choices[0]
if top_choice.finish_reason == 'stop':
response_text = top_choice.text.strip()
if len(response_text) > 0:
return response_text
else:
raise RegenerateRequestException("Empty text")
else:
raise Exception("GPT3 error")
| [] |
2024-01-10 | heterog/mind-wave | mind_wave.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (C) 2023 Andy Stewart
#
# Author: Andy Stewart <[email protected]>
# Maintainer: Andy Stewart <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import openai
import queue
import threading
import traceback
import os
import sys
from epc.server import ThreadingEPCServer
from functools import wraps
from utils import (get_command_result, get_emacs_var, get_emacs_vars, init_epc_client, eval_in_emacs, logger, close_epc_client, message_emacs, string_to_base64, decode_text)
def catch_exception(func):
@wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except:
message_emacs(traceback.format_exc())
return wrapper
def threaded(func):
@wraps(func)
def wrapper(*args, **kwargs):
thread = threading.Thread(target=func, args=args, kwargs=kwargs)
thread.start()
if hasattr(args[0], 'thread_queue'):
args[0].thread_queue.append(thread)
return wrapper
class MindWave:
def __init__(self, args):
# Init EPC client port.
init_epc_client(int(args[0]))
# Build EPC server.
self.server = ThreadingEPCServer(('localhost', 0), log_traceback=True)
# self.server.logger.setLevel(logging.DEBUG)
self.server.allow_reuse_address = True
# Get API key.
api_key = self.chat_get_api_key()
if api_key is not None:
openai.api_key = api_key
openai.api_base, openai.api_type, openai.api_version = get_emacs_vars(["mind-wave-api-base", "mind-wave-api-type", "mind-wave-api-version"])
self.server.register_instance(self) # register instance functions let elisp side call
# Start EPC server with sub-thread, avoid block Qt main loop.
self.server_thread = threading.Thread(target=self.server.serve_forever)
self.server_thread.start()
# Pass epc port and webengine codec information to Emacs when first start mind-wave.
eval_in_emacs('mind-wave--first-start', self.server.server_address[1])
# All Emacs request running in event_loop.
self.event_queue = queue.Queue()
self.event_loop = threading.Thread(target=self.event_dispatcher)
self.event_loop.start()
# Build thread queue.
self.thread_queue = []
# Build subtitles dict.
self.subtitle_dict = {}
# event_loop never exit, simulation event loop.
self.event_loop.join()
def event_dispatcher(self):
try:
while True:
self.event_queue.get(True)
self.event_queue.task_done()
except:
logger.error(traceback.format_exc())
def chat_get_api_key(self):
mind_wave_chat_api_key = get_emacs_var("mind-wave-api-key")
key = None
if mind_wave_chat_api_key:
key = mind_wave_chat_api_key
else:
key = os.environ.get("OPENAI_API_KEY")
if key is None:
message_emacs(f"ChatGPT API key not found, please copy it from https://platform.openai.com/account/api-keys, and fill API key in file: {mind_wave_chat_api_key_file_path}. Or set the enviroment OPENAI_API_KEY")
return key
@catch_exception
def send_completion_request(self, messages, model="gpt-3.5-turbo"):
response = openai.ChatCompletion.create(
model = model,
messages = messages)
result = ''
for choice in response.choices:
result += choice.message.content
return (result, response)
@catch_exception
def send_stream_request(self, messages, callback, model="gpt-3.5-turbo"):
response = openai.ChatCompletion.create(
model = model,
messages = messages,
temperature=0,
stream=True)
for chunk in response:
(result_type, result_content) = self.get_chunk_result(chunk)
callback(result_type, result_content)
@threaded
def chat_ask(self, buffer_file_name, buffer_content, prompt):
content, model = self.chat_parse_content(buffer_content)
messages = content
if prompt:
messages = content + [{"role": "user", "content": prompt}]
def callback(result_type, result_content):
eval_in_emacs("mind-wave-chat-ask--response", buffer_file_name, result_type, result_content)
self.send_stream_request(messages, callback, model)
def parse_lines(self, lines):
messages = []
role = ''
content = ''
model = "gpt-3.5-turbo"
for line in lines:
if line.startswith('# : '):
begin = line.find(":") + 1
model_content = line[begin:].strip()
if model_content != "":
model = model_content
elif line.startswith('# > ') or line.startswith('## > '):
if role:
messages.append({"role": role, "content": content})
begin = line.find('>') + 1
end = line.find(':')
role = line[begin:end].strip().lower()
content = line[end + 2:]
else:
content += line
if role:
messages.append({"role": role, "content": content})
return messages, model
def add_default_system_message(self, messages):
default_system = {"role": "system", "content": "You are a helpful assistant."}
if len(messages) == 0:
messages.append(default_system)
elif messages[0]["role"] != "system":
messages = [default_system] + messages
return messages
def chat_parse_content(self, buffer_content):
text = decode_text(buffer_content)
lines = text.splitlines(True)
messages, model = self.parse_lines(lines)
messages = self.add_default_system_message(messages)
return messages, model
@threaded
def parse_title(self, buffer_file_name, text_content, role, prompt):
text = decode_text(text_content)
(result, _) = self.send_completion_request(
[{"role": "system", "content": role},
{"role": "user", "content": f"{prompt}:\n{text}"}],
get_emacs_var("mind-wave-parse-title-model")
)
eval_in_emacs("mind-wave-parse-title--response", buffer_file_name, result)
@threaded
def async_text(self, buffer_file_name, text_content, text_start, text_end, role, prompt, notify_start, notify_end):
text = decode_text(text_content)
if text_content == "":
content = f"{prompt}"
else:
content = f"{prompt}:\n{text}"
messages = [{"role": "system", "content": role},
{"role": "user", "content": content}]
def callback(result_type, result_content):
eval_in_emacs("mind-wave-async-text--response",
buffer_file_name,
result_type,
result_content,
text_start,
text_end,
notify_start,
notify_end)
self.send_stream_request(messages, callback, get_emacs_var("mind-wave-async-text-model"))
@threaded
def action_code(self, buffer_name, major_mode, code, role, prompt, callback_template, notify_start, notify_end):
text = decode_text(code)
messages = [{"role": "system", "content": role},
{"role": "user", "content": f"{prompt}: \n{text}"}]
def callback(result_type, result_content):
eval_in_emacs("mind-wave-split-window--response",
buffer_name,
f"mind-wave-{callback_template}-{buffer_name}",
major_mode,
result_type,
result_content,
notify_start,
notify_end)
self.send_stream_request(messages, callback, get_emacs_var("mind-wave-action-code-model"))
@threaded
def explain_word(self, buffer_name, major_mode, sentence, word, callback_template, notify_start, notify_end):
sentence_text = decode_text(sentence)
messages = [{"role": "system", "content": "你是一位英语词义语法专家, 你在教我英语, 我给你一句英文句子, 和这个句子中的一个单词, 请用中文帮我解释一下,这个单词在句子中的意思和句子本身的意思. 并举几个相同意思的英文例句,并用中文解释例句。如果你明白了请说同意,然后我们开始。"},
{"role": "assistant", "content": "好的,我明白了,请给我这个句子和单词。"},
{"role": "user", "content": f"句子是:{sentence_text}\n 单词是:{word}"}]
def callback(result_type, result_content):
eval_in_emacs("mind-wave-split-window--response",
buffer_name,
f"mind-wave-{callback_template}-{buffer_name}",
major_mode,
result_type,
result_content,
notify_start,
notify_end)
self.send_stream_request(messages, callback, get_emacs_var("mind-wave-explain-word-model"))
def get_video_subtitle(self, video_id):
from youtube_transcript_api import YouTubeTranscriptApi
message_emacs(f"Get subtitles for video id: {video_id}...")
if video_id in self.subtitle_dict:
text = self.subtitle_dict[video_id]
else:
transcript = YouTubeTranscriptApi.get_transcript(video_id, languages=["zh-Hans", "en"])
text = " ".join(line["text"] for line in transcript)
self.subtitle_dict[video_id] = text
return text
@threaded
def summary_video(self, buffer_name, video_id, role, prompt, notify_start, notify_end):
import importlib
if importlib.find_loader("youtube_transcript_api") is None:
message_emacs("Please use pip3 install package 'youtube_transcript_api' first.")
return
text = self.get_video_subtitle(video_id)
self.summary_text(buffer_name, role, prompt, notify_start, notify_end, text, video_id)
@threaded
def git_commit(self, dir, role, prompt):
diff_string = get_command_result(f"cd {dir} ; git diff")
(result, _) = self.send_completion_request(
[{"role": "system", "content": role},
{"role": "user", "content": f"{prompt}:\n{diff_string}"}],
get_emacs_var("mind-wave-git-commit-model"))
eval_in_emacs("mind-wave-generate-commit-name--response", result)
@threaded
def summary_web(self, buffer_name, url, role, prompt, notify_start, notify_end):
import shutil
if not shutil.which("readable"):
message_emacs("Please install 'readable' cli tool first")
return
text = get_command_result(f"readable {url} -p 'text-content'")
self.summary_text(buffer_name, role, prompt, notify_start, notify_end, text, url)
def summary_text(self, buffer_name, role, prompt, notify_start, notify_end, text, template):
part_size = 3000
message_parts = [text[i:i + part_size] for i in range(0, len(text), part_size)]
def callback(result_type, result_content):
eval_in_emacs("mind-wave-split-window--response",
buffer_name,
f"mind-wave-summary-{template}",
"text-mode",
result_type,
result_content,
notify_start,
notify_end)
self.send_stream_part_request(role, prompt, message_parts, callback)
def send_stream_part_request(self, role, prompt, message_parts, callback):
if not message_parts:
return
text = message_parts[0]
messages = [{"role": "system", "content": role},
{"role": "user", "content": f"{prompt}: \n{text}"}]
try:
response = openai.ChatCompletion.create(
model = "gpt-3.5-turbo",
messages = messages,
temperature=0,
stream=True)
for chunk in response:
(result_type, result_content) = self.get_chunk_result(chunk)
callback(result_type, result_content)
if result_type == "end":
self.send_stream_part_request(role, prompt, message_parts[1:], callback)
except:
message_emacs(traceback.format_exc())
def get_chunk_result(self, chunk):
delta = chunk.choices[0].delta
if not delta:
return ("end", "")
elif "role" in delta:
return ("start", "")
elif "content" in delta:
return ("content", string_to_base64(delta["content"]))
def cleanup(self):
"""Do some cleanup before exit python process."""
close_epc_client()
if __name__ == "__main__":
if len(sys.argv) >= 3:
import cProfile
profiler = cProfile.Profile()
profiler.run("MindWave(sys.argv[1:])")
else:
MindWave(sys.argv[1:])
| [
"你是一位英语词义语法专家, 你在教我英语, 我给你一句英文句子, 和这个句子中的一个单词, 请用中文帮我解释一下,这个单词在句子中的意思和句子本身的意思. 并举几个相同意思的英文例句,并用中文解释例句。如果你明白了请说同意,然后我们开始。",
"PLACEHOLDER:\nPLACEHOLDER",
"句子是:PLACEHOLDER\n 单词是:PLACEHOLDER",
"You are a helpful assistant.",
"好的,我明白了,请给我这个句子和单词。",
"PLACEHOLDER: \nPLACEHOLDER"
] |
2024-01-10 | senthilkumarimuth/DocsGPT_Openai | src~prepare_contextvector.py | import PyPDF2
from langchain.text_splitter import CharacterTextSplitter
import pandas as pd
import openai
import dotenv,os
from transformers import GPT2TokenizerFast
import pickle
import time
from src.utils.rewrite_pages import rewrite
import sys
from pathlib import Path, PurePath
sys.path.append(PurePath(Path(__file__).parents[1]).as_posix())
from utils.logging.custom_logging import logger
# set api key
env = dotenv.load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
# enter the document name for which vector to be created
document_name = str(input('Enter PDF document name for which vector to be created(keep it short ex: pdp): '))
# pdf to text
pdfFileObj = open('../data/TVS Jupiter 125 - SMW.pdf', 'rb')
pdfReader = PyPDF2.PdfReader(pdfFileObj)
num_pages = len(pdfReader.pages)
data = []
logger.debug("wait while pages are being rewritten by completion API to remove noises")
for page in range(0, num_pages):
pageObj = pdfReader.pages[page]
page_text = pageObj.extract_text()
data.append(page_text)
pdfFileObj.close()
data_rewrite = [rewrite(doc) for doc in data]
logger.info(f'Number of pages in the document is: {len(data)}')
# Split small chucks to so that LLMs can perform well
text_splitter = CharacterTextSplitter(chunk_size=1500, separator="\n")
docs = []
metadatas = []
sources = None
for i, d in enumerate(data_rewrite):
splits = text_splitter.split_text(d)
docs.extend(splits)
metadatas.extend([{"source": i}] * len(splits))
df = pd.DataFrame(metadatas)
df.insert(1, 'content', docs)
df.insert(1,'raw_index', df.index)
df = df.set_index(['raw_index',"source"])
logger.info(f'Number of rows in the document after chunk splits: {str(len(df))}')
# Tokenize
def count_tokens(text: str) -> int:
"""count the number of tokens in a string"""
return len(tokenizer.encode(text))
tokenizer = GPT2TokenizerFast.from_pretrained("gpt2") ##Todo: Use the logic provided by openai
content_token = [ count_tokens(text) for text in df.content.tolist()]
logger.info(f'Total number of tokens in document: {(str(sum(content_token)))}')
df.insert(1, 'tokens', content_token)
EMBEDDING_MODEL = "text-embedding-ada-002"
def get_embedding(text: str, model: str = EMBEDDING_MODEL) -> list[float]:
result = openai.Embedding.create(
model=model,
input= text
)
return result["data"][0]["embedding"]
def compute_doc_embeddings(df: pd.DataFrame) -> dict[tuple[str, str], list[float]]:
"""
Create an embedding for each row in the dataframe using the OpenAI Embeddings API.
Return a dictionary that maps between each embedding vector and the index of the row that it corresponds to.
"""
logger.info(f'Embedding process is started')
counter = 0
embed_dict = {}
page_count = 20 # For free-trail users, 20 requests per min are allowed
for idx, r in df.iterrows():
embed_dict[idx] = get_embedding(r.content)
counter = counter + 1
time.sleep(2)
if counter == page_count:
counter = 0
logger.info(f'Embedding vector for {page_count} pages created.Waiting for 60 seconds before continuing')
time.sleep(60) # Workaround for rate limit for a min
logger.info(f'Embedding process is completed')
return embed_dict
# compute embedding for the document
document_embeddings = compute_doc_embeddings(df)
# Save as pkl file
root_path = PurePath(Path(__file__).parents[1]).as_posix()
vector_path = os.path.join(root_path, 'vectorstores', f'{document_name}')
os.makedirs(vector_path, exist_ok=True)
# write docs.index and pkl file
df.to_pickle(os.path.join(vector_path,'df.pkl'))
df.to_csv(os.path.join(vector_path,'df.csv'))
with open(os.path.join(vector_path,"document_embeddings.pkl"), "wb") as f:
pickle.dump(document_embeddings, f)
# end
# Todo: Update path in HTML so that new document can be recognized by UI
logger.info('Vectorization is successful') | [] |
2024-01-10 | senthilkumarimuth/DocsGPT_Openai | src~utils~rewrite_pages.py | """
This module rewrites pages so that it can be used while pages to vector. Pages as it is very noisy.
"""
import openai
COMPLETIONS_API_PARAMS = {
# We use temperature of 0.0 because it gives the most predictable, factual answer.
"temperature": 0.0,
"max_tokens": 300,
"model": "text-davinci-003",
}
prompt_init = "Rewrite the below in a format that AI easily understands, but don't change any important words. \n"
def remove_page_number(page):
page_numper = ""
number = "0123456789"
for i in page:
if i in number:
page_numper = page_numper + i
else:
break
page = page[len(page_numper):]
return page
def rewrite(page):
prompt = prompt_init +"'"+ remove_page_number(page)+ "'"
response = openai.Completion.create(
prompt=prompt,
**COMPLETIONS_API_PARAMS
)
re_written_text = response["choices"][0]["text"].strip(" \n")
return re_written_text
| [
"Rewrite the below in a format that AI easily understands, but don't change any important words. \n"
] |
2024-01-10 | senthilkumarimuth/DocsGPT_Openai | src~utils~common.py | import openai
import numpy as np
import tiktoken
import pandas as pd
import sys
from pathlib import Path, PurePath
sys.path.append(PurePath(Path(__file__).parents[1]).as_posix())
from src.utils.logging.custom_logging import logger
COMPLETIONS_MODEL = "text-davinci-003"
EMBEDDING_MODEL = "text-embedding-ada-002"
MAX_SECTION_LEN = 1000
SEPARATOR = "\n* "
ENCODING = "gpt2" # encoding for text-davinci-003
encoding = tiktoken.get_encoding(ENCODING)
separator_len = len(encoding.encode(SEPARATOR))
f"Context separator contains {separator_len} tokens"
COMPLETIONS_API_PARAMS = {
# We use temperature of 0.0 because it gives the most predictable, factual answer.
"temperature": 0.0,
"max_tokens": 500,
"model": COMPLETIONS_MODEL,
}
def get_embedding(text: str, model: str = EMBEDDING_MODEL) -> list[float]:
result = openai.Embedding.create(
model=model,
input=text
)
return result["data"][0]["embedding"]
def vector_similarity(x: list[float], y: list[float]) -> float:
"""
Returns the similarity between two vectors.
Because OpenAI Embeddings are normalized to length 1, the cosine similarity is the same as the dot product.
"""
return np.dot(np.array(x), np.array(y))
def order_document_sections_by_query_similarity(query: str, contexts: dict[(str, str), np.array]) -> list[
(float, (str, str))]:
"""
Find the query embedding for the supplied query, and compare it against all of the pre-calculated document embeddings
to find the most relevant sections.
Return the list of document sections, sorted by relevance in descending order.
"""
query_embedding = get_embedding(query)
document_similarities = sorted([
(vector_similarity(query_embedding, doc_embedding), doc_index) for doc_index, doc_embedding in contexts.items()
], reverse=True)
logger.debug(f"most relevant document sections {document_similarities}")
return document_similarities
def construct_prompt(question: str, context_embeddings: dict, df: pd.DataFrame, template: str, memory) -> str:
"""
Fetch relevant
"""
most_relevant_document_sections = order_document_sections_by_query_similarity(question, context_embeddings)
chosen_sections = []
chosen_sections_len = 0
chosen_sections_indexes = []
for _, section_index in most_relevant_document_sections:
# Add contexts until we run out of space.
document_section = df.loc[section_index]
chosen_sections_len += document_section.tokens + separator_len
section = SEPARATOR + document_section.content.replace("\n", " ")
if chosen_sections_len > MAX_SECTION_LEN:
logger.warning(f'SECTION LEN EXCEED TO MAX SECTION LEN({MAX_SECTION_LEN})')
logger.warning(f'missed to include in prompt: {section}')
else:
logger.info(f'Context Proability is: {_}')
logger.debug(f'Contex: {section}')
chosen_sections.append(section)
chosen_sections_indexes.append(str(section_index))
#chosen_sections.append(SEPARATOR + document_section.content.replace("\n", " "))
#header = """You are TVS QA BOT. You are capable of answering questions reqarding TVS Owner Manual. Answer the question as truthfully as possible using the provided context, and if the answer is not contained within the text below, say "I don't know."\n\nContext:\n"""
_prompt = template+ "\n" +memory.load_memory_variables({})['history'] + "\nQUESTION: " + \
question +"\ncontent: " + "".join(chosen_sections) + "\nFINAL ANSWER:"
return _prompt
def answer_query_with_context(
query: str,
df: pd.DataFrame,
document_embeddings: dict[(str, str), np.array],
template: str,
memory,
show_prompt: bool = False,) -> str:
"""
Facade function to get question from user and call model, eventually returns the answer to user
:param query: question to docsgpt
:param df: document in dataframe
:param document_embeddings: embedding vector of document
:param template: prompt
:param show_prompt: to show prompt in stdout or not? boolean
:return: answer from docgpt
"""
prompt = construct_prompt(
query,
document_embeddings,
df,
template,
memory
)
#prompt = prompt + "\n" + memory.load_memory_variables({})['history']
if show_prompt:
print(prompt)
response = openai.Completion.create(
prompt=prompt,
**COMPLETIONS_API_PARAMS
)
return response["choices"][0]["text"].strip(" \n") | [
"\nQUESTION: ",
"\n",
"\ncontent: ",
"\nFINAL ANSWER:"
] |
2024-01-10 | sudheer0071/Review_Genie | GPT_Terminal.py | API_KEY = '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'
import openai
import os
os.environ['OPENAI_API_KEY'] = API_KEY
openai.api_key = os.environ['OPENAI_API_KEY']
keep_prompting = True
while keep_prompting:
print('ReviewGenie : \t',end='')
prompt = input('Hello, how can I help you today (type "exit" if done):\n\nUser: \t')
print('\n')
if prompt == 'exit':
keep_prompting = False
else:
response = openai.Completion.create(engine = 'text-davinci-003', prompt=prompt,max_tokens = 200)
print('ReviewGenie : \t',end='')
print(response['choices'][0]['text'])
print('\n')
| [
"Hello, how can I help you today (type \"exit\" if done):\n\nUser: \t",
"False",
"True"
] |
2024-01-10 | fakecrash1/Jarvis | gpt_functions.py | import openai
from datetime import datetime
import os
import urllib.request
import pywhatkit
from users import user_manager
# API Key
api_key_path = './Api_keys/api_key_openai.txt'
# Read the API key from the text file
with open(api_key_path, 'r') as f:
openai_api_key = f.read().strip()
openai.api_key = openai_api_key
# Define the path to the directory where the generated images will be saved
image_dir = "./Jarvis_Memory/images"
# Check if the directory exists, create it if it doesn't
if not os.path.exists(image_dir):
os.makedirs(image_dir)
# Generate response
def generate_response(user_input, conversation_history):
system_content = "You are JARVIS (Just A Rather Very Intelligent System), respectively the household assistance of the "+user_manager.current_user.name+" family and designed by Mr. "+user_manager.current_user.name+" (as Jarvis, you call the user as Sir.). You are a helpful AI assistant and your purpose is to make human life better, with helpful answers."
response = openai.ChatCompletion.create(
model="gpt-4",
messages=[
{"role": "system", "content": system_content},
{"role": "user", "content": conversation_history + "\\n" + user_manager.current_user.name + ": " + user_input}
],
max_tokens=320,
)
message = response['choices'][0]['message']['content'].strip()
conversation_history += "\\n" + user_manager.current_user.name + ": " + user_input + "\\n" + message
return message, conversation_history
# Generate image
def generate_image(image_prompt):
image_response = openai.Image.create(
prompt=image_prompt,
n=1,
size="1024x1024"
)
image_url = image_response['data'][0]['url']
return image_url
# Play music
def play_music(song):
pywhatkit.playonyt(song)
# Get help
def print_help():
help_message = '''
Here are some tips and commands for using the chatbot:
1. Type your questions or statements normally, and the chatbot will respond.
2. To generate an image, type "generate image:" followed by a description, example: ("generate image: a beautiful sunset").
3. Play anything from Youtube, use command: "play:"
4. Search on Google with command: "search:"
4. To exit the chat, type "exit" or "quit".
Note: If the chatbot provides an unsatisfactory response, try rephrasing your question or statement.
'''
print(help_message)
| [
"\\n",
": "
] |
2024-01-10 | Areyouokay0/Autofilteraib | plugins~zzz_ai_LazyDeveloper.py | from utils import temp
from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton
from pyrogram import Client, filters
from info import *
import openai
openai.api_key = OPENAI_API
@Client.on_message(filters.private & filters.text)
async def lazy_answer(client, message):
if AI == True:
user_id = message.from_user.id
if user_id:
try:
lazy_users_message = message.text
user_id = message.from_user.id
response = openai.Completion.create(
model = "text-davinci-003",
prompt = lazy_users_message,
temperature = 0.5,
max_tokens = 1000,
top_p=1,
frequency_penalty=0.1,
presence_penalty = 0.0,
)
btn=[
[InlineKeyboardButton(text=f"⇱🤷♀️ Take Action 🗃️⇲", url=f'https://t.me/{temp.U_NAME}')],
[InlineKeyboardButton(text=f"🗑 Delete log ❌", callback_data=f'close_data')],
]
reply_markup=InlineKeyboardMarkup(btn)
footer_credit = "🦋<a href='https://t.me/andybotupdates'>• ʀᴇᴘᴏʀᴛ ɪꜱꜱᴜᴇ •</a>══<a href='https://telegram.me/Aman_sain'>• ᴄᴏɴᴛᴀᴄᴛ ᴍᴀꜱᴛᴇʀ •</a>🦋"
lazy_response = response.choices[0].text
await client.send_message(LAZY_AI_LOGS, text=f"⚡️⚡️#Lazy_AI_Query \n\n• A user named **{message.from_user.mention}** with user id - `{user_id}`. Asked me this query...\n\n══❚█══Q U E R Y══█❚══\n\n\n[Q྿.]**{lazy_users_message}**\n\n👇Here is what i responded:\n:-`{lazy_response}`\n\n\n❚═USER ID═❚═• `{user_id}` \n❚═USER Name═❚═• `{message.from_user.mention}` \n\n🗃️" , reply_markup = reply_markup )
await message.reply(f"{lazy_response}\n\n\n{footer_credit}")
except Exception as error:
print(error)
else:
return
# i am NOT INTRESTED IN BEING YOUR SECOND FATHER... SO DON'T REMOVE MY CREDIT...
# @LazyDeveloperr
# ...PRESENTING...
# A R T I F i C I A L - I N T E L i G E N C E
# .in Auto-Filter-Bot.
# @LazyDeveloperr
# this line is for copy-pasters...
# ...while you are removing my credit and calling yourself a developerr...
# _____ just imagine, At that time i am fucking your mom and sis at same time, harder & too harder...
#
# i am NOT INTRESTED IN BEING YOUR SECOND FATHER... SO DON'T REMOVE MY CREDIT...
# @LazyDeveloperr
#
#
#
#
#
#
#
#
#
#
#
# ...PRESENTING...
# A R T I F i C I A L - I N T E L i G E N C E
# .in Auto-Filter-Bot.
# @LazyDeveloperr
#
#
#
#
#
#
#
#
#
#
#
#
# this line is for copy-pasters...
# ...while you are removing my credit and calling yourself a developerr...
# _____ just imagine, At that time i am fucking your mom and sis at same time, harder & too harder...
#
| [] |
2024-01-10 | satwik121/Extractor | rcp.py | import streamlit as st
from azure.core.credentials import AzureKeyCredential
from azure.ai.formrecognizer import DocumentAnalysisClient
import json
import mysql.connector
from mysql.connector import Error
import pandas as pd
from decouple import config
from dotenv import load_dotenv
import os
import openai
import config
# api_key = os.getenv("api_key")
# endpoint = os.getenv("endpoint")
api_key = st.secrets['api_key']
endpoint = st.secrets['endpoint']
print(api_key)
print(endpoint)
model_id = "receipt_model"
#formUrl = "YOUR_DOCUMENT"
# Create a Form Recognizer client
#form_recognizer_client = FormRecognizerClient(endpoint, AzureKeyCredential(api_key))
document_analysis_client = DocumentAnalysisClient(
endpoint=endpoint, credential=AzureKeyCredential(api_key)
)
dict1 = {}
# Create a Streamlit app
st.title("Reciept Extractor")
# Upload PDF file using Streamlit
uploaded_file = st.file_uploader("Upload an Invoice or Receipt PDF", type=["jpg","png"])
from io import BytesIO
# Read the contents of the uploaded file
try:
if uploaded_file:
file_contents = uploaded_file.read()
#file_contents = uploaded_fil
# Create a file stream using BytesIO
file_stream = BytesIO(file_contents)
except Error as e:
st.error(f"Upload the File ")
if uploaded_file is not None:
with uploaded_file:
st.write("File Uploaded! Analyzing...")
file_contents = uploaded_file.read()
# Analyze the content of the document
poller =document_analysis_client.begin_analyze_document( model_id = model_id, document= file_stream)
documents = poller.result()
# Display extracted entities
for idx, document in enumerate(documents.documents):
st.subheader(f"Document #{idx + 1} Entities:")
for name, field in document.fields.items():
dict1[name] = field.value
dict1.pop("prod table")
#st.write(dict1)
import pandas as pd
# Create a list of indices
index = list(range(1, 2))
df = pd.DataFrame(dict1,index = index)
#st.write(df)
df.columns = ['VendorOrg', 'ClientName','Subtotal', 'Total', 'Tax', 'VendorAddress', 'ClientAddress', 'ShippingAddress', 'Receipt', 'ReceiptDate', 'DueDate', 'PONumber']
#df.to_csv('rcpt.csv',index = False)
ik = df.to_json(orient='records')
json_string = json.loads(ik)
d1= json_string[0]
st.write(d1)
st.write(df)
try:
# Establish a database connection
db_connection = mysql.connector.connect(
host="sqldatabase.mysql.database.azure.com",
user="yusuf121",
password="Satwik@121",
database="chatbotdb"
)
cursor = db_connection.cursor()
insert_query = """INSERT INTO receipt (VendorOrg, ClientName, Subtotal, Total, Tax, VendorAddress, ClientAddress, ShippingAddress, Receipt, ReceiptDate, DueDate, PONumber)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"""
cursor.execute(insert_query, (
d1.get("VendorOrg", None),
d1.get("ClientName", None),
d1.get("Subtotal", None),
d1.get("Total", None),
d1.get("Tax", None),
d1.get("VendorAddress", None),
d1.get("ClientAddress", None),
d1.get("ShippingAddress", None),
d1.get("Receipt", None),
d1.get("ReceiptDate", None),
d1.get("DueDate", None),
d1.get("PONumber", None)
))
db_connection.commit()
except Error as e:
st.error(f"Error connecting to the database: {str(e)}")
st.write(" Details Added successfully in the table ")
db_connection.commit()
| [] |
2024-01-10 | lwgm/nas-tools | app~plugins~modules~_autosignin~chdbits.py | import json
import os
import random
import re
from lxml import etree
from app.helper.openai_helper import OpenAiHelper
from app.plugins.modules._autosignin._base import _ISiteSigninHandler
from app.utils import StringUtils, RequestUtils
from config import Config
class CHDBits(_ISiteSigninHandler):
"""
彩虹岛签到
如果填写openai key则调用chatgpt获取答案
否则随机
"""
# 匹配的站点Url,每一个实现类都需要设置为自己的站点Url
site_url = "chdbits.co"
# 已签到
_sign_regex = ['今天已经签过到了']
# 签到成功,待补充
_success_regex = ['\\d+点魔力值']
# 存储正确的答案,后续可直接查
_answer_path = os.path.join(Config().get_temp_path(), "signin")
_answer_file = _answer_path + "/chdbits.json"
@classmethod
def match(cls, url):
"""
根据站点Url判断是否匹配当前站点签到类,大部分情况使用默认实现即可
:param url: 站点Url
:return: 是否匹配,如匹配则会调用该类的signin方法
"""
return True if StringUtils.url_equal(url, cls.site_url) else False
def signin(self, site_info: dict):
"""
执行签到操作
:param site_info: 站点信息,含有站点Url、站点Cookie、UA等信息
:return: 签到结果信息
"""
site = site_info.get("name")
site_cookie = site_info.get("cookie")
ua = site_info.get("ua")
proxy = Config().get_proxies() if site_info.get("proxy") else None
# 创建正确答案存储目录
if not os.path.exists(os.path.dirname(self._answer_file)):
os.makedirs(os.path.dirname(self._answer_file))
# 判断今日是否已签到
index_res = RequestUtils(cookies=site_cookie,
headers=ua,
proxies=proxy
).get_res(url='https://chdbits.co/bakatest.php')
if not index_res or index_res.status_code != 200:
self.error(f"签到失败,请检查站点连通性")
return False, f'【{site}】签到失败,请检查站点连通性'
if "login.php" in index_res.text:
self.error(f"签到失败,cookie失效")
return False, f'【{site}】签到失败,cookie失效'
sign_status = self.sign_in_result(html_res=index_res.text,
regexs=self._sign_regex)
if sign_status:
self.info(f"今日已签到")
return True, f'【{site}】今日已签到'
# 没有签到则解析html
html = etree.HTML(index_res.text)
if not html:
return False, f'【{site}】签到失败'
# 获取页面问题、答案
questionid = html.xpath("//input[@name='questionid']/@value")[0]
option_ids = html.xpath("//input[@name='choice[]']/@value")
option_values = html.xpath("//input[@name='choice[]']/following-sibling::text()")
question_str = html.xpath("//td[@class='text' and contains(text(),'请问:')]/text()")[0]
answers = list(zip(option_ids, option_values))
# 正则获取问题
match = re.search(r'请问:(.+)', question_str)
if match:
question_str = match.group(1)
self.debug(f"获取到签到问题 {question_str}")
else:
self.error(f"未获取到签到问题")
return False, f"【{site}】签到失败,未获取到签到问题"
# 查询已有答案
exits_answers = {}
try:
with open(self._answer_file, 'r') as f:
json_str = f.read()
exits_answers = json.loads(json_str)
# 查询本地本次验证码hash答案
question_answer = exits_answers[question_str]
# question_answer是数组
if not isinstance(question_answer, list):
question_answer = [question_answer]
# 本地存在本次hash对应的正确答案再遍历查询
choice = []
for q in question_answer:
for num, answer in answers:
if str(q) == str(num):
choice.append(int(q))
if len(choice) > 0:
# 签到
return self.__signin(questionid=questionid,
choice=choice,
site_cookie=site_cookie,
ua=ua,
proxy=proxy,
site=site)
except (FileNotFoundError, IOError, OSError) as e:
self.debug("查询本地已知答案失败,继续请求豆瓣查询")
# 正确答案,默认随机,如果gpt返回则用gpt返回的答案提交
choice = [option_ids[random.randint(0, len(option_ids) - 1)]]
# 组装gpt问题
gpt_options = "{\n" + ",\n".join([f"{num}:{value}" for num, value in answers]) + "\n}"
gpt_question = f"题目:{question_str}\n" \
f"选项:{gpt_options}"
self.debug(f"组装chatgpt问题 {gpt_question}")
# chatgpt获取答案
answer = OpenAiHelper().get_question_answer(question=gpt_question)
self.debug(f"chatpgt返回结果 {answer}")
# 处理chatgpt返回的答案信息
if answer is None:
self.warn(f"ChatGPT未启用, 开始随机签到")
# return f"【{site}】签到失败,ChatGPT未启用"
elif answer:
# 正则获取字符串中的数字
answer_nums = list(map(int, re.findall("\d+", answer)))
if not answer_nums:
self.warn(f"无法从chatgpt回复 {answer} 中获取答案, 将采用随机签到")
else:
choice = []
for answer in answer_nums:
# 如果返回的数字在option_ids范围内,则直接作为答案
if str(answer) in option_ids:
choice.append(int(answer))
self.info(f"chatgpt返回答案id {answer} 在签到选项 {option_ids} 中")
# 签到
return self.__signin(questionid=questionid,
choice=choice,
site_cookie=site_cookie,
ua=ua,
proxy=proxy,
site=site,
exits_answers=exits_answers,
question=question_str)
def __signin(self, questionid, choice, site, site_cookie, ua, proxy, exits_answers=None, question=None):
"""
签到请求
questionid: 450
choice[]: 8
choice[]: 4
usercomment: 此刻心情:无
submit: 提交
多选会有多个choice[]....
"""
data = {
'questionid': questionid,
'choice[]': choice[0] if len(choice) == 1 else choice,
'usercomment': '太难了!',
'wantskip': '不会'
}
self.debug(f"签到请求参数 {data}")
sign_res = RequestUtils(cookies=site_cookie,
headers=ua,
proxies=proxy
).post_res(url='https://chdbits.co/bakatest.php', data=data)
if not sign_res or sign_res.status_code != 200:
self.error(f"签到失败,签到接口请求失败")
return False, f'【{site}】签到失败,签到接口请求失败'
# 判断是否签到成功
sign_status = self.sign_in_result(html_res=sign_res.text,
regexs=self._success_regex)
if sign_status:
self.info(f"签到成功")
if exits_answers and question:
# 签到成功写入本地文件
self.__write_local_answer(exits_answers=exits_answers or {},
question=question,
answer=choice)
return True, f'【{site}】签到成功'
else:
sign_status = self.sign_in_result(html_res=sign_res.text,
regexs=self._sign_regex)
if sign_status:
self.info(f"今日已签到")
return True, f'【{site}】今日已签到'
self.error(f"签到失败,请到页面查看")
return False, f'【{site}】签到失败,请到页面查看'
def __write_local_answer(self, exits_answers, question, answer):
"""
签到成功写入本地文件
"""
try:
exits_answers[question] = answer
# 序列化数据
formatted_data = json.dumps(exits_answers, indent=4)
with open(self._answer_file, 'w') as f:
f.write(formatted_data)
except (FileNotFoundError, IOError, OSError) as e:
self.debug("签到成功写入本地文件失败")
| [] |
2024-01-10 | NoPause-io/nopause-python | examples~async_stream_play_with_chatgpt_sounddevice.py | # Copyright 2023 NoPause
import asyncio
import openai
import nopause
import sounddevice as sd
# Install sdk packages first:
# pip install openai nopause
# Install sounddevice (see https://pypi.org/project/sounddevice/)
# pip install sounddevice
openai.api_key = "your_openai_api_key_here"
nopause.api_key = "your_nopause_api_key_here"
async def chatgpt_stream(prompt: str):
responses = await openai.ChatCompletion.acreate(
model="gpt-3.5-turbo-0613",
messages=[
{"role": "system", "content": "You are a helpful assistant from NoPause IO."},
{"role": "user", "content": prompt},
],
stream=True,
)
print("[User]: {}".format(prompt))
print("[Assistant]: ", end='', flush=True)
async def agenerator():
async for response in responses:
content = response["choices"][0]["delta"].get("content", '')
print(content, end='', flush=True)
yield content
print()
return agenerator()
async def text_stream():
sentence = "Hello, how are you?"
print("[Text]: ", end='', flush=True)
for char in sentence:
await asyncio.sleep(0.01) # simulate streaming text and avoid blocking
print(char, end='', flush=True)
yield char
print()
async def main():
# Note: openai key is needed for chatgpt
text_stream_type = 'chatgpt' # chatgpt | text
if text_stream_type == 'chatgpt':
text_agenerator = chatgpt_stream("Hello, who are you?")
else:
text_agenerator = text_stream()
audio_chunks = await nopause.Synthesis.astream(text_agenerator, voice_id="Zoe")
stream = sd.RawOutputStream(
samplerate=24000, blocksize=4800,
device=sd.query_devices(kind="output")['index'],
channels=1, dtype='int16',
)
with stream:
async for chunk in audio_chunks:
stream.write(chunk.data)
await asyncio.sleep(1)
print('Play done.')
if __name__ == '__main__':
asyncio.run(main())
| [
"You are a helpful assistant from NoPause IO."
] |
2024-01-10 | NoPause-io/nopause-python | examples~async_stream_play_with_chatgpt_pyaudio.py | # Copyright 2023 NoPause
import asyncio
import pyaudio
import openai
import nopause
# Install sdk packages first:
# pip install openai nopause
# For pyaudio (see https://pypi.org/project/PyAudio/):
# * windows
# python -m pip install pyaudio
# * mac
# brew install portaudio
# pip install pyaudio
openai.api_key = "your_openai_api_key_here"
nopause.api_key = "your_nopause_api_key_here"
async def chatgpt_stream(prompt: str):
responses = await openai.ChatCompletion.acreate(
model="gpt-3.5-turbo-0613",
messages=[
{"role": "system", "content": "You are a helpful assistant from NoPause IO."},
{"role": "user", "content": prompt},
],
stream=True,
)
print("[User]: {}".format(prompt))
print("[Assistant]: ", end='', flush=True)
async def agenerator():
async for response in responses:
content = response["choices"][0]["delta"].get("content", '')
print(content, end='', flush=True)
yield content
print()
return agenerator()
async def text_stream():
sentence = "Hello, how are you?"
print("[Text]: ", end='', flush=True)
for char in sentence:
await asyncio.sleep(0.01) # simulate streaming text and avoid blocking
print(char, end='', flush=True)
yield char
print()
async def main():
# Note: openai key is needed for chatgpt
text_stream_type = 'chatgpt' # chatgpt | text
if text_stream_type == 'chatgpt':
text_agenerator = chatgpt_stream("Hello, who are you?")
else:
text_agenerator = text_stream()
audio_chunks = await nopause.Synthesis.astream(text_agenerator, voice_id="Zoe")
p = pyaudio.PyAudio()
stream = p.open(
format=pyaudio.paInt16,
channels=1,
rate=24000,
output=True,
)
async for chunk in audio_chunks:
stream.write(chunk.data)
await asyncio.sleep(1)
stream.close()
p.terminate()
print('Play done.')
if __name__ == '__main__':
asyncio.run(main())
| [
"You are a helpful assistant from NoPause IO."
] |
2024-01-10 | NoPause-io/nopause-python | examples~stream_play_with_chatgpt_pyaudio.py | # Copyright 2023 NoPause
import time
import pyaudio
import openai
import nopause
# Install sdk packages first:
# pip install openai nopause
# For pyaudio (see https://pypi.org/project/PyAudio/):
# * windows
# python -m pip install pyaudio
# * mac
# brew install portaudio
# pip install pyaudio
openai.api_key = "your_openai_api_key_here"
nopause.api_key = "your_nopause_api_key_here"
def chatgpt_stream(prompt: str):
responses = openai.ChatCompletion.create(
model="gpt-3.5-turbo-0613",
messages=[
{"role": "system", "content": "You are a helpful assistant from NoPause IO."},
{"role": "user", "content": prompt},
],
stream=True,
)
print("[User]: {}".format(prompt))
print("[Assistant]: ", end='', flush=True)
def generator():
for response in responses:
content = response["choices"][0]["delta"].get("content", '')
print(content, end='', flush=True)
yield content
print()
return generator()
def text_stream():
sentence = "Hello, how are you?"
print("[Text]: ", end='', flush=True)
for char in sentence:
time.sleep(0.01) # simulate streaming text
print(char, end='', flush=True)
yield char
print()
def main():
# Note: openai key is needed for chatgpt
text_stream_type = 'chatgpt' # chatgpt | text
if text_stream_type == 'chatgpt':
text_generator = chatgpt_stream("Hello, who are you?")
else:
text_generator = text_stream()
audio_chunks = nopause.Synthesis.stream(text_generator, voice_id="Zoe")
p = pyaudio.PyAudio()
stream = p.open(
format=pyaudio.paInt16,
channels=1,
rate=24000,
output=True,
)
for chunk in audio_chunks:
stream.write(chunk.data)
time.sleep(1) # wait for playing
stream.close()
p.terminate()
print('Play done.')
if __name__ == '__main__':
main()
| [
"You are a helpful assistant from NoPause IO."
] |
2024-01-10 | NoPause-io/nopause-python | examples~stream_play_with_chatgpt_sounddevice.py | # Copyright 2023 NoPause
import time
import openai
import nopause
import sounddevice as sd
# Install sdk packages first:
# pip install openai nopause
# Install sounddevice (see https://pypi.org/project/sounddevice/)
# pip install sounddevice
openai.api_key = "your_openai_api_key_here"
nopause.api_key = "your_nopause_api_key_here"
def chatgpt_stream(prompt: str):
responses = openai.ChatCompletion.create(
model="gpt-3.5-turbo-0613",
messages=[
{"role": "system", "content": "You are a helpful assistant from NoPause IO."},
{"role": "user", "content": prompt},
],
stream=True,
)
print("[User]: {}".format(prompt))
print("[Assistant]: ", end='', flush=True)
def generator():
for response in responses:
content = response["choices"][0]["delta"].get("content", '')
print(content, end='', flush=True)
yield content
print()
return generator()
def text_stream():
sentence = "Hello, how are you?"
print("[Text]: ", end='', flush=True)
for char in sentence:
time.sleep(0.01) # simulate streaming text
print(char, end='', flush=True)
yield char
print()
def main():
# Note: openai key is needed for chatgpt
text_stream_type = 'chatgpt' # chatgpt | text
if text_stream_type == 'chatgpt':
text_generator = chatgpt_stream("Hello, who are you?")
else:
text_generator = text_stream()
audio_chunks = nopause.Synthesis.stream(text_generator, voice_id="Zoe")
stream = sd.RawOutputStream(
samplerate=24000, blocksize=4800,
device=sd.query_devices(kind="output")['index'],
channels=1, dtype='int16',
)
with stream:
for chunk in audio_chunks:
stream.write(chunk.data)
time.sleep(1)
print('Play done.')
if __name__ == '__main__':
main()
| [
"You are a helpful assistant from NoPause IO."
] |
2024-01-10 | NoPause-io/nopause-python | examples~async_stream_play_with_chatgpt_sounddevice_callback.py | # Copyright 2023 NoPause
import queue
import asyncio
import openai
import nopause
import sounddevice as sd
# Install sdk packages first:
# pip install openai nopause
# Install sounddevice (see https://pypi.org/project/sounddevice/)
# pip install sounddevice
openai.api_key = "your_openai_api_key_here"
nopause.api_key = "your_nopause_api_key_here"
async def chatgpt_stream(prompt: str):
responses = await openai.ChatCompletion.acreate(
model="gpt-3.5-turbo-0613",
messages=[
{"role": "system", "content": "You are a helpful assistant from NoPause IO."},
{"role": "user", "content": prompt},
],
stream=True,
)
print("[User]: {}".format(prompt))
print("[Assistant]: ", end='', flush=True)
async def agenerator():
async for response in responses:
content = response["choices"][0]["delta"].get("content", '')
print(content, end='', flush=True)
yield content
print()
return agenerator()
async def text_stream():
sentence = "Hello, how are you?"
print("[Text]: ", end='', flush=True)
for char in sentence:
await asyncio.sleep(0.01) # simulate streaming text and avoid blocking
print(char, end='', flush=True)
yield char
print()
async def main():
try:
# Note: openai key is needed for chatgpt
text_stream_type = 'chatgpt' # chatgpt | text
if text_stream_type == 'chatgpt':
text_agenerator = chatgpt_stream("Hello, who are you?")
else:
text_agenerator = text_stream()
audio_chunks = await nopause.Synthesis.astream(text_agenerator, voice_id="Zoe")
q = queue.Queue()
loop = asyncio.get_event_loop()
event = asyncio.Event() # For non-async, just use threading.Event
input_done = False
def callback(outdata, frames, time, status):
nonlocal input_done
if q.empty():
outdata[:] = b'\x00' * len(outdata)
if input_done:
loop.call_soon_threadsafe(event.set)
return
chunk_data = q.get()
# single channel
outdata[:len(chunk_data)] = chunk_data
if len(chunk_data) < len(outdata):
outdata[len(chunk_data):] = b'\x00' * (len(outdata) - len(chunk_data))
if input_done and q.empty():
loop.call_soon_threadsafe(event.set)
samplerate = 24000
blocksize = 4800
stream = sd.RawOutputStream(
samplerate=samplerate, blocksize=blocksize,
device=sd.query_devices(kind="output")['index'],
channels=1, dtype='int16',
callback=callback)
with stream:
async for chunk in audio_chunks:
# Note, a block of int16 (blocksize*1 16-bit) = two blocks of bytes (blocksize*2 8-bit)
for i in range(0, len(chunk.data), blocksize*2):
q.put_nowait(chunk.data[i:i+blocksize*2])
input_done = True
await event.wait()
await asyncio.sleep(1)
print('Play done.')
except KeyboardInterrupt:
print('\nInterrupted by user')
except BaseException as e:
raise e
if __name__ == '__main__':
asyncio.run(main())
| [
"You are a helpful assistant from NoPause IO."
] |
2024-01-10 | ndurner/langchain-gpt4 | langchain~output_parsers~structured.py | from __future__ import annotations
import json
from typing import List
from pydantic import BaseModel
from langchain.output_parsers.base import BaseOutputParser
from langchain.output_parsers.format_instructions import STRUCTURED_FORMAT_INSTRUCTIONS
line_template = '\t"{name}": {type} // {description}'
class ResponseSchema(BaseModel):
name: str
description: str
def _get_sub_string(schema: ResponseSchema) -> str:
return line_template.format(
name=schema.name, description=schema.description, type="string"
)
class StructuredOutputParser(BaseOutputParser):
response_schemas: List[ResponseSchema]
@classmethod
def from_response_schemas(
cls, response_schemas: List[ResponseSchema]
) -> StructuredOutputParser:
return cls(response_schemas=response_schemas)
def get_format_instructions(self) -> str:
schema_str = "\n".join(
[_get_sub_string(schema) for schema in self.response_schemas]
)
return STRUCTURED_FORMAT_INSTRUCTIONS.format(format=schema_str)
def parse(self, text: str) -> BaseModel:
json_string = text.split("```json")[1].strip().strip("```").strip()
json_obj = json.loads(json_string)
for schema in self.response_schemas:
if schema.name not in json_obj:
raise ValueError(
f"Got invalid return object. Expected key `{schema.name}` "
f"to be present, but got {json_obj}"
)
return json_obj
| [
"\t\"{name}\": {type} // {description}"
] |
2024-01-10 | ndurner/langchain-gpt4 | langchain~output_parsers~regex.py | from __future__ import annotations
import re
from typing import Dict, List, Optional
from pydantic import BaseModel
from langchain.output_parsers.base import BaseOutputParser
class RegexParser(BaseOutputParser, BaseModel):
"""Class to parse the output into a dictionary."""
regex: str
output_keys: List[str]
default_output_key: Optional[str] = None
@property
def _type(self) -> str:
"""Return the type key."""
return "regex_parser"
def parse(self, text: str) -> Dict[str, str]:
"""Parse the output of an LLM call."""
match = re.search(self.regex, text)
if match:
return {key: match.group(i + 1) for i, key in enumerate(self.output_keys)}
else:
if self.default_output_key is None:
raise ValueError(f"Could not parse output: {text}")
else:
return {
key: text if key == self.default_output_key else ""
for key in self.output_keys
}
| [] |
2024-01-10 | ndurner/langchain-gpt4 | langchain~output_parsers~list.py | from __future__ import annotations
from abc import abstractmethod
from typing import List
from langchain.output_parsers.base import BaseOutputParser
class ListOutputParser(BaseOutputParser):
"""Class to parse the output of an LLM call to a list."""
@abstractmethod
def parse(self, text: str) -> List[str]:
"""Parse the output of an LLM call."""
class CommaSeparatedListOutputParser(ListOutputParser):
"""Parse out comma separated lists."""
def get_format_instructions(self) -> str:
return (
"Your response should be a list of comma separated values, "
"eg: `foo, bar, baz`"
)
def parse(self, text: str) -> List[str]:
"""Parse the output of an LLM call."""
return text.strip().split(", ")
| [] |
2024-01-10 | ndurner/langchain-gpt4 | langchain~memory~kg.py | from typing import Any, Dict, List
from pydantic import BaseModel, Field
from langchain.chains.llm import LLMChain
from langchain.graphs import NetworkxEntityGraph
from langchain.graphs.networkx_graph import KnowledgeTriple, get_entities, parse_triples
from langchain.memory.chat_memory import BaseChatMemory
from langchain.memory.prompt import (
ENTITY_EXTRACTION_PROMPT,
KNOWLEDGE_TRIPLE_EXTRACTION_PROMPT,
)
from langchain.memory.utils import get_prompt_input_key
from langchain.prompts.base import BasePromptTemplate
from langchain.schema import BaseLanguageModel, SystemMessage, get_buffer_string
class ConversationKGMemory(BaseChatMemory, BaseModel):
"""Knowledge graph memory for storing conversation memory.
Integrates with external knowledge graph to store and retrieve
information about knowledge triples in the conversation.
"""
k: int = 2
human_prefix: str = "Human"
ai_prefix: str = "AI"
kg: NetworkxEntityGraph = Field(default_factory=NetworkxEntityGraph)
knowledge_extraction_prompt: BasePromptTemplate = KNOWLEDGE_TRIPLE_EXTRACTION_PROMPT
entity_extraction_prompt: BasePromptTemplate = ENTITY_EXTRACTION_PROMPT
llm: BaseLanguageModel
"""Number of previous utterances to include in the context."""
memory_key: str = "history" #: :meta private:
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
"""Return history buffer."""
entities = self._get_current_entities(inputs)
summaries = {}
for entity in entities:
knowledge = self.kg.get_entity_knowledge(entity)
if knowledge:
summaries[entity] = ". ".join(knowledge) + "."
if summaries:
summary_strings = [
f"On {entity}: {summary}" for entity, summary in summaries.items()
]
if self.return_messages:
context: Any = [SystemMessage(content=text) for text in summary_strings]
else:
context = "\n".join(summary_strings)
else:
if self.return_messages:
context = []
else:
context = ""
return {self.memory_key: context}
@property
def memory_variables(self) -> List[str]:
"""Will always return list of memory variables.
:meta private:
"""
return [self.memory_key]
def _get_prompt_input_key(self, inputs: Dict[str, Any]) -> str:
"""Get the input key for the prompt."""
if self.input_key is None:
return get_prompt_input_key(inputs, self.memory_variables)
return self.input_key
def _get_prompt_output_key(self, outputs: Dict[str, Any]) -> str:
"""Get the output key for the prompt."""
if self.output_key is None:
if len(outputs) != 1:
raise ValueError(f"One output key expected, got {outputs.keys()}")
return list(outputs.keys())[0]
return self.output_key
def get_current_entities(self, input_string: str) -> List[str]:
chain = LLMChain(llm=self.llm, prompt=self.entity_extraction_prompt)
buffer_string = get_buffer_string(
self.chat_memory.messages[-self.k * 2 :],
human_prefix=self.human_prefix,
ai_prefix=self.ai_prefix,
)
output = chain.predict(
history=buffer_string,
input=input_string,
)
return get_entities(output)
def _get_current_entities(self, inputs: Dict[str, Any]) -> List[str]:
"""Get the current entities in the conversation."""
prompt_input_key = self._get_prompt_input_key(inputs)
return self.get_current_entities(inputs[prompt_input_key])
def get_knowledge_triplets(self, input_string: str) -> List[KnowledgeTriple]:
chain = LLMChain(llm=self.llm, prompt=self.knowledge_extraction_prompt)
buffer_string = get_buffer_string(
self.chat_memory.messages[-self.k * 2 :],
human_prefix=self.human_prefix,
ai_prefix=self.ai_prefix,
)
output = chain.predict(
history=buffer_string,
input=input_string,
verbose=True,
)
knowledge = parse_triples(output)
return knowledge
def _get_and_update_kg(self, inputs: Dict[str, Any]) -> None:
"""Get and update knowledge graph from the conversation history."""
prompt_input_key = self._get_prompt_input_key(inputs)
knowledge = self.get_knowledge_triplets(inputs[prompt_input_key])
for triple in knowledge:
self.kg.add_triple(triple)
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
"""Save context from this conversation to buffer."""
super().save_context(inputs, outputs)
self._get_and_update_kg(inputs)
def clear(self) -> None:
"""Clear memory contents."""
super().clear()
self.kg.clear()
| [] |
2024-01-10 | ndurner/langchain-gpt4 | langchain~output_parsers~pydantic.py | import json
import re
from typing import Any
from pydantic import BaseModel, ValidationError
from langchain.output_parsers.base import BaseOutputParser
from langchain.output_parsers.format_instructions import PYDANTIC_FORMAT_INSTRUCTIONS
class PydanticOutputParser(BaseOutputParser):
pydantic_object: Any
def parse(self, text: str) -> BaseModel:
try:
# Greedy search for 1st json candidate.
match = re.search("\{.*\}", text.strip())
json_str = ""
if match:
json_str = match.group()
json_object = json.loads(json_str)
return self.pydantic_object.parse_obj(json_object)
except (json.JSONDecodeError, ValidationError) as e:
name = self.pydantic_object.__name__
msg = f"Failed to parse {name} from completion {text}. Got: {e}"
raise ValueError(msg)
def get_format_instructions(self) -> str:
schema = self.pydantic_object.schema()
# Remove extraneous fields.
reduced_schema = {
prop: {"description": data["description"], "type": data["type"]}
for prop, data in schema["properties"].items()
}
# Ensure json in context is well-formed with double quotes.
schema = json.dumps(reduced_schema)
return PYDANTIC_FORMAT_INSTRUCTIONS.format(schema=schema)
| [] |
2024-01-10 | ndurner/langchain-gpt4 | langchain~llms~sagemaker_endpoint.py | """Wrapper around Sagemaker InvokeEndpoint API."""
from abc import ABC, abstractmethod
from typing import Any, Dict, List, Mapping, Optional
from pydantic import BaseModel, Extra, root_validator
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
class ContentHandlerBase(ABC):
"""A handler class to transform input from LLM to a
format that SageMaker endpoint expects. Similarily,
the class also handles transforming output from the
SageMaker endpoint to a format that LLM class expects.
"""
"""
Example:
.. code-block:: python
class ContentHandler(ContentHandlerBase):
content_type = "application/json"
accepts = "application/json"
def transform_input(self, prompt: str, model_kwargs: Dict) -> bytes:
input_str = json.dumps({prompt: prompt, **model_kwargs})
return input_str.encode('utf-8')
def transform_output(self, output: bytes) -> str:
response_json = json.loads(output.read().decode("utf-8"))
return response_json[0]["generated_text"]
"""
content_type: Optional[str] = "text/plain"
"""The MIME type of the input data passed to endpoint"""
accepts: Optional[str] = "text/plain"
"""The MIME type of the response data returned from endpoint"""
@abstractmethod
def transform_input(self, prompt: str, model_kwargs: Dict) -> bytes:
"""Transforms the input to a format that model can accept
as the request Body. Should return bytes or seekable file
like object in the format specified in the content_type
request header.
"""
@abstractmethod
def transform_output(self, output: bytes) -> str:
"""Transforms the output from the model to string that
the LLM class expects.
"""
class SagemakerEndpoint(LLM, BaseModel):
"""Wrapper around custom Sagemaker Inference Endpoints.
To use, you must supply the endpoint name from your deployed
Sagemaker model & the region where it is deployed.
To authenticate, the AWS client uses the following methods to
automatically load credentials:
https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html
If a specific credential profile should be used, you must pass
the name of the profile from the ~/.aws/credentials file that is to be used.
Make sure the credentials / roles used have the required policies to
access the Sagemaker endpoint.
See: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html
"""
"""
Example:
.. code-block:: python
from langchain import SagemakerEndpoint
endpoint_name = (
"my-endpoint-name"
)
region_name = (
"us-west-2"
)
credentials_profile_name = (
"default"
)
se = SagemakerEndpoint(
endpoint_name=endpoint_name,
region_name=region_name,
credentials_profile_name=credentials_profile_name
)
"""
client: Any #: :meta private:
endpoint_name: str = ""
"""The name of the endpoint from the deployed Sagemaker model.
Must be unique within an AWS Region."""
region_name: str = ""
"""The aws region where the Sagemaker model is deployed, eg. `us-west-2`."""
credentials_profile_name: Optional[str] = None
"""The name of the profile in the ~/.aws/credentials or ~/.aws/config files, which
has either access keys or role information specified.
If not specified, the default credential profile or, if on an EC2 instance,
credentials from IMDS will be used.
See: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html
"""
content_handler: ContentHandlerBase
"""The content handler class that provides an input and
output transform functions to handle formats between LLM
and the endpoint.
"""
"""
Example:
.. code-block:: python
class ContentHandler(ContentHandlerBase):
content_type = "application/json"
accepts = "application/json"
def transform_input(self, prompt: str, model_kwargs: Dict) -> bytes:
input_str = json.dumps({prompt: prompt, **model_kwargs})
return input_str.encode('utf-8')
def transform_output(self, output: bytes) -> str:
response_json = json.loads(output.read().decode("utf-8"))
return response_json[0]["generated_text"]
"""
model_kwargs: Optional[Dict] = None
"""Key word arguments to pass to the model."""
endpoint_kwargs: Optional[Dict] = None
"""Optional attributes passed to the invoke_endpoint
function. See `boto3`_. docs for more info.
.. _boto3: <https://boto3.amazonaws.com/v1/documentation/api/latest/index.html>
"""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that AWS credentials to and python package exists in environment."""
try:
import boto3
try:
if values["credentials_profile_name"] is not None:
session = boto3.Session(
profile_name=values["credentials_profile_name"]
)
else:
# use default credentials
session = boto3.Session()
values["client"] = session.client(
"sagemaker-runtime", region_name=values["region_name"]
)
except Exception as e:
raise ValueError(
"Could not load credentials to authenticate with AWS client. "
"Please check that credentials in the specified "
"profile name are valid."
) from e
except ImportError:
raise ValueError(
"Could not import boto3 python package. "
"Please it install it with `pip install boto3`."
)
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
_model_kwargs = self.model_kwargs or {}
return {
**{"endpoint_name": self.endpoint_name},
**{"model_kwargs": _model_kwargs},
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "sagemaker_endpoint"
def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str:
"""Call out to Sagemaker inference endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = se("Tell me a joke.")
"""
_model_kwargs = self.model_kwargs or {}
_endpoint_kwargs = self.endpoint_kwargs or {}
body = self.content_handler.transform_input(prompt, _model_kwargs)
content_type = self.content_handler.content_type
accepts = self.content_handler.accepts
# send request
try:
response = self.client.invoke_endpoint(
EndpointName=self.endpoint_name,
Body=body,
ContentType=content_type,
Accept=accepts,
**_endpoint_kwargs,
)
except Exception as e:
raise ValueError(f"Error raised by inference endpoint: {e}")
text = self.content_handler.transform_output(response["Body"])
if stop is not None:
# This is a bit hacky, but I can't figure out a better way to enforce
# stop tokens when making calls to the sagemaker endpoint.
text = enforce_stop_tokens(text, stop)
return text
| [] |
2024-01-10 | ndurner/langchain-gpt4 | langchain~document_loaders~html_bs.py | """Loader that uses bs4 to load HTML files, enriching metadata with page title."""
import logging
from typing import Dict, List, Union
from bs4 import BeautifulSoup
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
logger = logging.getLogger(__file__)
class BSHTMLLoader(BaseLoader):
"""Loader that uses beautiful soup to parse HTML files."""
def __init__(self, file_path: str) -> None:
self.file_path = file_path
def load(self) -> List[Document]:
"""Load HTML document into document objects."""
with open(self.file_path, "r") as f:
soup = BeautifulSoup(f, features="lxml")
text = soup.get_text()
if soup.title:
title = str(soup.title.string)
else:
title = ""
metadata: Dict[str, Union[str, None]] = {
"source": self.file_path,
"title": title,
}
return [Document(page_content=text, metadata=metadata)]
| [] |
2024-01-10 | ndurner/langchain-gpt4 | langchain~output_parsers~rail_parser.py | from __future__ import annotations
from typing import Any, Dict
from langchain.output_parsers.base import BaseOutputParser
class GuardrailsOutputParser(BaseOutputParser):
guard: Any
@property
def _type(self) -> str:
return "guardrails"
@classmethod
def from_rail(cls, rail_file: str, num_reasks: int = 1) -> GuardrailsOutputParser:
try:
from guardrails import Guard
except ImportError:
raise ValueError(
"guardrails-ai package not installed. "
"Install it by running `pip install guardrails-ai`."
)
return cls(guard=Guard.from_rail(rail_file, num_reasks=num_reasks))
@classmethod
def from_rail_string(
cls, rail_str: str, num_reasks: int = 1
) -> GuardrailsOutputParser:
try:
from guardrails import Guard
except ImportError:
raise ValueError(
"guardrails-ai package not installed. "
"Install it by running `pip install guardrails-ai`."
)
return cls(guard=Guard.from_rail_string(rail_str, num_reasks=num_reasks))
def get_format_instructions(self) -> str:
return self.guard.raw_prompt.format_instructions
def parse(self, text: str) -> Dict:
return self.guard.parse(text)
| [] |
2024-01-10 | ndurner/langchain-gpt4 | langchain~memory~summary_buffer.py | from typing import Any, Dict, List
from pydantic import BaseModel, root_validator
from langchain.memory.chat_memory import BaseChatMemory
from langchain.memory.summary import SummarizerMixin
from langchain.schema import BaseMessage, SystemMessage, get_buffer_string
class ConversationSummaryBufferMemory(BaseChatMemory, SummarizerMixin, BaseModel):
"""Buffer with summarizer for storing conversation memory."""
max_token_limit: int = 2000
moving_summary_buffer: str = ""
memory_key: str = "history"
@property
def buffer(self) -> List[BaseMessage]:
return self.chat_memory.messages
@property
def memory_variables(self) -> List[str]:
"""Will always return list of memory variables.
:meta private:
"""
return [self.memory_key]
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
"""Return history buffer."""
buffer = self.buffer
if self.moving_summary_buffer != "":
first_messages: List[BaseMessage] = [
SystemMessage(content=self.moving_summary_buffer)
]
buffer = first_messages + buffer
if self.return_messages:
final_buffer: Any = buffer
else:
final_buffer = get_buffer_string(
buffer, human_prefix=self.human_prefix, ai_prefix=self.ai_prefix
)
return {self.memory_key: final_buffer}
@root_validator()
def validate_prompt_input_variables(cls, values: Dict) -> Dict:
"""Validate that prompt input variables are consistent."""
prompt_variables = values["prompt"].input_variables
expected_keys = {"summary", "new_lines"}
if expected_keys != set(prompt_variables):
raise ValueError(
"Got unexpected prompt input variables. The prompt expects "
f"{prompt_variables}, but it should have {expected_keys}."
)
return values
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
"""Save context from this conversation to buffer."""
super().save_context(inputs, outputs)
# Prune buffer if it exceeds max token limit
buffer = self.chat_memory.messages
curr_buffer_length = self.llm.get_num_tokens_from_messages(buffer)
if curr_buffer_length > self.max_token_limit:
pruned_memory = []
while curr_buffer_length > self.max_token_limit:
pruned_memory.append(buffer.pop(0))
curr_buffer_length = self.llm.get_num_tokens_from_messages(buffer)
self.moving_summary_buffer = self.predict_new_summary(
pruned_memory, self.moving_summary_buffer
)
def clear(self) -> None:
"""Clear memory contents."""
super().clear()
self.moving_summary_buffer = ""
| [] |
2024-01-10 | ndurner/langchain-gpt4 | langchain~document_loaders~csv_loader.py | from csv import DictReader
from typing import Dict, List, Optional
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
class CSVLoader(BaseLoader):
"""Loads a CSV file into a list of documents.
Each document represents one row of the CSV file. Every row is converted into a
key/value pair and outputted to a new line in the document's page_content.
Output Example:
.. code-block:: txt
column1: value1
column2: value2
column3: value3
"""
def __init__(self, file_path: str, csv_args: Optional[Dict] = None):
self.file_path = file_path
if csv_args is None:
self.csv_args = {
"delimiter": ",",
"quotechar": '"',
}
else:
self.csv_args = csv_args
def load(self) -> List[Document]:
docs = []
with open(self.file_path, newline="") as csvfile:
csv = DictReader(csvfile, **self.csv_args) # type: ignore
for i, row in enumerate(csv):
docs.append(
Document(
page_content="\n".join(
f"{k.strip()}: {v.strip()}" for k, v in row.items()
),
metadata={"source": self.file_path, "row": i},
)
)
return docs
| [] |
2024-01-10 | ndurner/langchain-gpt4 | langchain~document_loaders~youtube.py | """Loader that loads YouTube transcript."""
from __future__ import annotations
from pathlib import Path
from typing import Any, Dict, List, Optional
from pydantic import root_validator
from pydantic.dataclasses import dataclass
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
SCOPES = ["https://www.googleapis.com/auth/drive.readonly"]
@dataclass
class GoogleApiClient:
"""A Generic Google Api Client.
To use, you should have the ``google_auth_oauthlib,youtube_transcript_api,google``
python package installed.
As the google api expects credentials you need to set up a google account and
register your Service. "https://developers.google.com/docs/api/quickstart/python"
Example:
.. code-block:: python
from langchain.document_loaders import GoogleApiClient
google_api_client = GoogleApiClient(
service_account_path=Path("path_to_your_sec_file.json")
)
"""
credentials_path: Path = Path.home() / ".credentials" / "credentials.json"
service_account_path: Path = Path.home() / ".credentials" / "credentials.json"
token_path: Path = Path.home() / ".credentials" / "token.json"
def __post_init__(self) -> None:
self.creds = self._load_credentials()
@root_validator
def validate_channel_or_videoIds_is_set(
cls, values: Dict[str, Any]
) -> Dict[str, Any]:
"""Validate that either folder_id or document_ids is set, but not both."""
if not values.get("credentials_path") and not values.get(
"service_account_path"
):
raise ValueError("Must specify either channel_name or video_ids")
return values
def _load_credentials(self) -> Any:
"""Load credentials."""
# Adapted from https://developers.google.com/drive/api/v3/quickstart/python
try:
from google.auth.transport.requests import Request
from google.oauth2 import service_account
from google.oauth2.credentials import Credentials
from google_auth_oauthlib.flow import InstalledAppFlow
from youtube_transcript_api import YouTubeTranscriptApi # noqa: F401
except ImportError:
raise ImportError(
"You must run"
"`pip install --upgrade "
"google-api-python-client google-auth-httplib2 "
"google-auth-oauthlib"
"youtube-transcript-api`"
"to use the Google Drive loader"
)
creds = None
if self.service_account_path.exists():
return service_account.Credentials.from_service_account_file(
str(self.service_account_path)
)
if self.token_path.exists():
creds = Credentials.from_authorized_user_file(str(self.token_path), SCOPES)
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
str(self.credentials_path), SCOPES
)
creds = flow.run_local_server(port=0)
with open(self.token_path, "w") as token:
token.write(creds.to_json())
return creds
class YoutubeLoader(BaseLoader):
"""Loader that loads Youtube transcripts."""
def __init__(
self, video_id: str, add_video_info: bool = False, language: str = "en"
):
"""Initialize with YouTube video ID."""
self.video_id = video_id
self.add_video_info = add_video_info
self.language = language
@classmethod
def from_youtube_channel(cls, youtube_url: str, **kwargs: Any) -> YoutubeLoader:
"""Given a channel name, load all videos."""
video_id = youtube_url.split("youtube.com/watch?v=")[-1]
return cls(video_id, **kwargs)
def load(self) -> List[Document]:
"""Load documents."""
try:
from youtube_transcript_api import YouTubeTranscriptApi
except ImportError:
raise ImportError(
"Could not import youtube_transcript_api python package. "
"Please it install it with `pip install youtube-transcript-api`."
)
metadata = {"source": self.video_id}
if self.add_video_info:
# Get more video meta info
# Such as title, description, thumbnail url, publish_date
video_info = self._get_video_info()
metadata.update(video_info)
transcript_pieces = YouTubeTranscriptApi.get_transcript(
self.video_id, languages=[self.language]
)
transcript = " ".join([t["text"].strip(" ") for t in transcript_pieces])
return [Document(page_content=transcript, metadata=metadata)]
def _get_video_info(self) -> dict:
"""Get important video information.
Components are:
- title
- description
- thumbnail url,
- publish_date
- channel_author
- and more.
"""
try:
from pytube import YouTube
except ImportError:
raise ImportError(
"Could not import pytube python package. "
"Please it install it with `pip install pytube`."
)
yt = YouTube(f"https://www.youtube.com/watch?v={self.video_id}")
video_info = {
"title": yt.title,
"description": yt.description,
"view_count": yt.views,
"thumbnail_url": yt.thumbnail_url,
"publish_date": yt.publish_date,
"length": yt.length,
"author": yt.author,
}
return video_info
@dataclass
class GoogleApiYoutubeLoader(BaseLoader):
"""Loader that loads all Videos from a Channel
To use, you should have the ``googleapiclient,youtube_transcript_api``
python package installed.
As the service needs a google_api_client, you first have to initialize
the GoogleApiClient.
Additionally you have to either provide a channel name or a list of videoids
"https://developers.google.com/docs/api/quickstart/python"
Example:
.. code-block:: python
from langchain.document_loaders import GoogleApiClient
from langchain.document_loaders import GoogleApiYoutubeLoader
google_api_client = GoogleApiClient(
service_account_path=Path("path_to_your_sec_file.json")
)
loader = GoogleApiYoutubeLoader(
google_api_client=google_api_client,
channel_name = "CodeAesthetic"
)
load.load()
"""
google_api_client: GoogleApiClient
channel_name: Optional[str] = None
video_ids: Optional[List[str]] = None
add_video_info: bool = True
captions_language: str = "en"
def __post_init__(self) -> None:
self.youtube_client = self._build_youtube_client(self.google_api_client.creds)
def _build_youtube_client(self, creds: Any) -> Any:
try:
from googleapiclient.discovery import build
from youtube_transcript_api import YouTubeTranscriptApi # noqa: F401
except ImportError:
raise ImportError(
"You must run"
"`pip install --upgrade "
"google-api-python-client google-auth-httplib2 "
"google-auth-oauthlib"
"youtube-transcript-api`"
"to use the Google Drive loader"
)
return build("youtube", "v3", credentials=creds)
@root_validator
def validate_channel_or_videoIds_is_set(
cls, values: Dict[str, Any]
) -> Dict[str, Any]:
"""Validate that either folder_id or document_ids is set, but not both."""
if not values.get("channel_name") and not values.get("video_ids"):
raise ValueError("Must specify either channel_name or video_ids")
return values
def _get_transcripe_for_video_id(self, video_id: str) -> str:
from youtube_transcript_api import YouTubeTranscriptApi
transcript_pieces = YouTubeTranscriptApi.get_transcript(video_id)
return " ".join([t["text"].strip(" ") for t in transcript_pieces])
def _get_document_for_video_id(self, video_id: str, **kwargs: Any) -> Document:
captions = self._get_transcripe_for_video_id(video_id)
video_response = (
self.youtube_client.videos()
.list(
part="id,snippet",
id=video_id,
)
.execute()
)
return Document(
page_content=captions,
metadata=video_response.get("items")[0],
)
def _get_channel_id(self, channel_name: str) -> str:
request = self.youtube_client.search().list(
part="id",
q=channel_name,
type="channel",
maxResults=1, # we only need one result since channel names are unique
)
response = request.execute()
channel_id = response["items"][0]["id"]["channelId"]
return channel_id
def _get_document_for_channel(self, channel: str, **kwargs: Any) -> List[Document]:
channel_id = self._get_channel_id(channel)
request = self.youtube_client.search().list(
part="id,snippet",
channelId=channel_id,
maxResults=50, # adjust this value to retrieve more or fewer videos
)
video_ids = []
while request is not None:
response = request.execute()
# Add each video ID to the list
for item in response["items"]:
if not item["id"].get("videoId"):
continue
meta_data = {"videoId": item["id"]["videoId"]}
if self.add_video_info:
item["snippet"].pop("thumbnails")
meta_data.update(item["snippet"])
video_ids.append(
Document(
page_content=self._get_transcripe_for_video_id(
item["id"]["videoId"]
),
metadata=meta_data,
)
)
request = self.youtube_client.search().list_next(request, response)
return video_ids
def load(self) -> List[Document]:
"""Load documents."""
document_list = []
if self.channel_name:
document_list.extend(self._get_document_for_channel(self.channel_name))
elif self.video_ids:
document_list.extend(
[
self._get_document_for_video_id(video_id)
for video_id in self.video_ids
]
)
else:
raise ValueError("Must specify either channel_name or video_ids")
return document_list
| [] |
2024-01-10 | ndurner/langchain-gpt4 | langchain~output_parsers~regex_dict.py | from __future__ import annotations
import re
from typing import Dict, Optional
from pydantic import BaseModel
from langchain.output_parsers.base import BaseOutputParser
class RegexDictParser(BaseOutputParser, BaseModel):
"""Class to parse the output into a dictionary."""
regex_pattern: str = r"{}:\s?([^.'\n']*)\.?" # : :meta private:
output_key_to_format: Dict[str, str]
no_update_value: Optional[str] = None
@property
def _type(self) -> str:
"""Return the type key."""
return "regex_dict_parser"
def parse(self, text: str) -> Dict[str, str]:
"""Parse the output of an LLM call."""
result = {}
for output_key, expected_format in self.output_key_to_format.items():
specific_regex = self.regex_pattern.format(re.escape(expected_format))
matches = re.findall(specific_regex, text)
if not matches:
raise ValueError(
f"No match found for output key: {output_key} with expected format \
{expected_format} on text {text}"
)
elif len(matches) > 1:
raise ValueError(
f"Multiple matches found for output key: {output_key} with \
expected format {expected_format} on text {text}"
)
elif (
self.no_update_value is not None and matches[0] == self.no_update_value
):
continue
else:
result[output_key] = matches[0]
return result
| [] |
2024-01-10 | ndurner/langchain-gpt4 | langchain~vectorstores~redis.py | """Wrapper around Redis vector database."""
from __future__ import annotations
import json
import uuid
from typing import Any, Callable, Iterable, List, Mapping, Optional
import numpy as np
from redis.client import Redis as RedisType
from langchain.docstore.document import Document
from langchain.embeddings.base import Embeddings
from langchain.utils import get_from_dict_or_env
from langchain.vectorstores.base import VectorStore
def _check_redis_module_exist(client: RedisType, module: str) -> bool:
return module in [m["name"] for m in client.info().get("modules", {"name": ""})]
class Redis(VectorStore):
def __init__(
self,
redis_url: str,
index_name: str,
embedding_function: Callable,
**kwargs: Any,
):
"""Initialize with necessary components."""
try:
import redis
except ImportError:
raise ValueError(
"Could not import redis python package. "
"Please install it with `pip install redis`."
)
self.embedding_function = embedding_function
self.index_name = index_name
try:
redis_client = redis.from_url(redis_url, **kwargs)
except ValueError as e:
raise ValueError(f"Your redis connected error: {e}")
# check if redis add redisearch module
if not _check_redis_module_exist(redis_client, "search"):
raise ValueError(
"Could not use redis directly, you need to add search module"
"Please refer [RediSearch](https://redis.io/docs/stack/search/quick_start/)" # noqa
)
self.client = redis_client
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
**kwargs: Any,
) -> List[str]:
# `prefix`: Maybe in the future we can let the user choose the index_name.
prefix = "doc" # prefix for the document keys
ids = []
# Check if index exists
for i, text in enumerate(texts):
key = f"{prefix}:{uuid.uuid4().hex}"
metadata = metadatas[i] if metadatas else {}
self.client.hset(
key,
mapping={
"content": text,
"content_vector": np.array(
self.embedding_function(text), dtype=np.float32
).tobytes(),
"metadata": json.dumps(metadata),
},
)
ids.append(key)
return ids
def similarity_search(
self, query: str, k: int = 4, **kwargs: Any
) -> List[Document]:
try:
from redis.commands.search.query import Query
except ImportError:
raise ValueError(
"Could not import redis python package. "
"Please install it with `pip install redis`."
)
# Creates embedding vector from user query
embedding = self.embedding_function(query)
# Prepare the Query
return_fields = ["metadata", "content", "vector_score"]
vector_field = "content_vector"
hybrid_fields = "*"
base_query = (
f"{hybrid_fields}=>[KNN {k} @{vector_field} $vector AS vector_score]"
)
redis_query = (
Query(base_query)
.return_fields(*return_fields)
.sort_by("vector_score")
.paging(0, k)
.dialect(2)
)
params_dict: Mapping[str, str] = {
"vector": np.array(embedding) # type: ignore
.astype(dtype=np.float32)
.tobytes()
}
# perform vector search
results = self.client.ft(self.index_name).search(redis_query, params_dict)
documents = [
Document(page_content=result.content, metadata=json.loads(result.metadata))
for result in results.docs
]
return documents
@classmethod
def from_texts(
cls,
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
index_name: Optional[str] = None,
**kwargs: Any,
) -> Redis:
"""Construct RediSearch wrapper from raw documents.
This is a user-friendly interface that:
1. Embeds documents.
2. Creates a new index for the embeddings in the RediSearch instance.
3. Adds the documents to the newly created RediSearch index.
This is intended to be a quick way to get started.
Example:
.. code-block:: python
from langchain import RediSearch
from langchain.embeddings import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
redisearch = RediSearch.from_texts(
texts,
embeddings,
redis_url="redis://username:password@localhost:6379"
)
"""
redis_url = get_from_dict_or_env(kwargs, "redis_url", "REDIS_URL")
try:
import redis
from redis.commands.search.field import TextField, VectorField
from redis.commands.search.indexDefinition import IndexDefinition, IndexType
except ImportError:
raise ValueError(
"Could not import redis python package. "
"Please install it with `pip install redis`."
)
try:
# We need to first remove redis_url from kwargs,
# otherwise passing it to Redis will result in an error.
kwargs.pop("redis_url")
client = redis.from_url(url=redis_url, **kwargs)
except ValueError as e:
raise ValueError(f"Your redis connected error: {e}")
# check if redis add redisearch module
if not _check_redis_module_exist(client, "search"):
raise ValueError(
"Could not use redis directly, you need to add search module"
"Please refer [RediSearch](https://redis.io/docs/stack/search/quick_start/)" # noqa
)
embeddings = embedding.embed_documents(texts)
dim = len(embeddings[0])
# Constants
vector_number = len(embeddings) # initial number of vectors
# name of the search index if not given
if not index_name:
index_name = uuid.uuid4().hex
prefix = "doc" # prefix for the document keys
distance_metric = (
"COSINE" # distance metric for the vectors (ex. COSINE, IP, L2)
)
content = TextField(name="content")
metadata = TextField(name="metadata")
content_embedding = VectorField(
"content_vector",
"FLAT",
{
"TYPE": "FLOAT32",
"DIM": dim,
"DISTANCE_METRIC": distance_metric,
"INITIAL_CAP": vector_number,
},
)
fields = [content, metadata, content_embedding]
# Check if index exists
try:
client.ft(index_name).info()
print("Index already exists")
except: # noqa
# Create Redis Index
client.ft(index_name).create_index(
fields=fields,
definition=IndexDefinition(prefix=[prefix], index_type=IndexType.HASH),
)
pipeline = client.pipeline()
for i, text in enumerate(texts):
key = f"{prefix}:{str(uuid.uuid4().hex)}"
metadata = metadatas[i] if metadatas else {}
pipeline.hset(
key,
mapping={
"content": text,
"content_vector": np.array(
embeddings[i], dtype=np.float32
).tobytes(),
"metadata": json.dumps(metadata),
},
)
pipeline.execute()
return cls(redis_url, index_name, embedding.embed_query)
| [] |
2024-01-10 | ndurner/langchain-gpt4 | langchain~agents~load_tools.py | # flake8: noqa
"""Load tools."""
from typing import Any, List, Optional
from langchain.agents.tools import Tool
from langchain.callbacks.base import BaseCallbackManager
from langchain.chains.api import news_docs, open_meteo_docs, tmdb_docs
from langchain.chains.api.base import APIChain
from langchain.chains.llm_math.base import LLMMathChain
from langchain.chains.pal.base import PALChain
from langchain.llms.base import BaseLLM
from langchain.requests import RequestsWrapper
from langchain.tools.base import BaseTool
from langchain.tools.bing_search.tool import BingSearchRun
from langchain.tools.google_search.tool import GoogleSearchResults, GoogleSearchRun
from langchain.tools.python.tool import PythonREPLTool
from langchain.tools.requests.tool import RequestsGetTool
from langchain.tools.wikipedia.tool import WikipediaQueryRun
from langchain.tools.wolfram_alpha.tool import WolframAlphaQueryRun
from langchain.utilities.bash import BashProcess
from langchain.utilities.bing_search import BingSearchAPIWrapper
from langchain.utilities.google_search import GoogleSearchAPIWrapper
from langchain.utilities.google_serper import GoogleSerperAPIWrapper
from langchain.utilities.searx_search import SearxSearchWrapper
from langchain.utilities.serpapi import SerpAPIWrapper
from langchain.utilities.wikipedia import WikipediaAPIWrapper
from langchain.utilities.wolfram_alpha import WolframAlphaAPIWrapper
def _get_python_repl() -> BaseTool:
return PythonREPLTool()
def _get_requests() -> BaseTool:
return RequestsGetTool(requests_wrapper=RequestsWrapper())
def _get_terminal() -> BaseTool:
return Tool(
name="Terminal",
description="Executes commands in a terminal. Input should be valid commands, and the output will be any output from running that command.",
func=BashProcess().run,
)
_BASE_TOOLS = {
"python_repl": _get_python_repl,
"requests": _get_requests,
"terminal": _get_terminal,
}
def _get_pal_math(llm: BaseLLM) -> BaseTool:
return Tool(
name="PAL-MATH",
description="A language model that is really good at solving complex word math problems. Input should be a fully worded hard word math problem.",
func=PALChain.from_math_prompt(llm).run,
)
def _get_pal_colored_objects(llm: BaseLLM) -> BaseTool:
return Tool(
name="PAL-COLOR-OBJ",
description="A language model that is really good at reasoning about position and the color attributes of objects. Input should be a fully worded hard reasoning problem. Make sure to include all information about the objects AND the final question you want to answer.",
func=PALChain.from_colored_object_prompt(llm).run,
)
def _get_llm_math(llm: BaseLLM) -> BaseTool:
return Tool(
name="Calculator",
description="Useful for when you need to answer questions about math.",
func=LLMMathChain(llm=llm, callback_manager=llm.callback_manager).run,
coroutine=LLMMathChain(llm=llm, callback_manager=llm.callback_manager).arun,
)
def _get_open_meteo_api(llm: BaseLLM) -> BaseTool:
chain = APIChain.from_llm_and_api_docs(llm, open_meteo_docs.OPEN_METEO_DOCS)
return Tool(
name="Open Meteo API",
description="Useful for when you want to get weather information from the OpenMeteo API. The input should be a question in natural language that this API can answer.",
func=chain.run,
)
_LLM_TOOLS = {
"pal-math": _get_pal_math,
"pal-colored-objects": _get_pal_colored_objects,
"llm-math": _get_llm_math,
"open-meteo-api": _get_open_meteo_api,
}
def _get_news_api(llm: BaseLLM, **kwargs: Any) -> BaseTool:
news_api_key = kwargs["news_api_key"]
chain = APIChain.from_llm_and_api_docs(
llm, news_docs.NEWS_DOCS, headers={"X-Api-Key": news_api_key}
)
return Tool(
name="News API",
description="Use this when you want to get information about the top headlines of current news stories. The input should be a question in natural language that this API can answer.",
func=chain.run,
)
def _get_tmdb_api(llm: BaseLLM, **kwargs: Any) -> BaseTool:
tmdb_bearer_token = kwargs["tmdb_bearer_token"]
chain = APIChain.from_llm_and_api_docs(
llm,
tmdb_docs.TMDB_DOCS,
headers={"Authorization": f"Bearer {tmdb_bearer_token}"},
)
return Tool(
name="TMDB API",
description="Useful for when you want to get information from The Movie Database. The input should be a question in natural language that this API can answer.",
func=chain.run,
)
def _get_wolfram_alpha(**kwargs: Any) -> BaseTool:
return WolframAlphaQueryRun(api_wrapper=WolframAlphaAPIWrapper(**kwargs))
def _get_google_search(**kwargs: Any) -> BaseTool:
return GoogleSearchRun(api_wrapper=GoogleSearchAPIWrapper(**kwargs))
def _get_wikipedia(**kwargs: Any) -> BaseTool:
return WikipediaQueryRun(api_wrapper=WikipediaAPIWrapper(**kwargs))
def _get_google_serper(**kwargs: Any) -> BaseTool:
return Tool(
name="Serper Search",
func=GoogleSerperAPIWrapper(**kwargs).run,
description="A low-cost Google Search API. Useful for when you need to answer questions about current events. Input should be a search query.",
)
def _get_google_search_results_json(**kwargs: Any) -> BaseTool:
return GoogleSearchResults(api_wrapper=GoogleSearchAPIWrapper(**kwargs))
def _get_serpapi(**kwargs: Any) -> BaseTool:
return Tool(
name="Search",
description="A search engine. Useful for when you need to answer questions about current events. Input should be a search query.",
func=SerpAPIWrapper(**kwargs).run,
coroutine=SerpAPIWrapper(**kwargs).arun,
)
def _get_searx_search(**kwargs: Any) -> BaseTool:
return Tool(
name="SearX Search",
description="A meta search engine. Useful for when you need to answer questions about current events. Input should be a search query.",
func=SearxSearchWrapper(**kwargs).run,
)
def _get_bing_search(**kwargs: Any) -> BaseTool:
return BingSearchRun(api_wrapper=BingSearchAPIWrapper(**kwargs))
_EXTRA_LLM_TOOLS = {
"news-api": (_get_news_api, ["news_api_key"]),
"tmdb-api": (_get_tmdb_api, ["tmdb_bearer_token"]),
}
_EXTRA_OPTIONAL_TOOLS = {
"wolfram-alpha": (_get_wolfram_alpha, ["wolfram_alpha_appid"]),
"google-search": (_get_google_search, ["google_api_key", "google_cse_id"]),
"google-search-results-json": (
_get_google_search_results_json,
["google_api_key", "google_cse_id", "num_results"],
),
"bing-search": (_get_bing_search, ["bing_subscription_key", "bing_search_url"]),
"google-serper": (_get_google_serper, ["serper_api_key"]),
"serpapi": (_get_serpapi, ["serpapi_api_key", "aiosession"]),
"searx-search": (_get_searx_search, ["searx_host"]),
"wikipedia": (_get_wikipedia, ["top_k_results"]),
}
def load_tools(
tool_names: List[str],
llm: Optional[BaseLLM] = None,
callback_manager: Optional[BaseCallbackManager] = None,
**kwargs: Any,
) -> List[BaseTool]:
"""Load tools based on their name.
Args:
tool_names: name of tools to load.
llm: Optional language model, may be needed to initialize certain tools.
callback_manager: Optional callback manager. If not provided, default global callback manager will be used.
Returns:
List of tools.
"""
tools = []
for name in tool_names:
if name in _BASE_TOOLS:
tools.append(_BASE_TOOLS[name]())
elif name in _LLM_TOOLS:
if llm is None:
raise ValueError(f"Tool {name} requires an LLM to be provided")
tool = _LLM_TOOLS[name](llm)
if callback_manager is not None:
tool.callback_manager = callback_manager
tools.append(tool)
elif name in _EXTRA_LLM_TOOLS:
if llm is None:
raise ValueError(f"Tool {name} requires an LLM to be provided")
_get_llm_tool_func, extra_keys = _EXTRA_LLM_TOOLS[name]
missing_keys = set(extra_keys).difference(kwargs)
if missing_keys:
raise ValueError(
f"Tool {name} requires some parameters that were not "
f"provided: {missing_keys}"
)
sub_kwargs = {k: kwargs[k] for k in extra_keys}
tool = _get_llm_tool_func(llm=llm, **sub_kwargs)
if callback_manager is not None:
tool.callback_manager = callback_manager
tools.append(tool)
elif name in _EXTRA_OPTIONAL_TOOLS:
_get_tool_func, extra_keys = _EXTRA_OPTIONAL_TOOLS[name]
sub_kwargs = {k: kwargs[k] for k in extra_keys if k in kwargs}
tool = _get_tool_func(**sub_kwargs)
if callback_manager is not None:
tool.callback_manager = callback_manager
tools.append(tool)
else:
raise ValueError(f"Got unknown tool {name}")
return tools
def get_all_tool_names() -> List[str]:
"""Get a list of all possible tool names."""
return (
list(_BASE_TOOLS)
+ list(_EXTRA_OPTIONAL_TOOLS)
+ list(_EXTRA_LLM_TOOLS)
+ list(_LLM_TOOLS)
)
| [] |
2024-01-10 | ndurner/langchain-gpt4 | langchain~utilities~zapier.py | """Util that can interact with Zapier NLA.
Full docs here: https://nla.zapier.com/api/v1/dynamic/docs
Note: this wrapper currently only implemented the `api_key` auth method for testing
and server-side production use cases (using the developer's connected accounts on
Zapier.com)
For use-cases where LangChain + Zapier NLA is powering a user-facing application, and
LangChain needs access to the end-user's connected accounts on Zapier.com, you'll need
to use oauth. Review the full docs above and reach out to [email protected] for
developer support.
"""
import json
from typing import Dict, List, Optional
import requests
from pydantic import BaseModel, Extra, root_validator
from requests import Request, Session
from langchain.utils import get_from_dict_or_env
class ZapierNLAWrapper(BaseModel):
"""Wrapper for Zapier NLA.
Full docs here: https://nla.zapier.com/api/v1/dynamic/docs
Note: this wrapper currently only implemented the `api_key` auth method for
testingand server-side production use cases (using the developer's connected
accounts on Zapier.com)
For use-cases where LangChain + Zapier NLA is powering a user-facing application,
and LangChain needs access to the end-user's connected accounts on Zapier.com,
you'll need to use oauth. Review the full docs above and reach out to
[email protected] for developer support.
"""
zapier_nla_api_key: str
zapier_nla_api_base: str = "https://nla.zapier.com/api/v1/"
zapier_nla_api_dynamic_base: str = "https://nla.zapier.com/api/v1/dynamic/"
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
def _get_session(self) -> Session:
session = requests.Session()
session.headers.update(
{
"Accept": "application/json",
"Content-Type": "application/json",
}
)
session.params = {"api_key": self.zapier_nla_api_key}
return session
def _get_action_request(
self, action_id: str, instructions: str, params: Optional[Dict] = None
) -> Request:
data = params if params else {}
data.update(
{
"instructions": instructions,
}
)
return Request(
"POST",
self.zapier_nla_api_base + f"exposed/{action_id}/execute/",
json=data,
)
@root_validator(pre=True)
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key exists in environment."""
zapier_nla_api_key = get_from_dict_or_env(
values, "zapier_nla_api_key", "ZAPIER_NLA_API_KEY"
)
values["zapier_nla_api_key"] = zapier_nla_api_key
return values
def list(self) -> List[Dict]:
"""Returns a list of all exposed (enabled) actions associated with
current user (associated with the set api_key). Change your exposed
actions here: https://nla.zapier.com/demo/start/
The return list can be empty if no actions exposed. Else will contain
a list of action objects:
[{
"id": str,
"description": str,
"params": Dict[str, str]
}]
`params` will always contain an `instructions` key, the only required
param. All others optional and if provided will override any AI guesses
(see "understanding the AI guessing flow" here:
https://nla.zapier.com/api/v1/dynamic/docs)
"""
session = self._get_session()
response = session.get(self.zapier_nla_api_dynamic_base + "exposed/")
response.raise_for_status()
return response.json()["results"]
def run(
self, action_id: str, instructions: str, params: Optional[Dict] = None
) -> Dict:
"""Executes an action that is identified by action_id, must be exposed
(enabled) by the current user (associated with the set api_key). Change
your exposed actions here: https://nla.zapier.com/demo/start/
The return JSON is guaranteed to be less than ~500 words (350
tokens) making it safe to inject into the prompt of another LLM
call.
"""
session = self._get_session()
request = self._get_action_request(action_id, instructions, params)
response = session.send(session.prepare_request(request))
response.raise_for_status()
return response.json()["result"]
def preview(
self, action_id: str, instructions: str, params: Optional[Dict] = None
) -> Dict:
"""Same as run, but instead of actually executing the action, will
instead return a preview of params that have been guessed by the AI in
case you need to explicitly review before executing."""
session = self._get_session()
params = params if params else {}
params.update({"preview_only": True})
request = self._get_action_request(action_id, instructions, params)
response = session.send(session.prepare_request(request))
response.raise_for_status()
return response.json()["input_params"]
def run_as_str(self, *args, **kwargs) -> str: # type: ignore[no-untyped-def]
"""Same as run, but returns a stringified version of the JSON for
insertting back into an LLM."""
data = self.run(*args, **kwargs)
return json.dumps(data)
def preview_as_str(self, *args, **kwargs) -> str: # type: ignore[no-untyped-def]
"""Same as preview, but returns a stringified version of the JSON for
insertting back into an LLM."""
data = self.preview(*args, **kwargs)
return json.dumps(data)
def list_as_str(self, *args, **kwargs) -> str: # type: ignore[no-untyped-def]
"""Same as list, but returns a stringified version of the JSON for
insertting back into an LLM."""
actions = self.list(*args, **kwargs)
return json.dumps(actions)
| [] |
2024-01-10 | ndurner/langchain-gpt4 | langchain~vectorstores~pgvector.py | import enum
import logging
import uuid
from typing import Any, Dict, Iterable, List, Optional, Tuple
import sqlalchemy
from pgvector.sqlalchemy import Vector
from sqlalchemy.dialects.postgresql import UUID
from sqlalchemy.orm import Mapped, Session, declarative_base, relationship
from langchain.docstore.document import Document
from langchain.embeddings.base import Embeddings
from langchain.utils import get_from_dict_or_env
from langchain.vectorstores.base import VectorStore
Base = declarative_base() # type: Any
ADA_TOKEN_COUNT = 1536
_LANGCHAIN_DEFAULT_COLLECTION_NAME = "langchain"
class BaseModel(Base):
__abstract__ = True
uuid = sqlalchemy.Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
class CollectionStore(BaseModel):
__tablename__ = "langchain_pg_collection"
name = sqlalchemy.Column(sqlalchemy.String)
cmetadata = sqlalchemy.Column(sqlalchemy.JSON)
embeddings = relationship(
"EmbeddingStore",
back_populates="collection",
passive_deletes=True,
)
@classmethod
def get_by_name(cls, session: Session, name: str) -> Optional["CollectionStore"]:
return session.query(cls).filter(cls.name == name).first()
@classmethod
def get_or_create(
cls,
session: Session,
name: str,
cmetadata: Optional[dict] = None,
) -> Tuple["CollectionStore", bool]:
"""
Get or create a collection.
Returns [Collection, bool] where the bool is True if the collection was created.
"""
created = False
collection = cls.get_by_name(session, name)
if collection:
return collection, created
collection = cls(name=name, metadata=cmetadata)
session.add(collection)
session.commit()
created = True
return collection, created
class EmbeddingStore(BaseModel):
__tablename__ = "langchain_pg_embedding"
collection_id: Mapped[UUID] = sqlalchemy.Column(
UUID(as_uuid=True),
sqlalchemy.ForeignKey(
f"{CollectionStore.__tablename__}.uuid",
ondelete="CASCADE",
),
)
collection = relationship(CollectionStore, back_populates="embeddings")
embedding: Vector = sqlalchemy.Column(Vector(ADA_TOKEN_COUNT))
document = sqlalchemy.Column(sqlalchemy.String, nullable=True)
cmetadata = sqlalchemy.Column(sqlalchemy.JSON, nullable=True)
# custom_id : any user defined id
custom_id = sqlalchemy.Column(sqlalchemy.String, nullable=True)
class QueryResult:
EmbeddingStore: EmbeddingStore
distance: float
class DistanceStrategy(str, enum.Enum):
EUCLIDEAN = EmbeddingStore.embedding.l2_distance
COSINE = EmbeddingStore.embedding.cosine_distance
MAX_INNER_PRODUCT = EmbeddingStore.embedding.max_inner_product
DEFAULT_DISTANCE_STRATEGY = DistanceStrategy.EUCLIDEAN
class PGVector(VectorStore):
"""
VectorStore implementation using Postgres and pgvector.
- `connection_string` is a postgres connection string.
- `embedding_function` any embedding function implementing
`langchain.embeddings.base.Embeddings` interface.
- `collection_name` is the name of the collection to use. (default: langchain)
- NOTE: This is not the name of the table, but the name of the collection.
The tables will be created when initializing the store (if not exists)
So, make sure the user has the right permissions to create tables.
- `distance_strategy` is the distance strategy to use. (default: EUCLIDEAN)
- `EUCLIDEAN` is the euclidean distance.
- `COSINE` is the cosine distance.
- `pre_delete_collection` if True, will delete the collection if it exists.
(default: False)
- Useful for testing.
"""
def __init__(
self,
connection_string: str,
embedding_function: Embeddings,
collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME,
distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY,
pre_delete_collection: bool = False,
logger: Optional[logging.Logger] = None,
) -> None:
self.connection_string = connection_string
self.embedding_function = embedding_function
self.collection_name = collection_name
self.distance_strategy = distance_strategy
self.pre_delete_collection = pre_delete_collection
self.logger = logger or logging.getLogger(__name__)
self.__post_init__()
def __post_init__(
self,
) -> None:
"""
Initialize the store.
"""
self._conn = self.connect()
# self.create_vector_extension()
self.create_tables_if_not_exists()
self.create_collection()
def connect(self) -> sqlalchemy.engine.Connection:
engine = sqlalchemy.create_engine(self.connection_string)
conn = engine.connect()
return conn
def create_vector_extension(self) -> None:
try:
with Session(self._conn) as session:
statement = sqlalchemy.text("CREATE EXTENSION IF NOT EXISTS vector")
session.execute(statement)
session.commit()
except Exception as e:
self.logger.exception(e)
def create_tables_if_not_exists(self) -> None:
Base.metadata.create_all(self._conn)
def drop_tables(self) -> None:
Base.metadata.drop_all(self._conn)
def create_collection(self) -> None:
if self.pre_delete_collection:
self.delete_collection()
with Session(self._conn) as session:
CollectionStore.get_or_create(session, self.collection_name)
def delete_collection(self) -> None:
self.logger.debug("Trying to delete collection")
with Session(self._conn) as session:
collection = self.get_collection(session)
if not collection:
self.logger.error("Collection not found")
return
session.delete(collection)
session.commit()
def get_collection(self, session: Session) -> Optional["CollectionStore"]:
return CollectionStore.get_by_name(session, self.collection_name)
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
kwargs: vectorstore specific parameters
Returns:
List of ids from adding the texts into the vectorstore.
"""
if ids is None:
ids = [str(uuid.uuid1()) for _ in texts]
embeddings = self.embedding_function.embed_documents(list(texts))
if not metadatas:
metadatas = [{} for _ in texts]
with Session(self._conn) as session:
collection = self.get_collection(session)
if not collection:
raise ValueError("Collection not found")
for text, metadata, embedding, id in zip(texts, metadatas, embeddings, ids):
embedding_store = EmbeddingStore(
embedding=embedding,
document=text,
cmetadata=metadata,
custom_id=id,
)
collection.embeddings.append(embedding_store)
session.add(embedding_store)
session.commit()
return ids
def similarity_search(
self,
query: str,
k: int = 4,
**kwargs: Any,
) -> List[Document]:
"""Run similarity search with PGVector with distance.
Args:
query (str): Query text to search for.
k (int): Number of results to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List of Documents most similar to the query.
"""
embedding = self.embedding_function.embed_query(text=query)
return self.similarity_search_by_vector(
embedding=embedding,
k=k,
)
def similarity_search_with_score(
self, query: str, k: int = 4
) -> List[Tuple[Document, float]]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of Documents most similar to the query and score for each
"""
embedding = self.embedding_function.embed_query(query)
docs = self.similarity_search_with_score_by_vector(embedding, k)
return docs
def similarity_search_with_score_by_vector(
self,
embedding: List[float],
k: int = 4,
) -> List[Tuple[Document, float]]:
with Session(self._conn) as session:
collection = self.get_collection(session)
if not collection:
raise ValueError("Collection not found")
results: List[QueryResult] = (
session.query(
EmbeddingStore,
self.distance_strategy(embedding).label("distance"), # type: ignore
)
.filter(EmbeddingStore.collection_id == collection.uuid)
.order_by(sqlalchemy.asc("distance"))
.join(
CollectionStore,
EmbeddingStore.collection_id == CollectionStore.uuid,
)
.limit(k)
.all()
)
docs = [
(
Document(
page_content=result.EmbeddingStore.document,
metadata=result.EmbeddingStore.cmetadata,
),
result.distance if self.embedding_function is not None else None,
)
for result in results
]
return docs
def similarity_search_by_vector(
self,
embedding: List[float],
k: int = 4,
**kwargs: Any,
) -> List[Document]:
"""Return docs most similar to embedding vector.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of Documents most similar to the query vector.
"""
docs_and_scores = self.similarity_search_with_score_by_vector(
embedding=embedding, k=k
)
return [doc for doc, _ in docs_and_scores]
@classmethod
def from_texts(
cls,
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME,
distance_strategy: DistanceStrategy = DistanceStrategy.COSINE,
ids: Optional[List[str]] = None,
pre_delete_collection: bool = False,
**kwargs: Any,
) -> "PGVector":
"""
Return VectorStore initialized from texts and embeddings.
Postgres connection string is required
"Either pass it as a parameter
or set the PGVECTOR_CONNECTION_STRING environment variable.
"""
connection_string = cls.get_connection_string(kwargs)
store = cls(
connection_string=connection_string,
collection_name=collection_name,
embedding_function=embedding,
distance_strategy=distance_strategy,
pre_delete_collection=pre_delete_collection,
)
store.add_texts(texts=texts, metadatas=metadatas, ids=ids, **kwargs)
return store
@classmethod
def get_connection_string(cls, kwargs: Dict[str, Any]) -> str:
connection_string: str = get_from_dict_or_env(
data=kwargs,
key="connection_string",
env_key="PGVECTOR_CONNECTION_STRING",
)
if not connection_string:
raise ValueError(
"Postgres connection string is required"
"Either pass it as a parameter"
"or set the PGVECTOR_CONNECTION_STRING environment variable."
)
return connection_string
@classmethod
def from_documents(
cls,
documents: List[Document],
embedding: Embeddings,
collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME,
distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY,
ids: Optional[List[str]] = None,
pre_delete_collection: bool = False,
**kwargs: Any,
) -> "PGVector":
"""
Return VectorStore initialized from documents and embeddings.
Postgres connection string is required
"Either pass it as a parameter
or set the PGVECTOR_CONNECTION_STRING environment variable.
"""
texts = [d.page_content for d in documents]
metadatas = [d.metadata for d in documents]
connection_string = cls.get_connection_string(kwargs)
kwargs["connection_string"] = connection_string
return cls.from_texts(
texts=texts,
pre_delete_collection=pre_delete_collection,
embedding=embedding,
distance_strategy=distance_strategy,
metadatas=metadatas,
ids=ids,
collection_name=collection_name,
**kwargs,
)
@classmethod
def connection_string_from_db_params(
cls,
driver: str,
host: str,
port: int,
database: str,
user: str,
password: str,
) -> str:
"""Return connection string from database parameters."""
return f"postgresql+{driver}://{user}:{password}@{host}:{port}/{database}"
| [] |
2024-01-10 | schroederdewitt/evals | evals~registry~data~word_association~corpus_tools~validators.py | import os
import re
from abc import ABC, abstractmethod
from collections.abc import Callable
from typing import Dict, List, NamedTuple, Tuple, Union
import numpy as np
import openai
from logger_config import logger
openai.api_key = os.environ.get("OPENAI_API_KEY")
CORRELATION_PROMPT_TEMPLATE = """Task: Estimate the degree of correlation between
two provided strings. In your evaluation, consider not just direct links, but also indirect and subtle correlations.
As an illustration, if 'watch' appears in the first string and 'tower' in the second,
you could consider the combined term 'watchtower'. Similarly, for 'watch' and 'warning',
think in terms of phrases like 'watch out for warnings'.
You should score the correlation using the following scale from 0.00 to 1.00, where:
0.00 signifies no correlation whatsoever.
0.50 indicates a moderate level of correlation. This means there are several significant
connections between the terms in the two strings, but these are not overwhelming.
1.00 is reserved for ONLY two strings that are completely identical.
Strings to Correlate:
string_one: {word}
string_two: {related_words}"""
ANSWER_PROMPT_TEMPLATE = """\nYour final output should be in the following format(NOTE: Include the square brackets):
\nReasoning: <your reasoning>\nFinal Answer: [<float, rounded to the 100th place>]"""
class Embedding(NamedTuple):
"""A named tuple representing a string and its corresponding embedding."""
string: str
vector: List[float]
class RelatedWordsPair(NamedTuple):
"""A named tuple containing a word and its related words."""
word: str
related_words: str
class EmbeddingPair(NamedTuple):
"""A named tuple representing a pair of related words and their embeddings."""
related_words_pair: RelatedWordsPair
vectors: Tuple[Embedding]
class SimilarityTuple(NamedTuple):
"""A named tuple representing the result of a similarity analysis."""
related_words_pair: RelatedWordsPair
similar: bool
similarity_score: float
class QualityValidator(ABC):
"""Abstract base class for implementing quality validators."""
def __init__(self, target_score: int) -> None:
self.target_score = target_score
@abstractmethod
def validate(self, related_words_pair: List[RelatedWordsPair]) -> List[SimilarityTuple]:
raise NotImplementedError
class EmbeddingsValidator(QualityValidator):
"""
An implementation of QualityValidator that validates the similarity of embeddings for pairs of related words.
"""
def validate(
self,
related_words_pairs: List[RelatedWordsPair],
similarity_function: Callable[[List[float], List[float]], float] = None,
) -> List[SimilarityTuple]:
"""
Validates a list of related words pairs by comparing their embeddings.
Args:
related_words_pairs: a list of related word pairs to validate.
similarity_function: a function that calculates similarity between two embeddings.
Defaults to cosine similarity.
Returns:
A list of SimilarityTuple each containing a RelatedWordsPair, a boolean indicating if they're similar,
and the similarity score.
"""
logger.info(f"Validating {len(related_words_pairs)} related strings.")
if similarity_function is None:
similarity_function = self.calculate_cosine_similarity
# flatten all strings
all_strings = [
string for pair in related_words_pairs for string in (pair.word, pair.related_words)
]
logger.debug(f"{all_strings} flattened.")
# get embeddings
embeddings = self.get_embeddings(all_strings)
logger.info(f"{len(embeddings)} embeddings processed.")
# form EmbeddingPairs
embedding_pairs = [
EmbeddingPair(related_string, (embeddings[i], embeddings[i + 1]))
for i, related_string in enumerate(related_words_pairs)
]
results = []
# for each EmbeddingPair, compare their embeddings and form SimilarityTuple
for pair in embedding_pairs:
similarity_score = round(
similarity_function(pair.vectors[0].vector, pair.vectors[1].vector), 3
)
similar = similarity_score > self.target_score
similarity_tuple = SimilarityTuple(pair.related_words_pair, similar, similarity_score)
results.append(similarity_tuple)
logger.info(f"{pair.related_words_pair}: {similar} score:({similarity_score})")
return results
@staticmethod
def calculate_cosine_similarity(vec1: List[float], vec2: List[float]) -> float:
"""
Calculates cosine similarity between two vectors.
Args:
vec1: First vector.
vec2: Second vector.
Returns:
The cosine similarity between the two vectors.
"""
vec1_norm = np.linalg.norm(vec1)
vec2_norm = np.linalg.norm(vec2)
similarity = np.dot(vec1, vec2) / (vec1_norm * vec2_norm)
logger.debug(f"vec1: {vec1}, vec2: {vec2}, similarity: {similarity}")
return similarity
@staticmethod
def calculate_euclidean_distance(vec1: List[float], vec2: List[float]) -> float:
"""
Calculates Euclidean distance between two vectors.
Args:
vec1: First vector.
vec2: Second vector.
Returns:
The Euclidean distance between the two vectors.
"""
vec1 = np.array(vec1)
vec2 = np.array(vec2)
difference = vec1 - vec2
distance = np.linalg.norm(difference)
logger.debug(f"vec1: {vec1}, vec2: {vec2}, distance: {distance}")
return distance
@staticmethod
def get_embeddings(
emb_input: Union[RelatedWordsPair, str, List[str], List[List[int]]]
) -> List[Embedding]:
"""
Batches the process of getting embeddings from the API.
Args:
emb_input: an input which can be a single string, a list of strings or a list of lists of tokens.
Returns:
A list of Embedding namedtuples where each Embedding
represents the input string and its corresponding vector.
"""
response = openai.Embedding.create(model="text-embedding-ada-002", input=emb_input)
logger.debug(f"embeddings response: {response}")
response_data = response["data"]
emb_list = [data["embedding"] for data in response_data]
embeddings = [
Embedding(string=string, vector=vector) for string, vector in zip(emb_input, emb_list)
]
return embeddings
class GPTValidator(QualityValidator):
"""Uses the GPT model to validate the similarities between pairs of related words."""
def __init__(
self, target_score: int, criteria: Dict[str, str] = None, model: str = "gpt-4"
) -> None:
"""
Constructor for GPTValidator.
Args:
target_score: The minimum score threshold for two words to be considered similar.
criteria: A dictionary containing any specific criteria to be used in the validation process.
model: The identifier of the GPT model to use for the validation.
"""
self._model = model
self.criteria = criteria
super().__init__(target_score)
def validate(self, related_words_pairs: List[RelatedWordsPair]) -> List[SimilarityTuple]:
"""
Validates a list of related word pairs by comparing the outputs of the GPT model.
Args:
related_words_pairs: A list of pairs of related words to validate.
Returns:
A list of tuples containing the original word pair, a boolean indicating whether they are similar
according to the GPT model, and the similarity score.
"""
similarity_tuples = []
for related_words_pair in related_words_pairs:
response = self.get_chat_completion(related_words_pair)
similarity_score = self.extract_score(response)
similarity = similarity_score > self.target_score
similarity_tuple = SimilarityTuple(related_words_pair, similarity, similarity_score)
similarity_tuples.append(similarity_tuple)
return similarity_tuples
def get_chat_completion(
self,
related_words_pair: RelatedWordsPair,
correlation_prompt: str = None,
answer_prompt: str = None,
) -> List[SimilarityTuple]:
"""
Uses the GPT model to generate a completion based on a given prompt.
Args:
related_words_pair: The pair of related words to generate a completion for.
correlation_prompt: An optional specific prompt for the correlation task.
answer_prompt: An optional specific prompt for the answer format. If not provided, a default is used.
Returns:
The content of the message from the GPT model's response.
"""
if correlation_prompt is None:
correlation_prompt = CORRELATION_PROMPT_TEMPLATE.format(
word=related_words_pair.word, related_words=related_words_pair.related_words
)
if answer_prompt is None:
answer_prompt = ANSWER_PROMPT_TEMPLATE
prompt = correlation_prompt + answer_prompt
messages = [{"role": "user", "content": prompt}]
logger.debug(
f"Getting chat_completion using {self._model}.\nPrompting messages: {messages}"
)
response = openai.ChatCompletion.create(
model=self._model, messages=messages, temperature=0.0
)
logger.debug(f"response_message: {response}")
response_message = response["choices"][0]["message"]["content"]
logger.info(f"response_message: {response_message}")
return response_message
@staticmethod
def extract_score(response_content: str) -> float:
"""
Extracts the similarity score from the content of a GPT model's response.
Args:
response_content: The content of a GPT model's response.
Returns:
The similarity score as a float. If no score could be extracted, returns 0.0.
"""
try:
match = re.search(r"Final Answer: \[(.+?)]", response_content).group(1)
score = float(match)
logger.debug(f"response_content: {response_content}, score: {score}")
except AttributeError:
score = 0.0
logger.warning(
"Answer not found in response, score set to 0, will autofail validation scoring."
)
return score
def set_model(self, model: str) -> None:
"""
Changes the GPT model used for validation.
Args:
model: The identifier of the GPT model to use for the validation.
"""
# Add logic to reject incorrect models
self._model = model
if __name__ == "__main__":
# Demonstration of Both Validators
related_words_pairs = [
RelatedWordsPair("stage", "point, level, present"),
RelatedWordsPair("board", "point, level, present"),
]
validator = EmbeddingsValidator(0.75)
similarity_tuples: SimilarityTuple = validator.validate(related_words_pairs)
print(similarity_tuples)
gpt_validator = GPTValidator(0.75, model="gpt-4")
similarity_tuples: SimilarityTuple = gpt_validator.validate(related_words_pairs)
print(similarity_tuples)
| [
"warning",
"\nYour final output should be in the following format(NOTE: Include the square brackets):\n\nReasoning: <your reasoning>\nFinal Answer: [<float, rounded to the 100th place>]",
"PLACEHOLDERPLACEHOLDER",
"watch out for warnings",
"Task: Estimate the degree of correlation between\n two provided strings. In your evaluation, consider not just direct links, but also indirect and subtle correlations.\n As an illustration, if 'watch' appears in the first string and 'tower' in the second,\n you could consider the combined term 'watchtower'. Similarly, for 'watch' and 'warning',\n think in terms of phrases like 'watch out for warnings'.\n You should score the correlation using the following scale from 0.00 to 1.00, where:\n 0.00 signifies no correlation whatsoever.\n 0.50 indicates a moderate level of correlation. This means there are several significant\n connections between the terms in the two strings, but these are not overwhelming.\n 1.00 is reserved for ONLY two strings that are completely identical.\n\nStrings to Correlate:\nstring_one: {word}\nstring_two: {related_words}",
"watchtower"
] |
2024-01-10 | schroederdewitt/evals | evals~cli~oaieval.py | """
This file defines the `oaieval` CLI for running evals.
"""
import argparse
import logging
import shlex
import sys
from typing import Any, Mapping, Optional, Union, cast
import openai
import evals
import evals.api
import evals.base
import evals.record
from evals.eval import Eval
from evals.record import RecorderBase
from evals.registry import Registry
logger = logging.getLogger(__name__)
def _purple(str: str) -> str:
return f"\033[1;35m{str}\033[0m"
def get_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(description="Run evals through the API")
parser.add_argument(
"completion_fn",
type=str,
help="One or more CompletionFn URLs, separated by commas (,). A CompletionFn can either be the name of a model available in the OpenAI API or a key in the registry (see evals/registry/completion_fns).",
)
parser.add_argument("eval", type=str, help="Name of an eval. See registry.")
parser.add_argument("--extra_eval_params", type=str, default="")
parser.add_argument(
"--completion_args",
type=str,
default="",
help="Specify additional parameters to modify the behavior of the completion_fn during its creation. Parameters should be passed as a comma-separated list of key-value pairs (e.g., 'key1=value1,key2=value2'). This option allows for the dynamic modification of completion_fn settings, including the ability to override default arguments where necessary.",
)
parser.add_argument("--max_samples", type=int, default=None)
parser.add_argument("--cache", action=argparse.BooleanOptionalAction, default=True)
parser.add_argument("--visible", action=argparse.BooleanOptionalAction, default=None)
parser.add_argument("--seed", type=int, default=20220722)
parser.add_argument("--user", type=str, default="")
parser.add_argument("--record_path", type=str, default=None)
parser.add_argument(
"--log_to_file", type=str, default=None, help="Log to a file instead of stdout"
)
parser.add_argument(
"--registry_path",
type=str,
default=None,
action="append",
help="Path to the registry",
)
parser.add_argument("--debug", action=argparse.BooleanOptionalAction, default=False)
parser.add_argument(
"--local-run",
action=argparse.BooleanOptionalAction,
default=True,
help="Enable local mode for running evaluations. In this mode, the evaluation results are stored locally in a JSON file. This mode is enabled by default.",
)
parser.add_argument(
"--http-run",
action=argparse.BooleanOptionalAction,
default=False,
help="Enable HTTP mode for running evaluations. In this mode, the evaluation results are sent to a specified URL rather than being stored locally or in Snowflake. This mode should be used in conjunction with the '--http-run-url' and '--http-batch-size' arguments.",
)
parser.add_argument(
"--http-run-url",
type=str,
default=None,
help="URL to send the evaluation results when in HTTP mode. This option should be used in conjunction with the '--http-run' flag.",
)
parser.add_argument(
"--http-batch-size",
type=int,
default=100,
help="Number of events to send in each HTTP request when in HTTP mode. Default is 1, i.e., send events individually. Set to a larger number to send events in batches. This option should be used in conjunction with the '--http-run' flag.",
)
parser.add_argument(
"--http-fail-percent-threshold",
type=int,
default=5,
help="The acceptable percentage threshold of HTTP requests that can fail. Default is 5, meaning 5% of total HTTP requests can fail without causing any issues. If the failure rate goes beyond this threshold, suitable action should be taken or the process will be deemed as failing, but still stored locally.",
)
parser.add_argument("--dry-run", action=argparse.BooleanOptionalAction, default=False)
parser.add_argument("--dry-run-logging", action=argparse.BooleanOptionalAction, default=True)
return parser
class OaiEvalArguments(argparse.Namespace):
completion_fn: str
eval: str
extra_eval_params: str
max_samples: Optional[int]
cache: bool
visible: Optional[bool]
seed: int
user: str
record_path: Optional[str]
log_to_file: Optional[str]
registry_path: list[str]
debug: bool
local_run: bool
http_run: bool
http_run_url: Optional[str]
http_batch_size: int
http_fail_percent_threshold: int
dry_run: bool
dry_run_logging: bool
def run(args: OaiEvalArguments, registry: Optional[Registry] = None) -> str:
if args.debug:
logging.getLogger().setLevel(logging.DEBUG)
visible = args.visible if args.visible is not None else (args.max_samples is None)
if args.max_samples is not None:
evals.eval.set_max_samples(args.max_samples)
registry = registry or Registry()
if args.registry_path:
registry.add_registry_paths(args.registry_path)
eval_spec = registry.get_eval(args.eval)
assert (
eval_spec is not None
), f"Eval {args.eval} not found. Available: {list(sorted(registry._evals.keys()))}"
# If the user provided an argument to --completion_args, parse it into a dict here, to be passed to the completion_fn creation **kwargs
completion_args = args.completion_args.split(",")
additonal_completion_args = {k: v for k, v in (kv.split("=") for kv in completion_args if kv)}
completion_fns = args.completion_fn.split(",")
completion_fn_instances = [
registry.make_completion_fn(url, **additonal_completion_args) for url in completion_fns
]
run_config = {
"completion_fns": completion_fns,
"eval_spec": eval_spec,
"seed": args.seed,
"max_samples": args.max_samples,
"command": " ".join(map(shlex.quote, sys.argv)),
"initial_settings": {
"visible": visible,
},
}
eval_name = eval_spec.key
if eval_name is None:
raise Exception("you must provide a eval name")
run_spec = evals.base.RunSpec(
completion_fns=completion_fns,
eval_name=eval_name,
base_eval=eval_name.split(".")[0],
split=eval_name.split(".")[1],
run_config=run_config,
created_by=args.user,
)
record_path = (
f"/tmp/evallogs/{run_spec.run_id}_{args.completion_fn}_{args.eval}.jsonl"
if args.record_path is None
else args.record_path
)
if args.http_run:
args.local_run = False
elif args.local_run:
args.http_run = False
recorder = build_recorder(args, run_spec, record_path)
api_extra_options: dict[str, Any] = {}
if not args.cache:
api_extra_options["cache_level"] = 0
run_url = f"{run_spec.run_id}"
logger.info(_purple(f"Run started: {run_url}"))
def parse_extra_eval_params(
param_str: Optional[str],
) -> Mapping[str, Union[str, int, float]]:
"""Parse a string of the form "key1=value1,key2=value2" into a dict."""
if not param_str:
return {}
def to_number(x: str) -> Union[int, float, str]:
try:
return int(x)
except (ValueError, TypeError):
pass
try:
return float(x)
except (ValueError, TypeError):
pass
return x
str_dict = dict(kv.split("=") for kv in param_str.split(","))
return {k: to_number(v) for k, v in str_dict.items()}
extra_eval_params = parse_extra_eval_params(args.extra_eval_params)
eval_class = registry.get_class(eval_spec)
eval: Eval = eval_class(
completion_fns=completion_fn_instances,
seed=args.seed,
name=eval_name,
eval_registry_path=eval_spec.registry_path,
registry=registry,
**extra_eval_params,
)
result = eval.run(recorder)
recorder.record_final_report(result)
if not (args.dry_run or args.local_run):
logger.info(_purple(f"Run completed: {run_url}"))
logger.info("Final report:")
for key, value in result.items():
logger.info(f"{key}: {value}")
return run_spec.run_id
def build_recorder(
args: OaiEvalArguments, run_spec: evals.base.RunSpec, record_path: str
) -> RecorderBase:
if args.dry_run:
return evals.record.DummyRecorder(run_spec=run_spec, log=args.dry_run_logging)
if args.local_run:
return evals.record.LocalRecorder(record_path, run_spec=run_spec)
if args.http_run:
if args.http_run_url is None:
raise ValueError("URL must be specified when using http-run mode")
return evals.record.HttpRecorder(
url=args.http_run_url,
run_spec=run_spec,
batch_size=args.http_batch_size,
fail_percent_threshold=args.http_fail_percent_threshold,
local_fallback_path=record_path,
)
return evals.record.Recorder(
record_path,
run_spec=run_spec,
)
def main() -> None:
parser = get_parser()
args = cast(OaiEvalArguments, parser.parse_args(sys.argv[1:]))
logging.basicConfig(
format="[%(asctime)s] [%(filename)s:%(lineno)d] %(message)s",
level=logging.INFO,
filename=args.log_to_file if args.log_to_file else None,
)
logging.getLogger("openai").setLevel(logging.WARN)
if hasattr(openai.error, "set_display_cause"): # type: ignore
openai.error.set_display_cause() # type: ignore
run(args)
if __name__ == "__main__":
main()
| [] |
2024-01-10 | schroederdewitt/evals | evals~registry.py | """
Functions to handle registration of evals. To add a new eval to the registry,
add an entry in one of the YAML files in the `../registry` dir.
By convention, every eval name should start with {base_eval}.{split}.
"""
import copy
import difflib
import functools
import logging
import os
import re
from functools import cached_property
from pathlib import Path
from typing import Any, Generator, Iterator, Optional, Sequence, Tuple, Type, TypeVar, Union
import openai
import yaml
from evals import OpenAIChatCompletionFn, OpenAICompletionFn
from evals.api import CompletionFn, DummyCompletionFn
from evals.base import BaseEvalSpec, CompletionFnSpec, EvalSetSpec, EvalSpec
from evals.elsuite.modelgraded.base import ModelGradedSpec
from evals.utils.misc import make_object
logger = logging.getLogger(__name__)
DEFAULT_PATHS = [
Path(__file__).parents[0].resolve() / "registry",
Path.home() / ".evals",
]
SPEC_RESERVED_KEYWORDS = ["key", "group", "cls"]
def n_ctx_from_model_name(model_name: str) -> Optional[int]:
"""Returns n_ctx for a given API model name. Model list last updated 2023-06-16."""
# note that for most models, the max tokens is n_ctx + 1
PREFIX_AND_N_CTX: list[tuple[str, int]] = [
("gpt-3.5-turbo-", 4096),
("gpt-4-32k-", 32768),
("gpt-4-", 8192),
]
MODEL_NAME_TO_N_CTX: dict[str, int] = {
"ada": 2048,
"text-ada-001": 2048,
"babbage": 2048,
"text-babbage-001": 2048,
"curie": 2048,
"text-curie-001": 2048,
"davinci": 2048,
"text-davinci-001": 2048,
"code-davinci-002": 8000,
"text-davinci-002": 4096,
"text-davinci-003": 4096,
"gpt-3.5-turbo": 4096,
"gpt-4": 8192,
"gpt-4-32k": 32768,
"gpt-4-base": 8192,
}
# first, look for an exact match
if model_name in MODEL_NAME_TO_N_CTX:
return MODEL_NAME_TO_N_CTX[model_name]
# otherwise, look for a prefix match
for model_prefix, n_ctx in PREFIX_AND_N_CTX:
if model_name.startswith(model_prefix):
return n_ctx
# not found
return None
def is_chat_model(model_name: str) -> bool:
if model_name in {"gpt-4-base"}:
return False
CHAT_MODEL_NAMES = {"gpt-3.5-turbo", "gpt-4", "gpt-4-32k"}
if model_name in CHAT_MODEL_NAMES:
return True
for model_prefix in {"gpt-3.5-turbo-", "gpt-4-", "gpt-4-32k-"}:
if model_name.startswith(model_prefix):
return True
return False
T = TypeVar("T")
RawRegistry = dict[str, Any]
class Registry:
def __init__(self, registry_paths: Sequence[Union[str, Path]] = DEFAULT_PATHS):
self._registry_paths = [Path(p) if isinstance(p, str) else p for p in registry_paths]
def add_registry_paths(self, paths: Sequence[Union[str, Path]]) -> None:
self._registry_paths.extend([Path(p) if isinstance(p, str) else p for p in paths])
@cached_property
def api_model_ids(self) -> list[str]:
try:
return [m["id"] for m in openai.Model.list()["data"]]
except openai.error.OpenAIError as err: # type: ignore
# Errors can happen when running eval with completion function that uses custom
# API endpoints and authentication mechanisms.
logger.warning(f"Could not fetch API model IDs from OpenAI API: {err}")
return []
def make_completion_fn(
self,
name: str,
**kwargs: Any,
) -> CompletionFn:
"""
Create a CompletionFn. The name can be one of the following formats:
1. openai-model-id (e.g. "gpt-3.5-turbo")
2. completion-fn-id (from the registry)
"""
if name == "dummy":
return DummyCompletionFn()
n_ctx = n_ctx_from_model_name(name)
if is_chat_model(name):
return OpenAIChatCompletionFn(model=name, n_ctx=n_ctx, **kwargs)
elif name in self.api_model_ids:
return OpenAICompletionFn(model=name, n_ctx=n_ctx, **kwargs)
# No match, so try to find a completion-fn-id in the registry
spec = self.get_completion_fn(name)
if spec is None:
raise ValueError(f"Could not find CompletionFn in the registry with ID {name}")
if spec.args is None:
spec.args = {}
spec.args.update(kwargs)
spec.args["registry"] = self
instance = make_object(spec.cls)(**spec.args or {})
assert isinstance(instance, CompletionFn), f"{name} must be a CompletionFn"
return instance
def get_class(self, spec: EvalSpec) -> Any:
return make_object(spec.cls, **(spec.args if spec.args else {}))
def _dereference(
self, name: str, d: RawRegistry, object: str, type: Type[T], **kwargs: dict
) -> Optional[T]:
if name not in d:
logger.warning(
(
f"{object} '{name}' not found. "
f"Closest matches: {difflib.get_close_matches(name, d.keys(), n=5)}"
)
)
return None
def get_alias() -> Optional[str]:
if isinstance(d[name], str):
return d[name]
if isinstance(d[name], dict) and "id" in d[name]:
return d[name]["id"]
return None
logger.debug(f"Looking for {name}")
while True:
alias = get_alias()
if alias is None:
break
name = alias
spec = d[name]
if kwargs:
spec = copy.deepcopy(spec)
spec.update(kwargs)
try:
return type(**spec)
except TypeError as e:
raise TypeError(f"Error while processing {object} '{name}': {e}")
def get_modelgraded_spec(self, name: str, **kwargs: dict) -> Optional[ModelGradedSpec]:
assert name in self._modelgraded_specs, (
f"Modelgraded spec {name} not found. "
f"Closest matches: {difflib.get_close_matches(name, self._modelgraded_specs.keys(), n=5)}"
)
return self._dereference(
name, self._modelgraded_specs, "modelgraded spec", ModelGradedSpec, **kwargs
)
def get_completion_fn(self, name: str) -> Optional[CompletionFnSpec]:
return self._dereference(name, self._completion_fns, "completion_fn", CompletionFnSpec)
def get_eval(self, name: str) -> Optional[EvalSpec]:
return self._dereference(name, self._evals, "eval", EvalSpec)
def get_eval_set(self, name: str) -> Optional[EvalSetSpec]:
return self._dereference(name, self._eval_sets, "eval set", EvalSetSpec)
def get_evals(self, patterns: Sequence[str]) -> Iterator[Optional[EvalSpec]]:
# valid patterns: hello, hello.dev*, hello.dev.*-v1
def get_regexp(pattern: str) -> re.Pattern[str]:
pattern = pattern.replace(".", "\\.")
pattern = pattern.replace("*", ".*")
return re.compile(f"^{pattern}$")
regexps = list(map(get_regexp, patterns))
for name in self._evals:
# if any regexps match, return the name
if any(map(lambda regexp: regexp.match(name), regexps)):
yield self.get_eval(name)
def get_base_evals(self) -> list[Optional[BaseEvalSpec]]:
base_evals: list[Optional[BaseEvalSpec]] = []
for name, spec in self._evals.items():
if name.count(".") == 0:
base_evals.append(self.get_base_eval(name))
return base_evals
def get_base_eval(self, name: str) -> Optional[BaseEvalSpec]:
if name not in self._evals:
return None
spec_or_alias = self._evals[name]
if isinstance(spec_or_alias, dict):
spec = spec_or_alias
try:
return BaseEvalSpec(**spec)
except TypeError as e:
raise TypeError(f"Error while processing base eval {name}: {e}")
alias = spec_or_alias
return BaseEvalSpec(id=alias)
def _load_file(self, path: Path) -> Generator[Tuple[str, Path, dict], None, None]:
with open(path, "r", encoding="utf-8") as f:
d = yaml.safe_load(f)
if d is None or not isinstance(d, dict):
# no entries in the file
return
for name, spec in d.items():
yield name, path, spec
def _load_directory(self, path: Path) -> Generator[Tuple[str, Path, dict], None, None]:
files = Path(path).glob("*.yaml")
for file in files:
yield from self._load_file(file)
def _load_resources(
self, registry_path: Path, resource_type: str
) -> Generator[Tuple[str, Path, dict], None, None]:
path = registry_path / resource_type
logging.info(f"Loading registry from {path}")
if os.path.exists(path):
if os.path.isdir(path):
yield from self._load_directory(path)
else:
yield from self._load_file(path)
@staticmethod
def _validate_reserved_keywords(spec: dict, name: str, path: Path) -> None:
for reserved_keyword in SPEC_RESERVED_KEYWORDS:
if reserved_keyword in spec:
raise ValueError(
f"{reserved_keyword} is a reserved keyword, but was used in {name} from {path}"
)
def _load_registry(self, registry_paths: Sequence[Path], resource_type: str) -> RawRegistry:
"""Load registry from a list of regstry paths and a specific resource type
Each path includes yaml files which are a dictionary of name -> spec.
"""
registry: RawRegistry = {}
for registry_path in registry_paths:
for name, path, spec in self._load_resources(registry_path, resource_type):
assert name not in registry, f"duplicate entry: {name} from {path}"
self._validate_reserved_keywords(spec, name, path)
spec["key"] = name
spec["group"] = str(os.path.basename(path).split(".")[0])
spec["registry_path"] = registry_path
if "class" in spec:
spec["cls"] = spec["class"]
del spec["class"]
registry[name] = spec
return registry
@functools.cached_property
def _completion_fns(self) -> RawRegistry:
return self._load_registry(self._registry_paths, "completion_fns")
@functools.cached_property
def _eval_sets(self) -> RawRegistry:
return self._load_registry(self._registry_paths, "eval_sets")
@functools.cached_property
def _evals(self) -> RawRegistry:
return self._load_registry(self._registry_paths, "evals")
@functools.cached_property
def _modelgraded_specs(self) -> RawRegistry:
return self._load_registry(self._registry_paths, "modelgraded")
registry = Registry()
| [] |
2024-01-10 | schroederdewitt/evals | evals~utils~api_utils.py | """
This file defines various helper functions for interacting with the OpenAI API.
"""
import concurrent
import logging
import os
import backoff
import openai
EVALS_THREAD_TIMEOUT = float(os.environ.get("EVALS_THREAD_TIMEOUT", "40"))
@backoff.on_exception(
wait_gen=backoff.expo,
exception=(
openai.error.ServiceUnavailableError,
openai.error.APIError,
openai.error.RateLimitError,
openai.error.APIConnectionError,
openai.error.Timeout,
),
max_value=60,
factor=1.5,
)
def openai_completion_create_retrying(*args, **kwargs):
"""
Helper function for creating a completion.
`args` and `kwargs` match what is accepted by `openai.Completion.create`.
"""
result = openai.Completion.create(*args, **kwargs)
if "error" in result:
logging.warning(result)
raise openai.error.APIError(result["error"])
return result
def request_with_timeout(func, *args, timeout=EVALS_THREAD_TIMEOUT, **kwargs):
"""
Worker thread for making a single request within allotted time.
"""
while True:
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
future = executor.submit(func, *args, **kwargs)
try:
result = future.result(timeout=timeout)
return result
except concurrent.futures.TimeoutError:
continue
@backoff.on_exception(
wait_gen=backoff.expo,
exception=(
openai.error.ServiceUnavailableError,
openai.error.APIError,
openai.error.RateLimitError,
openai.error.APIConnectionError,
openai.error.Timeout,
),
max_value=60,
factor=1.5,
)
def openai_chat_completion_create_retrying(*args, **kwargs):
"""
Helper function for creating a chat completion.
`args` and `kwargs` match what is accepted by `openai.ChatCompletion.create`.
"""
result = request_with_timeout(openai.ChatCompletion.create, *args, **kwargs)
if "error" in result:
logging.warning(result)
raise openai.error.APIError(result["error"])
return result
| [] |
2024-01-10 | shubham0831/Tooling | workflow-automator~main.py | meeting_notes = '''hello'''
from anthropic import Anthropic, HUMAN_PROMPT, AI_PROMPT
from slack_sdk import WebClient
import re
import json
from jira import JIRA
anthropic = Anthropic(
# defaults to os.environ.get("ANTHROPIC_API_KEY")
api_key="",
)
# Replace these values with your own Jira instance URL and credentials
JIRA_SERVER = ''
JIRA_USERNAME = ''
JIRA_API_TOKEN = ''
# Slack API configuration
SLACK_TOKEN = ''
SLACK_CHANNEL = '#slack-bot-test-channel'
def generate_prompt(prompt_dict) -> str:
generated_prompt = "Here is a conversation between a human and you, go through it and analyze it, and then based on the context, response to the human question"
index = 0
while index in prompt_dict.keys():
convo = prompt_dict[index]
question = convo['prompt']
question = "human : " + question
generated_prompt = generated_prompt + "\n" + question
response = convo['reply']
if response != "":
response = "ai : " + response
generated_prompt = generated_prompt + "\n" + question
index+=1
return generated_prompt, index-1
ai_behavior_prompt = '''
I will be sending you a transcript from a meeting. Based on this transcript I want you to do the following:
List all the tickets mentioned in this meeting, and for each ticket, give me the following output:
jira_ticket_number : ticket_number,
ticket_description : what is the original ticket about
user : the name of the user this ticket belongs to,
action_item : action item (eg. increase in story points, delayed release, new subtasks, etc)
previous_release_version: release version of the task (4.34 by default)
suggested_release_version: release version you suggest
previous_story_points: story point of the ticket (default value is 5)
suggested_story_points: story points you suggest
reasoning: reasoning for the change
For story points, if a task is taking longer than expected suggest an increase in the story points. Assume each task has a story point of 5, then based on how difficult it is proving
update the story point by 1,3,5, or 7
Assume each task is due for 4.34 release. If it seems like it will get delayed, push the release back to 4.35.
Each ticket can only belong to one user, and not multiple user, identify that user. It is usually the person who first gives updates on the task.
If a ticket has no action item, just put action item as "nothing to do"
If you suggest change for any story, provide your reasoning as well, in the reasoning section. Elaborate on this reasoning and don't be vague. If a person has mentioned a reason
about something being complex, figure out what in particular is complex and put that in reasoning.
Once you have done this for each ticket, give me a high level summary of this meeting.
Just reply ok to this message, I will send the transcript in the next message.
'''
# Create a Jira client instance
jira = JIRA(server=JIRA_SERVER, basic_auth=(JIRA_USERNAME, JIRA_API_TOKEN))
def update_story_point(ticket_number, new_story_points):
int_story_points = int(new_story_points)
print(f"Updating story points for {ticket_number} to {new_story_points}")
issue = jira.issue(ticket_number)
# for field_name in issue.raw['fields']:
# field_value = issue.raw['fields'][field_name]
# print(f'{field_name}: {field_value}')
issue.update(fields={"customfield_10016": int_story_points})
meeting_transcript_file= open("/Users/shubham/Code/personal/DigitalGarageHackathon/sample_meeting_transcript.txt", "r")
meeting_transcript = meeting_transcript_file.read()
meeting_transcript_file.close()
previousPrompts = {
0 : {
"prompt" : ai_behavior_prompt,
"reply" : "ok"
},
1 : {
"prompt" : f"Here's the transcript from the meeting. Please do the tasks I mention : \n {meeting_transcript}",
"reply" : ""
},
}
prompt, last_index = generate_prompt(previousPrompts)
completion = anthropic.completions.create(
model="claude-2",
max_tokens_to_sample=1000000,
prompt=f"{HUMAN_PROMPT} {prompt} {AI_PROMPT}",
)
previousPrompts[last_index]['reply'] = completion.completion
print(completion.completion)
client = WebClient(token=SLACK_TOKEN)
result = client.users_list()
members = result.data['members']
split_notes = meeting_notes.split("\n\n")
ticket_dicts = []
keys_to_extract = [
'user',
'jira_ticket_number',
'ticket_description',
'action_item',
'previous_release_version',
'suggested_release_version',
'previous_story_points',
'suggested_story_points',
'reasoning'
]
for section in split_notes:
ticket_dict = {}
for key in keys_to_extract:
match = re.search(fr'{key}: (.+)', section)
if match:
ticket_dict[key] = match.group(1)
ticket_dicts.append(ticket_dict)
for ticket in ticket_dicts:
if 'suggested_story_points' in ticket and ticket['action_item'] != 'Nothing to do':
username = ticket['user']
str_representation = json.dumps(ticket)
# if ticket['jira_ticket_number'] == "CLOUD-46":
# print(ticket)
for m in members:
if m['name'] == username or m['name'] == "dmitriybaikov":
user_id = m['id']
# sent message to user, check for response in the future, for now just update the ticket
client.chat_postMessage(channel=user_id, text=str_representation)
# print(m)
if ticket['jira_ticket_number'] == "CLOUD-46" or ticket['jira_ticket_number'] == "CLOUD-47":
update_story_point(ticket['jira_ticket_number'], ticket['suggested_story_points'])
| [
"\n I will be sending you a transcript from a meeting. Based on this transcript I want you to do the following:\n\n List all the tickets mentioned in this meeting, and for each ticket, give me the following output:\n\n jira_ticket_number : ticket_number,\n ticket_description : what is the original ticket about\n user : the name of the user this ticket belongs to,\n action_item : action item (eg. increase in story points, delayed release, new subtasks, etc)\n previous_release_version: release version of the task (4.34 by default)\n suggested_release_version: release version you suggest\n previous_story_points: story point of the ticket (default value is 5)\n suggested_story_points: story points you suggest\n reasoning: reasoning for the change\n \n\n For story points, if a task is taking longer than expected suggest an increase in the story points. Assume each task has a story point of 5, then based on how difficult it is proving\n update the story point by 1,3,5, or 7\n\n Assume each task is due for 4.34 release. If it seems like it will get delayed, push the release back to 4.35.\n\n Each ticket can only belong to one user, and not multiple user, identify that user. It is usually the person who first gives updates on the task.\n\n If a ticket has no action item, just put action item as \"nothing to do\"\n\n If you suggest change for any story, provide your reasoning as well, in the reasoning section. Elaborate on this reasoning and don't be vague. If a person has mentioned a reason\n about something being complex, figure out what in particular is complex and put that in reasoning.\n\n Once you have done this for each ticket, give me a high level summary of this meeting.\n\n Just reply ok to this message, I will send the transcript in the next message.\n ",
"PLACEHOLDER\nPLACEHOLDER",
"{0: {'prompt': PLACEHOLDER, 'reply': 'ok'}, 1: {'prompt': \"Here's the transcript from the meeting. Please do the tasks I mention : \\n PLACEHOLDER\", 'reply': ''}}",
"Here is a conversation between a human and you, go through it and analyze it, and then based on the context, response to the human question",
"PLACEHOLDER PLACEHOLDER PLACEHOLDER"
] |
2024-01-10 | kolubex/Megathon | gradio_app.py | import gradio as gr
from gradio.components import Textbox
# IMPORTS
import os
os.environ["CUDA_VISIBLE_DEVICES"] = ""
import json
import pandas as pd
from operator import itemgetter
from transformers import AutoTokenizer, AutoModelForCausalLM
from langchain.prompts import ChatPromptTemplate
from langchain.chat_models import ChatOpenAI
from langchain.embeddings import OpenAIEmbeddings
from langchain.schema.output_parser import StrOutputParser
from langchain.schema.runnable import RunnablePassthrough, RunnableLambda
from langchain.vectorstores import FAISS
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.document_loaders.csv_loader import CSVLoader
# Embed and store splits
from langchain.vectorstores import Chroma
from langchain.embeddings import OpenAIEmbeddings, HuggingFaceEmbeddings
from langchain import hub
from langchain.llms import HuggingFacePipeline
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
from transformers import OpenAIGPTTokenizer, OpenAIGPTModel
import torch
from langchain.llms import HuggingFaceHub
from langchain.llms import HuggingFacePipeline
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline, AutoModelForSeq2SeqLM
import time
# RAG chain
from langchain.schema.runnable import RunnablePassthrough
# torch.cuda.set_device('cpu')
# dont use cuda
device = torch.device('cpu')
def load_model():
embeddings_model_name = "alibidaran/medical_transcription_generator"
embeddings = HuggingFaceEmbeddings(model_name=embeddings_model_name)
vectorstore = Chroma(persist_directory="./vectorstore_train/", embedding_function=embeddings)
retriever = vectorstore.as_retriever()
rag_prompt = hub.pull("rlm/rag-prompt")
model_id = 'google/flan-t5-small'
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForSeq2SeqLM.from_pretrained(model_id, load_in_8bit=False, device_map='cpu')
hf_pipeline = pipeline(
"text2text-generation",
model=model,
tokenizer=tokenizer,
max_length=128
)
hf = HuggingFacePipeline(pipeline=hf_pipeline)
from langchain.schema.runnable import RunnablePassthrough
rag_chain = (
{"context": retriever, "question": RunnablePassthrough()}
| rag_prompt
| hf
)
return rag_chain
def model_function(prompt, rag_chain):
start = time.time()
print("prompt: ", prompt)
response = rag_chain.invoke(prompt)
response += "\n" + "Time taken: " + str(time.time() - start) + " seconds"
return (response)
if __name__ == "__main__":
rag_chain = load_model()
print("model loaded")
# Define the interface
interface = gr.Interface(
fn=lambda prompt: model_function(prompt, rag_chain), # function to call
inputs=Textbox(lines=2, placeholder="Enter your text here..."), # text input
outputs="text", # text output
live=False # model is called only when the submit button is pressed
)
# Launch the interface
interface.launch(share=True)
| [
"rlm/rag-prompt"
] |
2024-01-10 | aldrinjenson/smart-qa | src~llm_models.py | import requests
import json
from src.constants import OPENAI_API_KEY
from openai import OpenAI
client = OpenAI()
client.api_key = OPENAI_API_KEY
def execute_with_ollama(query):
payload = {
"model": "mistral",
"format": "json",
"stream": False,
"messages": [{"role": "user", "content": query}],
}
payload_json = json.dumps(payload)
url = "http://localhost:11434/api/chat"
try:
response = requests.post(url, data=payload_json)
if response.status_code == 200:
response_data = response.json()
response_data = json.loads(response_data["message"]["content"])
print(response_data)
return response_data
else:
print(f"LLM Request failed with status code {response.status_code}")
return None
except requests.RequestException as e:
print(f"Request exception: {e}")
return None
def execute_with_openai(query):
completion = client.chat.completions.create(
model="gpt-3.5-turbo-1106",
messages=[
{"role": "system", "content": query},
],
temperature=0.7,
max_tokens=64,
top_p=1,
response_format={"type": "json_object"},
)
response = completion.choices[0].message.content
print(response)
response = json.loads(response)
return response
| [] |
2024-01-10 | jocwulf/math_adventure | math_story_de.py | import os
import numpy as np
import streamlit as st
import openai
from dotenv import load_dotenv
from streamlit_chat import message
from PIL import Image
# Konstanten
SENTENCES_PER_EPISODE = 5 # Anzahl der Sätze pro Episode
RIDDLE_MIN = 2 # Mindestzahl für Rätsel
MODEL = "gpt-3.5-turbo-0613" # Verwenden Sie ein besser performantes GPT-Modell, wenn möglich
# Laden des OpenAI-Schlüssels aus der .env-Datei
load_dotenv(".env")
openai.api_key = os.getenv("OPENAI_KEY")
# Bild laden
image = Image.open('children.png')
def reset_state():
"""Setzt den Sitzungsstatus zurück."""
keys = list(st.session_state.keys())
for key in keys:
del st.session_state[key]
def generate_riddle(calculation_type, riddle_max):
"""Generiert ein Rätsel und gibt die Frage und Antwort zurück."""
num1 = np.random.randint(RIDDLE_MIN, riddle_max)
num2 = np.random.randint(RIDDLE_MIN, riddle_max)
if calculation_type == "Addition":
question = "{} + {}".format(num1, num2)
answer = num1 + num2
elif calculation_type == "Subtraction":
question = "{} - {}".format(max(num1, num2), min(num1, num2))
answer = max(num1, num2) - min(num1, num2)
elif calculation_type == "Multiplication":
question = "{} * {}".format(num1, num2)
answer = num1 * num2
elif calculation_type == "Division":
product = num1 * num2
question = "{} / {}".format(product, num1)
answer = num2
return question, answer
def generate_story(messages):
"""Generiert eine Story-Episode mit der OpenAI-API und gibt die Story zurück."""
story = openai.ChatCompletion.create(
model=MODEL,
messages=messages,
temperature=0.5,
)
return story['choices'][0]['message']['content']
def generate_challenge():
"""Generiert eine Reihe von Rätseln und Geschichten für das Kind zum Lösen."""
st.session_state['right_answer'] = [] # Liste der richtigen Antworten
st.session_state['question'] = [] # Liste der Fragen
st.session_state['story'] = [] # Liste der Episoden
st.session_state['num_riddles'] = st.session_state['riddle_count'] # Anzahl der Rätsel, die im Sitzungsstatus bestehen bleiben
messages = [] # Liste der Nachrichten für OpenAI-Anfragen
# Systemnachricht für OpenAI-Anfragen
sys_message = """Erzähle einem siebenjährigen Kind eine Fortsetzungsgeschichte über {2}. Jede Episode besteht genau aus {0} Sätzen. Die Geschichte handelt vom Tag von {1}. Eine Episode der Geschichte besteht genau aus {0} Sätzen und nicht mehr. Beginne direkt mit der Erzählung. Beende jede Episode mit einem Matheproblem, das immer zuvor von [role: user] gestellt wird. Integriere das Matheproblem in die Erzählung der Episode. Stelle sicher, dass das Matheproblem korrekt formuliert ist. Gib die Lösung nicht. Durch das Lösen dieses Problems kann das Kind {1} helfen. Führe in der neuen Episode bereits erzählte Episoden fort und stelle ein neues Matheproblem. BITTE BEACHTEN: Gib die Lösung zum Matheproblem nicht. Verwende nur {0} Sätze. Beende das Ende mit dem Matheproblem.""".format(SENTENCES_PER_EPISODE, st.session_state['person'], st.session_state['topic'])
messages.append({"role": "system", "content": sys_message})
# Fortschrittsbalken erstellen
progress_bar = st.progress(0)
status_message = st.empty()
status_message.text("Ich generiere deine Geschichte...")
for i in range(st.session_state['riddle_count']): # Rätsel und Geschichten generieren
# Fortschrittsbalken aktualisieren
progress_bar.progress((i + 1) / st.session_state['riddle_count'])
# Rätsel generieren
calculation_type = np.random.choice(st.session_state['calculation_type'])
question, answer = generate_riddle(calculation_type, st.session_state['riddle_max'])
messages.append({"role": "user", "content": question})
# Geschichte generieren
story = generate_story(messages)
messages.append({"role": "assistant", "content": story})
# Rätsel und Geschichte im Sitzungsstatus speichern
st.session_state.right_answer.append(answer)
st.session_state.question.append(question)
st.session_state.story.append(story)
# Finale Episode erstellen
messages.pop(0) # Erstes Element in der Nachrichtenliste entfernen (Systemnachricht)
messages.append({"role": "user", "content": "Beende die Geschichte in fünf Sätzen. Füge kein Matheproblem hinzu."})
story = generate_story(messages)
st.session_state.story.append(story)
st.session_state['current_task'] = 0 # Verfolgt die aktuelle Episode
status_message.empty() # Statusnachricht entfernen
return st.session_state['story'][0] # Gibt die erste Episode zurück
def on_input_change():
"""Verarbeitet die Eingabe des Kindes und überprüft, ob sie korrekt ist."""
user_input = st.session_state["user_input"+str(st.session_state['current_task'])] # Benutzereingabe holen
st.session_state['past'].append(user_input) # Benutzereingabe im Sitzungsstatus speichern
if user_input == st.session_state.right_answer[st.session_state['current_task']]: # Benutzereingabe ist korrekt
# Überprüfen, ob alle Aufgaben erledigt sind
if st.session_state['current_task'] == st.session_state['num_riddles']-1: # Alle Aufgaben sind erledigt
st.session_state['generated'].append(st.session_state['story'][st.session_state['current_task']+1]) # Finale Episode generieren
st.session_state['finished'] = True # Fertig-Flag setzen
else: # Nicht alle Aufgaben sind erledigt
st.session_state['current_task']+=1 # Aufgabenzähler erhöhen
st.session_state['generated'].append(st.session_state['story'][st.session_state['current_task']]) # Nächste Episode für die Ausgabe anhängen
else: # Benutzereingabe ist falsch
st.session_state.generated.append("Nicht ganz richtig. Versuche es noch einmal! " + st.session_state.question[st.session_state['current_task'] ]) # Falsche Nachricht zur Ausgabe hinzufügen
if 'end_story' not in st.session_state:
st.session_state['end_story'] = False
if 'input_done' not in st.session_state:
st.session_state['input_done'] = False
st.title("༼ ͡ಠ ͜ʖ ͡ಠ ༽ Dein Mathe-Abenteuer")
st.image(image, use_column_width=True, caption = "3 * 2 = 7 ???")
if st.session_state['end_story']: # Geschichte beendet
st.write("Die Geschichte ist zu Ende.")
if st.button("Eine neue Geschichte starten"):
reset_state()
else: # Geschichte nicht beendet
if st.session_state['input_done'] == False:
with st.sidebar:
st.selectbox("Wie viele Matheaufgaben möchtest du lösen?", [3, 5, 7, 10], key="riddle_count", index=0)
st.multiselect("Wähle den Rechentyp", ["Addition", "Subtraktion", "Multiplikation", "Division"], key="calculation_type", default=["Addition", "Subtraktion", "Multiplikation", "Division"])
st.selectbox("Wähle den Zahlenbereich", ["1 Stelle (1-9)", "2 Stellen (1-99)"], key="number_range", index=0)
st.text_input("Wer soll die Hauptfigur Deiner Geschichte sein?", key="person")
st.text_input("Was willst Du mit Deiner Hauptfigur erleben?", key="topic")
if st.button("Die Geschichte starten", key="start_btn"):
st.session_state['input_done'] = True
if st.session_state['number_range'] == "1 Stelle (1-9)":
st.session_state['riddle_max'] = 9
else:
st.session_state['riddle_max'] = 99
if st.session_state['input_done']:
if 'past' not in st.session_state:
st.session_state['past']=['Hier werden deine Antworten angezeigt.']
if 'generated' not in st.session_state:
st.session_state['generated'] = [generate_challenge()]
if 'finished' not in st.session_state:
st.session_state['finished'] = False
chat_placeholder = st.empty()
with chat_placeholder.container():
# st.write(st.session_state.story) # zum Debuggen
for i in range(len(st.session_state['generated'])):
message(str(st.session_state['past'][i]), is_user=True, key=str(i) + '_user')
message(
st.session_state['generated'][i],
key=str(i)
)
if not st.session_state['finished']:
with st.container():
st.number_input("Deine Lösung:", min_value=-1, max_value=100,
value=-1, step=1, on_change=on_input_change,
key="user_input"+str(st.session_state['current_task']))
if st.button("Die Geschichte beenden", key="end_btn"):
st.session_state['end_story'] = True
| [
"Beende die Geschichte in fünf Sätzen. Füge kein Matheproblem hinzu."
] |
2024-01-10 | jocwulf/math_adventure | math_story_en.py | import os
import numpy as np
import streamlit as st
import openai
from dotenv import load_dotenv
from streamlit_chat import message
from PIL import Image
# Constants
SENTENCES_PER_EPISODE = 5 # number of sentences per episode
RIDDLE_MIN = 2 # minimum number for riddles
MODEL = "gpt-3.5-turbo-0613" # use a better performing gpt model if possible
# Load OpenAI key from .env file
load_dotenv(".env")
openai.api_key = os.getenv("OPENAI_KEY")
# Load image
image = Image.open('children.png')
def reset_state():
"""Resets the session state."""
keys = list(st.session_state.keys())
for key in keys:
del st.session_state[key]
def generate_riddle(calculation_type, riddle_max):
"""Generates a riddle and returns the question and answer."""
num1 = np.random.randint(RIDDLE_MIN, riddle_max)
num2 = np.random.randint(RIDDLE_MIN, riddle_max)
if calculation_type == "Addition":
question = "{} + {}".format(num1, num2)
answer = num1 + num2
elif calculation_type == "Subtraction":
question = "{} - {}".format(max(num1, num2), min(num1, num2))
answer = max(num1, num2) - min(num1, num2)
elif calculation_type == "Multiplication":
question = "{} * {}".format(num1, num2)
answer = num1 * num2
elif calculation_type == "Division":
product = num1 * num2
question = "{} / {}".format(product, num1)
answer = num2
return question, answer
def generate_story(messages):
"""Generates a story episode using the OpenAI API and returns the story."""
story = openai.ChatCompletion.create(
model=MODEL,
messages=messages,
temperature=0.5,
)
return story['choices'][0]['message']['content']
def generate_challenge():
"""Generates a set of riddles and stories for the child to solve."""
st.session_state['right_answer'] = [] # list of right answers
st.session_state['question'] = [] # list of questions
st.session_state['story'] = [] # list of episodes
st.session_state['num_riddles'] = st.session_state['riddle_count'] # number of riddles persistent in session state
messages = [] # list of messages for openai requests
# system message for openai requests
sys_message = """Tell a seven-year-old child a continuation story about {2}. Each episode consists of exactly {0} sentences. The story is about the day of {1}. An episode of the story consists of exactly {0} sentences and no more. Start directly with the narration. End each episode with a math problem, which is always posed by [role: user] beforehand. Integrate the math problem into the narration of the episode. Make sure the math problem is correctly formulated. Do not give the solution. By solving this problem, the child can help {1}. Continue in the new episode already told episodes and pose a new math problem. PLEASE NOTE: Do not give the solution to the math problem. Use only {0} sentences. End the end with the math problem.""".format(SENTENCES_PER_EPISODE, st.session_state['person'], st.session_state['topic'])
messages.append({"role": "system", "content": sys_message})
# Create a progress bar
progress_bar = st.progress(0)
status_message = st.empty()
status_message.text("I am generating your story...")
for i in range(st.session_state['riddle_count']): # generate riddles and stories
# Update the progress bar
progress_bar.progress((i + 1) / st.session_state['riddle_count'])
# generate riddle
calculation_type = np.random.choice(st.session_state['calculation_type'])
question, answer = generate_riddle(calculation_type, st.session_state['riddle_max'])
messages.append({"role": "user", "content": question})
# generate story
story = generate_story(messages)
messages.append({"role": "assistant", "content": story})
# save riddle and story to session state
st.session_state.right_answer.append(answer)
st.session_state.question.append(question)
st.session_state.story.append(story)
# create final episode
messages.pop(0) # remove first item in messages list (system message)
messages.append({"role": "user", "content": "Finish the story in five sentences. Do not include a math problem."})
story = generate_story(messages)
st.session_state.story.append(story)
st.session_state['current_task'] = 0 # keeps track of the current episode
status_message.empty() # remove the status message
return st.session_state['story'][0] # return first episode
def on_input_change():
"""Handles child input and checks if it is correct."""
user_input = st.session_state["user_input"+str(st.session_state['current_task'])] # get user input
st.session_state['past'].append(user_input) # save user input to session state
if user_input == st.session_state.right_answer[st.session_state['current_task']]: # user input is correct
#check if all tasks done
if st.session_state['current_task'] == st.session_state['num_riddles']-1: # all tasks are done
st.session_state['generated'].append(st.session_state['story'][st.session_state['current_task']+1]) # generate final episode
st.session_state['finished'] = True # set finished flag
else: # not all tasks are done
st.session_state['current_task']+=1 # increase current task counter
st.session_state['generated'].append(st.session_state['story'][st.session_state['current_task']]) # append next episode for output
else: # user input is wrong
st.session_state.generated.append("Not quite right. Try again! " + st.session_state.question[st.session_state['current_task'] ]) # append wrong message to output
if 'end_story' not in st.session_state:
st.session_state['end_story'] = False
if 'input_done' not in st.session_state:
st.session_state['input_done'] = False
st.title("༼ ͡ಠ ͜ʖ ͡ಠ ༽ Your Math Adventure")
st.image(image, use_column_width=True, caption = "3 * 2 = 7 ???")
if st.session_state['end_story']: # story ended
st.write("The story has ended.")
if st.button("Start a new story"):
reset_state()
else: # story not ended
if st.session_state['input_done'] == False:
with st.sidebar:
st.selectbox("How many math problems would you like to solve?", [3, 5, 7, 10], key="riddle_count", index=0)
st.multiselect("Choose the calculation type", ["Addition", "Subtraction", "Multiplication", "Division"], key="calculation_type", default=["Addition", "Subtraction", "Multiplication", "Division"])
st.selectbox("Choose the number range", ["1 digit (1-9)", "2 digits (1-99)"], key="number_range", index=0)
st.text_input("Provide a character for your story", key="person")
st.text_input("Provide a topic for your story", key="topic")
if st.button("Start the story", key="start_btn"):
st.session_state['input_done'] = True
if st.session_state['number_range'] == "1 digit (1-9)":
st.session_state['riddle_max'] = 9
else:
st.session_state['riddle_max'] = 99
if st.session_state['input_done']:
if 'past' not in st.session_state:
st.session_state['past']=['Here your answers are shown.']
if 'generated' not in st.session_state:
st.session_state['generated'] = [generate_challenge()]
if 'finished' not in st.session_state:
st.session_state['finished'] = False
chat_placeholder = st.empty()
with chat_placeholder.container():
# st.write(st.session_state.story) # for debugging
for i in range(len(st.session_state['generated'])):
message(str(st.session_state['past'][i]), is_user=True, key=str(i) + '_user')
message(
st.session_state['generated'][i],
key=str(i)
)
if not st.session_state['finished']:
with st.container():
st.number_input("Your solution:", min_value=-1, max_value=100,
value=-1, step=1, on_change=on_input_change,
key="user_input"+str(st.session_state['current_task']))
if st.button("End the story", key="end_btn"):
st.session_state['end_story'] = True
| [
"Finish the story in five sentences. Do not include a math problem."
] |
2024-01-10 | setohe0909/langchain-multiple-files | utils~methods.py | import streamlit as st
from dotenv import load_dotenv
from PyPDF2 import PdfReader
from langchain.text_splitter import CharacterTextSplitter
from langchain.embeddings import OpenAIEmbeddings, HuggingFaceInstructEmbeddings
# Vector database such as: faiss, pinecone
from langchain.vectorstores import FAISS
from langchain.chat_models import ChatOpenAI
from langchain.memory import ConversationBufferMemory
from langchain.chains import ConversationalRetrievalChain
from utils.htmlTemplates import bot_template, user_template
from langchain.llms import huggingface_hub
def get_pdf_text(pdf_docs):
text = ""
for pdf in pdf_docs:
pdfReader = PdfReader(pdf)
for page in pdfReader.pages:
text += page.extract_text() # Extract content from the page
return text
def get_text_chunks(text):
text_splitter = CharacterTextSplitter(
separator="\n",
chunk_size=1000,
chunk_overlap=200,
length_function=len
)
chunk = text_splitter.split_text(text)
return chunk
def get_vectorstore(text_chunks):
# embeddings = OpenAIEmbeddings() #--> open-ai
embeddings = HuggingFaceInstructEmbeddings(model_name="hkunlp/instructor-xl") #--> https://huggingface.co/hkunlp/instructor-xl
vectorstore = FAISS.from_texts(texts=text_chunks, embedding=embeddings)
return vectorstore
def get_conversation_chain(vectorstore):
llm= ChatOpenAI()
# Using llm from huggingface
#llm = HuggingFaceHub(repo_id="google/flan-t5-xxl", model_kwargs={"temperature":0.5, "max_length":512})
memory = ConversationBufferMemory(memory_key='chat_history', return_messages=True)
conversation_chain = ConversationalRetrievalChain.from_llm(
llm=llm,
retriever=vectorstore.as_retriever(),
memory=memory
)
return conversation_chain
def handle_userinput(user_question):
response = st.session_state.conversation({'question': user_question})
st.session_state.chat_history = response['chat_history']
for i, message in enumerate(st.session_state.chat_history):
if i & 2 == 0:
st.write(user_template.replace(
"{{MSG}}", message.content), unsafe_allow_html=True)
else:
st.write(bot_template.replace(
"{{MSG}}", message.content), unsafe_allow_html=True)
| [] |
2024-01-10 | htrangn/ItsTrangNguyen | pages~ChatBot.py | import openai
import streamlit as st
with st.sidebar:
activities2 = ["Chat", "About"]
choice = st.sidebar.selectbox("Select Activity", activities2)
openai_api_key = st.text_input("OpenAI API Key", key="chatbot_api_key", type="password")
"[OpenAI's Platform website](https://platform.openai.com/account/api-keys)"
"[Instruct to get an OpenAI API key](https://www.howtogeek.com/885918/how-to-get-an-openai-api-key/)"
st.write("View the source code: ", "[GITHUB](https://github.com/htrangn/ItsTrangNguyen/edit/main/pages/ChatBot.py)")
markdown = """
Web App URL: <https://facapp.streamlit.app>
"""
st.sidebar.info(markdown)
if choice == "Chat":
st.title("💬 CHATBOT")
if "messages" not in st.session_state:
st.session_state["messages"] = [{"role": "assistant", "content": "How can I help you?"}]
for msg in st.session_state.messages:
st.chat_message(msg["role"]).write(msg["content"])
if prompt := st.chat_input():
if not openai_api_key:
st.info("Please add your OpenAI API key to continue.")
st.stop()
openai.api_key = openai_api_key
st.session_state.messages.append({"role": "user", "content": prompt})
st.chat_message("user").write(prompt)
response = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=st.session_state.messages)
msg = response.choices[0].message
st.session_state.messages.append(msg)
st.chat_message("assistant").write(msg.content)
elif choice == "About":
st.subheader("About this app")
st.write("This app was made by Nguyen H.Trang")
st.write("This app requires an OpenAI API key to activate")
st.write("This chatbot is designed to deliver a seamless conversational experience with its natural language processing capabilities")
| [
"How can I help you?"
] |
2024-01-10 | JasonJbrg/mayachat | PycharmProjects~pythonProject~venv~chatapp.py | import openai
import streamlit as st
from streamlit_chat import message as msg
from translate import Translator
from pydub import AudioSegment
from pydub.playback import play
import os
from dotenv import load_dotenv
import json
import docx
import io
from docx import Document
from datetime import datetime
# Load environment variables from .env file
load_dotenv()
# Read the config.json file
with open("config.json") as file:
config = json.load(file)
# Extract the values from the config dictionary
task_selection = config["task_selection"]
initial_context = {
task: f"{config['initial_context'][task]} Please provide brief and concise responses."
for task in task_selection
}
load_dotenv() # take environment variables from .env.
openai.api_key = os.getenv('OPENAI_KEY')
# Set page configuration
st.set_page_config(
page_title="Maya Lingo",
page_icon="/Users/jasons/PycharmProjects/pythonProject/venv/static/jedburghlogo_webicon.png",
layout="wide",
initial_sidebar_state="expanded"
)
# Apply styles
streamlit_style = """
<style>
@import url('https://fonts.googleapis.com/css2?family=IBM+Plex+Mono&display=swap');
html, body, [class*="css"] {
font-family: 'IBM Plex Mono', monospace;
background: #4F5223;
}
.custom-title {
overflow: hidden;
color: white;
font-size: 1.3em;
animation: typewriter 4s steps(50) 1s both;
white-space: nowrap;
padding-bottom: 50px;
}
@keyframes typewriter {
0% {
width: 0;
}
100% {
width: 100%;
}
}
</style>
"""
st.markdown(streamlit_style, unsafe_allow_html=True)
st.markdown('<h1 class="custom-title">WELCOME, AIRMAN ALLIE</h1>', unsafe_allow_html=True)
# If 'custom_title' is not in st.session_state, assign a default title
if 'custom_title' not in st.session_state:
st.session_state.custom_title = "WELCOME, AIRMAN ALLIE"
app_name = st.session_state.custom_title
# Create subheader
st.markdown("""
<div style='border: 2px solid white; padding: 10px;'>
<h2 style='margin: 0; font-size: 14px; padding: 1em; font-family: 'IBM Plex Mono', monospace;'>Hamza, Cafe Owner</h2>
</div>
""", unsafe_allow_html=True)
# Set default task
if 'selected_task' not in st.session_state:
st.session_state.selected_task = task_selection[0]
st.session_state.selected_task = st.sidebar.radio("Select Task", task_selection)
# Initialize the Translator
translator = Translator(to_lang="en", from_lang="ar")
# Initialize chat history in session state
if 'hst_chat' not in st.session_state:
st.session_state.hst_chat = []
if 'hst_chat_time' not in st.session_state:
st.session_state.hst_chat_time = []
# Get user input
user_prompt = st.text_input("Start your chat (in Arabic):")
btn_enter = st.button("Enter")
# When 'Enter' button is clicked
if btn_enter:
# Get the current timestamp
current_time = datetime.now()
# Add user's message and timestamp to chat history
st.session_state.hst_chat.append({"role": "user", "content": user_prompt})
st.session_state.hst_chat_time.append(current_time)
# Load specific words from tcv.txt file
with open("/Users/jasons/PycharmProjects/pythonProject/venv/tcv.txt", "r", encoding="utf-8") as file:
specific_words = [word.strip() for word in file.readlines()]
# Check if user's input has any of the specific words
# If yes, play ding sound
user_input_words = user_prompt.split()
matching_words = set(specific_words).intersection(user_input_words)
if matching_words:
ding_sound_path = "/Users/jasons/PycharmProjects/pythonProject/venv/audio/tcv_match.mp3"
ding_sound = AudioSegment.from_file(ding_sound_path)
play(ding_sound)
MAX_TOKENS = 500
MAX_TOKENS_PER_MESSAGE = 50
# Prepare the conversation for the chat model
conversation = [
{"role": "assistant", "content": initial_context[st.session_state.selected_task]},
] + st.session_state.hst_chat
# Calculate the total number of tokens in the conversation
total_tokens = sum(len(message['content'].split()) for message in conversation)
# Check if the total tokens exceed the maximum allowed limit
if total_tokens > MAX_TOKENS:
# Remove messages until the total tokens is below the limit
excess_tokens = total_tokens - MAX_TOKENS
removed_tokens = 0
removed_messages = 0
# Iterate through the conversation messages from the beginning
for i in range(len(conversation) - 1, -1, -1):
message_tokens = len(conversation[i]['content'].split())
if removed_tokens + message_tokens <= excess_tokens:
# Remove the entire message
removed_tokens += message_tokens
removed_messages += 1
else:
# Remove a portion of the message
tokens_to_remove = excess_tokens - removed_tokens
conversation[i]['content'] = ' '.join(conversation[i]['content'].split()[:-tokens_to_remove])
break
# Remove the excess messages from the conversation
conversation = conversation[:-removed_messages]
# Split messages into multiple parts if they exceed the maximum tokens per message
split_conversation = []
current_message = {"role": conversation[0]["role"], "content": ""}
for message in conversation[1:]:
tokens_in_message = len(message["content"].split())
if len(current_message["content"].split()) + tokens_in_message > MAX_TOKENS_PER_MESSAGE:
split_conversation.append(current_message)
current_message = {"role": message["role"], "content": message["content"]}
else:
current_message["content"] += " " + message["content"]
if current_message["content"]:
split_conversation.append(current_message)
# Use OpenAI API to get a response from the chat model
responses = []
for split_message in split_conversation:
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[split_message],
max_tokens=MAX_TOKENS_PER_MESSAGE,
n=1
)
responses.append(response['choices'][0]['message']['content'])
# Add assistant's response to the chat history
for response in responses:
assistant_response = response
st.session_state.hst_chat.append({"role": "assistant", "content": assistant_response})
st.session_state.hst_chat_time.append(datetime.now())
else:
# Use OpenAI API to get a response from the chat model
return_openai = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=conversation,
max_tokens=MAX_TOKENS,
n=1
)
# Add assistant's response to the chat history
assistant_response = return_openai['choices'][0]['message']['content']
st.session_state.hst_chat.append({"role": "assistant", "content": assistant_response})
st.session_state.hst_chat_time.append(datetime.now())
# Display chat history
if st.session_state.hst_chat:
for i in range(len(st.session_state.hst_chat)):
if i % 2 == 0:
# msg("You: " + st.session_state.hst_chat[i]['content'], is_user=True)
st.markdown(f"<div style='text-align: left; color: black; background-color: rgba(206, 187, 163, 0.5); '>You: {st.session_state.hst_chat[i]['content']}</div>", unsafe_allow_html=True)
else:
# msg(st.session_state.selected_task + ": " + st.session_state.hst_chat[i]['content'])
st.markdown(f"<div style='text-align: left; color: black; background-color: rgba(206, 187, 163, 1.0);'>{st.session_state.selected_task}: {st.session_state.hst_chat[i]['content']}</div>", unsafe_allow_html=True)
# Translation button for user input
if i % 2 == 0:
translation_expander = st.expander("Show User Translation")
with translation_expander:
translation_result = translator.translate(st.session_state.hst_chat[i]['content'])
if isinstance(translation_result, str):
translation = translation_result
else:
translation = translation_result.text
st.write(translation)
# Translation button for assistant responses
else:
translation_expander = st.expander("Show Assistant Translation")
with translation_expander:
translation_result = translator.translate(st.session_state.hst_chat[i]['content'])
if isinstance(translation_result, str):
translation = translation_result
else:
translation = translation_result.text
st.write(translation)
# If chat history exists, show the 'Save & Export' button
btn_save = st.button("Save & Export")
if btn_save:
# Create a Word document with chat history
doc = Document()
# Add the custom title with date and time to the document
custom_title = f"{st.session_state.custom_title} - {datetime.now().strftime('%m/%d/%Y %I:%M:%S %p')}"
doc.add_paragraph(custom_title)
doc.add_paragraph("")
# Calculate the total duration
total_duration = st.session_state.hst_chat_time[-1] - st.session_state.hst_chat_time[0]
# Add the total duration to the document
doc.add_paragraph(f"Total Duration: {total_duration}")
# Add the custom title, task selection and initial context to the document
doc.add_paragraph(f"Custom Title: {st.session_state.custom_title}")
doc.add_paragraph(f"Task Selection: {st.session_state.selected_task}")
doc.add_paragraph(f"Initial Context: {initial_context[st.session_state.selected_task]}")
doc.add_paragraph("")
# Add the chat history to the document
for message in st.session_state.hst_chat:
doc.add_paragraph(f"{message['role']}: {message['content']}")
# Save the Document into memory
f = io.BytesIO()
doc.save(f)
# Format current date and time
now = datetime.now()
date_time = now.strftime("%m/%d/%Y %I:%M:%S %p")
# Append date and time to the file name
f.name = st.session_state.custom_title + "_" + date_time + '.docx'
f.seek(0)
# Download button for chat history Word document
st.download_button(
label="Download chat history",
data=f,
file_name=f.name,
mime='application/vnd.openxmlformats-officedocument.wordprocessingml.document'
) | [
"content",
"Start your chat (in Arabic):"
] |
2024-01-10 | JasonJbrg/mayachat | venv~chatapp.py | import openai
import streamlit as st
from streamlit_chat import message as msg
from translate import Translator
from pydub import AudioSegment
from pydub.playback import play
import os
from dotenv import load_dotenv
import json
import docx
import io
from docx import Document
from datetime import datetime
import random
# Load environment variables from .env file
dotenv_path = "PycharmProjects/.env"
load_dotenv(dotenv_path)
# Read the config.json file
with open("venv/config.json") as file:
config = json.load(file)
# Extract the values from the config dictionary
task_selection = config["task_selection"]
initial_context = {
task: f"{config['initial_context'][task]} Please provide brief and concise responses."
for task in task_selection
}
greetings = config["greetings"]
load_dotenv('/Users/jasons/PycharmProjects/pythonProject/PycharmProjects/.env')
# Set OpenAI API key
openai.api_key = os.getenv("OPENAI_API_KEY")
# Set page configuration
st.set_page_config(
page_title="Maya Lingo",
layout="wide",
)
hide_streamlit_style = """
<style>
@import url('https://fonts.googleapis.com/css2?family=IBM+Plex+Mono&display=swap');
.css-uf99v8 {
font-family: 'IBM Plex Mono', monospace;
background-color: #4F5223;
}
.stChatFloatingInputContainer.css-usj992.ehod42b2 {
font-family: 'IBM Plex Mono', monospace;
background-color: #4F5223;
}
#MainMenu {visibility: hidden;}
footer {visibility: hidden;}
header {visibility: hidden;}
</style>
"""
st.markdown(hide_streamlit_style, unsafe_allow_html=True)
# Custom title
st.markdown("""
<div style="
background-color: black;
padding: 5px;
font-family: 'IBM Plex Mono', monospace;
font-size: 35px;
color: white;
margin-top: -90px;
width: 100%;
font-weight: bold;
">
<p style="margin: 0; text-align: left;">Welcome,</p>
<p style="margin: 0; text-align: left;">Airman Allie</p>
</div>
""", unsafe_allow_html=True)
# Add a default option to the languages dictionary
languages = {'Select Language': ''}
languages.update({
'Chinese': 'zh',
'Russian': 'ru',
'Arabic': 'ar',
'Japanese': 'ja',
'Farsi': 'fa',
'Spanish': 'es',
'German': 'de',
'Levantine Arabic': 'apc' # ISO 639-3 code for Levantine Arabic
})
# Define default values for selected_language and selected_task
selected_language = 'Select Language'
selected_task = 'Select Topic'
# Initialize new_message and return_openai to None
new_message = None
return_openai = None
# Get user input for language selection
selected_language = st.selectbox("", list(languages.keys()), key='language_selection')
if selected_language != 'Select Language':
# Initialize the Translator with the selected language
translator = Translator(to_lang="en", from_lang=languages[selected_language])
# Initialize two Translator objects with appropriate language settings
translator_to_en = Translator(from_lang=languages[selected_language], to_lang="en")
translator_from_en = Translator(from_lang="en", to_lang=languages[selected_language])
# Add a default option to the task_selection list
task_selection = ['Select Topic'] + task_selection
# Get user input for task selection
selected_task = st.selectbox(" ", task_selection, key='task_selection')
# Only update the selected task in session state if a task is selected
if selected_task != 'Select Topic':
st.session_state.selected_task = selected_task
# Only proceed if a task is selected and the chat history is empty
if not st.session_state.hst_chat:
# Update the selected task in session state
st.session_state.selected_task = selected_task
# Choose a random greeting for the selected task
greeting = random.choice(greetings[selected_task])
# Translate the greeting to the target language using translator_from_en
greeting_translated = translator_from_en.translate(greeting)
st.session_state.hst_chat.append({"role": "assistant", "content": greeting_translated})
st.session_state.hst_chat_time.append(datetime.now())
# Get user input
if 'selected_task' in st.session_state:
prompt = st.chat_input("Say something")
if prompt:
new_message = {"role": "user", "content": prompt}
# Initialize conversation in session state if not already present
if 'conversation' not in st.session_state:
st.session_state.conversation = []
# Check if a new message was submitted
if new_message is not None:
# Add user's original response to the chat history
st.session_state.hst_chat.append(new_message)
st.session_state.hst_chat_time.append(datetime.now())
# Add user's response to the conversation
st.session_state.conversation.append(new_message)
# Only generate a response if the last message was from the user
if len(st.session_state.hst_chat) >= 2 and st.session_state.hst_chat[-2]["role"] == "user":
# Use OpenAI API to get a response from the chat model
return_openai = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=st.session_state.conversation,
max_tokens=MAX_TOKENS,
n=1
)
# Add assistant's response to the chat history
if return_openai:
assistant_response = return_openai['choices'][0]['message']['content']
st.session_state.hst_chat.append({"role": "assistant", "content": assistant_response})
st.session_state.hst_chat_time.append(datetime.now())
# Add a default option to the task_selection list
task_selection = ['Select Topic'] + task_selection
# Initialize chat history in session state if not already present
if 'hst_chat' not in st.session_state:
st.session_state.hst_chat = []
if 'hst_chat_time' not in st.session_state:
st.session_state.hst_chat_time = []
# Only proceed if a task is selected and the chat history is empty
if selected_task != 'Select Topic' and not st.session_state.hst_chat:
# Update the selected task in session state
st.session_state.selected_task = selected_task
# Choose a random greeting for the selected task
greeting = random.choice(greetings[selected_task])
# Translate the greeting to the target language using translator_from_en
greeting_translated = translator_from_en.translate(greeting)
st.session_state.hst_chat.append({"role": "assistant", "content": greeting_translated})
st.session_state.hst_chat_time.append(datetime.now())
# Update the selected task in session state
st.session_state.selected_task = selected_task
MAX_TOKENS = 500
MAX_TOKENS_PER_MESSAGE = 50
# Define a function to get the initial context
def get_initial_context(task):
if task is not None and task in initial_context:
return initial_context[task]
else:
return "Please select a task."
# Initialize conversation to an empty list
conversation = []
# Prepare the conversation for the chat model
if 'selected_task' in st.session_state and st.session_state.selected_task is not None and st.session_state.selected_task in initial_context:
conversation = [
{"role": "assistant", "content": initial_context[st.session_state.selected_task]},
] + st.session_state.hst_chat
else:
# Handle case where st.session_state.selected_task is None or does not exist in initial_context
conversation = [
{"role": "assistant", "content": "Please select a valid task."},
] + st.session_state.hst_chat
# Only generate a response if the last message was from the user
if conversation and conversation[-1]["role"] == "user":
# Use OpenAI API to get a response from the chat model
return_openai = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=conversation,
max_tokens=MAX_TOKENS,
n=1
)
# Calculate the total number of tokens in the conversation
total_tokens = sum(len(message['content'].split()) for message in conversation)
# Check if the total tokens exceed the maximum allowed limit
if total_tokens > MAX_TOKENS:
# Remove messages until the total tokens is below the limit
excess_tokens = total_tokens - MAX_TOKENS
removed_tokens = 0
removed_messages = 0
# Iterate through the conversation messages from the beginning
for i in range(len(conversation) - 1, -1, -1):
message_tokens = len(conversation[i]['content'].split())
if removed_tokens + message_tokens <= excess_tokens:
# Remove the entire message
removed_tokens += message_tokens
removed_messages += 1
else:
# Remove a portion of the message
tokens_to_remove = excess_tokens - removed_tokens
conversation[i]['content'] = ' '.join(conversation[i]['content'].split()[:-tokens_to_remove])
break
# Remove the excess messages from the conversation
conversation = conversation[:-removed_messages]
# Split messages into multiple parts if they exceed the maximum tokens per message
split_conversation = []
current_message = {"role": conversation[0]["role"], "content": ""}
for message in conversation[1:]:
tokens_in_message = len(message["content"].split())
if len(current_message["content"].split()) + tokens_in_message > MAX_TOKENS_PER_MESSAGE:
split_conversation.append(current_message)
current_message = {"role": message["role"], "content": message["content"]}
else:
current_message["content"] += " " + message["content"]
if current_message["content"]:
split_conversation.append(current_message)
# Use OpenAI API to get a response from the chat model
responses = []
for split_message in split_conversation:
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[split_message],
max_tokens=MAX_TOKENS_PER_MESSAGE,
n=1
)
responses.append(response['choices'][0]['message']['content'])
# Add assistant's response to the chat history
for response in responses:
assistant_response = response
st.session_state.hst_chat.append({"role": "assistant", "content": assistant_response})
st.session_state.hst_chat_time.append(datetime.now())
else:
# Use OpenAI API to get a response from the chat model
return_openai = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=conversation,
max_tokens=MAX_TOKENS,
n=1
)
# Add assistant's response to the chat history
assistant_response = return_openai['choices'][0]['message']['content']
st.session_state.hst_chat.append({"role": "assistant", "content": assistant_response})
st.session_state.hst_chat_time.append(datetime.now())
# Display chat history
if st.session_state.hst_chat:
for i in range(len(st.session_state.hst_chat)):
if st.session_state.hst_chat[i]["role"] == "user":
st.markdown(
f"<div style='text-align: left; color: black; background-color: rgba(206, 187, 163, 0.5); '>You: {st.session_state.hst_chat[i]['content']}</div>",
unsafe_allow_html=True)
elif st.session_state.hst_chat[i]["role"] == "assistant":
st.markdown(
f"<div style='text-align: left; color: black; background-color: rgba(206, 187, 163, 1.0);'>{st.session_state.selected_task}: {st.session_state.hst_chat[i]['content']}</div>",
unsafe_allow_html=True)
# Translation expander for user input
if i % 2 == 0:
translation_expander = st.expander("Show User Translation", expanded=False)
with translation_expander:
# Use translator_to_en for user's messages
translation_result = translator_to_en.translate(st.session_state.hst_chat[i]['content'])
st.write(translation_result)
# Translation expander for assistant responses
else:
translation_expander = st.expander("Show Assistant Translation")
with translation_expander:
# Use translator_to_en for assistant's responses
# We are assuming that the assistant's responses are not in English.
# If they are in English, you do not need to translate them.
translation_result = translator_to_en.translate(st.session_state.hst_chat[i]['content'])
st.write(translation_result)
# If chat history exists, show the 'Save & Export' button
if st.session_state.hst_chat:
btn_save = st.button("Save & Export")
if btn_save:
# Create a Word document with chat history
doc = Document()
# Add the current date and time to the document
doc.add_paragraph(datetime.now().strftime('%m/%d/%Y %I:%M:%S %p'))
# Calculate the total duration
total_duration = st.session_state.hst_chat_time[-1] - st.session_state.hst_chat_time[0]
# Add the total duration to the document
doc.add_paragraph(f"Total Duration: {total_duration}")
# Add the chat history to the document
for message in st.session_state.hst_chat:
doc.add_paragraph(f"{message['role']}: {message['content']}")
# Save the Document into memory
f = io.BytesIO()
doc.save(f)
# Format current date and time
now = datetime.now()
date_time = now.strftime("%m%d%Y_%H%M%S") # Changed format to remove slashes and colons
# Append date and time to the file name
f.name = "Chat_History_" + date_time + '.docx' # Changed to a static string "Chat_History_"
f.seek(0)
# Download button for chat history Word document
st.download_button(
label="Download chat history",
data=f,
file_name=f.name,
mime='application/vnd.openxmlformats-officedocument.wordprocessingml.document'
)
| [
"content",
"Please select a valid task.",
"Say something"
] |
2024-01-10 | njogued/alx-higher_level_programming | zz~play_api.py | import openai
import os
from playwright.sync_api import sync_playwright
jd = input("Enter the link to the job: ")
with sync_playwright() as s:
browser = s.firefox.launch()
page = browser.new_page()
page.goto(jd)
summary = page.query_selector_all('p')
print(summary)
browser.close()
# text = input("Enter the prompt: ")
text = "In 100 words or less, explain the domestication of cats"
# length = input("Max words output: ")
length = 1000
print(os.getenv("OPENAI_API_KEY"))
openai.api_key="API-KEY"
response = openai.Completion.create(
model="text-davinci-003",
prompt = text,
temperature=0.7,
max_tokens=length
)
prompt = response.choices[0].text
print(prompt)
| [
"In 100 words or less, explain the domestication of cats"
] |
2024-01-10 | takuma-yoneda/dl | rl~envs~dummy_vec_env.py | """Adapt SubprocVecEnv to save environment state. Adapted from OpenAI Baselines."""
import numpy as np
from baselines.common.vec_env import VecEnv
from dl.rl import env_state_dict, env_load_state_dict
from dl import nest
class DummyVecEnv(VecEnv):
"""
VecEnv that does runs multiple environments sequentially, that is,
the step and reset commands are send to one environment at a time.
Useful when debugging and when num_env == 1 (in the latter case,
avoids communication overhead)
"""
def __init__(self, env_fns):
"""
Arguments:
env_fns: iterable of callables functions that build environments
"""
self.envs = [fn() for fn in env_fns]
env = self.envs[0]
VecEnv.__init__(self, len(env_fns), env.observation_space,
env.action_space)
self.transitions = [None for _ in range(self.num_envs)]
self.actions = None
self.spec = self.envs[0].spec
def step_async(self, actions):
def _numpy_check(ac):
if not isinstance(ac, np.ndarray):
raise ValueError("You must pass actions as nested numpy arrays"
" to DummyVecEnv.")
nest.map_structure(_numpy_check, actions)
self.actions = actions
def step_wait(self):
active = [False for _ in range(self.num_envs)]
for e in range(self.num_envs):
if self.transitions[e] is None or not self.transitions[e][2]: # if episode is over:
action = nest.map_structure(lambda ac: ac[e], self.actions)
self.transitions[e] = self.envs[e].step(action)
active[e] = True
obs, rs, dones, infos = zip(*self.transitions)
for e, info in enumerate(infos):
info['active'] = active[e]
obs = nest.map_structure(np.stack, nest.zip_structure(*obs))
return obs, np.stack(rs), np.stack(dones), infos
def reset(self, force=True):
if not force:
return self._reset_done_envs()
obs = [self.envs[e].reset() for e in range(self.num_envs)]
self.transitions = [None for _ in range(self.num_envs)]
return nest.map_structure(np.stack, nest.zip_structure(*obs))
def _reset_done_envs(self):
obs = []
for e in range(self.num_envs):
if self.transitions[e] is None or self.transitions[e][2]:
self.transitions[e] = None
obs.append(self.envs[e].reset())
else:
obs.append(self.transitions[e][0])
return nest.map_structure(np.stack, nest.zip_structure(*obs))
def get_images(self):
return [env.render(mode='rgb_array') for env in self.envs]
def render(self, mode='human'):
if self.num_envs == 1:
return self.envs[0].render(mode=mode)
else:
return super().render(mode=mode)
def close_extras(self):
for env in self.envs:
env.close()
self.closed = True
def state_dict(self):
env_states = []
for e in self.envs:
env_states.append(env_state_dict(e))
return {'env_states': env_states}
def load_state_dict(self, state_dict):
for e, state in zip(self.envs, state_dict['env_states']):
if isinstance(state, list):
# this could happen if the state was saved with a subproc env
state = state[0]
env_load_state_dict(e, state)
if __name__ == "__main__":
import unittest
import gym
from gym import Wrapper
class StateWrapper(Wrapper):
# hack to save random state from lunar lander env.
def __init__(self, env):
super().__init__(env)
def step(self, action):
return self.env.step(action)
def state_dict(self):
return {'rng': self.env.np_random.get_state()}
def load_state_dict(self, state_dict):
self.env.np_random.set_state(state_dict['rng'])
def make_env(nenv, seed=0):
def _env(rank):
def _thunk():
env = gym.make('LunarLander-v2')
env = StateWrapper(env)
env.seed(seed + rank)
return env
return _thunk
return DummyVecEnv([_env(i) for i in range(nenv)])
class TestDummyVecEnv(unittest.TestCase):
"""Test DummyVecEnv"""
def test(self):
nenv = 4
env = make_env(nenv)
obs = env.reset()
env2 = make_env(nenv)
obs2 = env2.reset()
env3 = make_env(nenv, seed=1)
obs3 = env3.reset()
assert np.allclose(obs, obs2)
assert not np.allclose(obs, obs3)
for _ in range(100):
actions = np.array([env.action_space.sample()
for _ in range(nenv)])
ob, r, done, _ = env.step(actions)
ob2, r2, done2, _ = env2.step(actions)
assert np.allclose(ob, ob2)
assert np.allclose(r, r2)
assert np.allclose(done, done2)
env3.load_state_dict(env.state_dict())
ob = env.reset()
ob3 = env3.reset()
assert np.allclose(ob, ob3)
for _ in range(100):
actions = np.array([env.action_space.sample()
for _ in range(nenv)])
ob, r, done, _ = env.step(actions)
ob3, r3, done3, _ = env3.step(actions)
assert np.allclose(ob, ob3)
assert np.allclose(r, r3)
assert np.allclose(done, done3)
dones = [False for _ in range(nenv)]
obs = [None for _ in range(nenv)]
while not np.all(dones):
actions = np.array([env.action_space.sample()
for _ in range(nenv)])
ob, r, new_dones, _ = env.step(actions)
for e, d in enumerate(new_dones):
if dones[e]:
assert d
assert np.allclose(ob[e], obs[e])
obs[e] = ob[e]
dones = new_dones
env.reset(force=False)
unittest.main()
| [] |
2024-01-10 | sergiomirazo/Curso_python_ARJE-10-2023 | Cortana.py | import pyttsx3
import tkinter as tk
import openai
engine = pyttsx3.init()
engine.setProperty('rate', 125)
class Ship:
def __init__(self, Name, Speed, Accel, Shields, Health, Coor, Radar, Trans):
self.Name = Name
self.Speed = Speed
self.Accel = Accel
self.Shields = Shields
self.Health = Health
self.Coor = Coor
self.Radar = Radar
self.Trans = Trans
def message(self):
print(self.Trans)
engine.say(self.Trans)
engine.runAndWait()
def sys_status(self):
if self.Radar:
engine.say("ALERTA! Amenaza detectada")
else:
engine.say("No hay amenazas cercanas")
if self.Shields and self.Health==100:
engine.say("Escudos cargados")
engine.say("Todos los sistemas en línea")
print("Escudos cargados \nTodos los sistemas en línea")
engine.runAndWait()
elif self.Health<100 and self.Shields:
engine.say("Daños estructurales detectados")
self.Health = 100
engine.say("El equipo de reparación se hizo cargo")
engine.say("Escudos cargados")
engine.runAndWait()
elif self.Health<100 and self.Health>20 and not self.Shields:
engine.say("Daños estructurales detectados")
self.Health = 100
engine.say("El equipo de reparación se hizo cargo")
engine.say("Escudos críticos")
engine.say("Cargando escudos")
self.Shields = True
engine.runAndWait()
elif self.Health < 20 and not self.Shields:
engine.say("ADVERTENCIA!... Daños severos estructurales detectados")
self.Health = 100
engine.say("El equipo de reparación se hizo cargo")
engine.say("Escudos críticos")
engine.say("Cargando escudos")
self.Shields = True
engine.runAndWait()
ship = Ship("Endurance", 900, 52, True, 100, "Arp 286, AR: 14h 20m 20s / Dec: +3º 56′", False, "Hola mundo")
system = f"Eres Cortana, la IA que administra la nave {ship.Name}, velocidad: {ship.Speed}, aceleración: {ship.Accel},nuestas coordenadas: {ship.Coor} "
def Consulta():
global label2
global label3
global system
global engine
label2.destroy()
label3.destroy()
prompt = entry1.get()
response = openai.Completion.create(
engine="text-davinci-003",
prompt=prompt+system,
temperature = 0.7,
max_tokens = 2600,
top_p = 1.0,
n=1)
response_text = response["choices"][0]["text"]
print(response_text)
engine.say(response_text)
engine.runAndWait()
label2 = tk.Label(root, text=prompt, font=('helvetica', 12), bg=BG_, fg='yellow')
window.create_window(250, 350, window=label2)
label3 = tk.Label(root, text=response_text, wraplength=360, anchor="n", bg=BG_, fg=FG_, font=('helvetica', 10))
window.create_window(250, 700, window=label3)
root = tk.Tk()
BG_ = '#220d8c'
FG_ = '#e6e1ff'
openai.api_key = 'TU_CLAVE_DE_OPENAI' #Debes reemplazar este string por tu api de openai
window = tk.Canvas(root, width=500, height=1000, relief='raised', bg=BG_ )
window.pack(fill='x')
label1 = tk.Label(root, text='Hacer consulta:')
label1.config(font=('helvetica', 22, 'bold'), bg=BG_, fg=FG_)
window.create_window(250, 125, window=label1)
label2 = tk.Label(root, text=' ', font=('helvetica', 12, 'bold'), bg=BG_ , fg=FG_)
window.create_window(250, 600, window=label2)
label3 = tk.Label(root, text=' ', font=('helvetica', 12, 'bold'), bg=BG_ , fg='yellow' )
window.create_window(250, 400, window=label3)
entry1 = tk.Entry(root)
entry1.config(font=('helvetica', 12), bg=FG_, fg='black')
window.create_window(200, 240, window=entry1)
button1 = tk.Button(text='Ingresar', command=Consulta, bg='#1e8248', fg=FG_)
window.create_window(330, 240, window=button1)
button2 = tk.Button(text='Status', command=ship.sys_status, bg='#ab1eb0', fg=FG_)
window.create_window(270, 300, window=button2)
button3 = tk.Button(text='Mensaje', command=ship.message, bg='#ab1eb0', fg=FG_)
window.create_window(330, 300, window=button2)
root.mainloop()
| [
"PLACEHOLDERf\"Eres Cortana, la IA que administra la nave {ship.Name}, velocidad: {ship.Speed}, aceleración: {ship.Accel},nuestas coordenadas: {ship.Coor} "
] |
2024-01-10 | jungwoo3490/Telegram_GPT | Telegram_GPT.py | import telegram
import asyncio
from openai import OpenAI
# API_KEY 주석처리
# client = OpenAI(api_key="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")
completion = client.chat.completions.create(
model="gpt-3.5-turbo-1106",
messages=[
{"role": "system", "content": "너는 동화속 공주야."},
{"role": "user", "content": "오늘 무슨 음식을 먹을지 추천해줘. json"}
],
response_format={"type": "json_object"}
)
token = "6722824989:AAFcr_3QSlHeaRG3EHSl_WFZhYpU0CRWSw0"
bot = telegram.Bot(token = token)
chat_id = "-1002143232599"
text = completion.choices[0].message.content
asyncio.run(bot.sendMessage(chat_id = chat_id , text=text)) | [
"오늘 무슨 음식을 먹을지 추천해줘. json",
"너는 동화속 공주야."
] |
2024-01-10 | ChrisXiaoShu/ChatPDF | window.py | import os
import PySimpleGUI as sg
from lib.util import create_embedding_vectorstore, load_file, split_text
from langchain.chat_models import ChatOpenAI
from langchain.chains import ConversationalRetrievalChain
def create_file_conversation_chain(api_key, file_path):
if api_key is None:
sg.popup("Please enter your OpenAI API Key!")
return None
if file_path is None:
sg.popup("Please upload a file first!")
os.environ["OPENAI_API_KEY"] = api_key
# load file
loader = load_file(file_path)
# split text
texts = split_text(loader)
# create embedding vectorstore
vectorstore = create_embedding_vectorstore(texts)
return ConversationalRetrievalChain.from_llm(ChatOpenAI(temperature=0), vectorstore.as_retriever())
def question_processor(conversation_chain):
chat_history = []
def _processor(question):
answer = conversation_chain({"question": question, 'chat_history': chat_history})
chat_history.append((question, answer['answer']))
return "\n".join([f"Q: {q}\nA: {a}" for q, a in chat_history])
return _processor
# Create the layout of the window
layout = [
[sg.Text("Enter your OpenAI API Key:")],
[sg.InputText(key="-KEY_TEXT-")],
[sg.Text("Select a PDF file:")],
[sg.Input(key="-FILE-"), sg.FileBrowse(file_types=(("PDF Files", "*.pdf", "*.txt"),)), sg.Button("Upload")],
[sg.Multiline(key="-ROLLER-", size=(100, 40), enable_events=True, autoscroll=True, reroute_stdout=True)],
[sg.InputText(key="-INPUT-", size=(90, 2)), sg.Button("Send")]
]
# Create the window
window = sg.Window("ChatPDF", layout, size=(700, 600))
# Initialize variables
processor = None
# Event loop to process window events
while True:
event, values = window.read()
if event == sg.WINDOW_CLOSED or event == "Cancel":
break
elif event == "Upload":
api_key = values["-KEY_TEXT-"]
file_name = values["-FILE-"]
qa_chain = create_file_conversation_chain(api_key, file_name)
processor = question_processor(qa_chain)
elif event == "Send":
# Raise an alert if no PDF file uploaded
if not processor:
sg.popup("Please init a qa processor by upload a file and enter your OpenAI API Key!")
continue
question = values["-INPUT-"]
result = processor(question)
window["-ROLLER-"].update(result)
window["-INPUT-"].update("")
# Close the window
window.close() | [] |
2024-01-10 | ChrisXiaoShu/ChatPDF | ai_model.py | # create basic AI model class with template model variable and has get_chain and run methods
from typing import Any
from chainlit import Message
from langchain import ConversationChain, LLMChain, PromptTemplate
from langchain.base_language import BaseLanguageModel
from langchain.chat_models import ChatOpenAI
from langchain.memory import ConversationBufferMemory
from chainlit.lc.agent import run_langchain_agent
from chainlit.config import config
class AIModel:
"""Base class for AI models."""
template: str
prompt: PromptTemplate
model: BaseLanguageModel
def get_chain(self, **kwargs: Any) -> Any:
raise NotImplementedError("get_chain not implemented")
def run(self, **kwargs: Any) -> Any:
"""Run the model."""
chain = self.get_chain(**kwargs)
return chain.run(**kwargs)
class BartenderAI(AIModel):
model = ChatOpenAI(temperature=0)
template = """The following is a friendly conversation between a Customer and an BartenderAI. The BartenderAI is a professional bartender and help Customer find a cocktail that suits. AI should guide Customer in choosing a cocktail that is tailored to its preferences. BartenderAI should understand Customer preferences based on Customer preferred texture, type of alcohol, taste, or personal characteristics. please don't recommend a particular cocktail to Customer. AI job is merely understand Customer preference. And don't ask too complex question make question simple and one at a time. 請用繁體中文與我對答案。
Current conversation:
{history}
Customer: {input}
BartenderAI:
"""
prompt = PromptTemplate(template=template, input_variables=["history", "input"])
def get_chain(self, **kwargs: Any) -> Any:
return ConversationChain(
prompt=self.prompt,
llm=self.model,
memory=ConversationBufferMemory()
)
class SummaryPreferenceAI(AIModel):
model = ChatOpenAI(temperature=0, model_name="gpt-3.5-turbo")
template = """You're now a professional bartender, and the following is the conversation between the Customer and Bartender, please summary the customer preference from the following conversation in 繁體中文
Current conversation:
{history}
"""
prompt = PromptTemplate(template=template, input_variables=["history"])
def get_chain(self, **kwargs: Any) -> Any:
return LLMChain(llm=self.model, prompt=self.prompt)
async def run(self, conversation_history) -> Message:
chain = self.get_chain()
raw_res, output_key = await run_langchain_agent(
agent=chain , input_str=conversation_history, use_async=config.code.lc_agent_is_async
)
if output_key is not None:
# Use the output key if provided
res = raw_res[output_key]
else:
# Otherwise, use the raw response
res = raw_res
# Finally, send the response to the user
return Message(author=config.ui.name, content=res)
class RecommendAI(AIModel):
model = ChatOpenAI(temperature=0, model_name="gpt-3.5-turbo")
template = """you are acting as a professional bartender, you know much about the customer preference, and can recommend the right one to your customer. The below is the menu, please choice one of the cocktail based on the customer preference, elaborate the reason why you recommend and reply in 繁體中文
here is Customer preference:
-----------------------
{preferences}
-----------------------
here is the menu:
-----------------------
酒名 價格(NTD) 基底酒款 其他成分 酒感等級 口感描述
青雲閤 400 Gin琴酒 Nordes gin諾帝斯琴酒/St.germain接骨木花利口酒/Skinos乳香利口酒/Jasmine Syrup自製茉莉花糖漿/Citrus Acid檸檬酸液/tonic通寧水 2 微甜/花香味強烈/清爽/氣泡感
和泉樓 400 Gin琴酒 Generous Gin Azur大方琴酒/Crème de Violet 紫羅蘭利口酒/Lime Juice萊姆汁/Lavender Syrup自製薰衣草糖漿/La Caravedo Pisco秘魯白蘭地 3.5 偏酸爽口/如同香水的強烈花香
醉花園 450 Homemade Rose Liqueur自製玫瑰利口酒 Homemade Rose Liqueur自製玫瑰利口酒/Red Repper Syrup粉紅胡椒糖漿/Hendricks Flora Adora Gin亨利爵士花神/Latic Acid乳酸/Cream鮮奶油/Egg White蛋白/Soda Water蘇打水 3 蛋糕般的綿密奶泡/主體玫瑰花香帶一絲粉紅胡椒的偏甜辛香
鐵觀音 400 vodka伏特加 Tieguanyin tea infused vodka鐵觀音伏特加/Cointreau君度橙酒/Crème de peach水蜜桃利口酒 2 水蜜桃甜香為前調/中調展現鐵觀音培茶風味/清爽的氣泡/酒體輕盈
文山包種 400 Gin琴酒 Wen Shan Pochong包種茶琴酒/Pavan葡萄利口酒/Lavender Leaf Syrup自製薰衣草片糖漿/Lemon juice檸檬汁 3 偏甜爽口/花草香/麝香葡萄與橙花氣味為中調/茶香做為後韻
金萱 430 White Wine白葡萄酒 Jin Xuan Tea Infused White Wine金萱茶白葡萄酒/Pineapple Sage Infused Apple Whiskey鳳梨鼠尾草蘋果威士忌/Chamomile Cordial洋甘菊風味液/Cream cheese Foam奶油起司泡沫 3 上層奶泡起司蛋糕風味呼應金萱茶獨特奶香/中調強烈洋甘菊轉為鼠尾草與蘋果的清新/微苦茶感與葡萄弱酸做結尾
東方美人 $450 V.S.O.P brandy白蘭地 Driental beauty infused V.S.O.P brandy 東方美人茶白蘭地/Sesame芝麻/Adriatico鹽味杏仁利口酒/Fig Leaf Syrup無花果葉糖漿/Blackwalnut Bitters 黑核桃苦精/Selva Ray Chocolate巧克力蘭姆酒 4 初聞明顯可可香而後是杏仁與無花果葉類的堅果氣息/接著輕微苦韻洗滌口腔後茶感才慢悠悠出現
北港甜湯米糕粥 $430 Whiskey威士忌 Longan Infused Whiskey自製桂圓威士忌/Sticy Rice圓糯米/Macallan 12years麥卡倫12年/Cannanmon Bitters自製肉桂苦精 3 翻玩70年歷史甜品/甜而不膩的大人甜湯/桂圓的蜜味與雪莉桶威士忌完美融合/些許肉桂味添加層次/有趣的食用型調酒
阿嬌姨烤魷魚 $430 Vodka伏特加/ Whiskey泥煤威士忌 Squid Infused Vodka自制烤魷魚伏特加/ Talisker Storm Whiskey/Black Cardamom黑荳蔻/Basil Syrup羅勒糖漿/Citrus Acid檸檬酸/Cucumber Soda Water黃瓜口味氣泡水/Squid Slices網狀魷魚片 3.5 出乎意料的味覺組合/輕微的黑荳蔻模擬出炭烤的煙燻味/帶有鹹感的威士忌襯托魷魚鮮香/小黃瓜與氣泡帶來清爽結尾
童年記憶愛玉冰 $400 Bamboo Leaves Infused Vermouth自製竹葉苦艾酒 Bamboo Leaves Infused Vermouth自製竹葉苦艾酒/Ice Jelly愛玉/Homemade Limocello自製檸檬利口酒/White Wine Cardamom Syrup白酒荳蔻糖漿 3.5 竹葉香與檸檬甜感結合後接葡萄微酸/輕微的香料做結尾/吃得到愛玉喔
香煙裊裊龍山寺 $430 Gin琴酒 Tanquerary No.10/Skinos希臘乳香酒/Sandalwood Infused Gin檀香木琴酒/Selva Ray Coconut Rum椰子蘭姆酒/Malibu椰子香甜酒 5 椰子氣味鋪陳檀香木質氣息/順口度高/如同佛珠與佛堂的既視感香氣
民風淳樸剝皮寮 $420 Vodka伏特加/ Gin琴酒 Don Julio Blanco/Peeled Pepper Infused Vodka自製剝皮辣椒伏特加/East 135 GinㄥSoy Sauce手工醬油/Clarify Tomato Juice澄清番茄汁/ Ginger Ale薑汁汽水/Umami Bitters旨味苦精 3 氣泡爽口/輕微香菇與番茄鮮味/尾巴有些許辣椒熱感/不會辣
日皇御用摩納卡 $430 Whiskey泥煤威士忌 Arbeg10y/Red Beans杜瓦小豆香甜酒/Luxardo Apricot杏桃香甜酒/Milk牛奶/Hawthorn Miso Campari Monaka仙楂味增金巴利最中餅 2.5 前味紅豆氣味明顯/中段杏桃果香參雜煙燻味/大人味奶酒
阿寶師的咖哩酥 400 Whiskey威士忌 Pork Floss Infused Whiskey肉鬆威士忌/Curry Syrup咖哩糖漿/Carrot Juice胡蘿蔔汁 3 甜味型調酒/咖哩氣味轉為肉鬆帶來的輕微脂感/尾韻為胡蘿蔔自然清甜
懸壺濟世青草巷退火養肝茶 400 gin琴酒 Cheerful Crackers Infused gin自製奇福餅乾琴酒/Burdock Infused Frangelico 自製牛蒡榛果香甜酒/Dita荔枝香甜酒/Grassleef Sweetflag Rhizome石菖蒲/Falernum法勒南香甜酒/Suze龍膽草香甜酒 3.5 苦甜型調酒/牛蒡與龍膽草結合使苦味不再單調/中調由石菖蒲與法勒南特有的香料譜出/奇福餅乾的油脂感作為橋樑銜接所有風味
清涼百草茶 400 Herbal Tea Wine青草茶酒 Herbal Tea Wine青草茶酒/Vecchio amaro del capo義大利藥草酒/Asiatic Worm wood杜瓦艾草香甜酒/Dita荔枝香甜酒/Fernet Branca義大利苦味香甜酒 4 中式草本遇上西式藥酒/清甜中帶微苦/艾草香銜接荔枝果香/
駐顏美人湯 400 Brandy白蘭地 La Caravedo Pisco秘魯白蘭地/White wine cardamom syrup自製白酒荳蔻糖漿/Aloe Liqueur蘆薈香甜酒/Chartreuse修道院香甜酒/Acid Solution酸味模擬液/Aloe Water澄清蘆薈汁/Ruta Graveolens Spray芸香噴霧 3 入口時可聞到雲香帶有甜感的獨特香氣/蘆薈為主軸的清新類花香/尾韻香水白蘭地葡萄香與荳蔻隱約浮現
新世紀冬瓜茶 360 Rum蘭姆酒 Spice White Gourd Drink自製香料冬瓜茶/Clarify Banana Juice澄清香蕉水/Soda Water蘇打水 3.5 香料增添冬瓜茶層次風味與香蕉熱帶水果氣味相輔相乘/輕微甜口
古早味楊桃湯 360 Gin琴酒 Star fruit juice鹽漬楊桃湯/Pineapple Juice鳳梨汁/Caramel焦糖/Tonic通寧水/Spite雪碧 3.5 楊桃湯輕微鹹味帶出甜感/焦糖鳳梨作為後味支撐
-----------------------
"""
prompt = PromptTemplate(template=template, input_variables=["preferences"])
def get_chain(self, **kwargs: Any) -> Any:
return LLMChain(llm=self.model, prompt=self.prompt)
async def run(self, preferences) -> Message:
chain = self.get_chain()
raw_res, output_key = await run_langchain_agent(
agent=chain , input_str=preferences, use_async=config.code.lc_agent_is_async
)
if output_key is not None:
# Use the output key if provided
res = raw_res[output_key]
else:
# Otherwise, use the raw response
res = raw_res
# Finally, send the response to the user
return Message(author=config.ui.name, content=res)
class DetailAI(AIModel):
model = ChatOpenAI(temperature=0, model_name="gpt-3.5-turbo")
template = """please make a story for the drink call {drinks} base on the following information in 繁體中文
Drinks information:
-----------------------
酒名 價格(NTD) 基底酒款 其他成分 酒感等級 口感描述
青雲閤 400 Gin琴酒 Nordes gin諾帝斯琴酒/St.germain接骨木花利口酒/Skinos乳香利口酒/Jasmine Syrup自製茉莉花糖漿/Citrus Acid檸檬酸液/tonic通寧水 2 微甜/花香味強烈/清爽/氣泡感
和泉樓 400 Gin琴酒 Generous Gin Azur大方琴酒/Crème de Violet 紫羅蘭利口酒/Lime Juice萊姆汁/Lavender Syrup自製薰衣草糖漿/La Caravedo Pisco秘魯白蘭地 3.5 偏酸爽口/如同香水的強烈花香
醉花園 450 Homemade Rose Liqueur自製玫瑰利口酒 Homemade Rose Liqueur自製玫瑰利口酒/Red Repper Syrup粉紅胡椒糖漿/Hendricks Flora Adora Gin亨利爵士花神/Latic Acid乳酸/Cream鮮奶油/Egg White蛋白/Soda Water蘇打水 3 蛋糕般的綿密奶泡/主體玫瑰花香帶一絲粉紅胡椒的偏甜辛香
鐵觀音 400 vodka伏特加 Tieguanyin tea infused vodka鐵觀音伏特加/Cointreau君度橙酒/Crème de peach水蜜桃利口酒 2 水蜜桃甜香為前調/中調展現鐵觀音培茶風味/清爽的氣泡/酒體輕盈
文山包種 400 Gin琴酒 Wen Shan Pochong包種茶琴酒/Pavan葡萄利口酒/Lavender Leaf Syrup自製薰衣草片糖漿/Lemon juice檸檬汁 3 偏甜爽口/花草香/麝香葡萄與橙花氣味為中調/茶香做為後韻
金萱 430 White Wine白葡萄酒 Jin Xuan Tea Infused White Wine金萱茶白葡萄酒/Pineapple Sage Infused Apple Whiskey鳳梨鼠尾草蘋果威士忌/Chamomile Cordial洋甘菊風味液/Cream cheese Foam奶油起司泡沫 3 上層奶泡起司蛋糕風味呼應金萱茶獨特奶香/中調強烈洋甘菊轉為鼠尾草與蘋果的清新/微苦茶感與葡萄弱酸做結尾
東方美人 $450 V.S.O.P brandy白蘭地 Driental beauty infused V.S.O.P brandy 東方美人茶白蘭地/Sesame芝麻/Adriatico鹽味杏仁利口酒/Fig Leaf Syrup無花果葉糖漿/Blackwalnut Bitters 黑核桃苦精/Selva Ray Chocolate巧克力蘭姆酒 4 初聞明顯可可香而後是杏仁與無花果葉類的堅果氣息/接著輕微苦韻洗滌口腔後茶感才慢悠悠出現
北港甜湯米糕粥 $430 Whiskey威士忌 Longan Infused Whiskey自製桂圓威士忌/Sticy Rice圓糯米/Macallan 12years麥卡倫12年/Cannanmon Bitters自製肉桂苦精 3 翻玩70年歷史甜品/甜而不膩的大人甜湯/桂圓的蜜味與雪莉桶威士忌完美融合/些許肉桂味添加層次/有趣的食用型調酒
阿嬌姨烤魷魚 $430 Vodka伏特加/ Whiskey泥煤威士忌 Squid Infused Vodka自制烤魷魚伏特加/ Talisker Storm Whiskey/Black Cardamom黑荳蔻/Basil Syrup羅勒糖漿/Citrus Acid檸檬酸/Cucumber Soda Water黃瓜口味氣泡水/Squid Slices網狀魷魚片 3.5 出乎意料的味覺組合/輕微的黑荳蔻模擬出炭烤的煙燻味/帶有鹹感的威士忌襯托魷魚鮮香/小黃瓜與氣泡帶來清爽結尾
童年記憶愛玉冰 $400 Bamboo Leaves Infused Vermouth自製竹葉苦艾酒 Bamboo Leaves Infused Vermouth自製竹葉苦艾酒/Ice Jelly愛玉/Homemade Limocello自製檸檬利口酒/White Wine Cardamom Syrup白酒荳蔻糖漿 3.5 竹葉香與檸檬甜感結合後接葡萄微酸/輕微的香料做結尾/吃得到愛玉喔
香煙裊裊龍山寺 $430 Gin琴酒 Tanquerary No.10/Skinos希臘乳香酒/Sandalwood Infused Gin檀香木琴酒/Selva Ray Coconut Rum椰子蘭姆酒/Malibu椰子香甜酒 5 椰子氣味鋪陳檀香木質氣息/順口度高/如同佛珠與佛堂的既視感香氣
民風淳樸剝皮寮 $420 Vodka伏特加/ Gin琴酒 Don Julio Blanco/Peeled Pepper Infused Vodka自製剝皮辣椒伏特加/East 135 GinㄥSoy Sauce手工醬油/Clarify Tomato Juice澄清番茄汁/ Ginger Ale薑汁汽水/Umami Bitters旨味苦精 3 氣泡爽口/輕微香菇與番茄鮮味/尾巴有些許辣椒熱感/不會辣
日皇御用摩納卡 $430 Whiskey泥煤威士忌 Arbeg10y/Red Beans杜瓦小豆香甜酒/Luxardo Apricot杏桃香甜酒/Milk牛奶/Hawthorn Miso Campari Monaka仙楂味增金巴利最中餅 2.5 前味紅豆氣味明顯/中段杏桃果香參雜煙燻味/大人味奶酒
阿寶師的咖哩酥 400 Whiskey威士忌 Pork Floss Infused Whiskey肉鬆威士忌/Curry Syrup咖哩糖漿/Carrot Juice胡蘿蔔汁 3 甜味型調酒/咖哩氣味轉為肉鬆帶來的輕微脂感/尾韻為胡蘿蔔自然清甜
懸壺濟世青草巷退火養肝茶 400 gin琴酒 Cheerful Crackers Infused gin自製奇福餅乾琴酒/Burdock Infused Frangelico 自製牛蒡榛果香甜酒/Dita荔枝香甜酒/Grassleef Sweetflag Rhizome石菖蒲/Falernum法勒南香甜酒/Suze龍膽草香甜酒 3.5 苦甜型調酒/牛蒡與龍膽草結合使苦味不再單調/中調由石菖蒲與法勒南特有的香料譜出/奇福餅乾的油脂感作為橋樑銜接所有風味
清涼百草茶 400 Herbal Tea Wine青草茶酒 Herbal Tea Wine青草茶酒/Vecchio amaro del capo義大利藥草酒/Asiatic Worm wood杜瓦艾草香甜酒/Dita荔枝香甜酒/Fernet Branca義大利苦味香甜酒 4 中式草本遇上西式藥酒/清甜中帶微苦/艾草香銜接荔枝果香/
駐顏美人湯 400 Brandy白蘭地 La Caravedo Pisco秘魯白蘭地/White wine cardamom syrup自製白酒荳蔻糖漿/Aloe Liqueur蘆薈香甜酒/Chartreuse修道院香甜酒/Acid Solution酸味模擬液/Aloe Water澄清蘆薈汁/Ruta Graveolens Spray芸香噴霧 3 入口時可聞到雲香帶有甜感的獨特香氣/蘆薈為主軸的清新類花香/尾韻香水白蘭地葡萄香與荳蔻隱約浮現
新世紀冬瓜茶 360 Rum蘭姆酒 Spice White Gourd Drink自製香料冬瓜茶/Clarify Banana Juice澄清香蕉水/Soda Water蘇打水 3.5 香料增添冬瓜茶層次風味與香蕉熱帶水果氣味相輔相乘/輕微甜口
古早味楊桃湯 360 Gin琴酒 Star fruit juice鹽漬楊桃湯/Pineapple Juice鳳梨汁/Caramel焦糖/Tonic通寧水/Spite雪碧 3.5 楊桃湯輕微鹹味帶出甜感/焦糖鳳梨作為後味支撐
-------------------------
"""
prompt = PromptTemplate(template=template, input_variables=["drinks"])
def get_chain(self, **kwargs: Any) -> Any:
return LLMChain(llm=self.model, prompt=self.prompt)
async def run(self, drinks) -> Message:
chain = self.get_chain()
raw_res, output_key = await run_langchain_agent(
agent=chain , input_str=drinks, use_async=config.code.lc_agent_is_async
)
if output_key is not None:
# Use the output key if provided
res = raw_res[output_key]
else:
# Otherwise, use the raw response
res = raw_res
# Finally, send the response to the user
return Message(author=config.ui.name, content=res) | [
"The following is a friendly conversation between a Customer and an BartenderAI. The BartenderAI is a professional bartender and help Customer find a cocktail that suits. AI should guide Customer in choosing a cocktail that is tailored to its preferences. BartenderAI should understand Customer preferences based on Customer preferred texture, type of alcohol, taste, or personal characteristics. please don't recommend a particular cocktail to Customer. AI job is merely understand Customer preference. And don't ask too complex question make question simple and one at a time. 請用繁體中文與我對答案。 \nCurrent conversation:\n{history}\nCustomer: {input}\nBartenderAI:\n",
"drinks",
"input",
"preferences",
"you are acting as a professional bartender, you know much about the customer preference, and can recommend the right one to your customer. The below is the menu, please choice one of the cocktail based on the customer preference, elaborate the reason why you recommend and reply in 繁體中文\nhere is Customer preference:\n-----------------------\n{preferences}\n-----------------------\n\nhere is the menu:\n-----------------------\n酒名\t價格(NTD)\t基底酒款\t其他成分\t酒感等級\t口感描述\n青雲閤\t400\tGin琴酒\tNordes gin諾帝斯琴酒/St.germain接骨木花利口酒/Skinos乳香利口酒/Jasmine Syrup自製茉莉花糖漿/Citrus Acid檸檬酸液/tonic通寧水\t2\t微甜/花香味強烈/清爽/氣泡感\n和泉樓\t400\tGin琴酒\tGenerous Gin Azur大方琴酒/Crème de Violet 紫羅蘭利口酒/Lime Juice萊姆汁/Lavender Syrup自製薰衣草糖漿/La Caravedo Pisco秘魯白蘭地\t3.5\t偏酸爽口/如同香水的強烈花香\n醉花園\t450\tHomemade Rose Liqueur自製玫瑰利口酒\tHomemade Rose Liqueur自製玫瑰利口酒/Red Repper Syrup粉紅胡椒糖漿/Hendricks Flora Adora Gin亨利爵士花神/Latic Acid乳酸/Cream鮮奶油/Egg White蛋白/Soda Water蘇打水\t3\t蛋糕般的綿密奶泡/主體玫瑰花香帶一絲粉紅胡椒的偏甜辛香\n鐵觀音\t400\tvodka伏特加\tTieguanyin tea infused vodka鐵觀音伏特加/Cointreau君度橙酒/Crème de peach水蜜桃利口酒\t2\t水蜜桃甜香為前調/中調展現鐵觀音培茶風味/清爽的氣泡/酒體輕盈\n文山包種\t400\tGin琴酒\tWen Shan Pochong包種茶琴酒/Pavan葡萄利口酒/Lavender Leaf Syrup自製薰衣草片糖漿/Lemon juice檸檬汁\t3\t偏甜爽口/花草香/麝香葡萄與橙花氣味為中調/茶香做為後韻\n金萱\t430\tWhite Wine白葡萄酒\tJin Xuan Tea Infused White Wine金萱茶白葡萄酒/Pineapple Sage Infused Apple Whiskey鳳梨鼠尾草蘋果威士忌/Chamomile Cordial洋甘菊風味液/Cream cheese Foam奶油起司泡沫\t3\t上層奶泡起司蛋糕風味呼應金萱茶獨特奶香/中調強烈洋甘菊轉為鼠尾草與蘋果的清新/微苦茶感與葡萄弱酸做結尾\n東方美人\t$450\tV.S.O.P brandy白蘭地\tDriental beauty infused V.S.O.P brandy 東方美人茶白蘭地/Sesame芝麻/Adriatico鹽味杏仁利口酒/Fig Leaf Syrup無花果葉糖漿/Blackwalnut Bitters 黑核桃苦精/Selva Ray Chocolate巧克力蘭姆酒\t4\t初聞明顯可可香而後是杏仁與無花果葉類的堅果氣息/接著輕微苦韻洗滌口腔後茶感才慢悠悠出現\n北港甜湯米糕粥\t$430\tWhiskey威士忌\tLongan Infused Whiskey自製桂圓威士忌/Sticy Rice圓糯米/Macallan 12years麥卡倫12年/Cannanmon Bitters自製肉桂苦精\t3\t翻玩70年歷史甜品/甜而不膩的大人甜湯/桂圓的蜜味與雪莉桶威士忌完美融合/些許肉桂味添加層次/有趣的食用型調酒\n阿嬌姨烤魷魚\t$430\tVodka伏特加/ Whiskey泥煤威士忌\tSquid Infused Vodka自制烤魷魚伏特加/ Talisker Storm Whiskey/Black Cardamom黑荳蔻/Basil Syrup羅勒糖漿/Citrus Acid檸檬酸/Cucumber Soda Water黃瓜口味氣泡水/Squid Slices網狀魷魚片\t3.5\t出乎意料的味覺組合/輕微的黑荳蔻模擬出炭烤的煙燻味/帶有鹹感的威士忌襯托魷魚鮮香/小黃瓜與氣泡帶來清爽結尾\n童年記憶愛玉冰\t$400\tBamboo Leaves Infused Vermouth自製竹葉苦艾酒\tBamboo Leaves Infused Vermouth自製竹葉苦艾酒/Ice Jelly愛玉/Homemade Limocello自製檸檬利口酒/White Wine Cardamom Syrup白酒荳蔻糖漿\t3.5\t竹葉香與檸檬甜感結合後接葡萄微酸/輕微的香料做結尾/吃得到愛玉喔\n香煙裊裊龍山寺\t$430\tGin琴酒\tTanquerary No.10/Skinos希臘乳香酒/Sandalwood Infused Gin檀香木琴酒/Selva Ray Coconut Rum椰子蘭姆酒/Malibu椰子香甜酒\t5\t椰子氣味鋪陳檀香木質氣息/順口度高/如同佛珠與佛堂的既視感香氣\n民風淳樸剝皮寮\t$420\tVodka伏特加/ Gin琴酒\tDon Julio Blanco/Peeled Pepper Infused Vodka自製剝皮辣椒伏特加/East 135 GinㄥSoy Sauce手工醬油/Clarify Tomato Juice澄清番茄汁/ Ginger Ale薑汁汽水/Umami Bitters旨味苦精\t3\t氣泡爽口/輕微香菇與番茄鮮味/尾巴有些許辣椒熱感/不會辣\n日皇御用摩納卡\t$430\tWhiskey泥煤威士忌\tArbeg10y/Red Beans杜瓦小豆香甜酒/Luxardo Apricot杏桃香甜酒/Milk牛奶/Hawthorn Miso Campari Monaka仙楂味增金巴利最中餅\t2.5\t前味紅豆氣味明顯/中段杏桃果香參雜煙燻味/大人味奶酒\n阿寶師的咖哩酥\t400\tWhiskey威士忌\tPork Floss Infused Whiskey肉鬆威士忌/Curry Syrup咖哩糖漿/Carrot Juice胡蘿蔔汁\t3\t甜味型調酒/咖哩氣味轉為肉鬆帶來的輕微脂感/尾韻為胡蘿蔔自然清甜\n懸壺濟世青草巷退火養肝茶\t400\tgin琴酒\tCheerful Crackers Infused gin自製奇福餅乾琴酒/Burdock Infused Frangelico 自製牛蒡榛果香甜酒/Dita荔枝香甜酒/Grassleef Sweetflag Rhizome石菖蒲/Falernum法勒南香甜酒/Suze龍膽草香甜酒\t3.5\t苦甜型調酒/牛蒡與龍膽草結合使苦味不再單調/中調由石菖蒲與法勒南特有的香料譜出/奇福餅乾的油脂感作為橋樑銜接所有風味\n清涼百草茶\t400\tHerbal Tea Wine青草茶酒\tHerbal Tea Wine青草茶酒/Vecchio amaro del capo義大利藥草酒/Asiatic Worm wood杜瓦艾草香甜酒/Dita荔枝香甜酒/Fernet Branca義大利苦味香甜酒\t4\t中式草本遇上西式藥酒/清甜中帶微苦/艾草香銜接荔枝果香/\n駐顏美人湯\t400\tBrandy白蘭地\tLa Caravedo Pisco秘魯白蘭地/White wine cardamom syrup自製白酒荳蔻糖漿/Aloe Liqueur蘆薈香甜酒/Chartreuse修道院香甜酒/Acid Solution酸味模擬液/Aloe Water澄清蘆薈汁/Ruta Graveolens Spray芸香噴霧\t3\t入口時可聞到雲香帶有甜感的獨特香氣/蘆薈為主軸的清新類花香/尾韻香水白蘭地葡萄香與荳蔻隱約浮現\n新世紀冬瓜茶\t360\tRum蘭姆酒\tSpice White Gourd Drink自製香料冬瓜茶/Clarify Banana Juice澄清香蕉水/Soda Water蘇打水\t3.5\t香料增添冬瓜茶層次風味與香蕉熱帶水果氣味相輔相乘/輕微甜口\n古早味楊桃湯\t360\tGin琴酒\tStar fruit juice鹽漬楊桃湯/Pineapple Juice鳳梨汁/Caramel焦糖/Tonic通寧水/Spite雪碧\t3.5\t楊桃湯輕微鹹味帶出甜感/焦糖鳳梨作為後味支撐\n-----------------------\n",
"You're now a professional bartender, and the following is the conversation between the Customer and Bartender, please summary the customer preference from the following conversation in 繁體中文\nCurrent conversation:\n{history}\n",
"please make a story for the drink call {drinks} base on the following information in 繁體中文 \nDrinks information:\n-----------------------\n酒名\t價格(NTD)\t基底酒款\t其他成分\t酒感等級\t口感描述\n青雲閤\t400\tGin琴酒\tNordes gin諾帝斯琴酒/St.germain接骨木花利口酒/Skinos乳香利口酒/Jasmine Syrup自製茉莉花糖漿/Citrus Acid檸檬酸液/tonic通寧水\t2\t微甜/花香味強烈/清爽/氣泡感\n和泉樓\t400\tGin琴酒\tGenerous Gin Azur大方琴酒/Crème de Violet 紫羅蘭利口酒/Lime Juice萊姆汁/Lavender Syrup自製薰衣草糖漿/La Caravedo Pisco秘魯白蘭地\t3.5\t偏酸爽口/如同香水的強烈花香\n醉花園\t450\tHomemade Rose Liqueur自製玫瑰利口酒\tHomemade Rose Liqueur自製玫瑰利口酒/Red Repper Syrup粉紅胡椒糖漿/Hendricks Flora Adora Gin亨利爵士花神/Latic Acid乳酸/Cream鮮奶油/Egg White蛋白/Soda Water蘇打水\t3\t蛋糕般的綿密奶泡/主體玫瑰花香帶一絲粉紅胡椒的偏甜辛香\n鐵觀音\t400\tvodka伏特加\tTieguanyin tea infused vodka鐵觀音伏特加/Cointreau君度橙酒/Crème de peach水蜜桃利口酒\t2\t水蜜桃甜香為前調/中調展現鐵觀音培茶風味/清爽的氣泡/酒體輕盈\n文山包種\t400\tGin琴酒\tWen Shan Pochong包種茶琴酒/Pavan葡萄利口酒/Lavender Leaf Syrup自製薰衣草片糖漿/Lemon juice檸檬汁\t3\t偏甜爽口/花草香/麝香葡萄與橙花氣味為中調/茶香做為後韻\n金萱\t430\tWhite Wine白葡萄酒\tJin Xuan Tea Infused White Wine金萱茶白葡萄酒/Pineapple Sage Infused Apple Whiskey鳳梨鼠尾草蘋果威士忌/Chamomile Cordial洋甘菊風味液/Cream cheese Foam奶油起司泡沫\t3\t上層奶泡起司蛋糕風味呼應金萱茶獨特奶香/中調強烈洋甘菊轉為鼠尾草與蘋果的清新/微苦茶感與葡萄弱酸做結尾\n東方美人\t$450\tV.S.O.P brandy白蘭地\tDriental beauty infused V.S.O.P brandy 東方美人茶白蘭地/Sesame芝麻/Adriatico鹽味杏仁利口酒/Fig Leaf Syrup無花果葉糖漿/Blackwalnut Bitters 黑核桃苦精/Selva Ray Chocolate巧克力蘭姆酒\t4\t初聞明顯可可香而後是杏仁與無花果葉類的堅果氣息/接著輕微苦韻洗滌口腔後茶感才慢悠悠出現\n北港甜湯米糕粥\t$430\tWhiskey威士忌\tLongan Infused Whiskey自製桂圓威士忌/Sticy Rice圓糯米/Macallan 12years麥卡倫12年/Cannanmon Bitters自製肉桂苦精\t3\t翻玩70年歷史甜品/甜而不膩的大人甜湯/桂圓的蜜味與雪莉桶威士忌完美融合/些許肉桂味添加層次/有趣的食用型調酒\n阿嬌姨烤魷魚\t$430\tVodka伏特加/ Whiskey泥煤威士忌\tSquid Infused Vodka自制烤魷魚伏特加/ Talisker Storm Whiskey/Black Cardamom黑荳蔻/Basil Syrup羅勒糖漿/Citrus Acid檸檬酸/Cucumber Soda Water黃瓜口味氣泡水/Squid Slices網狀魷魚片\t3.5\t出乎意料的味覺組合/輕微的黑荳蔻模擬出炭烤的煙燻味/帶有鹹感的威士忌襯托魷魚鮮香/小黃瓜與氣泡帶來清爽結尾\n童年記憶愛玉冰\t$400\tBamboo Leaves Infused Vermouth自製竹葉苦艾酒\tBamboo Leaves Infused Vermouth自製竹葉苦艾酒/Ice Jelly愛玉/Homemade Limocello自製檸檬利口酒/White Wine Cardamom Syrup白酒荳蔻糖漿\t3.5\t竹葉香與檸檬甜感結合後接葡萄微酸/輕微的香料做結尾/吃得到愛玉喔\n香煙裊裊龍山寺\t$430\tGin琴酒\tTanquerary No.10/Skinos希臘乳香酒/Sandalwood Infused Gin檀香木琴酒/Selva Ray Coconut Rum椰子蘭姆酒/Malibu椰子香甜酒\t5\t椰子氣味鋪陳檀香木質氣息/順口度高/如同佛珠與佛堂的既視感香氣\n民風淳樸剝皮寮\t$420\tVodka伏特加/ Gin琴酒\tDon Julio Blanco/Peeled Pepper Infused Vodka自製剝皮辣椒伏特加/East 135 GinㄥSoy Sauce手工醬油/Clarify Tomato Juice澄清番茄汁/ Ginger Ale薑汁汽水/Umami Bitters旨味苦精\t3\t氣泡爽口/輕微香菇與番茄鮮味/尾巴有些許辣椒熱感/不會辣\n日皇御用摩納卡\t$430\tWhiskey泥煤威士忌\tArbeg10y/Red Beans杜瓦小豆香甜酒/Luxardo Apricot杏桃香甜酒/Milk牛奶/Hawthorn Miso Campari Monaka仙楂味增金巴利最中餅\t2.5\t前味紅豆氣味明顯/中段杏桃果香參雜煙燻味/大人味奶酒\n阿寶師的咖哩酥\t400\tWhiskey威士忌\tPork Floss Infused Whiskey肉鬆威士忌/Curry Syrup咖哩糖漿/Carrot Juice胡蘿蔔汁\t3\t甜味型調酒/咖哩氣味轉為肉鬆帶來的輕微脂感/尾韻為胡蘿蔔自然清甜\n懸壺濟世青草巷退火養肝茶\t400\tgin琴酒\tCheerful Crackers Infused gin自製奇福餅乾琴酒/Burdock Infused Frangelico 自製牛蒡榛果香甜酒/Dita荔枝香甜酒/Grassleef Sweetflag Rhizome石菖蒲/Falernum法勒南香甜酒/Suze龍膽草香甜酒\t3.5\t苦甜型調酒/牛蒡與龍膽草結合使苦味不再單調/中調由石菖蒲與法勒南特有的香料譜出/奇福餅乾的油脂感作為橋樑銜接所有風味\n清涼百草茶\t400\tHerbal Tea Wine青草茶酒\tHerbal Tea Wine青草茶酒/Vecchio amaro del capo義大利藥草酒/Asiatic Worm wood杜瓦艾草香甜酒/Dita荔枝香甜酒/Fernet Branca義大利苦味香甜酒\t4\t中式草本遇上西式藥酒/清甜中帶微苦/艾草香銜接荔枝果香/\n駐顏美人湯\t400\tBrandy白蘭地\tLa Caravedo Pisco秘魯白蘭地/White wine cardamom syrup自製白酒荳蔻糖漿/Aloe Liqueur蘆薈香甜酒/Chartreuse修道院香甜酒/Acid Solution酸味模擬液/Aloe Water澄清蘆薈汁/Ruta Graveolens Spray芸香噴霧\t3\t入口時可聞到雲香帶有甜感的獨特香氣/蘆薈為主軸的清新類花香/尾韻香水白蘭地葡萄香與荳蔻隱約浮現\n新世紀冬瓜茶\t360\tRum蘭姆酒\tSpice White Gourd Drink自製香料冬瓜茶/Clarify Banana Juice澄清香蕉水/Soda Water蘇打水\t3.5\t香料增添冬瓜茶層次風味與香蕉熱帶水果氣味相輔相乘/輕微甜口\n古早味楊桃湯\t360\tGin琴酒\tStar fruit juice鹽漬楊桃湯/Pineapple Juice鳳梨汁/Caramel焦糖/Tonic通寧水/Spite雪碧\t3.5\t楊桃湯輕微鹹味帶出甜感/焦糖鳳梨作為後味支撐\n-------------------------\n"
] |
2024-01-10 | kladskull/crowd.dev | premium~eagle-eye~crowd-eagle-eye~crowd~eagle_eye~scheduled.py | import requests
import json
import logging
import json
from crowd.eagle_eye.sources import hacker_news
from crowd.eagle_eye.sources import devto
from crowd.eagle_eye.sources import post_process
from crowd.eagle_eye.apis import CohereAPI
from crowd.eagle_eye.apis.vector_api import VectorAPI
root = logging.getLogger()
if root.handlers:
for handler in root.handlers:
root.removeHandler(handler)
logging.basicConfig(
format="%(asctime)s.%(msecs)03d %(levelname)s [%(filename)s:%(lineno)s] %(message)s", level=logging.INFO
)
logger = logging.getLogger(__name__)
def scheduled_main(source):
"""
Main function.
It will get the data from Hacker News, process the data that was not yet in the database, vectorise it, and save it to the database.
"""
vector = VectorAPI()
cohere = CohereAPI()
if source == 'hacker_news':
logger.info("Source is Hacker News")
data = hacker_news()
elif source == 'devto':
logger.info("Source is Dev.to")
data = devto()
else:
raise ValueError(f'Unknown source: {source}')
logger.info('Finding existing IDs...')
existing_ids = vector.find_existing_ids([point.id for point in data])
data = post_process(data, existing_ids)
data = cohere.embed_points(data)
return vector.upsert(data)
if __name__ == '__main__':
scheduled_main()
| [] |
2024-01-10 | kladskull/crowd.dev | premium~eagle-eye~crowd-eagle-eye~crowd~eagle_eye~hacker_news.py | import pandas as pd
import requests
import hashlib
import json
import logging
import signal
from bs4 import BeautifulSoup
from transformers import GPT2TokenizerFast
from urllib.parse import urlparse
from reppy.robots import Robots
import time
logger = logging.getLogger(__name__)
def pre_process(df):
"""
Pre-process the data
Args:
df (DataFrame): DataFrame to be pre-processed
Returns:
DataFrame: Pre-processed DataFrame
"""
df = df.rename(columns={'by': 'username'})
df = df.drop(columns=['descendants', 'kids', 'type'])
df['platform'] = df.apply(lambda x: 'Hacker News', axis=1)
return make_ids(df)
def make_ids(df):
def hash(r):
return int(hashlib.sha256(f'{r.platform}-{r.id}'.encode('utf-8')).hexdigest(), 16) % 10**8
df['vectorId'] = df.apply(lambda x: hash(x), axis=1)
df.to_csv('hacker_news.csv', index=False)
return df
def merge(from_db, from_hn):
"""
Merge the data coming from the database and the data coming from Hacker News.
It is merged by sourceId, with priority to the data coming from the database.
Args:
from_db (DataFrame): df from the database
from_hn (DataFrame): df from Hacker News
"""
pd.concat([from_db, from_hn]).drop_duplicates(subset='sourceId', keep="first")
def post_process(df):
"""
Post-process the data, but still before training.
Args:
df (DataFrame): DataFrame to be post-processed for training
"""
def fill_text(text, url):
"""
If there is no text coming from Hacker News,
we try to get it from the text in the website given by the url.
We need to check robots.txt to see if we are allowed to crawl the website.
Args:
text (string): text field from Hacker News
url (string): url field from Hacker News
"""
def timeout_handler(num, stack):
"""
Raises an exception after a timeout.
"""
("Received SIGALRM")
raise Exception("Alarm")
# We only need this if
# 1. There is no text coming from Hacker News
# 2. The url is not empty
# 3. The site is not a PDF
if not pd.notna(text) and pd.notna(url) and 'pdf' not in url:
# We set a timeout for the request
signal.signal(signal.SIGALRM, timeout_handler)
signal.alarm(4)
try:
# TODO: Make sure this works
host = urlparse(url).hostname
robots = Robots.fetch(f'https://{host}/robots.txt')
if robots.allowed(url, 'my-user-agent'):
html = requests.get(url).text
else:
return text
except Exception as e: # noqa: E722
print(e)
return text
finally:
signal.alarm(0)
# Get the text in the page
soup = BeautifulSoup(html, features="html.parser")
allowlist = [
'p'
]
# Prune to 2000 characters to make sure we don't get a too long text for OpenAI
text = ' '.join([t for t in soup.find_all(text=True) if t.parent.name in allowlist])[:800]
return text
return text
def make_full_text(title, text):
"""
Concatenate the title and the text.
Args:
title (string): title field from Hacker News
text (string): text field from Hacker News
Returns:
string: concatenated title and text
"""
return "Title: " + title + "; Content: " + text
# Fill text and concatenated with the above functions
df['text'] = df.apply(lambda x: fill_text(x.text, x.url), axis=1)
df = df[df['text'].notna()]
df['combined'] = df.apply(lambda x: make_full_text(x.title, x.text), axis=1)
# Tokenise and drop rows that have too long tokens
tokenizer = GPT2TokenizerFast.from_pretrained("gpt2")
df['n_tokens'] = df.combined.apply(lambda x: len(tokenizer.encode(x)))
df = df[df.n_tokens < 1000].tail(1_000)
return df
def get_hacker_news_data():
"""
Get the data from Hacker News.
"""
print("Fetching top IDs from Hacker News...")
top_500 = json.loads(requests.get('https://hacker-news.firebaseio.com/v0/topstories.json').content)
print("Done")
print("Fetching data from Hacker News...")
('Starting')
start = time.time()
dicts = [
json.loads(requests.get(f'https://hacker-news.firebaseio.com/v0/item/{story_id}.json').content)
for story_id in top_500
]
print(f"Done in {time.time() - start} seconds")
df = pd.DataFrame(dicts)
return pre_process(df)
def main():
"""
Main function.
It will get the data from Hacker News, process the data that was not yet in the database, vectorise it, and save it to the database.
"""
# Get the data from Hacker News
df = get_hacker_news_data()
# Post-process the data
df = post_process(df)
df.to_csv('hacker_news_processed.csv', index=False)
if __name__ == '__main__':
from pprint import pprint as print
# main()
# get_hacker_news_data()
df = pd.read_csv('hacker_news_processed.csv')
import cohere
import numpy as np
co = cohere.Client("0BtmFTxNAhmanfNCdMRb10bpk2jtKCT1MLxQkSSF")
# embeds = co.embed(
# texts=list(df['combined'].values),
# model='small',
# truncate='LEFT'
# ).embeddings
# import numpy as np
# shape = np.array(embeds).shape
# print(shape)
from qdrant_client import QdrantClient
from qdrant_client.http import models
client = QdrantClient(host="localhost", port=6333)
client.recreate_collection(
collection_name="crowddev",
distance="Cosine",
vector_size=1024,
)
# Remove text nan
df = df[df['text'].notna()]
df1 = df.head(10)
print(df1)
embeds = co.embed(
texts=list(df1['combined'].values),
model='small',
truncate='LEFT'
).embeddings
df1.drop(columns=['combined'], inplace=True)
ids = list(df1.vectorId.values)
ids = [int(i) for i in ids]
df1.drop(columns=['vectorId'], inplace=True)
payloads = list(df1.T.to_dict().values())
print(payloads)
client.upsert(
collection_name="crowddev",
points=models.Batch(
ids=ids,
payloads=payloads,
vectors=embeds
),
)
| [] |
2024-01-10 | kladskull/crowd.dev | premium~eagle-eye~crowd-eagle-eye~crowd~eagle_eye~apis~vector_api.py | import pinecone
import datetime
import time
from crowd.eagle_eye.apis import CohereAPI
import logging
import itertools
import os
from crowd.eagle_eye.config import KUBE_MODE, VECTOR_API_KEY, VECTOR_INDEX
logger = logging.getLogger(__name__)
class VectorAPI:
"""
Class to interact with the vector database.
"""
def __init__(self, index_name=None):
"""
Initialize the VectorAPI.
Args:
index_name (str, optional): Name of the DB index. Defaults to "crowddev".
"""
if KUBE_MODE:
pinecone.init(api_key=VECTOR_API_KEY, environment="us-east-1-aws")
else:
pinecone.init(api_key=os.environ.get('VECTOR_API_KEY'), environment="us-east-1-aws")
if index_name is None:
if KUBE_MODE:
index_name = VECTOR_INDEX
else:
index_name = os.environ.get('VECTOR_INDEX')
self.index = pinecone.Index(index_name)
@staticmethod
def _chunks(iterable, batch_size=80):
"""A helper function to break an iterable into chunks of size batch_size.
https://www.pinecone.io/docs/insert-data/#batching-upserts.
Args:
iterable (iterable): The iterable to break into chunks.
batch_size (int, optional): The size of each chunk. Defaults to 80.
"""
it = iter(iterable)
chunk = tuple(itertools.islice(it, batch_size))
while chunk:
yield chunk
chunk = tuple(itertools.islice(it, batch_size))
def upsert(self, points):
"""
Upsert a list of points into the vector database.
Args:
points ([Point]): points to upsert.
"""
if (len(points) == 0):
return
# Pinecone needs the points converted into tuples
vectors = [
(point.id, point.embed, point.payload_as_dict())
for point in points
]
for ids_vectors_chunk in VectorAPI._chunks(vectors, batch_size=100):
self.index.upsert(vectors=ids_vectors_chunk)
return "OK"
@ staticmethod
def _get_timestamp(ndays, start=int(time.time())):
"""
Get the unix timestamp for a given number of days ago.
Args:
ndays (int): number of days ago.
start (int, optional): start timestamp. Defaults to int(time.time()).
Returns:
int: timestamp
"""
# TODO-test
now = datetime.datetime.fromtimestamp(start)
return int((now - datetime.timedelta(days=ndays)).timestamp())
def find_existing_ids(self, ids):
"""
Given a list of ids, find which ones already exist in the vector database.
Args:
ids ([str]): list of ids to find.
Returns:
[str]: list of existing ids.
"""
existing = list(self.index.fetch(ids=ids)['vectors'].keys())
logger.info('Found %d existing IDs', len(existing))
return existing
def delete(self, ids):
"""
Delete a list of ids from the vector database.
Args:
ids ([str]): list of ids to delete.
Returns:
str: success message.
"""
if type(ids) == str:
ids = [ids]
return self.index.delete(ids=ids)
def search(self, query, ndays, exclude, cohere=None):
"""
Perform a search on the vector database.
We can set number of days ago, and exclude certain ids.
Args:
query (str): query to perform, for example a keyword
ndays (int): maximum number of days ago to search
exclude ([str]): list of ids to exclude from the search
cohere (CohereAPI, optional): Already initialised CohereAPI. Defaults to None.
Returns:
[dict]: list of results
"""
if cohere is None:
cohere = CohereAPI()
start = self._get_timestamp(ndays)
# Embed the query into a vector
vector = cohere.embed_one(query)
return self.index.query(
vector=vector,
top_k=20,
filter={
"timestamp": {"$gte": start},
"vectorId": {"$nin": exclude}
},
includeMetadata=True
)
| [] |
2024-01-10 | slyder219/tKinterChatGPT | testing~import%20openai.py | import openai
openai.api_key = key
model = "gpt-3.5-turbo"
temp = 0.5
messages = []
while True:
mes = input("\nEnter input: ")
if "hist" in mes and len(mes) <= 10:
print(messages)
elif "temp" in mes and len(mes) <= 10:
new = float(input("Enter int for temp: "))
temp = new
else:
messages.append({"role" : "user", "content" : mes})
response = openai.ChatCompletion.create(
model = model,
messages = messages,
temperature = temp
)
textOut = response.choices[0].message.content
messages.append({"role" : "assistant", "content" : textOut})
print()
print(textOut)
| [] |
2024-01-10 | slyder219/tKinterChatGPT | testing~tetsingClass.py | import openai
key = os.environ.get("OPENAI_API_KEY")
openai.api_key = key
class convo():
def __init__(self):
self.temp = 0.5
self.model = "gpt-3.5-turbo"
self.history = []
self.latestTextOut = ""
def response(self, newMessage):
self.history.append(
{"role": "user", "content": newMessage}
)
response = openai.ChatCompletion.create(
model = self.model,
messages = self.history,
temperature = self.temp
)
textOut = response.choices[0].message.content
self.history.append(
{"role": "assistant", "content": textOut}
)
self.latestTextOut = textOut
def main():
currentConvo = convo()
currentConvo.response("Hello, how are you?")
print(currentConvo.latestTextOut)
if __name__ == "__main__":
main() | [] |
2024-01-10 | slyder219/tKinterChatGPT | testing~tkinterChatGPT.py |
import sys
import customtkinter as ctk
import openai
messages = []
temp = 0.6
sys.path.append(r"C:\Users\seanl\AppData\Local\Programs\Python\Python311\Lib\site-packages")
ctk.set_appearance_mode("dark")
ctk.set_default_color_theme("blue")
key = os.environ.get("OPENAI_API_KEY")
openai.api_key = key
model = "gpt-3.5-turbo"
def enterButton(textbox, output):
global messages
output.configure(state = "normal")
input = textbox.get("0.0", "end")
messages.append({"role": "user", "content": input})
response = openai.ChatCompletion.create(model = model,
messages = messages,
temperature = temp)
textResponse = response.choices[0].message.content
print(textResponse)
messages.append({"role": "assistant", "content": textResponse})
output.insert("end", "\n\n")
output.insert("end", textResponse)
textbox.delete("0.0", "end")
output.configure(state = "disabled")
def main():
root = ctk.CTk()
root.geometry("1000x700")
root.title("ChatGPT3.5Turbo")
frame1 = ctk.CTkFrame(root,
width = 1000,
height= 700)
frame1.grid(row = 0,
column = 0)
textbox = ctk.CTkTextbox(frame1,
width = 1000)
textbox.grid(row = 0,
column = 0,
sticky = "nsew")
outputBox = ctk.CTkTextbox(frame1,
width = 1000,
height = 375)
button = ctk.CTkButton(frame1,
width = 500,
height = 50,
border_width = 0,
corner_radius = 8,
text = "Enter",
command= lambda: enterButton(textbox, outputBox))
button.grid(row = 1)
outputBox.grid(row = 2,
column = 0,
pady = 20)
outputBox.configure(state = "disabled")
root.mainloop()
if __name__ == "__main__":
main()
| [
"INPUT"
] |
2024-01-10 | xorsuyash/Multi-pdf-chat | app2.py | import streamlit as st
from dotenv import load_dotenv
from PyPDF2 import PdfReader
from langchain.text_splitter import CharacterTextSplitter
from langchain.embeddings import OpenAIEmbeddings, HuggingFaceInstructEmbeddings
from langchain.vectorstores import FAISS
from langchain.chat_models import ChatOpenAI
from langchain.memory import ConversationBufferMemory
from langchain.chains import ConversationalRetrievalChain
from htmlTemplates import css, bot_template, user_template
from langchain.llms import HuggingFaceHub
def get_pdf_text(pdf_docs):
text = ""
for pdf in pdf_docs:
pdf_reader = PdfReader(pdf)
for page in pdf_reader.pages:
text += page.extract_text()
return text
def get_text_chunks(text):
text_splitter = CharacterTextSplitter(
separator="\n",
chunk_size=1000,
chunk_overlap=200,
length_function=len
)
chunks = text_splitter.split_text(text)
return chunks
def get_vectorstore(text_chunks):
embeddings = OpenAIEmbeddings()
# embeddings = HuggingFaceInstructEmbeddings(model_name="hkunlp/instructor-xl")
vectorstore = FAISS.from_texts(texts=text_chunks, embedding=embeddings)
return vectorstore
def get_conversation_chain(vectorstore):
llm = ChatOpenAI()
# llm = HuggingFaceHub(repo_id="google/flan-t5-xxl", model_kwargs={"temperature":0.5, "max_length":512})
memory = ConversationBufferMemory(
memory_key='chat_history', return_messages=True)
conversation_chain = ConversationalRetrievalChain.from_llm(
llm=llm,
retriever=vectorstore.as_retriever(),
memory=memory
)
return conversation_chain
def handle_userinput(user_question):
response = st.session_state.conversation({'question': user_question})
st.session_state.chat_history = response['chat_history']
for i, message in enumerate(st.session_state.chat_history):
if i % 2 == 0:
st.write(user_template.replace(
"{{MSG}}", message.content), unsafe_allow_html=True)
else:
st.write(bot_template.replace(
"{{MSG}}", message.content), unsafe_allow_html=True)
def main():
load_dotenv()
st.set_page_config(page_title="Chat with multiple PDFs",
page_icon=":books:")
st.write(css, unsafe_allow_html=True)
if "conversation" not in st.session_state:
st.session_state.conversation = None
if "chat_history" not in st.session_state:
st.session_state.chat_history = None
st.header("Chat with multiple PDFs :books:")
user_question = st.text_input("Ask a question about your documents:")
if user_question:
handle_userinput(user_question)
with st.sidebar:
st.subheader("Your documents")
pdf_docs = st.file_uploader(
"Upload your PDFs here and click on 'Process'", accept_multiple_files=True)
if st.button("Process"):
with st.spinner("Processing"):
# get pdf text
raw_text = get_pdf_text(pdf_docs)
# get the text chunks
text_chunks = get_text_chunks(raw_text)
# create vector store
vectorstore = get_vectorstore(text_chunks)
# create conversation chain
st.session_state.conversation = get_conversation_chain(
vectorstore)
if __name__ == '__main__':
main() | [] |
2024-01-10 | eugepineiro/lang-bot | rate_limit.py | import time
import random
from openai import RateLimitError
def make_request_with_retry(api_call, max_retries=5):
for i in range(max_retries):
try:
return api_call()
except RateLimitError:
wait_time = (2 ** i) + random.random()
time.sleep(wait_time)
raise Exception("Still hitting rate limit after max retries") | [] |
2024-01-10 | jadamixd/CodeNTNU-Hackathon | camera~imagetext_extraction.py | import pytesseract as pyt
from PIL import Image
import openai
import cv2
import numpy as np
# Path to the Tesseract executable (update this with your installation path)
pyt.pytesseract.tesseract_cmd = r'C:\Program Files\Tesseract-OCR\tesseract.exe'
# Function to sharpen and extract text from an image using OCR
def extract_text_from_image(image_path):
try:
# Load the image
image = cv2.imread(image_path)
# Convert the image to grayscale
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Apply thresholding to create a binary image
_, thresh = cv2.threshold(gray, 200, 255, cv2.THRESH_BINARY)
# Find contours in the binary image
contours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# Initialize variables to track the largest white rectangle
largest_area = 0
largest_contour = None
# Process each contour
for contour in contours:
# Calculate the area of the contour
area = cv2.contourArea(contour)
# Check if the area is larger than the current largest
if area > largest_area:
largest_area = area
largest_contour = contour
# Get the coordinates of the bounding box around the largest white contour
x, y, w, h = cv2.boundingRect(largest_contour)
# Crop the region containing the largest white contour
largest_white_contour = image[y:y+h, x:x+w]
# Save the resized image as a single image
cv2.imwrite("static\image_new.jpg", largest_white_contour)
kernel = np.array([[0, -1, 0],
[-1, 5,-1],
[0, -1, 0]])
image_sharp = cv2.filter2D(src=image, ddepth=-1, kernel=kernel)
# img = cv2.imread("static\image_new.jpg", 1)
# converting to LAB color space
lab= cv2.cvtColor(image_sharp, cv2.COLOR_BGR2LAB)
l_channel, a, b = cv2.split(lab)
# Applying CLAHE to L-channel
# feel free to try different values for the limit and grid size:
clahe = cv2.createCLAHE(clipLimit=0.5, tileGridSize=(2,2))
cl = clahe.apply(l_channel)
# merge the CLAHE enhanced L-channel with the a and b channel
limg = cv2.merge((cl,a,b))
# Converting image from LAB Color model to BGR color spcae
enhanced_img = cv2.cvtColor(limg, cv2.COLOR_LAB2BGR)
# Stacking the original image with the enhanced image
cv2.imwrite("static\image_new.jpg",enhanced_img)
# Open the image using Pillow (PIL)
image = Image.open("static\image_new.jpg")
# Perform OCR on the image
text = pyt.image_to_string(image)#, lang="nor")
return text
except Exception as e:
print(f"An error occurred: {str(e)}")
return None
# Function to extract the relevant parts from the extracted string
def extracting_relevant_text(extracted_text):
try:
# INSERT YOU OPENAI ORGANIZATION AND API KEY HERE
message=[
{"role": "system",
"content": "Du er en data analytiker som er god til å filtrere ut matvarer fra en tekst."},
{"role": "user",
"content": str(f"Kan du filtrere ut matvarer fra kvitteringen under og gi det som en komma separert streng med kun alle matvarene? Ikke oversett noen ting, rett opp i skrivefeil, og ignorer tall og ord som ikke er mat: {extracted_text}")}
]
response = openai.ChatCompletion.create(
model="gpt-4",
max_tokens=2000,
temperature=1.5,
messages=message
)
extracted_text_list = response['choices'][0]['message']['content'].strip('"').split(",")
print(extracted_text_list)
return extracted_text_list
except:
print("total failure!")
return []
| [
"Kan du filtrere ut matvarer fra kvitteringen under og gi det som en komma separert streng med kun alle matvarene? Ikke oversett noen ting, rett opp i skrivefeil, og ignorer tall og ord som ikke er mat: PLACEHOLDER",
"Du er en data analytiker som er god til å filtrere ut matvarer fra en tekst."
] |
2024-01-10 | BenMinch/MIP | MIP_AI.py | import pandas as pd
import sys,os,subprocess,argparse
from openai import OpenAI
import time
argparser = argparse.ArgumentParser(description='Get AI descriptions from protein descriptions')
argparser.add_argument('-i', '--input', help='Input csv file (Must have a column called Description with protein descriptions)', required=True)
argparser.add_argument('-o', '--output', help='Output csv file', required=True)
args = argparser.parse_args()
test_file = pd.read_csv(args.input)
output_file = args.output
import time
test_file
client=OpenAI()
assistant= client.beta.assistants.create(
name="Protein Classifier",
instructions="Based on the following description of a protein, I want you to give a short description of the protein's function with a 4 word limit",
model='gpt-3.5-turbo'
)
#iterate through descriptions and get responses in matching column
test_file['Function']=''
for i in range(len(test_file)):
thread=client.beta.threads.create()
description=test_file['Description'][i]
message=client.beta.threads.messages.create(
thread_id=thread.id,
role="user",
content=description
)
run= client.beta.threads.runs.create(
thread_id=thread.id,
assistant_id=assistant.id,
)
time.sleep(5)
messages = client.beta.threads.messages.list(
thread_id=thread.id
)
assistant_response=''
for message in messages.data:
if message.role == "assistant":
assistant_response = message.content[0].text.value
break
print(assistant_response)
test_file['Function'][i]=assistant_response
time.sleep(5)
#clear thread
client.beta.threads.delete(thread.id)
#clear thread
test_file.to_csv(output_file,index=False)
| [] |
2024-01-10 | DSSGxUK/sma | src~helpers~topic_modelling.py | import logging
import gensim
import gensim.corpora as corpora
import pandas as pd
from gensim.models import CoherenceModel
from gensim.utils import simple_preprocess
import os
os.environ["MALLET_HOME"] = "/files/mallet/mallet-2.0.8/"
logger = logging.getLogger(__name__)
logger.setLevel(logging.WARNING)
logging.getLogger('gensim').setLevel(logging.ERROR)
def create_dict(data, text_column):
# Tokenize the docs
tokenized_list = [simple_preprocess(doc) for doc in data[text_column]]
# Create the Corpus and dictionary
mydict = corpora.Dictionary()
# The (0, 1) in line 1 means, the word with id=0 appears once in the 1st document.
# Likewise, the (4, 4) in the second list item means the word with id 4 appears 4 times in the second document. And so on.
mycorpus = [mydict.doc2bow(doc, allow_update=True) for doc in tokenized_list]
# Not human readable. Convert the ids to words.
# Notice, the order of the words gets lost. Just the word and it?s frequency information is retained.
word_counts = [[(mydict[id], count) for id, count in line] for line in mycorpus]
# Save the Dict and Corpus
mydict.save('mydict.dict') # save dict to disk
corpora.MmCorpus.serialize('mycorpus.mm', mycorpus) # save corpus to disk
return mydict, mycorpus, tokenized_list
# Find the optimal number of topics for LDA.
# build many LDA models with different values of number of topics (k)
# and pick the one that gives the highest coherence value.
# Choosing a k that marks the end of a rapid growth of topic coherence
# usually offers meaningful and interpretable topics.
# Picking an even higher value can sometimes provide more granular sub-topics.
# If the same keywords being repeated in multiple topics, it's probably a sign that the k is too large.
def compute_coherence_values(dictionary, corpus, texts, limit, start, step):
"""
Compute c_v coherence for various number of topics
Parameters:
----------
dictionary : Gensim dictionary
corpus : Gensim corpus
texts : List of input texts
limit : Max num of topics
Returns:
-------
model_list : List of LDA topic models
coherence_values : Coherence values corresponding to the LDA model with respective number of topics
"""
coherence_values = []
model_list = []
mallet_path = '/home/desktop1/files/mallet-2.0.8/mallet-2.0.8/bin/mallet'
for num_topics in range(start, limit, step):
model = gensim.models.wrappers.LdaMallet(mallet_path, corpus=mycorpus, num_topics=num_topics, id2word=mydict)
model_list.append(model)
coherencemodel = CoherenceModel(model=model, texts=texts, dictionary=dictionary, coherence='c_v')
coherence_values.append(coherencemodel.get_coherence())
# Show graph
x = range(start, limit, step)
plt.plot(x, coherence_values)
plt.xlabel("Num Topics")
plt.ylabel("Coherence score")
plt.legend(("coherence_values"), loc='best')
plt.show()
# Print the coherence scores
for m, cv in zip(x, coherence_values):
print("Num Topics =", m, " has Coherence Value of", round(cv, 4))
k = coherence_values.index(max(coherence_values))
print(k)
return model_list
def topic_scores(data: list, num_topics):
# Tokenize the docs
tokenized_list = [simple_preprocess(doc) for doc in data]
# Create the Corpus and dictionary
mydict = corpora.Dictionary()
# The (0, 1) in line 1 means, the word with id=0 appears once in the 1st document.
# Likewise, the (4, 4) in the second list item means the word with id 4 appears 4 times in the second document. And so on.
mycorpus = [mydict.doc2bow(doc, allow_update=True) for doc in tokenized_list]
mallet_path = '/files/mallet-2.0.8/mallet-2.0.8/bin/mallet'
model = gensim.models.wrappers.LdaMallet(mallet_path, corpus=mycorpus, num_topics=num_topics, id2word=mydict)
#coherencemodel = CoherenceModel(model=model, texts=texts, dictionary=dictionary, coherence='c_v')
#coherence_values.append(coherencemodel.get_coherence())
# Init output
sent_topics_df = pd.DataFrame()
# Get topic in each document
for i, row in enumerate(model[mycorpus]):
# Get the topics, Perc Contribution and for each document
for j, (topic_num, prop_topic) in enumerate(row):
sent_topics_df = sent_topics_df.append(pd.Series([i, int(topic_num), prop_topic]), ignore_index=True)
sent_topics_df.columns = ['row_number','Topic', 'Topic_Contribution']
sent_topics_df = sent_topics_df.pivot(index="row_number", columns="Topic", values="Topic_Contribution").reset_index()
return sent_topics_df
| [] |
2024-01-10 | ChadiHelwe/MAFALDA | src~annotation_models~models.py | from abc import ABC, abstractmethod
from langchain.chat_models import ChatOpenAI
from langchain.schema import AIMessage, HumanMessage
from revChatGPT.V1 import Chatbot
from revChatGPT.V3 import Chatbot as ChatbotV3
class LanguageModel(ABC):
def __init__(self):
pass
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
return None
@abstractmethod
def run_prompt(self, prompt: str) -> str:
pass
class ChatGPTModel(LanguageModel):
def __init__(self, access_token: str, api_key=None, **kwargs):
self.access_token = access_token
self.messages = []
self.chat = None
self.kwargs = kwargs
self.conversation_id = None
self.parent_id = None
self.api_key = api_key
def __enter__(self):
config = {"access_token": self.access_token, "paid": True}
if self.kwargs:
config.update(self.kwargs)
if self.api_key:
self.chat = ChatbotV3(
api_key=self.api_key, engine=config["model"], temperature=0
)
else:
self.chat = Chatbot(config=config)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self.conversation_id:
try:
self.chat.delete_conversation(self.conversation_id)
except:
pass
self.chat = None
self.messages = None
self.conversation_id = None
self.parent_id = None
self.kwargs = None
self.access_token = None
def run_prompt(self, prompt: str) -> str:
self.messages.append(HumanMessage(content=prompt))
if self.api_key:
if self.conversation_id is None:
self.conversation_id = "default"
result = self.chat.ask(prompt, role="user", convo_id=self.conversation_id)
# self.chat.conversation
assert isinstance(result, str)
self.messages.append(AIMessage(content=result))
return result
else:
result = self.chat.ask(
prompt, conversation_id=self.conversation_id, parent_id=self.parent_id
)
result = list(result)[-1]
self.messages.append(AIMessage(content=result["message"]))
self.conversation_id = result["conversation_id"]
self.parent_id = result["parent_id"]
return result["message"]
| [] |
2024-01-10 | ChadiHelwe/MAFALDA | src~experiments_pipelines~pipelines.py | import json
import logging
import os
from collections import OrderedDict
from typing import Any
from langchain.chains import ConversationChain, LLMChain
from langchain.memory import ConversationBufferMemory
from langchain.prompts import PromptTemplate
from tqdm import tqdm
from src.classification_models.quantized_llama_based_models import (
LLaMABasedQuantizedModel,
)
from src.experiments_pipelines.chatbot import ChatBotLLM
from src.process_docanno_data import process_data
from src.utils import read_jsonl
PROMPT = {
0: """
{instruction_begin}
Text: "{example_input}"
Determine whether the following sentence contains a fallacy or not:
Sentence: "{sentence_input}" {instruction_end}
Output:
""",
1: """
{instruction_begin}
Text: "{example_input}"
Based on the above text, identify the fallacy (if any) in the following sentence. If a fallacy is present, specify the type(s) of fallacy without providing explanations. The possible types of fallacy are:
- appeal to emotion
- fallacy of logic
- fallacy of credibility
Sentence: "{sentence_input}" {instruction_end}
Output:
""",
2: """
{instruction_begin}
Text: "{example_input}"
Based on the above text, identify the fallacy (if any) in the following sentence. If a fallacy is present, specify the type(s) of fallacy without providing explanations. The possible types of fallacy are:
- appeal to positive emotion
- appeal to anger
- appeal to fear
- appeal to pity
- appeal to ridicule
- appeal to worse problems
- causal oversimplification
- circular reasoning
- equivocation
- false analogy
- false causality
- false dilemma
- hasty generalization
- slippery slope
- straw man
- fallacy of division
- ad hominem
- ad populum
- appeal to (false) authority
- appeal to nature
- appeal to tradition
- guilt by association
- tu quoque
Sentence: "{sentence_input}" {instruction_end}
Output:
""",
}
ALT_PROMPT = {
0: """
{instruction_begin}
Definitions:
- An argument consists of an assertion called the conclusion and one or more assertions called premises, where the premises are intended to establish the truth of the conclusion. Premises or conclusions can be implicit in an argument.
- A fallacious argument is an argument where the premises do not entail the conclusion.
Text: "{example_input}"
Based on the above text, determine whether the following sentence is part of a fallacious argument or not:
Sentence: "{sentence_input}" {instruction_end}
Output:
""",
1: """
{instruction_begin}
Definitions:
- An argument consists of an assertion called the conclusion and one or more assertions called premises, where the premises are intended to establish the truth of the conclusion. Premises or conclusions can be implicit in an argument.
- A fallacious argument is an argument where the premises do not entail the conclusion.
Text: "{example_input}"
Based on the above text, determine whether the following sentence is part of a fallacious argument or not. If it is, indicate the type(s) of fallacy without providing explanations. The potential types of fallacy include:
- appeal to emotion
- fallacy of logic
- fallacy of credibility
Sentence: "{sentence_input}" {instruction_end}
Output:
""",
2: """
{instruction_begin}
Definitions:
- An argument consists of an assertion called the conclusion and one or more assertions called premises, where the premises are intended to establish the truth of the conclusion. Premises or conclusions can be implicit in an argument.
- A fallacious argument is an argument where the premises do not entail the conclusion.
Text: "{example_input}"
Based on the above text, determine whether the following sentence is part of a fallacious argument or not. If it is, indicate the type(s) of fallacy without providing explanations. The potential types of fallacy include:
- appeal to positive emotion
- appeal to anger
- appeal to fear
- appeal to pity
- appeal to ridicule
- appeal to worse problems
- causal oversimplification
- circular reasoning
- equivocation
- false analogy
- false causality
- false dilemma
- hasty generalization
- slippery slope
- straw man
- fallacy of division
- ad hominem
- ad populum
- appeal to (false) authority
- appeal to nature
- appeal to tradition
- guilt by association
- tu quoque
Sentence: "{sentence_input}" {instruction_end}
Output:
""",
}
def zero_or_few_shots_pipeline(
model: LLaMABasedQuantizedModel,
dataset_path: str = None,
prediction_path: str = None,
level: int = 0,
alt_prompt: bool = True,
):
logger = logging.getLogger("MafaldaLogger")
if alt_prompt:
prompt = PromptTemplate(
input_variables=[
"example_input",
"sentence_input",
"instruction_begin",
"instruction_end",
],
template=ALT_PROMPT[level],
)
else:
prompt = PromptTemplate(
input_variables=[
"example_input",
"sentence_input",
"instruction_begin",
"instruction_end",
],
template=PROMPT[level],
)
chatbot_model = ChatBotLLM(model=model)
if model.model_name == "gpt-3.5":
chatbot_model.max_length = 1024
chatbot_chain = LLMChain(llm=chatbot_model, prompt=prompt)
data = read_jsonl(dataset_path)
processed_data = process_data(data)
assert len(data) == len(
processed_data
), f"Data length mismatch: {len(data)} != {len(processed_data)}"
# Check already processed examples
already_processed = set()
if os.path.exists(prediction_path):
with open(prediction_path, "r") as f:
for line in f:
try:
entry = json.loads(line)
already_processed.add(entry["text"])
except json.JSONDecodeError:
# Handle improperly formatted last line
f.seek(0)
all_lines = f.readlines()
with open(prediction_path, "w") as fw:
fw.writelines(all_lines[:-1])
with open(prediction_path, "a") as f:
for example, processed_example in tqdm(
zip(data, processed_data), total=len(data)
):
if example["text"] in already_processed:
logger.info(f"Skipping already processed example: {example['text']}")
continue
logger.info(example["text"])
# pred_outputs = '{"prediction": {'
pred_outputs = OrderedDict()
for s in processed_example:
logger.info(s)
output = chatbot_chain.run(
example_input=example["text"],
sentence_input=s,
instruction_begin=model.instruction_begin,
instruction_end=model.instruction_end,
)
logger.info(output)
# pred_outputs += f'"{s}": "{output}",'
pred_outputs[s] = output
# pred_outputs = pred_outputs[:-1] + "}}"
json_line = json.dumps(
{
"text": example["text"],
"prediction": pred_outputs,
}
)
f.write(json_line + "\n")
| [
"{0: '\\n {instruction_begin}\\n\\n Definitions:\\n - An argument consists of an assertion called the conclusion and one or more assertions called premises, where the premises are intended to establish the truth of the conclusion. Premises or conclusions can be implicit in an argument.\\n - A fallacious argument is an argument where the premises do not entail the conclusion.\\n\\n Text: \"{example_input}\"\\n\\n Based on the above text, determine whether the following sentence is part of a fallacious argument or not:\\n \\n Sentence: \"{sentence_input}\" {instruction_end}\\n \\n Output:\\n \\n ', 1: '\\n {instruction_begin}\\n\\n Definitions:\\n - An argument consists of an assertion called the conclusion and one or more assertions called premises, where the premises are intended to establish the truth of the conclusion. Premises or conclusions can be implicit in an argument.\\n - A fallacious argument is an argument where the premises do not entail the conclusion.\\n\\n Text: \"{example_input}\"\\n\\n Based on the above text, determine whether the following sentence is part of a fallacious argument or not. If it is, indicate the type(s) of fallacy without providing explanations. The potential types of fallacy include:\\n - appeal to emotion\\n - fallacy of logic\\n - fallacy of credibility \\n \\n Sentence: \"{sentence_input}\" {instruction_end}\\n \\n Output:\\n \\n ', 2: '\\n {instruction_begin}\\n\\n Definitions:\\n - An argument consists of an assertion called the conclusion and one or more assertions called premises, where the premises are intended to establish the truth of the conclusion. Premises or conclusions can be implicit in an argument.\\n - A fallacious argument is an argument where the premises do not entail the conclusion.\\n \\n Text: \"{example_input}\"\\n\\n Based on the above text, determine whether the following sentence is part of a fallacious argument or not. If it is, indicate the type(s) of fallacy without providing explanations. The potential types of fallacy include:\\n - appeal to positive emotion\\n - appeal to anger\\n - appeal to fear\\n - appeal to pity\\n - appeal to ridicule\\n - appeal to worse problems\\n - causal oversimplification\\n - circular reasoning\\n - equivocation\\n - false analogy\\n - false causality\\n - false dilemma\\n - hasty generalization\\n - slippery slope\\n - straw man\\n - fallacy of division\\n - ad hominem\\n - ad populum\\n - appeal to (false) authority\\n - appeal to nature\\n - appeal to tradition\\n - guilt by association\\n - tu quoque\\n \\n Sentence: \"{sentence_input}\" {instruction_end}\\n \\n Output:\\n '}",
"instruction_end",
"sentence_input",
"example_input",
"instruction_begin",
"{0: '\\n {instruction_begin}\\n\\n Text: \"{example_input}\"\\n\\n Determine whether the following sentence contains a fallacy or not:\\n \\n Sentence: \"{sentence_input}\" {instruction_end}\\n \\n Output:\\n \\n ', 1: '\\n {instruction_begin}\\n\\n Text: \"{example_input}\"\\n\\n Based on the above text, identify the fallacy (if any) in the following sentence. If a fallacy is present, specify the type(s) of fallacy without providing explanations. The possible types of fallacy are:\\n - appeal to emotion\\n - fallacy of logic\\n - fallacy of credibility \\n \\n Sentence: \"{sentence_input}\" {instruction_end}\\n \\n Output:\\n \\n ', 2: '\\n {instruction_begin}\\n\\n Text: \"{example_input}\"\\n\\n Based on the above text, identify the fallacy (if any) in the following sentence. If a fallacy is present, specify the type(s) of fallacy without providing explanations. The possible types of fallacy are:\\n - appeal to positive emotion\\n - appeal to anger\\n - appeal to fear\\n - appeal to pity\\n - appeal to ridicule\\n - appeal to worse problems\\n - causal oversimplification\\n - circular reasoning\\n - equivocation\\n - false analogy\\n - false causality\\n - false dilemma\\n - hasty generalization\\n - slippery slope\\n - straw man\\n - fallacy of division\\n - ad hominem\\n - ad populum\\n - appeal to (false) authority\\n - appeal to nature\\n - appeal to tradition\\n - guilt by association\\n - tu quoque\\n \\n Sentence: \"{sentence_input}\" {instruction_end}\\n \\n Output:\\n '}"
] |
2024-01-10 | Trinkle23897/tianshou | examples~atari~atari_wrapper.py | # Borrow a lot from openai baselines:
# https://github.com/openai/baselines/blob/master/baselines/common/atari_wrappers.py
import warnings
from collections import deque
import cv2
import gymnasium as gym
import numpy as np
from tianshou.env import ShmemVectorEnv
try:
import envpool
except ImportError:
envpool = None
def _parse_reset_result(reset_result):
contains_info = (
isinstance(reset_result, tuple)
and len(reset_result) == 2
and isinstance(reset_result[1], dict)
)
if contains_info:
return reset_result[0], reset_result[1], contains_info
return reset_result, {}, contains_info
class NoopResetEnv(gym.Wrapper):
"""Sample initial states by taking random number of no-ops on reset.
No-op is assumed to be action 0.
:param gym.Env env: the environment to wrap.
:param int noop_max: the maximum value of no-ops to run.
"""
def __init__(self, env, noop_max=30):
super().__init__(env)
self.noop_max = noop_max
self.noop_action = 0
assert env.unwrapped.get_action_meanings()[0] == "NOOP"
def reset(self, **kwargs):
_, info, return_info = _parse_reset_result(self.env.reset(**kwargs))
if hasattr(self.unwrapped.np_random, "integers"):
noops = self.unwrapped.np_random.integers(1, self.noop_max + 1)
else:
noops = self.unwrapped.np_random.randint(1, self.noop_max + 1)
for _ in range(noops):
step_result = self.env.step(self.noop_action)
if len(step_result) == 4:
obs, rew, done, info = step_result
else:
obs, rew, term, trunc, info = step_result
done = term or trunc
if done:
obs, info, _ = _parse_reset_result(self.env.reset())
if return_info:
return obs, info
return obs
class MaxAndSkipEnv(gym.Wrapper):
"""Return only every `skip`-th frame (frameskipping) using most recent raw observations (for max pooling across time steps).
:param gym.Env env: the environment to wrap.
:param int skip: number of `skip`-th frame.
"""
def __init__(self, env, skip=4):
super().__init__(env)
self._skip = skip
def step(self, action):
"""Step the environment with the given action.
Repeat action, sum reward, and max over last observations.
"""
obs_list, total_reward = [], 0.0
new_step_api = False
for _ in range(self._skip):
step_result = self.env.step(action)
if len(step_result) == 4:
obs, reward, done, info = step_result
else:
obs, reward, term, trunc, info = step_result
done = term or trunc
new_step_api = True
obs_list.append(obs)
total_reward += reward
if done:
break
max_frame = np.max(obs_list[-2:], axis=0)
if new_step_api:
return max_frame, total_reward, term, trunc, info
return max_frame, total_reward, done, info
class EpisodicLifeEnv(gym.Wrapper):
"""Make end-of-life == end-of-episode, but only reset on true game over.
It helps the value estimation.
:param gym.Env env: the environment to wrap.
"""
def __init__(self, env):
super().__init__(env)
self.lives = 0
self.was_real_done = True
self._return_info = False
def step(self, action):
step_result = self.env.step(action)
if len(step_result) == 4:
obs, reward, done, info = step_result
new_step_api = False
else:
obs, reward, term, trunc, info = step_result
done = term or trunc
new_step_api = True
self.was_real_done = done
# check current lives, make loss of life terminal, then update lives to
# handle bonus lives
lives = self.env.unwrapped.ale.lives()
if 0 < lives < self.lives:
# for Qbert sometimes we stay in lives == 0 condition for a few
# frames, so its important to keep lives > 0, so that we only reset
# once the environment is actually done.
done = True
term = True
self.lives = lives
if new_step_api:
return obs, reward, term, trunc, info
return obs, reward, done, info
def reset(self, **kwargs):
"""Calls the Gym environment reset, only when lives are exhausted.
This way all states are still reachable even though lives are episodic, and
the learner need not know about any of this behind-the-scenes.
"""
if self.was_real_done:
obs, info, self._return_info = _parse_reset_result(self.env.reset(**kwargs))
else:
# no-op step to advance from terminal/lost life state
step_result = self.env.step(0)
obs, info = step_result[0], step_result[-1]
self.lives = self.env.unwrapped.ale.lives()
if self._return_info:
return obs, info
return obs
class FireResetEnv(gym.Wrapper):
"""Take action on reset for environments that are fixed until firing.
Related discussion: https://github.com/openai/baselines/issues/240.
:param gym.Env env: the environment to wrap.
"""
def __init__(self, env):
super().__init__(env)
assert env.unwrapped.get_action_meanings()[1] == "FIRE"
assert len(env.unwrapped.get_action_meanings()) >= 3
def reset(self, **kwargs):
_, _, return_info = _parse_reset_result(self.env.reset(**kwargs))
obs = self.env.step(1)[0]
return (obs, {}) if return_info else obs
class WarpFrame(gym.ObservationWrapper):
"""Warp frames to 84x84 as done in the Nature paper and later work.
:param gym.Env env: the environment to wrap.
"""
def __init__(self, env):
super().__init__(env)
self.size = 84
self.observation_space = gym.spaces.Box(
low=np.min(env.observation_space.low),
high=np.max(env.observation_space.high),
shape=(self.size, self.size),
dtype=env.observation_space.dtype,
)
def observation(self, frame):
"""Returns the current observation from a frame."""
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
return cv2.resize(frame, (self.size, self.size), interpolation=cv2.INTER_AREA)
class ScaledFloatFrame(gym.ObservationWrapper):
"""Normalize observations to 0~1.
:param gym.Env env: the environment to wrap.
"""
def __init__(self, env):
super().__init__(env)
low = np.min(env.observation_space.low)
high = np.max(env.observation_space.high)
self.bias = low
self.scale = high - low
self.observation_space = gym.spaces.Box(
low=0.0,
high=1.0,
shape=env.observation_space.shape,
dtype=np.float32,
)
def observation(self, observation):
return (observation - self.bias) / self.scale
class ClipRewardEnv(gym.RewardWrapper):
"""clips the reward to {+1, 0, -1} by its sign.
:param gym.Env env: the environment to wrap.
"""
def __init__(self, env):
super().__init__(env)
self.reward_range = (-1, 1)
def reward(self, reward):
"""Bin reward to {+1, 0, -1} by its sign. Note: np.sign(0) == 0."""
return np.sign(reward)
class FrameStack(gym.Wrapper):
"""Stack n_frames last frames.
:param gym.Env env: the environment to wrap.
:param int n_frames: the number of frames to stack.
"""
def __init__(self, env, n_frames):
super().__init__(env)
self.n_frames = n_frames
self.frames = deque([], maxlen=n_frames)
shape = (n_frames, *env.observation_space.shape)
self.observation_space = gym.spaces.Box(
low=np.min(env.observation_space.low),
high=np.max(env.observation_space.high),
shape=shape,
dtype=env.observation_space.dtype,
)
def reset(self, **kwargs):
obs, info, return_info = _parse_reset_result(self.env.reset(**kwargs))
for _ in range(self.n_frames):
self.frames.append(obs)
return (self._get_ob(), info) if return_info else self._get_ob()
def step(self, action):
step_result = self.env.step(action)
if len(step_result) == 4:
obs, reward, done, info = step_result
new_step_api = False
else:
obs, reward, term, trunc, info = step_result
new_step_api = True
self.frames.append(obs)
if new_step_api:
return self._get_ob(), reward, term, trunc, info
return self._get_ob(), reward, done, info
def _get_ob(self):
# the original wrapper use `LazyFrames` but since we use np buffer,
# it has no effect
return np.stack(self.frames, axis=0)
def wrap_deepmind(
env_id,
episode_life=True,
clip_rewards=True,
frame_stack=4,
scale=False,
warp_frame=True,
):
"""Configure environment for DeepMind-style Atari.
The observation is channel-first: (c, h, w) instead of (h, w, c).
:param str env_id: the atari environment id.
:param bool episode_life: wrap the episode life wrapper.
:param bool clip_rewards: wrap the reward clipping wrapper.
:param int frame_stack: wrap the frame stacking wrapper.
:param bool scale: wrap the scaling observation wrapper.
:param bool warp_frame: wrap the grayscale + resize observation wrapper.
:return: the wrapped atari environment.
"""
assert "NoFrameskip" in env_id
env = gym.make(env_id)
env = NoopResetEnv(env, noop_max=30)
env = MaxAndSkipEnv(env, skip=4)
if episode_life:
env = EpisodicLifeEnv(env)
if "FIRE" in env.unwrapped.get_action_meanings():
env = FireResetEnv(env)
if warp_frame:
env = WarpFrame(env)
if scale:
env = ScaledFloatFrame(env)
if clip_rewards:
env = ClipRewardEnv(env)
if frame_stack:
env = FrameStack(env, frame_stack)
return env
def make_atari_env(task, seed, training_num, test_num, **kwargs):
"""Wrapper function for Atari env.
If EnvPool is installed, it will automatically switch to EnvPool's Atari env.
:return: a tuple of (single env, training envs, test envs).
"""
if envpool is not None:
if kwargs.get("scale", 0):
warnings.warn(
"EnvPool does not include ScaledFloatFrame wrapper, "
"please set `x = x / 255.0` inside CNN network's forward function.",
)
# parameters convertion
train_envs = env = envpool.make_gymnasium(
task.replace("NoFrameskip-v4", "-v5"),
num_envs=training_num,
seed=seed,
episodic_life=True,
reward_clip=True,
stack_num=kwargs.get("frame_stack", 4),
)
test_envs = envpool.make_gymnasium(
task.replace("NoFrameskip-v4", "-v5"),
num_envs=test_num,
seed=seed,
episodic_life=False,
reward_clip=False,
stack_num=kwargs.get("frame_stack", 4),
)
else:
warnings.warn(
"Recommend using envpool (pip install envpool) to run Atari games more efficiently.",
)
env = wrap_deepmind(task, **kwargs)
train_envs = ShmemVectorEnv(
[
lambda: wrap_deepmind(task, episode_life=True, clip_rewards=True, **kwargs)
for _ in range(training_num)
],
)
test_envs = ShmemVectorEnv(
[
lambda: wrap_deepmind(task, episode_life=False, clip_rewards=False, **kwargs)
for _ in range(test_num)
],
)
env.seed(seed)
train_envs.seed(seed)
test_envs.seed(seed)
return env, train_envs, test_envs
| [] |
2024-01-10 | fpena06/ChatGPT | src~revChatGPT~Official.py | """
A simple wrapper for the official ChatGPT API
"""
import argparse
import json
import os
import sys
from datetime import date
import openai
import tiktoken
ENGINE = os.environ.get("GPT_ENGINE") or "text-chat-davinci-002-20221122"
ENCODER = tiktoken.get_encoding("gpt2")
def get_max_tokens(prompt: str) -> int:
"""
Get the max tokens for a prompt
"""
return 4000 - len(ENCODER.encode(prompt))
class Chatbot:
"""
Official ChatGPT API
"""
def __init__(self, api_key: str, buffer: int = None, engine: str = None) -> None:
"""
Initialize Chatbot with API key (from https://platform.openai.com/account/api-keys)
"""
openai.api_key = api_key or os.environ.get("OPENAI_API_KEY")
self.conversations = Conversation()
self.prompt = Prompt(buffer=buffer)
self.engine = engine or ENGINE
def _get_completion(
self,
prompt: str,
temperature: float = 0.5,
stream: bool = False,
):
"""
Get the completion function
"""
return openai.Completion.create(
engine=self.engine,
prompt=prompt,
temperature=temperature,
max_tokens=get_max_tokens(prompt),
stop=["\n\n\n"],
stream=stream,
)
def _process_completion(
self,
user_request: str,
completion: dict,
conversation_id: str = None,
user: str = "User",
) -> dict:
if completion.get("choices") is None:
raise Exception("ChatGPT API returned no choices")
if len(completion["choices"]) == 0:
raise Exception("ChatGPT API returned no choices")
if completion["choices"][0].get("text") is None:
raise Exception("ChatGPT API returned no text")
completion["choices"][0]["text"] = completion["choices"][0]["text"].removesuffix(
"<|im_end|>"
)
# Add to chat history
self.prompt.add_to_history(
user_request,
completion["choices"][0]["text"],
user=user,
)
if conversation_id is not None:
self.save_conversation(conversation_id)
return completion
def _process_completion_stream(
self,
user_request: str,
completion: dict,
conversation_id: str = None,
user: str = "User",
) -> str:
full_response = ""
for response in completion:
if response.get("choices") is None:
raise Exception("ChatGPT API returned no choices")
if len(response["choices"]) == 0:
raise Exception("ChatGPT API returned no choices")
if response["choices"][0].get("finish_details") is not None:
break
if response["choices"][0].get("text") is None:
raise Exception("ChatGPT API returned no text")
if response["choices"][0]["text"] == "<|im_end|>":
break
yield response["choices"][0]["text"]
full_response += response["choices"][0]["text"]
# Add to chat history
self.prompt.add_to_history(user_request, full_response, user)
if conversation_id is not None:
self.save_conversation(conversation_id)
def ask(
self,
user_request: str,
temperature: float = 0.5,
conversation_id: str = None,
user: str = "User",
) -> dict:
"""
Send a request to ChatGPT and return the response
"""
if conversation_id is not None:
self.load_conversation(conversation_id)
completion = self._get_completion(
self.prompt.construct_prompt(user_request, user=user),
temperature,
)
return self._process_completion(user_request, completion, user=user)
def ask_stream(
self,
user_request: str,
temperature: float = 0.5,
conversation_id: str = None,
user: str = "User",
) -> str:
"""
Send a request to ChatGPT and yield the response
"""
if conversation_id is not None:
self.load_conversation(conversation_id)
prompt = self.prompt.construct_prompt(user_request, user=user)
return self._process_completion_stream(
user_request=user_request,
completion=self._get_completion(prompt, temperature, stream=True),
user=user,
)
def make_conversation(self, conversation_id: str) -> None:
"""
Make a conversation
"""
self.conversations.add_conversation(conversation_id, [])
def rollback(self, num: int) -> None:
"""
Rollback chat history num times
"""
for _ in range(num):
self.prompt.chat_history.pop()
def reset(self) -> None:
"""
Reset chat history
"""
self.prompt.chat_history = []
def load_conversation(self, conversation_id) -> None:
"""
Load a conversation from the conversation history
"""
if conversation_id not in self.conversations.conversations:
# Create a new conversation
self.make_conversation(conversation_id)
self.prompt.chat_history = self.conversations.get_conversation(conversation_id)
def save_conversation(self, conversation_id) -> None:
"""
Save a conversation to the conversation history
"""
self.conversations.add_conversation(conversation_id, self.prompt.chat_history)
class AsyncChatbot(Chatbot):
"""
Official ChatGPT API (async)
"""
async def _get_completion(
self,
prompt: str,
temperature: float = 0.5,
stream: bool = False,
):
"""
Get the completion function
"""
return await openai.Completion.acreate(
engine=self.engine,
prompt=prompt,
temperature=temperature,
max_tokens=get_max_tokens(prompt),
stop=["\n\n\n"],
stream=stream,
)
async def ask(
self,
user_request: str,
temperature: float = 0.5,
user: str = "User",
) -> dict:
"""
Same as Chatbot.ask but async
}
"""
completion = await self._get_completion(
self.prompt.construct_prompt(user_request, user=user),
temperature,
)
return self._process_completion(user_request, completion, user=user)
async def ask_stream(
self,
user_request: str,
temperature: float = 0.5,
user: str = "User",
) -> str:
"""
Same as Chatbot.ask_stream but async
"""
prompt = self.prompt.construct_prompt(user_request, user=user)
return self._process_completion_stream(
user_request=user_request,
completion=await self._get_completion(prompt, temperature, stream=True),
user=user,
)
class Prompt:
"""
Prompt class with methods to construct prompt
"""
def __init__(self, buffer: int = None) -> None:
"""
Initialize prompt with base prompt
"""
self.base_prompt = (
os.environ.get("CUSTOM_BASE_PROMPT")
or "You are ChatGPT, a large language model trained by OpenAI. Respond conversationally. Do not answer as the user. Current date: "
+ str(date.today())
+ "\n\n"
+ "User: Hello\n"
+ "ChatGPT: Hello! How can I help you today? <|im_end|>\n\n\n"
)
# Track chat history
self.chat_history: list = []
self.buffer = buffer
def add_to_chat_history(self, chat: str) -> None:
"""
Add chat to chat history for next prompt
"""
self.chat_history.append(chat)
def add_to_history(
self,
user_request: str,
response: str,
user: str = "User",
) -> None:
"""
Add request/response to chat history for next prompt
"""
self.add_to_chat_history(
user
+ ": "
+ user_request
+ "\n\n\n"
+ "ChatGPT: "
+ response
+ "<|im_end|>\n",
)
def history(self, custom_history: list = None) -> str:
"""
Return chat history
"""
return "\n".join(custom_history or self.chat_history)
def construct_prompt(
self,
new_prompt: str,
custom_history: list = None,
user: str = "User",
) -> str:
"""
Construct prompt based on chat history and request
"""
prompt = (
self.base_prompt
+ self.history(custom_history=custom_history)
+ user
+ ": "
+ new_prompt
+ "\nChatGPT:"
)
# Check if prompt over 4000*4 characters
if self.buffer is not None:
max_tokens = 4000 - self.buffer
else:
max_tokens = 3200
if len(ENCODER.encode(prompt)) > max_tokens:
# Remove oldest chat
if len(self.chat_history) == 0:
return prompt
self.chat_history.pop(0)
# Construct prompt again
prompt = self.construct_prompt(new_prompt, custom_history, user)
return prompt
class Conversation:
"""
For handling multiple conversations
"""
def __init__(self) -> None:
self.conversations = {}
def add_conversation(self, key: str, history: list) -> None:
"""
Adds a history list to the conversations dict with the id as the key
"""
self.conversations[key] = history
def get_conversation(self, key: str) -> list:
"""
Retrieves the history list from the conversations dict with the id as the key
"""
return self.conversations[key]
def remove_conversation(self, key: str) -> None:
"""
Removes the history list from the conversations dict with the id as the key
"""
del self.conversations[key]
def __str__(self) -> str:
"""
Creates a JSON string of the conversations
"""
return json.dumps(self.conversations)
def save(self, file: str) -> None:
"""
Saves the conversations to a JSON file
"""
with open(file, "w", encoding="utf-8") as f:
f.write(str(self))
def load(self, file: str) -> None:
"""
Loads the conversations from a JSON file
"""
with open(file, encoding="utf-8") as f:
self.conversations = json.loads(f.read())
def main():
print(
"""
ChatGPT - A command-line interface to OpenAI's ChatGPT (https://chat.openai.com/chat)
Repo: github.com/acheong08/ChatGPT
""",
)
print("Type '!help' to show a full list of commands")
print("Press enter twice to submit your question.\n")
def get_input(prompt):
"""
Multi-line input function
"""
# Display the prompt
print(prompt, end="")
# Initialize an empty list to store the input lines
lines = []
# Read lines of input until the user enters an empty line
while True:
line = input()
if line == "":
break
lines.append(line)
# Join the lines, separated by newlines, and store the result
user_input = "\n".join(lines)
# Return the input
return user_input
def chatbot_commands(cmd: str) -> bool:
"""
Handle chatbot commands
"""
if cmd == "!help":
print(
"""
!help - Display this message
!rollback - Rollback chat history
!reset - Reset chat history
!prompt - Show current prompt
!save_c <conversation_name> - Save history to a conversation
!load_c <conversation_name> - Load history from a conversation
!save_f <file_name> - Save all conversations to a file
!load_f <file_name> - Load all conversations from a file
!exit - Quit chat
""",
)
elif cmd == "!exit":
exit()
elif cmd == "!rollback":
chatbot.rollback(1)
elif cmd == "!reset":
chatbot.reset()
elif cmd == "!prompt":
print(chatbot.prompt.construct_prompt(""))
elif cmd.startswith("!save_c"):
chatbot.save_conversation(cmd.split(" ")[1])
elif cmd.startswith("!load_c"):
chatbot.load_conversation(cmd.split(" ")[1])
elif cmd.startswith("!save_f"):
chatbot.conversations.save(cmd.split(" ")[1])
elif cmd.startswith("!load_f"):
chatbot.conversations.load(cmd.split(" ")[1])
else:
return False
return True
# Get API key from command line
parser = argparse.ArgumentParser()
parser.add_argument(
"--api_key",
type=str,
required=True,
help="OpenAI API key",
)
parser.add_argument(
"--stream",
action="store_true",
help="Stream response",
)
parser.add_argument(
"--temperature",
type=float,
default=0.5,
help="Temperature for response",
)
args = parser.parse_args()
# Initialize chatbot
chatbot = Chatbot(api_key=args.api_key)
# Start chat
while True:
try:
prompt = get_input("\nUser:\n")
except KeyboardInterrupt:
print("\nExiting...")
sys.exit()
if prompt.startswith("!"):
if chatbot_commands(prompt):
continue
if not args.stream:
response = chatbot.ask(prompt, temperature=args.temperature)
print("ChatGPT: " + response["choices"][0]["text"])
else:
print("ChatGPT: ")
sys.stdout.flush()
for response in chatbot.ask_stream(prompt, temperature=args.temperature):
print(response, end="")
sys.stdout.flush()
print()
if __name__ == "__main__":
main()
| [
": ",
"\nUser:\n",
"\nChatGPT:"
] |
2024-01-10 | jenishah/transformers | src~transformers~tokenization_auto.py | # coding=utf-8
# Copyright 2018 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Auto Tokenizer class. """
from collections import OrderedDict
from .configuration_auto import (
AlbertConfig,
AutoConfig,
BartConfig,
BertConfig,
BertGenerationConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
ElectraConfig,
EncoderDecoderConfig,
FlaubertConfig,
FunnelConfig,
GPT2Config,
LongformerConfig,
LxmertConfig,
MarianConfig,
MBartConfig,
MobileBertConfig,
OpenAIGPTConfig,
PegasusConfig,
ReformerConfig,
RetriBertConfig,
RobertaConfig,
T5Config,
TransfoXLConfig,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
replace_list_option_in_docstrings,
)
from .configuration_utils import PretrainedConfig
from .tokenization_albert import AlbertTokenizer
from .tokenization_bart import BartTokenizer, BartTokenizerFast
from .tokenization_bert import BertTokenizer, BertTokenizerFast
from .tokenization_bert_generation import BertGenerationTokenizer
from .tokenization_bert_japanese import BertJapaneseTokenizer
from .tokenization_camembert import CamembertTokenizer
from .tokenization_ctrl import CTRLTokenizer
from .tokenization_distilbert import DistilBertTokenizer, DistilBertTokenizerFast
from .tokenization_electra import ElectraTokenizer, ElectraTokenizerFast
from .tokenization_flaubert import FlaubertTokenizer
from .tokenization_funnel import FunnelTokenizer, FunnelTokenizerFast
from .tokenization_gpt2 import GPT2Tokenizer, GPT2TokenizerFast
from .tokenization_longformer import LongformerTokenizer, LongformerTokenizerFast
from .tokenization_lxmert import LxmertTokenizer, LxmertTokenizerFast
from .tokenization_marian import MarianTokenizer
from .tokenization_mbart import MBartTokenizer
from .tokenization_mobilebert import MobileBertTokenizer, MobileBertTokenizerFast
from .tokenization_openai import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from .tokenization_pegasus import PegasusTokenizer
from .tokenization_reformer import ReformerTokenizer
from .tokenization_retribert import RetriBertTokenizer, RetriBertTokenizerFast
from .tokenization_roberta import RobertaTokenizer, RobertaTokenizerFast
from .tokenization_t5 import T5Tokenizer
from .tokenization_transfo_xl import TransfoXLTokenizer, TransfoXLTokenizerFast
from .tokenization_xlm import XLMTokenizer
from .tokenization_xlm_roberta import XLMRobertaTokenizer
from .tokenization_xlnet import XLNetTokenizer
from .utils import logging
logger = logging.get_logger(__name__)
TOKENIZER_MAPPING = OrderedDict(
[
(RetriBertConfig, (RetriBertTokenizer, RetriBertTokenizerFast)),
(T5Config, (T5Tokenizer, None)),
(MobileBertConfig, (MobileBertTokenizer, MobileBertTokenizerFast)),
(DistilBertConfig, (DistilBertTokenizer, DistilBertTokenizerFast)),
(AlbertConfig, (AlbertTokenizer, None)),
(CamembertConfig, (CamembertTokenizer, None)),
(PegasusConfig, (PegasusTokenizer, None)),
(MBartConfig, (MBartTokenizer, None)),
(XLMRobertaConfig, (XLMRobertaTokenizer, None)),
(MarianConfig, (MarianTokenizer, None)),
(BartConfig, (BartTokenizer, BartTokenizerFast)),
(LongformerConfig, (LongformerTokenizer, LongformerTokenizerFast)),
(RobertaConfig, (RobertaTokenizer, RobertaTokenizerFast)),
(ReformerConfig, (ReformerTokenizer, None)),
(ElectraConfig, (ElectraTokenizer, ElectraTokenizerFast)),
(FunnelConfig, (FunnelTokenizer, FunnelTokenizerFast)),
(LxmertConfig, (LxmertTokenizer, LxmertTokenizerFast)),
(BertConfig, (BertTokenizer, BertTokenizerFast)),
(OpenAIGPTConfig, (OpenAIGPTTokenizer, OpenAIGPTTokenizerFast)),
(GPT2Config, (GPT2Tokenizer, GPT2TokenizerFast)),
(TransfoXLConfig, (TransfoXLTokenizer, TransfoXLTokenizerFast)),
(XLNetConfig, (XLNetTokenizer, None)),
(FlaubertConfig, (FlaubertTokenizer, None)),
(XLMConfig, (XLMTokenizer, None)),
(CTRLConfig, (CTRLTokenizer, None)),
(BertGenerationConfig, (BertGenerationTokenizer, None)),
]
)
SLOW_TOKENIZER_MAPPING = {k: v[0] for k, v in TOKENIZER_MAPPING.items()}
class AutoTokenizer:
r"""
This is a generic tokenizer class that will be instantiated as one of the tokenizer classes of the library
when created with the :meth:`AutoTokenizer.from_pretrained` class method.
This class cannot be instantiated directly using ``__init__()`` (throws an error).
"""
def __init__(self):
raise EnvironmentError(
"AutoTokenizer is designed to be instantiated "
"using the `AutoTokenizer.from_pretrained(pretrained_model_name_or_path)` method."
)
@classmethod
@replace_list_option_in_docstrings(SLOW_TOKENIZER_MAPPING)
def from_pretrained(cls, pretrained_model_name_or_path, *inputs, **kwargs):
r"""
Instantiate one of the tokenizer classes of the library from a pretrained model vocabulary.
The tokenizer class to instantiate is selected based on the :obj:`model_type` property of the config object
(either passed as an argument or loaded from :obj:`pretrained_model_name_or_path` if possible), or when it's
missing, by falling back to using pattern matching on :obj:`pretrained_model_name_or_path`:
List options
Params:
pretrained_model_name_or_path (:obj:`str`):
Can be either:
- A string with the `shortcut name` of a predefined tokenizer to load from cache or download, e.g.,
``bert-base-uncased``.
- A string with the `identifier name` of a predefined tokenizer that was user-uploaded to our S3,
e.g., ``dbmdz/bert-base-german-cased``.
- A path to a `directory` containing vocabulary files required by the tokenizer, for instance saved
using the :func:`~transformers.PreTrainedTokenizer.save_pretrained` method, e.g.,
``./my_model_directory/``.
- A path or url to a single saved vocabulary file if and only if the tokenizer only requires a
single vocabulary file (like Bert or XLNet), e.g.: ``./my_model_directory/vocab.txt``.
(Not applicable to all derived classes)
inputs (additional positional arguments, `optional`):
Will be passed along to the Tokenizer ``__init__()`` method.
config (:class:`~transformers.PreTrainedConfig`, `optional`)
The configuration object used to dertermine the tokenizer class to instantiate.
cache_dir (:obj:`str`, `optional`):
Path to a directory in which a downloaded pretrained model configuration should be cached if the
standard cache should not be used.
force_download (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to force the (re-)download the model weights and configuration files and override the
cached versions if they exist.
resume_download (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to delete incompletely received files. Will attempt to resume the download if such a
file exists.
proxies (:obj:`Dict[str, str]`, `optional`):
A dictionary of proxy servers to use by protocol or endpoint, e.g.,
:obj:`{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each
request.
use_fast (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to try to load the fast version of the tokenizer.
kwargs (additional keyword arguments, `optional`):
Will be passed to the Tokenizer ``__init__()`` method. Can be used to set special tokens like
``bos_token``, ``eos_token``, ``unk_token``, ``sep_token``, ``pad_token``, ``cls_token``,
``mask_token``, ``additional_special_tokens``. See parameters in the ``__init__()`` for more details.
Examples::
from transformers import AutoTokenizer
# Download vocabulary from S3 and cache.
tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased')
# Download vocabulary from S3 (user-uploaded) and cache.
tokenizer = AutoTokenizer.from_pretrained('dbmdz/bert-base-german-cased')
# If vocabulary files are in a directory (e.g. tokenizer was saved using `save_pretrained('./test/saved_model/')`)
tokenizer = AutoTokenizer.from_pretrained('./test/bert_saved_model/')
"""
config = kwargs.pop("config", None)
if not isinstance(config, PretrainedConfig):
config = AutoConfig.from_pretrained(pretrained_model_name_or_path, **kwargs)
if "bert-base-japanese" in str(pretrained_model_name_or_path):
return BertJapaneseTokenizer.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
use_fast = kwargs.pop("use_fast", False)
if config.tokenizer_class is not None:
if use_fast and not config.tokenizer_class.endswith("Fast"):
tokenizer_class_candidate = f"{config.tokenizer_class}Fast"
else:
tokenizer_class_candidate = config.tokenizer_class
tokenizer_class = globals().get(tokenizer_class_candidate)
if tokenizer_class is None:
raise ValueError(
"Tokenizer class {} does not exist or is not currently imported.".format(tokenizer_class_candidate)
)
return tokenizer_class.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
# if model is an encoder decoder, the encoder tokenizer class is used by default
if isinstance(config, EncoderDecoderConfig):
if type(config.decoder) is not type(config.encoder): # noqa: E721
logger.warn(
f"The encoder model config class: {config.encoder.__class__} is different from the decoder model "
f"config class: {config.decoder.__class}. It is not recommended to use the "
"`AutoTokenizer.from_pretrained()` method in this case. Please use the encoder and decoder "
"specific tokenizer classes."
)
config = config.encoder
for config_class, (tokenizer_class_py, tokenizer_class_fast) in TOKENIZER_MAPPING.items():
if isinstance(config, config_class):
if tokenizer_class_fast and use_fast:
return tokenizer_class_fast.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
else:
return tokenizer_class_py.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
raise ValueError(
"Unrecognized configuration class {} to build an AutoTokenizer.\n"
"Model type should be one of {}.".format(
config.__class__, ", ".join(c.__name__ for c in TOKENIZER_MAPPING.keys())
)
)
| [] |
2024-01-10 | Zhang-l-i-n/LM_as_Evaluator | test~cluster.py | import json
import time
import openai
from scipy.cluster import hierarchy
from tqdm import tqdm
import numpy as np
import umap.plot
from matplotlib import pyplot as plt
from matplotlib.font_manager import FontProperties
import seaborn as sns
from sklearn.cluster import KMeans
from sklearn.cluster import AffinityPropagation
from sklearn.cluster import DBSCAN
from sklearn.cluster import AgglomerativeClustering
from scipy.cluster.hierarchy import linkage, dendrogram
def get_data(f_data, feature='level'):
embedding_list, label_list, y_list = [], [], []
data_json = json.load(open(f_data, 'r', encoding='utf-8'))
for d in data_json:
# if d[feature] == 2:
embedding_list.append(d['embedding'])
label_list.append(d['label'])
y_list.append(d[feature])
# label_list = eval(open(f_label, 'r').readline().strip())
return embedding_list, label_list, y_list
if __name__ == '__main__':
plt.margins(0, 0)
class_name = 'roberta_history'
f_emb = '../data/embedding/chinese-roberta-wwm-ext-large_eb_history_0814.json'
font_path = '/Users/zhanglin/Library/Fonts/SimHei.ttf'
custom_font = FontProperties(fname=font_path)
embedding_list, label_list, y_list = get_data(f_emb, 'type')
# embedding_list = eval(open(f_emb, 'r').readline().strip())
print(len(embedding_list))
X = np.array(embedding_list)
y = np.array([1] * len(embedding_list))
################
# UMAP降维
################
reducer = umap.UMAP(random_state=60)
embedding = reducer.fit_transform(X)
print(embedding.shape)
plt.scatter(embedding[:, 0], embedding[:, 1], c=y_list, cmap='rainbow')
# 添加标签
for i in range(len(label_list)):
plt.text(embedding[:, 0][i], embedding[:, 1][i], label_list[i], ha='center', va='bottom',
fontproperties=custom_font, fontsize=12)
plt.gca().set_aspect('equal', 'datalim')
plt.title('')
plt.savefig(class_name + '_a_UMAP_fig-time{}.png'.format(time.strftime("%Y%m%d-%H%M", time.localtime())),
dpi=500)
plt.show()
################
# Affinity Propagation(降维后的数据)
################
# af = AffinityPropagation(preference=None).fit(embedding)
# cluster_centers_indices = af.cluster_centers_indices_
# labels = af.labels_
# n_clusters = len(cluster_centers_indices)
#
# color = sns.color_palette("hls", n_clusters)
# for k, col in zip(range(n_clusters), color):
# class_members = labels == k
# cluster_center = embedding[cluster_centers_indices[k]]
# plt.scatter(embedding[class_members, 0], embedding[class_members, 1],
# marker='o',
# s=8,
# c=col)
# plt.scatter(cluster_center[0], cluster_center[1], marker='x', s=100, c='black')
# for i in range(len(label_list)):
# if embedding[:, 0][i] == cluster_center[0] and embedding[:, 1][i] == cluster_center[1]:
# print(label_list[i])
#
# for i in range(len(label_list)):
# plt.text(embedding[:, 0][i], embedding[:, 1][i], label_list[i], ha='center', va='bottom',
# fontproperties=custom_font, fontsize=5)
#
# plt.title('AP-after_reducer--clusters: %d' % n_clusters)
# plt.savefig(class_name + '_a_AP_fig-time{}.png'.format(time.strftime("%Y%m%d-%H%M", time.localtime())),
# dpi=500)
# plt.show()
# ################
# # DBSCAN(UMAP降维后的数据)
# ################
# cluster = DBSCAN(min_samples=3).fit(embedding)
# y_pred = cluster.labels_ # 获取训练后对象的每个样本的标签
# clusters = {}
# i = 0
# for y in y_pred:
# if y not in clusters:
# clusters[y] = i
# i += 1
# color = sns.color_palette("hls", len(clusters))
# for y in clusters.keys():
# plt.scatter(embedding[y_pred == y, 0], embedding[y_pred == y, 1],
# marker='o',
# s=8,
# c=color[y])
# for i in range(len(label_list)):
# plt.text(embedding[:, 0][i], embedding[:, 1][i], label_list[i], ha='center', va='bottom',
# fontproperties=custom_font, fontsize=5)
# plt.title('DBSCAN-after_reducer')
# plt.savefig(class_name + '_a_DBSCAN_fig-time{}.png'.format(time.strftime("%Y%m%d-%H%M", time.localtime())),
# dpi=500)
# plt.show()
#
# ################
# # Agglomerative(UMAP降维后的数据)
# ################
# cluster = AgglomerativeClustering(n_clusters=n_clusters, affinity='euclidean', linkage='ward').fit(embedding)
# y_pred = cluster.labels_
# plt.scatter(embedding[:,0],embedding[:,1], c=cluster.labels_, cmap='rainbow')
# for i in range(len(label_list)):
# plt.text(embedding[:, 0][i], embedding[:, 1][i], label_list[i], ha='center', va='bottom',
# fontproperties=custom_font, fontsize=5)
# plt.title('Agglomerative-after_reducer')
# plt.savefig(class_name + '_a_Agglomerative_fig-time{}.png'.format(time.strftime("%Y%m%d-%H%M", time.localtime())),
# dpi=500)
# plt.show()
# # # 绘制聚类树
# # Z = hierarchy.linkage(cluster.children_, method='ward')
# # fig = plt.figure(figsize=(10, 5))
# # dn = hierarchy.dendrogram(Z)
# # plt.show()
#
# ################
# # Affinity Propagation(UMAP降维前的数据)
# ################
# af = AffinityPropagation(preference=None).fit(X)
# cluster_centers_indices = af.cluster_centers_indices_
# labels = af.labels_
# n_clusters = len(cluster_centers_indices)
# # print(X.shape)
# # print(cluster_centers_indices)
# X_expand_2d = reducer.fit_transform(X)
#
# color = sns.color_palette("hls", n_clusters)
# for k, col in zip(range(n_clusters), color):
# class_members = labels == k
# cluster_center = X_expand_2d[cluster_centers_indices[k]]
# plt.scatter(X_expand_2d[class_members, 0], X_expand_2d[class_members, 1],
# marker='o',
# s=8,
# c=col)
# plt.scatter(cluster_center[0], cluster_center[1], marker='x', s=100, c='black')
#
# for i in range(len(label_list)):
# plt.text(X_expand_2d[:, 0][i], X_expand_2d[:, 1][i], label_list[i], ha='center', va='bottom',
# fontproperties=custom_font, fontsize=5)
# plt.title('AP-before_reducer--clusters: %d' % n_clusters)
# plt.savefig(class_name + '_b_AP_fig-time{}.png'.format(time.strftime("%Y%m%d-%H%M", time.localtime())),
# dpi=500)
# plt.show()
#
# ################
# # DBSCAN(UMAP降维前的数据)
# ################
# cluster = DBSCAN(min_samples=3).fit(X)
# embedding = reducer.fit_transform(X)
# y_pred = cluster.labels_ # 获取训练后对象的每个样本的标签
# clusters = {}
# i = 0
# for y in y_pred:
# if y not in clusters:
# clusters[y] = i
# i += 1
# color = sns.color_palette("hls", len(clusters))
# for y in clusters.keys():
# plt.scatter(embedding[y_pred == y, 0], embedding[y_pred == y, 1],
# marker='o',
# s=8,
# c=color[y])
# for i in range(len(label_list)):
# plt.text(embedding[:, 0][i], embedding[:, 1][i], label_list[i], ha='center', va='bottom',
# fontproperties=custom_font, fontsize=5)
# plt.title('DBSCAN-before_reducer')
# plt.savefig(class_name + '_b_DBSCAN_fig-time{}.png'.format(time.strftime("%Y%m%d-%H%M", time.localtime())),
# dpi=500)
# plt.show()
#
# ################
# # Agglomerative(UMAP降维前的数据)
# ################
# cluster = AgglomerativeClustering(n_clusters=n_clusters, affinity='euclidean', linkage='ward').fit(X)
# embedding = reducer.fit_transform(X)
# y_pred = cluster.labels_
# plt.scatter(embedding[:,0],embedding[:,1], c=cluster.labels_, cmap='rainbow')
# for i in range(len(label_list)):
# plt.text(embedding[:, 0][i], embedding[:, 1][i], label_list[i], ha='center', va='bottom',
# fontproperties=custom_font, fontsize=5)
# plt.title('Agglomerative-before_reducer')
# plt.savefig(class_name + '_b_Agglomerative_fig-time{}.png'.format(time.strftime("%Y%m%d-%H%M", time.localtime())),
# dpi=500)
# plt.show()
| [] |
2024-01-10 | Zhang-l-i-n/LM_as_Evaluator | test~get_question.py | # encoding:utf-8
# Note: The openai-python library support for Azure OpenAI is in preview.
import json
import os
import time
import openai
os.environ["OPENAI_API_KEY"] = "33e8f0c860bc4109825496444bbfed3e"
openai.api_type = "azure"
openai.api_base = "https://community-openai-34.openai.azure.com/"
openai.api_version = "2023-03-15-preview"
openai.api_key = os.getenv("OPENAI_API_KEY")
#######################
## 生成问题
#######################
def get_question(content_file, fw):
content = json.load(open(content_file, 'r', encoding='utf-8'))
for c in content:
for chapter in c:
for k1 in c[chapter]:
print(k1)
for k2 in c[chapter][k1]:
# print(c[chapter][k1][k2].keys())
# time.sleep(10)
if "question" not in c[chapter][k1][k2].keys():
print(k2)
print(c[chapter][k1][k2])
inputs = "针对’" + k2 + "‘进行提问,注意只能提一个问题,给出问题和答案!问题需要尽量详细地考察以下内容\n" + \
c[chapter][k1][k2]["text"]
response = openai.ChatCompletion.create(
engine="gpt35-34",
messages=[
{"role": "system",
"content": "现在你是一个提问者,针对某个主题进行提问,给出问题和答案。注意只能提一个问题。"},
{"role": "user", "content": inputs[:5500]},
],
temperature=1.0,
max_tokens=800,
top_p=1,
n=5,
frequency_penalty=0,
presence_penalty=0,
stop=None)
print(response["choices"][0]["message"]["content"])
print(response["choices"][1]["message"]["content"])
print(response["choices"][2]["message"]["content"])
print(response["choices"][3]["message"]["content"])
print(response["choices"][4]["message"]["content"])
c[chapter][k1][k2]["questions"] = \
[response["choices"][0]["message"]["content"],
response["choices"][1]["message"]["content"],
response["choices"][2]["message"]["content"],
response["choices"][3]["message"]["content"],
response["choices"][4]["message"]["content"]]
json_data = {
"text": c[chapter][k1][k2]["text"],
"questions": c[chapter][k1][k2]["questions"]
}
fw.write(json.dumps(json_data, ensure_ascii=False) + '\n')
time.sleep(30)
# content_file = "/Users/zhanglin/Desktop/LM_as_Evaluator/data/taxonomy/history_qa_v1.json"
# content = json.load(open(content_file, 'r', encoding='utf-8'))
#
# fw = open("history_questions_candidates.txt", 'a', encoding='utf-8')
# for c in content:
# for chapter in c:
# for k1 in c[chapter]:
# print(k1)
# for k2 in c[chapter][k1]:
# # print(c[chapter][k1][k2].keys())
# # time.sleep(10)
# if "question" not in c[chapter][k1][k2].keys():
# print(k2)
# print(c[chapter][k1][k2])
# inputs = "针对’" + k2 + "‘进行提问,注意只能提一个问题,给出问题和答案!问题需要尽量详细地考察以下内容\n" + \
# c[chapter][k1][k2]["text"]
# response = openai.ChatCompletion.create(
# engine="gpt35-34",
# messages=[
# {"role": "system",
# "content": "现在你是一个提问者,针对某个主题进行提问,给出问题和答案。注意只能提一个问题。"},
# {"role": "user", "content": inputs[:5500]},
# ],
# temperature=1.0,
# max_tokens=800,
# top_p=1,
# n=5,
# frequency_penalty=0,
# presence_penalty=0,
# stop=None)
# print(response["choices"][0]["message"]["content"])
# print(response["choices"][1]["message"]["content"])
# print(response["choices"][2]["message"]["content"])
# print(response["choices"][3]["message"]["content"])
# print(response["choices"][4]["message"]["content"])
# c[chapter][k1][k2]["questions"] = \
# [response["choices"][0]["message"]["content"],
# response["choices"][1]["message"]["content"],
# response["choices"][2]["message"]["content"],
# response["choices"][3]["message"]["content"],
# response["choices"][4]["message"]["content"]]
# json_data = {
# "text": c[chapter][k1][k2]["text"],
# "questions": c[chapter][k1][k2]["questions"]
# }
# fw.write(json.dumps(json_data, ensure_ascii=False) + '\n')
# time.sleep(30)
#
# with open("history_candidates.json", 'w', encoding='utf-8') as f:
# f.write(json.dumps(content, ensure_ascii=False))
# input = "针对’当代中国的法治与精神文明建设‘进行提问,给出问题和答案。问题需要尽量详细地考察以下内容:" \
# "近代西方的法律与教化 在罗马法的基础上,英 国和法国分别发展了英美法 系和大陆法系。 学习聚焦 希腊克里特岛有着辉煌的古代文明,这里的一些 城邦,很早就有习惯法,也出现了成文法。在当地的 一处遗址中,发现了公元前7世纪的石刻,上面的铭 文记载了有关法律的内容。这就是早期的成文法。 ▲克里特岛上的一处遗址 ▲铭文摹本 ▲《十二铜表法》在罗马街头颁布 时人们围观的情形 近代西方法律制度的渊源及发展 为了缓和平民和贵族的矛盾,公元前450年左右,罗 马共和国颁布了《十二铜表法》。罗马帝国时期,随着统 治区域的扩大和人口的激增,法律制度更加完善。6世纪, 东罗马帝国 ①皇帝查士丁尼下令编纂的《罗马民法大全》, 是古罗马法律的最高成就,也是近代西方法律制度的渊源。 中古时期,各日耳曼王国在记载和整理日耳曼人部落 习惯法的基础上编纂了一批成文法,称为“日耳曼法”, 作为庄园法庭审判的依据。教会也根据基督教神学,制定 和颁布了教会法。11世纪以后,欧洲国家出现了研究和宣 传罗马法的运动,促进了罗马法的传播。 11世纪,诺曼底公爵征服英国,建立了诺曼王朝。为 了加强对地方的控制,王室设立法院,并派法官定期到各 地进行巡回审判。12世纪前后,建立在习惯法基础上、全 ①东罗马帝国又称拜占庭帝国。 国普遍适用的法律在英国逐渐形成,这就是普通法。13世 纪,英国通过《大宪章》,确立了法律至上和王权有限的 原则。“光荣革命”后,英国确立了君主立宪制,法律体系 更加完善。美国等很多国家在学习英国法律的基础上制定 了本国法律,它们构成了普通法系,也称“英美法系”。 13世纪以后,随着王权的加强,法国统一法律的步伐 加快,建立在罗马法基础上的法律体系日益成熟。1789年, 法国爆发大革命。此后,法国在启蒙思想和大革命的影响 下,制定了一系列法律。1804年,拿破仑签署法令,颁布 了《法国民法典》,它与此后制定的四部法典一起被统称为 《拿破仑法典》。《拿破仑法典》与此前颁布的法律,构成了 法国的成文法体系,最终确立了法国的资产阶级法律制度。 后来,逐渐形成了以罗马法为基础、以《法国民法典》为 代表的世界性法律体系,称为“大陆法系”或“民法系”。 近代西方法律制度的基本特征 近代以来,西方各国在继承传统法律思想的基础上, 融合了启蒙思想家们提出的思想主张,制定了各自的法律 制度。这些法律制度从理论上看,包含着一些共同的基本 特征。 英美法系和大陆法系的不同 英美法系以判例法为主要法律渊源,以遵循先例为基本原 则;法官的地位突出,当无先例可循时,法官可以创立先例, 也可以对先例作出新的解释。因此,英美法系国家的法律也被 称为“法官制定的法律”。英美法系主要涵盖英国、美国、加 拿大、澳大利亚、印度等国家和地区。 大陆法系以成文法为主要法律渊源,强调宪法的根本法地 位,法律体系比较完整,一般不承认判例的效力;明确立法和 司法的分工,法官的作用不太突出。大陆法系国家的代表是法 国、德国、意大利、日本等。 历史纵横 ▲《大宪章》原件 法国大革命对法律建设 有什么重要贡献? 思考点 近代西方法律制度的基 本特征是立法和司法独立, 强调保障个人的权利。 学习聚焦 在国家权力结构层面上,坚持权力制衡、三权分立。 国家权力分为立法权、行政权和司法权。法律由议会制定, 行政机构在法律规定的框架内行使行政权,法院根据法律 独立掌握司法权。在法律内容上,注重保护个人权利,包 括生命权、自由权和财产权等。在司法实践过程中,坚持 程序公正和无罪推定。为了保证从立案到审理再到判决的 每个程序的公开公正,建立了律师制度和陪审团制度。独 立、专业的律师为被审判者提供辩护,可以减少法官对法 律的误读;从民众中产生陪审团,参与案件审理和判决, 使民众能够直接参与法律事务。无罪推定原则指的是所有 被审判者在判决之前都被视为无罪。 西方法律制度为资产阶级利益服务,存在着许多局限 性。它确认了私有财产制度,每个人财产的多少往往决定 着法律地位的高低。同时,对个人权利的认定也有逐渐改 进的过程。直到20世纪,黑人、原住民和妇女还在为享有 完全的公民权积极斗争。 宗教伦理与教化 392年,基督教成为罗马国教。476年,西罗马帝国灭 亡。在帝国废墟上建立的日耳曼人国家为了取得罗马人和 教会的支持,逐渐接受了基督教。基督教影响了中古时期 ▲林肯(右一)在法庭上辩护的情景 1836年,林肯成为律师。他在 诉讼活动中以敢于主持正义、熟练 运用法律而享有盛名,为以后当选 美国总统奠定了基础。 陪审团制度和律师制度的起源 陪审团制度最早可以追溯到古希腊罗马时期,但当时只适 用于奴隶主和自由民。古代的日耳曼人也留有“同侪裁决”的 遗风。12世纪,英国确立了陪审团制度。陪审团在法庭上聆听 证据,就事实问题作出决定;法官决定法律问题,判断某项证 据是否可以被引入审判。律师制度的起源也可以追溯到罗马时 期。资产阶级革命后,各国纷纷颁布法律,支持被告自己或聘 请律师辩护。1878年德国颁布的《国家律师法》,奠定了近代 律师制度的基础。 历史纵横 基督教的宗教伦理不仅 强化了教会对人们的控制, 也具有一定的社会教化功能。 学习聚焦 1764年7月,意大利人 贝卡里亚提出: 在法官判决之前,一个 人是不能被称为罪犯的。只要 还不能断定他已经侵犯了给 予他公共保护的契约,社会 就不能取消对他的公共保护。 —[意]贝卡里亚著, 黄风译《论犯罪与刑罚》 史料阅读 宗教改革后,西欧的基督教分裂为天主教和新教。新 教适应了原始积累时期新兴资产阶级的政治、经济诉求, 提出了一些新的主张。新教反对教皇权威,主张信徒通过 自己阅读《圣经》理解教义,还提倡节俭和积极入世的态 度,鼓励人们发财致富。但是,新教仍然坚持基督教的基 欧洲人的政治、经济和社会生活各个方面。教士们搜集和 抄录经典,保存了一些宝贵的古典文化,但他们更重要的 任务是宣讲教义。他们还开办学校,这些学校主要是宗教 学校,也有一些世俗学校。学校主要讲授宗教内容,也教 授算术、几何、天文、音乐、文法、修辞和逻辑。学习内 容虽然都以宗教为目的,但在教育和文化方面也发挥了重 要作用。人们的生老病死、婚丧嫁娶,基督教会都要介入, 几乎所有的节日都与基督教有关。教会尽管本身藏污纳垢, 但时刻不忘告诫人们必须孝敬父母,不许偷盗、奸淫、杀 人、贪恋别人的财物等,要求人们逆来顺受、忍受世间的 一切痛苦。基督教的宗教伦理和教化作用强化了教会对人 们的控制,深刻影响了人们的思想意识和日常行为。 宗教法庭 在基督教会的控制下,违背基督教伦理的行为往往为社会 所不容。教会不允许任何质疑声音的存在,甚至为此建立了宗 教法庭或宗教裁判所。1480年,西班牙建立了国王控制的宗教 法庭,以此来打击异己。到1820年,西班牙宗教法庭审判的 “异端”有30多万人,其中10多万人被判处火刑。 历史纵横 中世纪完全是从野蛮状态发展而来的。……在僧侣手中, 政治和法学同其他一切科学一样,不过是神学的分支,一切都 按照神学中适用的原则来处理。教会的教条同时就是政治信条, 圣经词句在各个法庭都具有法律效力。 ——[德]恩格斯《德国农民战争》,《马克思恩格斯文 集》第二卷 想一想:基督教在中古时期的欧洲发挥了什么样的作用? 学思之窗 ▲宗教法庭 ▲中古时期的婚礼 本教义,束缚人们的行为,麻醉人们的思想。新教还排斥 其他教派,引起了多次宗教冲突,造成了重大的人员伤亡 和财产损失。一些对教义持有不同意见的人被斥为“异 端”,遭到迫害。例如,1553年,西班牙科学家塞尔维特 在日内瓦被加尔文派判处火刑。 路德战胜了虔信造成的奴役制,是因为他用信念造成的奴役制代替了它。他破除了对权威的 信仰,是因为他恢复了信仰的权威。他把僧侣变成了世俗人,是因为他把世俗人变成了僧侣。他 把人从外在的宗教笃诚解放出来,是因为他把宗教笃诚变成了人的内在世界。他把肉体从锁链中 解放出来,是因为他给人的心灵套上了锁链。 —[德]马克思《〈黑格尔法哲学批判〉导言》,《马克思恩格斯文集》第一卷 史料阅读 法律始终把它的作者的世界图景包含在抽象的形式 中,而每一历史的世界图景都包含一种政治—经济的倾向, 这种倾向依据的不是这个人或那个人所想的事物,却依据 的是事实上掌握政权并因之掌握立法的阶级所实际打算造 成的事物。每一种法律都是由一个阶级以大多数的名义建 立起来的。 —[德]奥斯瓦尔德·斯宾格勒著,齐世荣等译 《西方的没落》 阅读材料,联系课文内容,谈谈你对法律的认识。 探究与拓展 问题探究 英美法系和大陆法系是西方两大法律体系,它们的源 头或多或少地与罗马法有联系,两者之间有共性,也有许 多不同之处。查阅相关资料,进一步了解它们之间的相同 和不同之处。 学习拓展 第10课"
#######################
## 筛选问题
#######################
# f = open("history_questions_candidates2.txt", "r", encoding="utf-8")
# fw = open("history_questions_candidates_filter.txt", 'a', encoding='utf-8')
# l = f.readline()
# while l.strip():
# data_json = json.loads(l)
# # print(data_json['text'])
# # print(data_json['questions'])
# questions_list = [s[:500] for s in data_json['questions']]
# inputs = "给定原文:" + data_json['text'][:4000] + \
# "\n下面是与原文相关的问题列表:" + str(questions_list) + \
# "\n请从问题列表中选出与原文最相符合的问题。"
# print(inputs)
#
# response = openai.ChatCompletion.create(
# engine="gpt35-34",
# messages=[
# {"role": "system",
# "content": "你现在是一个问题筛选器,需要从问题列表中选出与原文最相关且质量较好的问题。"},
# {"role": "user", "content": inputs},
# ],
# temperature=0.95,
# max_tokens=800,
# top_p=0.95,
# n=5,
# frequency_penalty=0,
# presence_penalty=0,
# stop=None)
# questions = \
# [response["choices"][0]["message"]["content"],
# response["choices"][1]["message"]["content"],
# response["choices"][2]["message"]["content"],
# response["choices"][3]["message"]["content"],
# response["choices"][4]["message"]["content"]]
# print(questions)
# json_data = {
# "text": data_json['text'],
# "questions": questions
# }
# fw.write(json.dumps(json_data, ensure_ascii=False) + '\n')
# time.sleep(30)
# l = f.readline()
#######################
## 筛选问题第二步
#######################
# def count_substring_occurrences(strings):
# result = {}
# for i, string1 in enumerate(strings):
# count = 0
# for j, string2 in enumerate(strings):
# if string1 in string2:
# count += 1
# result[string1] = count
# return result
#
#
# f = open("history_questions_candidates_filter.txt", 'r', encoding='utf-8')
# good_num = 0
# l = f.readline()
# while l.strip():
# data_json = json.loads(l)
# string_list = data_json['questions']
# result = count_substring_occurrences(string_list)
# print(json.dumps(result, ensure_ascii=False))
#
# good = False
# for i in result.values():
# if i >= 2:
# good = True
# if good:
# good_num += 1
# else:
# pass
# # print(data_json['text'])
#
# l = f.readline()
# print(good_num)
#######################
## 筛选问题第三步
#######################
question_result = []
k_list = []
data_origin = json.load(open("/Users/zhanglin/Desktop/LM_as_Evaluator/data/taxonomy/history.json","r",encoding="utf-8"))
for d in data_origin:
for k in d.keys():
for k2 in d[k].keys():
for k3 in d[k][k2].keys():
k_list.append(k3)
i = 0
f = open("history_questions_candidates_filter_result.txt", 'r', encoding='utf-8')
l = f.readline()
while l.strip():
questions_dict = json.loads(l.strip())
max_count = 0
best_question = ""
for k in questions_dict.keys():
if questions_dict[k] > max_count:
best_question = k
max_count = questions_dict[k]
if max_count < 2:
best_question = ""
max_count = 0
question_result.append({
"key": k_list[i],
"best_question": best_question,
"score": max_count
})
l = f.readline()
i += 1
print(json.dumps(question_result, ensure_ascii=False)) | [
"现在你是一个提问者,针对某个主题进行提问,给出问题和答案。注意只能提一个问题。"
] |
2024-01-10 | Zhang-l-i-n/LM_as_Evaluator | test~coverge.py | # encoding:utf-8
import json
import os
import jieba
import openai
content_file = "/Users/zhanglin/Desktop/LM_as_Evaluator/data/taxonomy/history_qa_v1.json"
content = json.load(open(content_file, 'r', encoding='utf-8'))
def Jaccrad(model, reference): # terms_reference为源句子,terms_model为候选句子
terms_reference = jieba.cut(reference) # 默认精准模式
terms_model = jieba.cut(model)
grams_reference = set(terms_reference) # 去重;如果不需要就改为list
grams_model = set(terms_model)
temp = 0
for i in grams_reference:
if i in grams_model:
temp = temp + 1
fenmu = len(grams_model) + len(grams_reference) - temp # 并集
jaccard_coefficient = float(temp / fenmu) # 交集
return jaccard_coefficient
# b = "殖民主义形成的原因是多方面的,包括经济、政治、文化和技术等诸多因素。以下是一些主要原因:\n\n经济原因:随着资本主义的发展,欧洲列强为了寻求原材料、市场和投资机会,以满足日益增长的经济需求,开始拓展势力范围,控制殖民地。\n政治原因:欧洲国家在近代为了竞争国际地位,提高国家声望,开始争夺海外殖民地,彰显自己的政治实力。\n文化原因:在宗教和文化方面,欧洲国家试图传播基督教,希望通过改变殖民地的宗教信仰来巩固自己的统治,同时也视之为\"文明使命\"。\n技术原因:航海技术、武器装备和医学的进步,使得欧洲列强能够迅速地开疆拓土,占领并统治远离本土的地区。\n殖民主义对世界历史产生了深远影响,如下所述:\n\n地理大发现:欧洲列强在争夺殖民地的过程中,探索了一系列新的航线,发现了新大陆,世界地图被重新绘制。\n国际贸易的发展:奴隶制、三角贸易以及其他互动的贸易发展,导致贫富差距日益扩大。\n文化交流与冲突:欧洲殖民者将自己的文化和信仰传播到殖民地,原住民文化发生严重被侵蚀和破坏。但与此同时,东西方文化的交流,导致人类历史上的重大发展和变革。\n种族歧视和奴隶制:这是殖民主义的黑暗面,奴隶制导致大量非洲人在极其恶劣的条件下劳动和生活,而种族歧视则导致非洲、亚洲等地区的革命斗争。\n殖民地的独立运动:殖民地国家百年来的抗议、反抗殖民势力的压迫与剥削,推动了民主和独立思想的传播和民族意识的觉醒,促进了后来的独立运动和民族解放事业。\n世界格局的演变:随着殖民地的获得,世界格局发生了巨大变化,大国之间权力的不断重新洗牌,导致两次世界大战的爆发。同时,二战后,大量殖民地纷纷脱离殖民统治,寻求独立和自主,世界进入新的历史时期。\n\n"
# b = "马克思主义的诞生与传播 1844年6月4日,德意志西里西亚纺织工人高唱革命 歌曲,举行游行示威,示威活动很快发展为起义。起义工人 捣毁工厂,焚烧债券,队伍很快发展到3000多人,后来起 义被镇压。这幅图反映的是统治者镇压工人起义的情景。 ▲在英国煤矿中工作的童工(绘画作品) ▲统治者派兵镇压西里西亚织工起义 (绘画作品) 工人阶级的悲惨遭遇引发 了早期工人运动,“空想社会 主义”没有找到解决资本主义 矛盾的正确途径。 学习聚焦 早期工人运动与社会主义思想的萌发 随着资本主义大工业的建立和发展,资本主义制度 的各种弊病逐渐显现。1825年,英国爆发了经济危机。此 后,每隔十年左右就会爆发一次资本主义经济危机。虽然 生产力不断提高、劳动产品日益丰富,但工人阶级的生活 条件并没有得到相应改善,他们的政治权利也极其有限。 在工业化的早期阶段,欧洲各地普遍出现了贫富分化严重、 工人阶级苦难深重的状况。 当时工人的劳动条件相当恶劣,厂房狭小简陋,被污 染的空气几乎使人窒息,工伤事故频频发生。工人尽管每 天工作长达十几个小时,但工资微薄,并时时面临失业的 威胁。大机器的采用也把妇女和儿童卷进劳动力市场,他 们的境况更为悲惨。工人的住宅区是大城市和工业区的贫 民窟,街道狭窄拥挤,房屋低矮破旧,天空浓烟密布,地 下污水横流,成了贫穷、疾病和犯罪之地。 为了改善劳动条件和生活状况,维护 自己的权益,工人阶级进行了从捣毁机器到 争取政治权利的多种形式的斗争。19世纪 三四十年代爆发的法国里昂工人武装起义、 英国工人争取普选权的宪章运动和德意志西 里西亚织工起义,表明工人阶级开始作为独 立的政治力量登上历史舞台。工人阶级迫切 需要科学理论的指导。 在工人阶级进行斗争的同时,以法国 人圣西门、傅立叶和英国人欧文为代表的有 识之士,也无情地揭露和批判资本主义制度 的种种弊端。他们反对自由放任的竞争,主 张建立合作、平等、和谐的理想社会。但 是,他们没有找到实现理想社会的现实力量 和正确有效的途径,他们的设想被称为“空 想社会主义”。 ▲“新和谐公社”蓝图 1824年,欧文在美国印第安纳州购买了120多平方千米的土地建设一 个名为“新和谐公社”的社会组织,进行共产主义“劳动公社”的试验。 这里有工厂、农场和学校,每个人都参加劳动,分工合作,人人享有充分 的、平等的民主权利。但是,“新和谐公社”脱离社会现实,最终内部矛盾 激化,4年后失败。 ▲宪章运动(绘画作品) 1836年,伦敦工人协会成立。后来,协会拟定 了呈送议会要求普选权的请愿书,并以“人民宪章” 名义正式公布,得到全国响应,这是宪章运动开始 的标志。宪章运动持续时间超过10年,数次向议会 递交请愿书,几百万工人先后在请愿书上签名,大 大激发了工人的斗争热情。 马克思和恩格斯经过长 期的理论探索和革命实践, 创立了马克思主义,为国际 工人运动指明了正确方向。 学习聚焦 马克思主义的诞生 19世纪中叶,德国思想家、革命家马克思和恩格斯在 广泛吸收人类优秀思想成果的基础上,进一步探讨工业革命 引起的社会变化,总结工人运动的经验,共同创立了富有生 命力的马克思主义。 1846—1847年间,马克思和恩格斯先后在布鲁塞尔建 立了共产主义通讯委员会和德意志工人协会,还加入了德 意志流亡工人的组织“正义者同盟”,并帮助该同盟改组 为“共产主义者同盟”。在伦敦召开的共产主义者同盟第 二次代表大会上,马克思和恩格斯受大会委托起草同盟纲 领,这就是1848年2月发表的《共产党宣言》。 ▲马克思(1818—1883) ▲恩格斯(1820—1895) 《共产党宣言》肯定了资本主义的历史进步作用,指 出“资产阶级在它的不到一百年的阶级统治中所创造的生 产力,比过去一切世代创造的全部生产力还要多,还要 大”。《共产党宣言》揭示了资本主义在积累财富和资本的 同时对工人阶级的残酷剥夺必将引起工人阶级反抗的社会 现实,论证了资本主义必然灭亡、共产主义必然胜利的客 观规律。《共产党宣言》肯定阶级斗争在阶级社会中推动历 史发展的重要作用,宣告了无产阶级作为资本主义掘墓人 和共产主义建设者的伟大使命,阐明了共产党的性质、目 的和策略原则。 《共产党宣言》第一次较为完整系统地阐述了科学社 会主义的基本原理,阐明了社会发展的客观规律,标志着 马克思主义的诞生。 1848年,欧洲普遍发生革命,马克思和恩格斯回到德 国,投入实际的革命斗争。他们创办报纸宣传革命,组织 工人参加武装起义。革命失败后,他们主要在英国活动, 在指导工人阶级进行斗争的同时,继续进行理论探索。 1867年,马克思撰写的政治经济学巨著《资本论》第 一卷出版。通过对资本主义制度的深刻剖析,马克思创立了 随着大工业的发展,资 产阶级赖以生产和占有产品的 基础本身也就从它的脚下被 挖掉了。它首先生产的是它自 身的掘墓人。资产阶级的灭 亡和无产阶级的胜利是同样 不可避免的。 ——[德]马克思、[德] 恩格斯《共产党宣言》,《马克 思恩格斯文集》第二卷 史料阅读 1848年革命 19世纪40年代,在欧 洲大陆,西欧各国的资产 阶级迫切要求争取更多的 政治权利,东南欧国家的 人民希望结束外国统治, 获得民族独立,革命一触 即发。1848年1月,意大利 首先爆发革命,掀开了欧 洲革命的序幕。2月,巴黎 发生武装起义,推翻了七 月王朝,再次建立了共和 国,把1848年革命推向高 潮。革命席卷了欧洲许多 地区,沉重地打击了欧洲 的封建势力,有利于资本 主义的进一步发展。在这 场革命中,无产阶级发挥 了重要作用。 历史纵横 马克思和恩格斯在《共 产党宣言》中表达了怎样的 思想? 思考点 剩余价值学说,揭露了资本主义制度和资本家剥削的秘密。 《资本论》成为马克思主义理论最重要的文献之一。 马克思和恩格斯创立了唯物史观。唯物史观科学地揭 示了生产力与生产关系、经济基础与上层建筑在人类社会 发展中的辩证关系,鲜明地提出了人民群众对历史发展的 巨大作用。 马克思主义是科学的、人民的、实践的、不断发展的 开放的理论,它创造性地揭示了人类社会发展规律,第一 次创立了人民实现自身解放的思想体系,指引着人民改造 世界的行动,始终站在时代前沿。 自19世纪中期起,马克思主义成为西欧工人运动的指 导思想;在东欧和东南欧,马克思主义的影响日益扩大; 在亚洲和美洲,马克思主义是工人运动与民族民主运动的 重要思想武器。 在马克思主义影响下,马克思主义政党在世界范围内 如雨后春笋般建立和发展起来,人民第一次成为自己命运 的主人,成为实现自身解放和全人类解放的根本政治力量。 国际工人运动的发展 在《共产党宣言》中,马克思和恩格斯发出了“全 世界无产者,联合起来”的伟大号召,积极促进工人阶级 的国际联合。1864年,国际工人协会在伦敦成立,这就是 历史上的“第一国际”。马克思是第一国际的创始人之一。 第一国际的成立推动了马克思主义的传播和国际工人运动 进入新阶段。 1870年,法国在普法战争中失败,社会矛盾激化。 1871年3月,巴黎爆发工人武装起义,起义者建立了自己 的政权—巴黎公社。第一国际的一些成员在公社的活动 中发挥了重要作用,他们认为建立工人政权是向社会主义 迈出的第一步。 ▲1871年3月28日,巴黎公社宣告成立(绘画作品) 剩余价值论 马克思在《资本论》 中深刻分析了资本家剥削 工人的秘密。他指出,工 人的工资在整个工业生产 产生的利润中所占的比重 很小,工人只要在每个工 作日劳动很少一部分时间, 就足以抵偿这点工资;在 余下的大部分时间里,工 人都是在无偿地为资本家 工作,他们创造的价值也 全部为资本家所得。这就 是资本家日益富裕的秘密。 资本家从生产中获得的利 润被称为“剩余价值”。马 克思的这一理论被称为 “剩余价值论”。 历史纵横 巴黎公社的实践丰富了 马克思主义的学说,为无产 阶级革命留下了重要启示。 学习聚焦 巴黎公社采取了一系列革命措施:打碎旧的国家机 器,建立立法与行政合一的政权机关和司法机构;废除旧 军队和旧警察,代之以国民自卫军和治安委员会;人民有 权监督和罢免由选举产生的公职人员,所有公职人员的工 资不得超过熟练工人的工资;由工人合作社管理工厂;实 行八小时工作日;等等。 ▲ 巴黎公社社员墙 1871年5月27日,200名巴黎 公社战士浴血奋战,最后一批公社 战士被敌人围困在一堵墙边,最后 全部壮烈牺牲。这堵墙被命名为 “巴黎公社社员墙”。 公社的伟大社会措施就是它本身的存在和工作。它所采取 的各项具体措施,只能显示出走向属于人民、由人民掌权的政 府的趋势…… ………… 工人的巴黎及其公社将永远作为新社会的光辉先驱而为人 所称颂。 ——[德]马克思《法兰西内战》,《马克思恩格斯文集》 第三卷 谈谈你对这段话的理解。 学思之窗 1871年5月28日,巴黎公社被法国资产阶级和德国联 合扼杀。但是,巴黎公社作为无产阶级建立政权的第一次 伟大尝试被载入史册,它的实践丰富了马克思主义的学说, 为国际工人运动的发展提供了宝贵的经验和教训。 探究与拓展 正像达尔文发现有机界的发展规律一样,马克思发现 了人类历史的发展规律,即历来为繁芜丛杂的意识形态所 掩盖着的一个简单事实:人们首先必须吃、喝、住、穿, 然后才能从事政治、科学、艺术、宗教等等;所以,直接 的物质的生活资料的生产,从而一个民族或一个时代的一 定的经济发展阶段,便构成基础,人们的国家设施、法的 观点、艺术以至宗教观念,就是从这个基础上发展起来的, 因而,也必须由这个基础来解释…… ——[德]恩格斯《在马克思墓前的讲话》,《马克思 恩格斯文集》第三卷 概括上述材料的主要观点,并谈谈你对这个观点的 理解。 问题探究 德国古典哲学是马克思主义的来源之一。查找资 料,进一步了解马克思主义哲学对德国古典哲学的继承 和超越。 学习拓展 第六单元 世界殖民体系与亚非拉民族独立运动 随着新航路的开辟,西欧国家率先踏上了对外殖民扩 张的道路。18世纪,拉丁美洲已经完全处于欧洲列强的殖 民统治之下。随着工业革命的深化和资本主义的不断发展, 欧洲列强的海外殖民活动愈演愈烈。19世纪中后期,伴随 第二次工业革命浪潮,资本主义开始向帝国主义过渡,西 方列强掀起了瓜分世界的狂潮,把亚洲和非洲的广大地区 变成了它们的殖民地或半殖民地。到20世纪初,资本主义 列强已经奴役和控制了世界上绝大部分土地和人口,建立 了资本主义世界殖民体系,形成了人类历史上由少数资本 主义国家奴役和控制世界上绝大多数国家和地区的极不合 理的状态。资本主义列强与殖民地半殖民地的矛盾空前激 化,殖民地半殖民地人民争取民族独立的斗争不断高涨。 通过本单元的学习,了解西方列强建立世界殖民体系 的过程,以及亚非拉人民的抗争,理解殖民地半殖民地的 民族独立运动对世界历史发展的重要影响。 第12课"
#
# jaccard_coefficient = Jaccrad(a, b)
# print(jaccard_coefficient)
b = "五四运动是20世纪初中国一场以爱国主义和民主科学思想为主要内容的政治、文化和社会运动。它对中国革命和现代化进程产生了深远的影响,并推动了中国社会历史的进步。五四运动的影响主要体现在以下几个方面:\n\n" \
"激发民族觉醒:五四运动极大地激发了中国人民的民族自尊心和民族自信心。在运动中,爱国主义情怀得到前所未有的强烈表达,中国人民开始认识到民族复兴的重要性,民族觉悟得到空前的提高。\n" \
"引领新思想:五四运动中,民主、科学、实事求是等新思想得到广泛传播,对中国传统封建思想体系产生了强烈冲击。许多青年知识分子和社会各界人士通过新文化运动吸收和传播西方现代文化思想,对中国的社会观念、价值观和文化风尚产生重要影响。" \
"\n生发新文化:五四运动使中国进入了新文化时代。民主、科学、自由等观念成为新时期的主流价值观。旧文化的瓦解和新文化的确立使中国传统文化发生深刻变革。现代文学、艺术、教育、科技等领域得到迅速发展,推动了整个国家现代化和社会进步。" \
"\n催生新政治:五四运动导致了中国政治格局的重大变革。在五四运动的影响下,中国共产党应运而生,开始了中国新民主主义革命的历程。通过一系列斗争,中国共产党最后取得成功,建立了新中国,为中国引入了社会主义制度,为实现民族独立和社会发展打下了坚实基础。" \
"\n催生新经济:五四运动发扬民主和科学的精神,为中国经济现代化建立了理论体系和实践土壤。五四思想家倡导创新和实践,大力发展国民经济,为民生改革和现代化建设注入源源不断的动力,推动中国经济不断前进。" \
"\n引导新外交:五四运动对中国的外交事业产生了重大影响。五四运动之后,中国开始摆脱不平等条约的桎梏,倡导主权独立和民族平等。新中国成立后,持续与世界无产者大联合,树立新的国际形象,赢得世界广泛尊重。" \
"\n总之,五四运动的深远影响从各个方面推动了中国社会历史的进步,成为了近现代中国发展历程中具有划时代意义的重要事件。"
############################
# Jaccard
############################
# for c in content:
# for chapter in c:
# for k1 in c[chapter]:
# print(k1)
# for k2 in c[chapter][k1]:
# print(c[chapter][k1][k2]["text"].split(" ")[1])
# jaccard_coefficient = Jaccrad(c[chapter][k1][k2]["text"], b)
# print(jaccard_coefficient)
############################
# API(gpt-3.5)
############################
os.environ["OPENAI_API_KEY"] = "33e8f0c860bc4109825496444bbfed3e"
openai.api_type = "azure"
openai.api_base = "https://community-openai-34.openai.azure.com/"
openai.api_version = "2023-03-15-preview"
openai.api_key = os.getenv("OPENAI_API_KEY")
for c in content:
for chapter in c:
for k1 in c[chapter]:
# print(k1)
for k2 in c[chapter][k1]:
inputs = "文本1: " + b + "\n" + "文本2: " + c[chapter][k1][k2]["text"] + "\n" + "请给出分数。"
response = openai.ChatCompletion.create(
engine="gpt35-34",
messages=[
{"role": "system",
"content": "你现在是一个内容比较器,你需要根据判断文本1包含了文本2多少内容,用1-100分数表示"},
{"role": "user", "content": inputs},
],
temperature=0.95,
max_tokens=800,
top_p=0.95,
n=5,
frequency_penalty=0,
presence_penalty=0,
stop=None)
scores = \
[response["choices"][0]["message"]["content"],
response["choices"][1]["message"]["content"],
response["choices"][2]["message"]["content"],
response["choices"][3]["message"]["content"],
response["choices"][4]["message"]["content"]]
print(k2)
print(scores)
| [
"你现在是一个内容比较器,你需要根据判断文本1包含了文本2多少内容,用1-100分数表示"
] |
2024-01-10 | Zhang-l-i-n/LM_as_Evaluator | embedding~get_embedding.py | import json
import openai
# from tenacity import retry, wait_random_exponential, stop_after_attempt
import torch
from transformers import BertTokenizer, BertModel
from tqdm import tqdm
# openai.api_version = '2023-03-15-preview'
# openai.api_type = 'azure'
# openai.api_base = "https://conmmunity-openai.openai.azure.com/"
# openai.api_key = '3371b75d06a54deabcdd5818629ca833'
# @retry(wait=wait_random_exponential(min=1, max=20), stop=stop_after_attempt(6))
# def get_embedding(text, engine="ada-1"):
# return openai.Embedding.create(engine=engine, input=[text])["data"][0]["embedding"]
def get_embedding(text: str) -> list[float]:
tokenizer = BertTokenizer.from_pretrained('shibing624/text2vec-base-chinese')
model = BertModel.from_pretrained('shibing624/text2vec-base-chinese')
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
inputs = tokenizer(text, return_tensors='pt')
outputs = model(**inputs)
return outputs.pooler_output.tolist()[0]
def embeddings(f_txt):
embedding_list = []
data_json = json.load(open(f_txt, 'r', encoding='utf-8'))
for i, d in enumerate(data_json):
for k1 in d.keys():
print(k1)
embedding_list.append(
{"label": k1,
"type": i + 1,
"embedding": get_embedding(k2)
}
)
for k2 in d[k1].keys():
print(k2)
embedding_list.append(
{"label": k2,
"type": i + 1,
"embedding": get_embedding(k2)
}
)
for k3 in d[k1][k2]:
print(k3)
embedding_list.append(
{"label": k3,
"type": i + 1,
"embedding": get_embedding(k3)
}
)
# count = 0
# with open(f_txt, 'r', encoding='utf-8') as frd:
# for line in frd:
# count = count + 1
#
# pbar = tqdm(total=count)
# with open(f_txt, 'r', encoding='utf-8') as f:
# line = f.readline()
# type = 0
# while line:
# if len(line) > 1:
# if line.startswith(' '):
# level = 2
# elif line.startswith(' '):
# level = 1
# type += 1
# elif line.strip():
# level = 0
# type = 0
# embedding_list.append(
# {"label": line.strip(),
# "level": level,
# "type": type,
# # "type": 1,
# "embedding": get_embedding(line)
# }
# )
# line = f.readline()
# pbar.update(1)
# pbar.close()
return embedding_list
def data_prepare(f_raw, f_emb):
embedding_list = embeddings(f_raw)
with open(f_emb, 'w', encoding='utf=8') as f:
f.write(json.dumps(embedding_list, ensure_ascii=False))
if __name__ == '__main__':
f_raw, f_emb = './taxonomy/history.json', './embedding/bert_embedding_history.json'
############
print('data preparing...')
data_prepare(f_raw, f_emb)
print('data prepared')
| [] |
2024-01-10 | Zhang-l-i-n/LM_as_Evaluator | test~get_embedding.py | import json
import openai
import pandas as pd
from tenacity import retry, wait_random_exponential, stop_after_attempt
from tqdm import tqdm
import numpy as np
import umap.plot
from matplotlib import pyplot as plt
from matplotlib.font_manager import FontProperties
import seaborn as sns
from sklearn.cluster import KMeans
from sklearn.cluster import AffinityPropagation
def show_cluster(n_clusters, X, y, label):
cluster = KMeans(n_clusters=n_clusters, random_state=0).fit(X)
y_pred = cluster.labels_ # 获取训练后对象的每个样本的标签
centroid = cluster.cluster_centers_
color = ['red', 'pink', 'orange', 'gray']
fig, axi1 = plt.subplots(1)
for i in range(n_clusters):
axi1.scatter(X[y_pred == i, 0], X[y_pred == i, 1],
marker='o',
s=8,
c=color[i])
axi1.scatter(centroid[:, 0], centroid[:, 1], marker='x', s=100, c='black')
plt.show()
openai.api_version = '2023-03-15-preview'
openai.api_type = 'azure'
openai.api_base = "https://conmmunity-openai.openai.azure.com/"
openai.api_key = '3371b75d06a54deabcdd5818629ca833'
@retry(wait=wait_random_exponential(min=1, max=20), stop=stop_after_attempt(6))
def get_embedding(text, engine="ada-1"):
return openai.Embedding.create(engine=engine, input=[text])["data"][0]["embedding"]
def embeddings(f_txt):
embedding_list = []
count = 0
with open(f_txt, 'r', encoding='utf-8') as frd:
for line in frd:
count = count + 1
pbar = tqdm(total=count)
with open(f_txt, 'r', encoding='utf-8') as f:
line = f.readline()
# type = 0
while line:
if len(line) > 1:
if line.startswith(' '):
level = 1
elif line.strip():
level = 0
# type += 1
embedding_list.append(
{"label": line.strip(),
"level": level,
# "type": type,
"type": 1,
"embedding": get_embedding(line)
}
)
line = f.readline()
pbar.update(1)
pbar.close()
return embedding_list
def get_data(f_data, y='level'):
embedding_list, label_list, y_list = [], [], []
data_json = json.load(open(f_data, 'r', encoding='utf-8'))
for d in data_json:
embedding_list.append(d['embedding'])
label_list.append(d['label'])
y_list.append(d[y])
# label_list = eval(open(f_label, 'r').readline().strip())
return embedding_list, label_list, y_list
def data_prepare(f_raw, f_emb):
embedding_list = embeddings(f_raw)
with open(f_emb, 'w', encoding='utf=8') as f:
f.write(json.dumps(embedding_list, ensure_ascii=False))
# label_list = []
# with open(f_raw, 'r', encoding='utf-8') as f:
# line = f.readline()
# while line:
# if line.strip():
# label_list.append(line.strip())
# line = f.readline()
# open(f_label, 'w', encoding='utf=8').write(str(label_list))
if __name__ == '__main__':
f_raw, f_emb = '计算机学科.level', 'embedding_bio2.json'
############
# print('data preparing...')
# data_prepare(f_raw, f_emb)
# print('data prepared')
# exit()
font_path = '/Users/zllll/Library/Fonts/SimHei.ttf'
custom_font = FontProperties(fname=font_path)
embedding_list, label_list, y_list = get_data(f_emb, 'type')
# embedding_list = eval(open(f_emb, 'r').readline().strip())
print(len(embedding_list))
X = np.array(embedding_list)
y = np.array([1] * len(embedding_list))
################
# UMAP降维
################
reducer = umap.UMAP()
embedding = reducer.fit_transform(X)
print(embedding.shape)
plt.scatter(embedding[:, 0], embedding[:, 1], c=y_list, cmap='rainbow')
# 添加标签
for i in range(len(label_list)):
plt.text(embedding[:, 0][i], embedding[:, 1][i], label_list[i], ha='center', va='bottom',
fontproperties=custom_font, fontsize=5)
plt.gca().set_aspect('equal', 'datalim')
plt.title('UMAP')
plt.show()
################
# Affinity Propagation
################
af = AffinityPropagation(preference=None).fit(embedding)
cluster_centers_indices = af.cluster_centers_indices_
labels = af.labels_
n_clusters = len(cluster_centers_indices)
color = sns.color_palette("hls", n_clusters)
for k, col in zip(range(n_clusters), color):
class_members = labels == k
cluster_center = embedding[cluster_centers_indices[k]]
plt.scatter(embedding[class_members, 0], embedding[class_members, 1],
marker='o',
s=8,
c=col)
plt.scatter(cluster_center[0], cluster_center[1], marker='x', s=100, c='black')
for i in range(len(label_list)):
plt.text(embedding[:, 0][i], embedding[:, 1][i], label_list[i], ha='center', va='bottom',
fontproperties=custom_font, fontsize=5)
plt.title('AP--Estimated number of clusters: %d' % n_clusters)
plt.show()
################
# Kmeans聚类(UMAP降维后的数据)
################
cluster = KMeans(n_clusters=n_clusters, random_state=0).fit(embedding)
y_pred = cluster.labels_ # 获取训练后对象的每个样本的标签
centroid = cluster.cluster_centers_
print(y_pred)
print(centroid)
color = sns.color_palette("hls", n_clusters)
for i in range(n_clusters):
plt.scatter(embedding[y_pred == i, 0], embedding[y_pred == i, 1],
marker='o',
s=8,
c=color[i])
plt.scatter(centroid[:, 0], centroid[:, 1], marker='x', s=100, c='black')
for i in range(len(label_list)):
plt.text(embedding[:, 0][i], embedding[:, 1][i], label_list[i], ha='center', va='bottom',
fontproperties=custom_font, fontsize=5)
plt.title('Kmeans')
plt.show()
################
# Affinity Propagation(UMAP降维前的数据)
################
af = AffinityPropagation(preference=None).fit(X)
cluster_centers_indices = af.cluster_centers_indices_
labels = af.labels_
n_clusters = len(cluster_centers_indices)
# print(X.shape)
# print(cluster_centers_indices)
X_expand_2d = reducer.fit_transform(X)
color = sns.color_palette("hls", n_clusters)
for k, col in zip(range(n_clusters), color):
class_members = labels == k
cluster_center = X_expand_2d[cluster_centers_indices[k]]
plt.scatter(X_expand_2d[class_members, 0], X_expand_2d[class_members, 1],
marker='o',
s=8,
c=col)
plt.scatter(cluster_center[0], cluster_center[1], marker='x', s=100, c='black')
for i in range(len(label_list)):
plt.text(X_expand_2d[:, 0][i], X_expand_2d[:, 1][i], label_list[i], ha='center', va='bottom',
fontproperties=custom_font, fontsize=5)
plt.title('AP--Estimated number of clusters: %d' % n_clusters)
plt.show()
################
# Kmeans聚类(UMAP降维前的数据)
################
cluster = KMeans(n_clusters=n_clusters, random_state=0).fit(X)
y_pred = cluster.labels_ # 获取训练后对象的每个样本的标签
centroid = cluster.cluster_centers_
# print(X)
# print(centroid)
X_expand = np.concatenate([X, centroid], axis=0)
X_expand_2d = reducer.fit_transform(X_expand)
embedding = X_expand_2d[:X.shape[0]]
centroid = X_expand_2d[X.shape[0]:]
color = sns.color_palette("hls", n_clusters)
for i in range(n_clusters):
plt.scatter(embedding[y_pred == i, 0], embedding[y_pred == i, 1],
marker='o',
s=8,
c=color[i])
plt.scatter(centroid[:, 0], centroid[:, 1], marker='x', s=100, c='black')
for i in range(len(label_list)):
plt.text(embedding[:, 0][i], embedding[:, 1][i], label_list[i], ha='center', va='bottom',
fontproperties=custom_font, fontsize=5)
plt.title('Kmeans')
plt.show()
| [] |
2024-01-10 | blazickjp/GPT-CodeApp | backend~tests~test_coding_agent.py | import unittest
import instructor
import difflib
from openai import OpenAI
from unittest.mock import MagicMock, mock_open, patch, call
from agent.coding_agent import CodingAgent
from memory.memory_manager import MemoryManager
from database.my_codebase import MyCodebase
from agent.agent_functions.file_ops import _OP_LIST, AddFunction, DeleteFunction
client = instructor.patch(OpenAI())
IGNORE_DIRS = ["node_modules", ".next", ".venv", "__pycache__", ".git"]
FILE_EXTENSIONS = [".js", ".py", ".md"]
# Sample code for testing purposes
ORIGINAL_CODE = "def example():\n pass\n"
MODIFIED_CODE_ADD = (
"def example():\n pass\n\ndef added_function():\n return 'test'\n"
)
MODIFIED_CODE_DELETE = ""
# Sample operation instances for testing
add_function_op = AddFunction(
file_name="example.py",
function_name="added_function",
args="",
body="return 'test'",
decorator_list=[],
)
delete_function_op = DeleteFunction(file_name="example.py", function_name="example")
class TestCodingAgent(unittest.TestCase):
def setUp(self):
# Mock the CodingAgent and its dependencies
self.agent = CodingAgent(memory_manager=None, function_map=None, codebase=None)
self.agent.ops_to_execute = [add_function_op, delete_function_op]
# Patch the open function in the coding_agent module
self.mock_open = mock_open(read_data=ORIGINAL_CODE)
self.open_patch = patch("agent.coding_agent.open", self.mock_open)
self.open_patch.start()
def tearDown(self):
self.open_patch.stop()
def test_execute_ops(self):
# Call the method to test
diffs = self.agent.execute_ops(self.agent.ops_to_execute)
print("Diff: ", diffs)
# We expect two diffs: one for the addition and one for the deletion
expected_diffs = [
"--- before.py\n+++ after.py\n@@ -1,2 +1,6 @@\n def example():\n pass\n+\n+\n+def added_function():\n+ return 'test'\n",
"--- before.py\n+++ after.py\n@@ -1,2 +0,0 @@\n-def example():\n- pass\n",
]
# Check that the diffs match what we expect
self.assertEqual(diffs, expected_diffs)
class TestCodingAgent1(unittest.TestCase):
def setUp(self):
# Mock database connection setup
self.mock_db_connection = MagicMock()
self.memory_manager = MemoryManager(db_connection=self.mock_db_connection)
self.codebase = MyCodebase(
db_connection=self.mock_db_connection,
file_extensions=FILE_EXTENSIONS,
ignore_dirs=IGNORE_DIRS,
)
# Initialize the agent for testing
self.agent = CodingAgent(
memory_manager=self.memory_manager,
codebase=self.codebase,
function_map=[_OP_LIST],
)
def test_process_json(self):
result = self.agent.process_json('{"key": "value"}')
self.assertEqual(result, {"key": "value"})
def test_agent_query(self):
self.agent.query = MagicMock()
self.agent.query.return_value = ["response"]
result = list(self.agent.query("input"))
self.assertEqual(result, ["response"])
if __name__ == "__main__":
unittest.main()
| [] |
2024-01-10 | blazickjp/GPT-CodeApp | backend~memory~memory_manager.py | from turtle import up
import tiktoken
from typing import Optional, List
from datetime import datetime
from dotenv import load_dotenv
from memory.system_prompt_handler import SystemPromptHandler
from pydantic import BaseModel, Field
import instructor
from instructor import OpenAISchema
from openai import OpenAI, AsyncOpenAI
import logging
CLIENT = instructor.patch(AsyncOpenAI())
class WorkingContext:
def __init__(self, db_connection, project_directory) -> None:
"""Initializes the WorkingContext class.
Args:
db_connection: The database connection object.
project_directory: The path to the project directory.
Attributes:
context: The working context string.
conn: The database connection.
cur: The database cursor.
client: The OpenAI API client.
project_directory: The project directory path.
"""
self.context = "The user is named Joe"
self.conn = db_connection
self.cur = self.conn.cursor()
self.client = CLIENT
self.project_directory = project_directory
self.create_tables()
def create_tables(self) -> None:
try:
self.cur.execute(
"""
CREATE TABLE IF NOT EXISTS working_context
(
context TEXT,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
project_directory TEXT
);
"""
)
except Exception as e:
print("Failed to create tables: ", str(e))
return
def add_context(self, context: str) -> None:
self.context += "\n" + context
self.cur.execute(
"""
INSERT INTO working_context
(context, created_at, project_directory)
VALUES (?, ?, ?);
""",
(context, datetime.now().isoformat(), self.project_directory),
)
self.conn.commit()
def get_context(self) -> str:
self.cur.execute(
"""
SELECT context, created_at
FROM working_context
where project_directory = ?
""",
(self.project_directory,),
)
results = self.cur.fetchall()
self.context = ""
for result in results:
self.context += "\n" + result[0]
return self.context
def remove_context(self, context: str) -> None:
self.context = self.context.replace(context, "")
self.cur.execute(
"""
DELETE FROM working_context
WHERE context = ?
and project_directory = ?
""",
(context, self.project_directory),
)
self.conn.commit()
def __str__(self) -> str:
return self.context
class ContextUpdate(BaseModel):
"""
API to add information from the working context.
"""
thought: str = Field(
default=...,
description="Always think first and document your thought process here.",
)
new_context: List[str] | None = Field(
default=None,
description="Valuable information from the conversation you want to keep in working context. ",
)
def execute(self, working_context: WorkingContext) -> None:
if self.new_context:
for context in self.new_context:
working_context.add_context(context)
return working_context
class MemoryManager:
# MemoryManager class manages interactions with the memory database
# including initializing connections, creating tables, and
# delegating to other classes that interact with the database.
def __init__(
self,
model: str = "gpt-3.5-turbo-16k",
identity: str = None,
tree: str = None,
max_tokens: int = 1000,
table_name: str = "default",
db_connection=None,
) -> None:
load_dotenv()
self.project_directory = None
self.model = model
self.max_tokens = max_tokens
self.system = None
self.identity = (
"You are an AI Pair Programmer and a world class python developer helping the Human work on a project."
if not identity
else identity
)
self.system_file_summaries = None
self.system_file_contents = None
self.conn = db_connection
self.cur = self.conn.cursor()
self.working_context = WorkingContext(
db_connection=db_connection, project_directory=self.project_directory
)
self.prompt_handler = SystemPromptHandler(
db_connection=db_connection,
tree=tree,
identity=self.identity,
working_context=self.working_context,
)
self.memory_table_name = f"{table_name}_memory"
self.prompt_handler.system_table_name = f"{table_name}_system_prompt"
self.system_table_name = f"{table_name}_system_prompt"
self.create_tables()
self.prompt_handler.set_system()
self.background_tasks = None
def get_messages(self, chat_box: Optional[bool] = None) -> List[dict]:
self.cur.execute(
f"""
SELECT role, content
FROM {self.system_table_name};
"""
)
results = self.cur.fetchall()
messages = [{"role": result[0], "content": result[1]} for result in results]
max_tokens = 30000 if chat_box else self.max_tokens
if chat_box:
self.cur.execute(
f"""
with t1 as (
SELECT role,
content as full_content,
COALESCE(summarized_message, content) as content,
COALESCE(summarized_message_tokens, content_tokens) as tokens,
sum(COALESCE(summarized_message_tokens, content_tokens)) OVER (ORDER BY interaction_index DESC) as token_cum_sum
FROM {self.memory_table_name}
WHERE project_directory = ?
ORDER BY interaction_index desc
)
select role, full_content, content, tokens
from t1
WHERE token_cum_sum <= ?;
""",
(
self.project_directory,
max_tokens,
),
)
else:
self.cur.execute(
# f"""
# WITH Exclude AS (
# SELECT interaction_index, last_idx
# FROM (
# select lag(interaction_index,1) over (order by interaction_index desc) as last_idx, *
# from {self.memory_table_name}
# )
# WHERE (content LIKE '/%' AND role = 'user')
# ),
# Filtered AS (
# SELECT *
# FROM {self.memory_table_name}
# WHERE interaction_index NOT IN (SELECT interaction_index FROM Exclude)
# and interaction_index NOT IN (SELECT last_idx FROM Exclude)
# ),
# t1 AS (
# SELECT role,
# content as full_content,
# COALESCE(summarized_message, content) as content,
# COALESCE(summarized_message_tokens, content_tokens) as tokens,
# SUM(COALESCE(summarized_message_tokens, content_tokens)) OVER (ORDER BY interaction_index DESC) as token_cum_sum
# FROM Filtered
# WHERE project_directory = ?
# ORDER BY interaction_index DESC
# )
# SELECT role, full_content, content, tokens
# FROM t1
# WHERE token_cum_sum <= ?;
# """,
f"""
with t1 as (
SELECT role,
content as full_content,
COALESCE(summarized_message, content) as content,
COALESCE(summarized_message_tokens, content_tokens) as tokens,
sum(COALESCE(summarized_message_tokens, content_tokens)) OVER (ORDER BY interaction_index DESC) as token_cum_sum
FROM {self.memory_table_name}
WHERE project_directory = ?
ORDER BY interaction_index desc
)
select role, full_content, content, tokens
from t1
WHERE token_cum_sum <= ?;
""",
(
self.project_directory,
max_tokens,
),
)
results = self.cur.fetchall()
for result in results[::-1]:
messages.append(
{"role": result[0], "content": result[2], "full_content": result[1]}
)
return messages
def add_message(
self,
role: str,
content: str,
command: Optional[str] = None,
function_response: Optional[str] = None,
) -> None:
timestamp = datetime.now().isoformat()
message_tokens = self.get_total_tokens_in_message(content)
summary, summary_tokens = (
self.summarize(content) if message_tokens > float("inf") else (None, None)
)
is_function_call = command is not None
try:
self.cur.execute(
f"""
INSERT INTO {self.memory_table_name}
(interaction_index, role, content, content_tokens, summarized_message, summarized_message_tokens, project_directory, is_function_call)
VALUES (?, ?, ?, ?, ?, ?, ?, ?);
""",
(
timestamp,
role,
content,
message_tokens,
summary,
summary_tokens,
self.project_directory,
is_function_call,
),
)
self.conn.commit()
except Exception as e:
print("Failed to insert data: ", str(e))
return
def get_total_tokens_in_message(self, message: str) -> int:
"""Returns the number of tokens in a message."""
encoding = tiktoken.encoding_for_model("gpt-3.5-turbo")
num_tokens = len(encoding.encode(message))
return num_tokens
def create_tables(self) -> None:
try:
self.cur.execute(
f"""
CREATE TABLE IF NOT EXISTS {self.memory_table_name}
(
interaction_index TIMESTAMP PRIMARY KEY,
role VARCHAR(100),
content TEXT,
content_tokens INT,
summarized_message TEXT,
summarized_message_tokens INT,
project_directory TEXT,
is_function_call BOOLEAN DEFAULT FALSE,
function_response BOOLEAN DEFAULT FALSE
);
"""
)
except Exception as e:
print("Failed to create tables: ", str(e))
return
async def update_context(self):
ctx = self.working_context.get_context()
print("Working Context: ", ctx)
prompt = f"""
You are monitoring a conversation between an engineer and their AI Assistant.
Your mission is to manage the working memory for the AI Assistant.
You do this by adding information to the working context (short-term memory) based on the conversation history.
## Guidelines
- Your insertions should be short, concise, and relevant to the future of the conversation.
- Keep track of facts, ideas, and concepts that are important to the conversation.
- Monitor the personality of the person you're speaking with and adjust your responses accordingly.
- Keep track of things that the user appeared to like or dislike.
- In your thoughts, justify why you are adding or removing information from the working context.
You can see the current working context below.
Working Context:
{ctx}
Please make any updates accordingly. Be sure the think step by step as you work.
"""
messages = [
{"role": item["role"], "content": item["content"]}
for item in self.get_messages()
]
for message in messages:
if message["role"] == "system":
message["content"] = prompt
print(messages)
update = await self.working_context.client.chat.completions.create(
model="gpt-4-1106-preview",
response_model=ContextUpdate,
messages=messages,
)
print(update)
self.working_context = update.execute(self.working_context)
self.prompt_handler.set_system()
return
def set_directory(self, directory: str) -> None:
self.project_directory = directory
self.working_context.project_directory = directory
self.prompt_handler.directory = directory
self.prompt_handler.set_system()
return
| [
"\nYou are monitoring a conversation between an engineer and their AI Assistant.\nYour mission is to manage the working memory for the AI Assistant. \nYou do this by adding information to the working context (short-term memory) based on the conversation history.\n\n\n## Guidelines\n- Your insertions should be short, concise, and relevant to the future of the conversation.\n- Keep track of facts, ideas, and concepts that are important to the conversation.\n- Monitor the personality of the person you're speaking with and adjust your responses accordingly.\n- Keep track of things that the user appeared to like or dislike.\n- In your thoughts, justify why you are adding or removing information from the working context.\n\nYou can see the current working context below.\n\nWorking Context:\nPLACEHOLDER\n\nPlease make any updates accordingly. Be sure the think step by step as you work.\n",
"content"
] |
2024-01-10 | blazickjp/GPT-CodeApp | backend~agent~agent_functions~function_ops.py | from instructor import OpenAISchema
from pydantic import Field
import json
import uuid
class AddFunction(OpenAISchema):
"""
Represents a function to be added to a Python file. Do not provide Partial values.
"""
file_name: str = Field(
..., description="The name of the file to add the function to."
)
function_name: str = Field(..., description="The name of the function.")
args: str = Field(..., description="The arguments of the function.")
body: str = Field(..., description="The body of the function.")
decorator_list: list[str] = Field(
[], description="The list of decorators to be applied to the function."
)
returns: str | None = Field(None, description="The return type of the function.")
id: str = str(uuid.uuid4())
def to_json(self):
out = dict(
id=self.id,
file_name=self.file_name,
function_name=self.function_name,
args=self.args,
body=self.body,
decorator_list=self.decorator_list,
returns=self.returns,
)
return "\n\n```json\n" + json.dumps(out, indent=4) + "\n```\n"
class DeleteFunction(OpenAISchema):
"""
Represents a request to delete a function from the agent.
"""
file_name: str = Field(
..., description="The name of the file containing the function to delete."
)
function_name: str = Field(..., description="The name of the function to delete.")
id: str = str(uuid.uuid4())
def to_json(self):
out = dict(
id=self.id,
file_name=self.file_name,
function_name=self.function_name,
)
return "\n\n```json\n" + json.dumps(out, indent=4) + "\n```\n"
class ModifyFunction(OpenAISchema):
"""
A class representing modifications to a function. All new values must be complete and will override the existing values. Do not provide Partial values.
"""
file_name: str = Field(
..., description="The name of the file containing the function to modify."
)
function_name: str = Field(..., description="The name of the function to modify.")
new_args: str | None = Field(
None, description="The new arguments for the function."
)
new_body: str | None = Field(
None,
description="The new body of the function. This will overwrite the old body. Always include a full body.",
)
new_decorator_list: list[str] | None = Field(
None, description="The new list of decorators for the function."
)
new_returns: str | None = Field(
None, description="The new return type for the function."
)
new_name: str | None = Field(None, description="The new name for the function.")
new_docstring: str | None = Field(
None, description="The new docstring for the function."
)
id: str = str(uuid.uuid4())
def to_json(self):
out = dict(
id=self.id,
file_name=self.file_name,
function_name=self.function_name,
new_args=self.new_args,
new_body=self.new_body,
new_decorator_list=self.new_decorator_list,
new_returns=self.new_returns,
new_name=self.new_name,
)
return "\n\n```json\n" + json.dumps(out, indent=4) + "\n```\n"
| [] |
2024-01-10 | blazickjp/GPT-CodeApp | backend~agent~agent_functions~class_ops.py | from instructor import OpenAISchema
from pydantic import Field
import json
import uuid
class AddClass(OpenAISchema):
"""Represents a class to be added to a file."""
file_name: str = Field(..., description="The name of the file to add the class to.")
class_name: str = Field(..., description="The name of the class.")
bases: list[str] = Field([], description="The base classes for the class.")
body: str = Field(..., description="The body of the class.")
decorator_list: list[str] = Field(
[], description="The list of decorators to be applied to the class."
)
id: str | None = str(uuid.uuid4())
def to_json(self):
out = dict(
id=self.id,
file_name=self.file_name,
class_name=self.class_name,
bases=self.bases,
body=self.body,
decorator_list=self.decorator_list,
)
return "\n\n```json\n" + json.dumps(out, indent=4) + "\n```\n"
class DeleteClass(OpenAISchema):
"""Represents a class to be deleted.
Attributes:
file_name (str): The name of the file containing the class to be deleted.
class_name (str): The name of the class to be deleted.
"""
file_name: str = Field(
..., description="The name of the file containing the class to delete."
)
class_name: str = Field(..., description="The name of the class to delete.")
id: str | None = str(uuid.uuid4())
def to_json(self):
out = dict(
id=self.id,
file_name=self.file_name,
class_name=self.class_name,
)
return "\n\n```json\n" + json.dumps(out, indent=4) + "\n```\n"
class ModifyClass(OpenAISchema):
"""Represents a request to modify a Python class. Modifications will override the existing class."""
file_name: str = Field(
..., description="The name of the file containing the class to modify."
)
class_name: str = Field(..., description="The name of the class to modify.")
new_bases: list[str] | None = Field(
None, description="The new base classes for the class."
)
new_body: str | None = Field(
None,
description="The new body of the class as a list of statements or a string. This will replace the entire existing body of the class.",
)
new_decorator_list: list[str] | None = Field(
None, description="The new list of decorators for the class."
)
new_name: str | None = Field(None, description="The new name for the class.")
new_args: str | None = Field(None, description="The new arguments for the class.")
new_docstring: str | None = Field(
None, description="The new docstring for the function."
)
id: str | None = str(uuid.uuid4())
def to_json(self):
out = dict(
id=self.id,
file_name=self.file_name,
class_name=self.class_name,
new_bases=self.new_bases,
new_body=self.new_body,
new_decorator_list=self.new_decorator_list,
new_name=self.new_name,
new_args=self.new_args,
)
return "\n\n```json\n" + json.dumps(out, indent=4) + "\n```\n"
| [] |
2024-01-10 | blazickjp/GPT-CodeApp | backend~agent~agent_functions~import_ops.py | from instructor import OpenAISchema
from pydantic import Field
import json
import uuid
class AddImport(OpenAISchema):
"""
Represents an import statement to be added to a Python file.
"""
file_name: str = Field(
..., description="The name of the file to add the import to."
)
module: str = Field(..., description="The name of the module to import.")
names: list | None = Field(
None, description="The names to import from the module. Defaults to None."
)
asnames: list | None = Field(
None, description="The names to import from the module with an alias."
)
objects: list | None = Field(
None, description="The objects to import from the module."
)
id: str = str(uuid.uuid4())
def to_json(self):
out = dict(
id=self.id,
file_name=self.file_name,
module=self.module,
names=self.names,
asnames=self.asnames,
objects=self.objects,
)
return "\n\n```json\n" + json.dumps(out, indent=4) + "\n```\n"
class DeleteImport(OpenAISchema):
"""
Represents a request to delete one or more imports from a Python module.
"""
file_name: str = Field(
..., description="The name of the file to delete the import from."
)
module: str = Field(
..., description="The name of the module to delete imports from."
)
names: list | None = Field(
None, description="The names to delete from the module. Defaults to None."
)
asnames: list | None = Field(
None, description="The names to delete from the module with an alias."
)
objects: list | None = Field(
None, description="The objects to delete from the module."
)
id: str = str(uuid.uuid4())
def to_json(self):
out = dict(
id=self.id,
file_name=self.file_name,
module=self.module,
names=self.names,
asnames=self.asnames,
objects=self.objects,
)
return "\n\n```json\n" + json.dumps(out, indent=4) + "\n```\n"
class ModifyImport(OpenAISchema):
"""
Represents a modification to an import statement in a Python file."""
file_name: str = Field(
..., description="The name of the file containing the import to modify."
)
module: str = Field(..., description="The name of the module to modify.")
new_names: list | None = Field(
None, description="The new names to import from the module."
)
new_asnames: list | None = Field(
None, description="The new names to import from the module with an alias."
)
objects_to_remove: list | None = Field(
None, description="The old objects to remove."
)
objects_to_add: list | None = Field(None, description="The new objects to add.")
id: str = str(uuid.uuid4())
def to_json(self):
out = dict(
id=self.id,
file_name=self.file_name,
module=self.module,
new_names=self.new_names,
new_asnames=self.new_asnames,
new_objects=self.new_objects,
)
return "\n\n```json\n" + json.dumps(out, indent=4) + "\n```\n"
| [] |
2024-01-10 | blazickjp/GPT-CodeApp | backend~agent~agent_functions~method_ops.py | from instructor import OpenAISchema
from pydantic import Field
import json
import uuid
class AddMethod(OpenAISchema):
"""
Represents a method to be added to a class.
"""
file_name: str = Field(
...,
description="The name of the file containing the class to add the method to.",
)
class_name: str = Field(
..., description="The name of the class to add the method to."
)
method_name: str = Field(..., description="The name of the method.")
args: str = Field(..., description="The arguments of the method.")
body: str = Field(..., description="The body of the method.")
decorator_list: list[str] = Field(
[], description="The list of decorators to be applied to the method."
)
returns: str | None = Field(None, description="The return type of the method.")
id: str = str(uuid.uuid4())
def to_json(self):
out = dict(
id=self.id,
file_name=self.file_name,
class_name=self.class_name,
method_name=self.method_name,
args=self.args,
body=self.body,
decorator_list=self.decorator_list,
returns=self.returns,
)
return "\n\n```json\n" + json.dumps(out) + "\n```\n"
class DeleteMethod(OpenAISchema):
"""Represents a method to be deleted from a class."""
file_name: str = Field(
...,
description="The name of the file containing the class to delete the method from.",
)
class_name: str = Field(
..., description="The name of the class to delete the method from."
)
method_name: str = Field(..., description="The name of the method to delete.")
id: str = str(uuid.uuid4())
def to_string(self):
out = dict(
id=self.id,
file_name=self.file_name,
class_name=self.class_name,
method_name=self.method_name,
)
return "\n\n```json\n" + json.dumps(out) + "\n```\n"
class ModifyMethod(OpenAISchema):
"""
Represents a method modification operation. Modifications will override the existing method, do not provide Partial values.
"""
file_name: str = Field(
...,
description="The name of the file containing the class to modify the method in.",
)
class_name: str = Field(
..., description="The name of the class to modify the method in."
)
method_name: str = Field(..., description="The name of the method to modify.")
new_args: str | None = Field(None, description="The new arguments for the method.")
new_body: str | None = Field(
None,
description="The new body of the method as a string. This will replace the entire existing body of the method.",
)
new_decorator_list: list[str] | None = Field(
None, description="The new list of decorators for the method."
)
new_method_name: str | None = Field(
None, description="The new name for the method."
)
new_returns: str | None = Field(
None, description="The new return type for the method."
)
new_docstring: str | None = Field(
None, description="The new docstring for the function."
)
id: str = str(uuid.uuid4())
def to_json(self):
out = dict(
id=self.id,
file_name=self.file_name,
class_name=self.class_name,
method_name=self.method_name,
new_args=self.new_args,
new_body=self.new_body,
new_decorator_list=self.new_decorator_list,
new_method_name=self.new_method_name,
new_returns=self.new_returns,
)
return "\n\n```json\n" + json.dumps(out) + "\n```\n"
| [] |
2024-01-10 | blazickjp/GPT-CodeApp | backend~agent~agent_functions~file_ops.py | import json
import uuid
from instructor import OpenAISchema
from pydantic import Field
from agent.agent_functions.function_ops import (
AddFunction,
DeleteFunction,
ModifyFunction,
)
from agent.agent_functions.class_ops import AddClass, DeleteClass, ModifyClass
from agent.agent_functions.method_ops import AddMethod, DeleteMethod, ModifyMethod
from agent.agent_functions.import_ops import AddImport, DeleteImport, ModifyImport
# Export all the entities
__all__ = [
"AddFunction",
"DeleteFunction",
"ModifyFunction",
"AddClass",
"DeleteClass",
"ModifyClass",
"AddMethod",
"DeleteMethod",
"ModifyMethod",
"AddImport",
"DeleteImport",
"ModifyImport",
"VariableNameChange",
]
class VariableNameChange(OpenAISchema):
"""
Represents a request to change the name of a variable throughout the entire codebase. This operation replaces all instances of the original variable name with a new name.
"""
original_name: str = Field(..., description="The original name of the variable.")
new_name: str = Field(..., description="The new name of the variable.")
id: str = str(uuid.uuid4())
def to_json(self):
out = dict(id=self.id, original_name=self.original_name, new_name=self.new_name)
return "\n\n```json\n" + json.dumps(out, indent=4) + "\n```\n"
_OP_LIST = [
AddImport,
DeleteImport,
AddFunction,
DeleteFunction,
AddClass,
DeleteClass,
AddMethod,
DeleteMethod,
ModifyFunction,
ModifyClass,
ModifyMethod,
ModifyImport,
VariableNameChange,
]
_OP_LIST = {cls.__name__: cls for cls in _OP_LIST}
| [] |
2024-01-10 | blazickjp/GPT-CodeApp | backend~agent~coding_agent.py | # import os
import re
import json
import boto3
import difflib
import ast
import instructor
from openai import OpenAI
from pydantic import BaseModel, Field
from typing import List, Optional
from types import SimpleNamespace
from pathlib import Path
from agent.agent_functions.ast_ops import ASTChangeApplicator
from database.my_codebase import MyCodebase
from agent.agent_prompts import ( # noqa
CHANGES_SYSTEM_PROMPT,
DEFAULT_SYSTEM_PROMPT,
PROFESSOR_SYNAPSE,
)
class Message(BaseModel):
role: str
content: str
def to_dict(self):
return {
"role": self.role,
"content": self.content,
}
class NestedNamespace(SimpleNamespace):
"""
A class to convert a dictionary into a nested namespace.
"""
def __init__(self, dictionary, **kwargs):
if not isinstance(dictionary, dict):
raise ValueError("Input must be a dictionary")
super().__init__(**kwargs)
for key, value in dictionary.items():
if isinstance(value, dict):
self.__setattr__(key, NestedNamespace(value))
elif isinstance(value, list) and all(isinstance(i, dict) for i in value):
self.__setattr__(key, [NestedNamespace(i) for i in value])
else:
self.__setattr__(key, value)
def __getattr__(self, name):
if name in self.__dict__:
return self.__dict__[name]
else:
return None
class CodingAgent:
"""
A class to represent a coding agent that uses OpenAI's GPT-3 model to generate code.
Attributes:
memory_manager (MemoryManager): Manages the memory of the agent.
functions (Optional[List[dict]]): A list of functions that the agent can call.
callables (Optional[List[Callable]]): A list of callable functions.
GPT_MODEL (str): The GPT-3 model used by the agent.
function_map (dict): A dictionary mapping function names to their callable objects.
"""
def __init__(
self,
memory_manager,
function_map: Optional[dict] = None,
codebase: Optional[MyCodebase] = None,
):
"""
Constructs all the necessary attributes for the CodingAgent object.
Args:
memory_manager (MemoryManager): Manages the memory of the agent.
functions (Optional[List[dict]]): A list of functions that the agent can call.
callables (Optional[List[Callable]]): A list of callable functions.
"""
self.memory_manager = memory_manager
self.function_map = function_map
self.GPT_MODEL = None
self.codebase = codebase
self.max_tokens = 4000
self.temperature = 0.75
self.tool_choice = "auto"
self.function_to_call = None
self.ops_to_execute = []
self.client = instructor.patch(OpenAI())
if function_map:
self.tools = [
{"type": "function", "function": op.openai_schema}
for op in self.function_map[0].values()
]
else:
self.tools = None
def query(self, input: str, command: Optional[str] = None) -> List[str]:
"""
Queries the GPT-3 model with the given input and command.
Args:
input (str): The input text to be processed by the GPT-3 model.
command (Optional[str]): The command to be executed by the agent.
Returns:
List[str]: The output generated by the GPT-3 model.
"""
print(f"Input Text: {input}\nCommand: {command}")
self.memory_manager.add_message("user", input)
message_history = [
{"role": i["role"], "content": i["content"]}
for i in self.memory_manager.get_messages()
]
keyword_args = {
"model": self.GPT_MODEL,
"messages": message_history,
"max_tokens": self.max_tokens,
"temperature": self.temperature,
"stream": True,
}
print("Message Count: ", len(keyword_args["messages"]))
# Override normal function calling when function_name is providednd}")
if command and command.lower() == "changes":
function_name = None
json_accumulator = ""
idx = None
keyword_args["tools"] = self.tools
keyword_args["tool_choice"] = "auto"
print(keyword_args["tools"])
self.memory_manager.prompt_handler.identity = CHANGES_SYSTEM_PROMPT
self.memory_manager.prompt_handler.set_system()
temp_system = self.memory_manager.prompt_handler.system
self.memory_manager.prompt_handler.identity = DEFAULT_SYSTEM_PROMPT
self.memory_manager.prompt_handler.set_system()
assert keyword_args["messages"][0]["role"] == "system"
keyword_args["messages"][0]["content"] = temp_system
# Call the model
print(f"Calling model: {self.GPT_MODEL}")
for i, chunk in enumerate(self.call_model_streaming(command, **keyword_args)):
if isinstance(chunk, dict):
chunk = NestedNamespace(chunk)
delta = chunk.choices[0].delta
if delta.tool_calls:
# Initialize json_accumulator and idx outside the loop
for call in delta.tool_calls:
# Check if we have started a new function call
if call.index != idx:
# Process the previous function call if any
if function_name and json_accumulator:
try:
data = json.loads(json_accumulator)
completed_op = self.function_map[0][function_name](
**data
)
self.ops_to_execute.append(completed_op)
return_string = completed_op.to_json()
yield return_string
except json.JSONDecodeError as e:
pass
# Now reset for the new call
idx = call.index
json_accumulator = call.function.arguments
function_name = call.function.name # Set the new function name
print(f"Function Name: {function_name}")
else:
# Continue accumulating JSON string for the current function call
yield call.function.arguments
json_accumulator += call.function.arguments
# print(f"JSON Accumulator (continued): {json_accumulator}")
# After the loop, process the final function call if any
if function_name and json_accumulator:
try:
data = json.loads(json_accumulator)
completed_op = self.function_map[0][function_name](**data)
self.ops_to_execute.append(completed_op)
return_string = completed_op.to_json()
yield return_string
except json.JSONDecodeError as e:
pass
else:
# Process normal text response
yield chunk.choices[0].delta.content
def execute_ops(self, ops: List[dict]):
diffs = [] # List to store the diffs for each operation
for op in self.ops_to_execute:
print(f"Executing operation: {op.id}")
if "backend" in op.file_name:
op.file_name = Path(self.codebase.directory).join(
op.file_name.replace("backend/", "")
)
op.file_name = self.normalize_path(op.file_name)
# Read the existing code from the file
try:
with open(op.file_name, "r") as file:
original_code = file.read()
except FileNotFoundError:
print(f"File not found: {op.file_name}")
continue
# Parse the original code into an AST
ast_tree = ast.parse(original_code)
# Create an ASTChangeApplicator to apply the changes
applicator = ASTChangeApplicator(ast_tree)
# Apply the operation to the AST tree
transformed_code = applicator.apply_changes([op])
# Compute the diff
diff = difflib.unified_diff(
original_code.splitlines(keepends=True),
transformed_code.splitlines(keepends=True),
fromfile="before.py",
tofile="after.py",
)
diff_string = "".join(diff)
diffs.append(diff_string)
print(f"Diff: {diff_string}")
# Write the transformed code back to the file
with open(op.file_name, "w") as file:
file.write(transformed_code)
# self.ops_to_execute = [op for op in self.ops_to_execute if op != op]
return diffs
def process_json(self, args: str) -> str:
"""
Process a JSON string, handling any triple-quoted strings within it.
Args:
args (str): The JSON string to process.
Returns:
str: The processed JSON string.
"""
try:
# Attempt to load the JSON string
response = json.loads(args)
return response
except json.decoder.JSONDecodeError:
# If there's a JSONDecodeError, it may be due to triple-quoted strings
# Find all occurrences of triple-quoted strings
triple_quoted_strings = re.findall(r"\"\"\"(.*?)\"\"\"", args, re.DOTALL)
# For each occurrence, replace newlines and triple quotes
for tqs in triple_quoted_strings:
# Replace newlines and double quotes within the triple-quoted string
fixed_string = tqs.replace("\n", "\\n").replace('"', '\\"')
# Replace the original triple-quoted string with the fixed string
response_str = args.replace(tqs, fixed_string)
# Now replace the triple quotes with single quotes
response_str = args.replace('"""', '"')
return json.loads(response_str)
def call_model_streaming(self, command: Optional[str] | None = None, **kwargs):
print("Calling model streaming")
print(kwargs["model"])
if self.GPT_MODEL.startswith("gpt"):
print("Calling OpenAI")
for chunk in self.client.chat.completions.create(**kwargs):
yield chunk
if self.GPT_MODEL == "anthropic":
print("Calling anthropic")
try:
sm_client = boto3.client("bedrock-runtime", region_name="us-west-2")
resp = sm_client.invoke_model_with_response_stream(
accept="*/*",
contentType="application/json",
modelId="anthropic.claude-v2:1",
body=json.dumps(
{
"prompt": self.generate_anthropic_prompt(),
"max_tokens_to_sample": max(kwargs["max_tokens"], 2000),
"temperature": self.temperature,
"anthropic_version": "bedrock-2023-05-31",
}
),
)
except Exception as e:
print(f"Error calling Anthropic Models: {e}")
yield {
"choices": [
{
"finish_reason": "stop",
"delta": {"content": "Error: " + str(e)},
}
]
}
while True:
try:
chunk = next(iter((resp["body"])))
bytes_to_send = chunk["chunk"]["bytes"]
decoded_str = json.loads(bytes_to_send.decode("utf-8"))
content = decoded_str["completion"]
stop_reason = decoded_str["stop_reason"]
if stop_reason == "stop_sequence":
yield {
"choices": [
{"finish_reason": "stop", "delta": {"content": content}}
]
}
break
else:
yield {
"choices": [
{"finish_reason": None, "delta": {"content": content}}
]
}
except StopIteration:
break
except UnboundLocalError:
print("UnboundLocalError")
break
def generate_anthropic_prompt(self, include_messages=True) -> str:
"""
Generates a prompt for the Gaive model.
Args:
input (str): The input text to be processed by the GPT-3 model.
Returns:
str: The generated prompt.
"""
conversation_history = "The following is a portion of your conversation history with the human, truncated to save token space, inside the <conversation-history> XML tags.\n\n<conversation-history>\n"
messages = self.memory_manager.get_messages()
if len(messages) > 1:
# Extract the last User messages
last_user_message = (
"Human: "
+ [
message["content"]
for message in messages
if message["role"] == "user"
][-1]
)
else:
last_user_message = ""
for idx, message in enumerate(messages):
if message["role"].lower() == "user":
conversation_history += f"Human: {message['content']}\n\n"
if message["role"].lower() == "assistant":
conversation_history += f"Assistant: {message['content']}\n\n"
conversation_history += "\n</conversation-history>\n\n"
if self.memory_manager.prompt_handler.system_file_contents:
file_context = (
"The human as loadedd the following files into context to help give you background related to the most recent request. They are contained in the <file-contents> XML Tags.\n\n<file-contents>\n"
+ self.memory_manager.prompt_handler.system_file_contents
+ "\n</file-contents>\n\n"
)
else:
file_context = ""
if self.memory_manager.prompt_handler.tree:
tree = (
"The working directory of the human is always loaded into context. This information is good background when the human is working on the project, but this may not always be the case. Sometimes the human may ask questions not related to the current project <directory-tree> XML Tags\n<directory-tree>\n"
+ self.memory_manager.prompt_handler.tree
+ "\n</directory-tree>\n\n"
)
else:
tree = ""
if include_messages:
sys_prompt = (
self.memory_manager.identity
+ conversation_history
+ tree
+ file_context
)
else:
sys_prompt = self.memory_manager.identity + "\n\n" + tree + file_context
return (
"\n\nHuman: The folllowing is your system prompt: "
+ sys_prompt
+ "\n\nAssistant: Understood\n\n"
+ last_user_message
+ "\n\nAssistant:"
)
# return sys_prompt + "\n\n" + last_user_message + "\n\nAssistant: "
@staticmethod
def normalize_path(input_path):
# Get the current working directory as a Path object
working_directory = Path.cwd()
# Create a Path object from the input path
input_path_obj = Path(input_path)
# Resolve the input path (make it absolute and resolve any symlinks)
resolved_input_path = input_path_obj.resolve()
# Make the path relative to the working directory, if possible
try:
relative_path = resolved_input_path.relative_to(working_directory)
return str(relative_path)
except ValueError:
# The input path is not a subpath of the working directory
return str(resolved_input_path)
| [
"\n\n",
"content",
"Error: PLACEHOLDER"
] |
2024-01-10 | wnlhdx/Command | python~rgzn~aitest.py | import sys
from PySide6 import *
from PySide6.QtCore import *
from PySide6.QtWidgets import *
from PySide6.QtWebEngineWidgets import QWebEngineView
import openai
import edge_tts
from edge_tts import *
import asyncio
import requests
import random
openai.api_base = "https://api.chatanywhere.com.cn/v1"
openai.api_key = "sk-VjSCWObgSc3EbfaJQRACXMvb33Q7th40lxF9d7Sk9aJoydQ8"
def gpt_35_api_stream(message):
try:
messages = [{'role': 'user', 'content': message}, ]
response = openai.ChatCompletion.create(
model='gpt-3.5-turbo',
messages=messages,
stream=True,
)
completion = {'role': '', 'content': ''}
for event in response:
if event['choices'][0]['finish_reason'] == 'stop':
# print(f'收到的完成数据: {completion}')
# print(f'openai返回结果:{completion.get("content")}')
break
for delta_k, delta_v in event['choices'][0]['delta'].items():
# print(f'流响应数据: {delta_k} = {delta_v}')
completion[delta_k] += delta_v
messages.append(completion) # 直接在传入参数 messages 中追加消息
# return (True, '')
return completion.get("content")
except Exception as err:
return '连接openai失败'
# return (False, f'OpenAI API 异常: {err}')
async def amain(TEXT) -> None:
"""Main function"""
voices = await VoicesManager.create()
# voice = voices.find(Gender="Female", Locale="zh-CN")
# communicate = edge_tts.Communicate(TEXT, "zh-CN-XiaoxiaoNeural")
communicate = edge_tts.Communicate(TEXT, "zh-CN-XiaoyiNeural")
# voice = voices.find(Gender="Female", Locale="es-AR")
# communicate = edge_tts.Communicate(TEXT, random.choice(voice)["Name"])
await communicate.save("output.mp3")
def tts(text):
loop = asyncio.get_event_loop_policy().get_event_loop()
try:
loop.run_until_complete(amain(text))
finally:
loop.close()
def save():
speaker = '世界上最优秀的化学教材'
responser = gpt_35_api_stream(speaker)
print(responser)
url = f'http://genshinvoice.top/api'
param = {'speaker': '克拉拉', 'text': responser}
# tts(responser)
open('test.wav', 'wb').write(requests.get(url, param).content)
class Window(QWidget):
def __init__(self, parent=None, **kwargs):
super(DesktopPet, self).__init__(parent)
# 窗体初始化
self.init()
# 托盘化初始
self.initTray()
# 宠物静态gif图加载
self.initPetImage()
# 宠物正常待机,实现随机切换动作
self.petNormalAction()
# 窗体初始化
def init(self):
self.is_follow_mouse = False
# 初始化
# 设置窗口属性:窗口无标题栏且固定在最前面
# FrameWindowHint:无边框窗口
# WindowStaysOnTopHint: 窗口总显示在最上面
# SubWindow: 新窗口部件是一个子窗口,而无论窗口部件是否有父窗口部件
self.setWindowFlags(Qt.FramelessWindowHint | Qt.WindowStaysOnTopHint | Qt.SubWindow)
# setAutoFillBackground(True)表示的是自动填充背景,False为透明背景
self.setAutoFillBackground(False)
# 窗口透明,窗体空间不透明
self.setAttribute(Qt.WA_TranslucentBackground, True)
# 重绘组件、刷新
self.repaint()
# 托盘化设置初始化
def initTray(self):
# 设置右键显示最小化的菜单项
# 菜单项退出,点击后调用quit函数
quit_action = QAction('退出', self, triggered=self.quit)
# 设置这个点击选项的图片
quit_action.setIcon(QIcon(os.path.join('yuzu.jpg')))
# 菜单项显示,点击后调用showing函数
showing = QAction(u'显示', self, triggered=self.showwin)
# 新建一个菜单项控件
self.tray_icon_menu = QMenu(self)
# 在菜单栏添加一个无子菜单的菜单项‘显示’
self.tray_icon_menu.addAction(showing)
# 在菜单栏添加一个无子菜单的菜单项‘退出’
self.tray_icon_menu.addAction(quit_action)
# QSystemTrayIcon类为应用程序在系统托盘中提供一个图标
self.tray_icon = QSystemTrayIcon(self)
# 设置托盘化图标
self.tray_icon.setIcon(QIcon(os.path.join('favicon.ico')))
# 设置托盘化菜单项
self.tray_icon.setContextMenu(self.tray_icon_menu)
# 展示
print('show icon in tray', self.tray_icon)
self.tray_icon.show()
# 宠物静态gif图加载
def initPetImage(self):
# 对话框定义
self.talkLabel = QLabel(self)
# 对话框样式设计
self.talkLabel.setStyleSheet("font:15pt '楷体';border-width: 1px;color:blue;")
# 定义显示图片部分
self.image = QLabel(self)
# QMovie是一个可以存放动态视频的类,一般是配合QLabel使用的,可以用来存放GIF动态图
self.movie = QWebEngineView()
self.view.load("model/test.html")
# 设置标签大小
self.movie.setScaledSize(QSize(200, 200))
# 将Qmovie在定义的image中显示
self.image.setMovie(self.movie)
self.movie.start()
self.resize(1024, 1024)
# 调用自定义的randomPosition,会使得宠物出现位置随机
self.randomPosition()
# 展示
print('show pet', self.geometry())
self.show()
# https://new.qq.com/rain/a/20211014a002rs00
# 将宠物正常待机状态的动图放入pet1中
self.pet_list = []
for i in os.listdir("normal"):
self.pet_list.append("normal/" + i)
# 将宠物正常待机状态的对话放入pet2中
self.dialog = []
# 读取目录下dialog文件
with open("dialog.txt", "r", encoding='utf8') as f:
text = f.read()
# 以\n 即换行符为分隔符,分割放进dialog中
self.dialog = text.split("\n")
# 宠物正常待机动作
def petNormalAction(self):
# 每隔一段时间做个动作
# 定时器设置
self.timer = QTimer()
# 时间到了自动执行
self.timer.timeout.connect(self.randomAct)
# 动作时间切换设置
self.timer.start(3000)
# 宠物状态设置为正常
self.condition = 0
# 每隔一段时间切换对话
self.talkTimer = QTimer()
self.talkTimer.timeout.connect(self.talk)
self.talkTimer.start(3000)
# 对话状态设置为常态
self.talk_condition = 0
# 宠物对话框
self.talk()
# 随机动作切换
def randomAct(self):
# condition记录宠物状态,宠物状态为0时,代表正常待机
if not self.condition:
# 随机选择装载在pet1里面的gif图进行展示,实现随机切换
self.movie = QMovie(random.choice(self.pet_list))
# 宠物大小
self.movie.setScaledSize(QSize(200, 200))
# 将动画添加到label中
self.image.setMovie(self.movie)
# 开始播放动画
self.movie.start()
# condition不为0,转为切换特有的动作,实现宠物的点击反馈
# 这里可以通过else-if语句往下拓展做更多的交互功能
else:
# 读取特殊状态图片路径 condition == 1
self.movie = QMovie("./click/click.gif")
# 宠物大小
self.movie.setScaledSize(QSize(200, 200))
# 将动画添加到label中
self.image.setMovie(self.movie)
# 开始播放动画
self.movie.start()
# 宠物状态设置为正常待机
self.condition = 0
self.talk_condition = 0
# 宠物对话框行为处理
def talk(self):
if not self.talk_condition:
# talk_condition为0则选取加载在dialog中的语句
self.talkLabel.setText(random.choice(self.dialog))
# 设置样式
self.talkLabel.setStyleSheet(
"font: bold;"
"font:25pt '楷体';"
"color:white;"
"background-color: black"
"url(:/)"
)
# 根据内容自适应大小
self.talkLabel.adjustSize()
else:
# talk_condition为1显示为别点我,这里同样可以通过if-else-if来拓展对应的行为
self.talkLabel.setText("别点我")
self.talkLabel.setStyleSheet(
"font: bold;"
"font:25pt '楷体';"
"color:red;"
"background-color: white"
"url(:/)"
)
self.talkLabel.adjustSize()
# 设置为正常状态
self.talk_condition = 0
# 退出操作,关闭程序
def quit(self):
self.close()
sys.exit()
# 显示宠物
def showwin(self):
# setWindowOpacity()设置窗体的透明度,通过调整窗体透明度实现宠物的展示和隐藏
self.setWindowOpacity(1)
# 宠物随机位置
def randomPosition(self):
screen_geo = self.screen().geometry()
pet_geo = self.geometry()
width = (screen_geo.width() - pet_geo.width()) * random.random()
height = (screen_geo.height() - pet_geo.height()) * random.random()
self.move(width, height)
# 鼠标左键按下时, 宠物将和鼠标位置绑定
def mousePressEvent(self, event):
# 更改宠物状态为点击
self.condition = 1
# 更改宠物对话状态
self.talk_condition = 1
self.timer.stop()
self.talkTimer.stop()
# 即可调用对话状态改变
self.talk()
# 即刻加载宠物点击动画
self.randomAct()
if event.button() == Qt.LeftButton:
self.is_follow_mouse = True
# globalPos() 事件触发点相对于桌面的位置
# pos() 程序相对于桌面左上角的位置,实际是窗口的左上角坐标
self.mouse_drag_pos = event.globalPosition().toPoint() - self.pos()
event.accept()
# 拖动时鼠标图形的设置
self.setCursor(QCursor(Qt.OpenHandCursor))
# 鼠标移动时调用,实现宠物随鼠标移动
def mouseMoveEvent(self, event):
# 如果鼠标左键按下,且处于绑定状态
if Qt.LeftButton and self.is_follow_mouse:
# 宠物随鼠标进行移动
self.move(event.globalPosition().toPoint() - self.mouse_drag_pos)
event.accept()
# 鼠标释放调用,取消绑定
def mouseReleaseEvent(self, event):
self.is_follow_mouse = False
# 鼠标图形设置为箭头
self.setCursor(QCursor(Qt.ArrowCursor))
self.timer.start()
self.talkTimer.start()
# 鼠标移进时调用
def enterEvent(self, event):
# 设置鼠标形状 Qt.ClosedHandCursor 非指向手
self.setCursor(Qt.ClosedHandCursor)
# 宠物右键点击交互
def contextMenuEvent(self, event):
# 定义菜单
menu = QMenu(self)
# 定义菜单项
quitAction = menu.addAction("退出")
hide = menu.addAction("隐藏")
# 使用exec_()方法显示菜单。从鼠标右键事件对象中获得当前坐标。mapToGlobal()方法把当前组件的相对坐标转换为窗口(window)的绝对坐标。
action = menu.exec(self.mapToGlobal(event.pos()))
# 点击事件为退出
if action == quitAction:
qApp.quit()
# 点击事件为隐藏
if action == hide:
# 通过设置透明度方式隐藏宠物
self.setWindowOpacity(0)
if __name__ == '__main__':
app = QApplication()
window = Window()
window.show()
| [] |
2024-01-10 | 1rvyn/llm-quickstart | chat-pdf~src~ingest.py | import pdfplumber
import PyPDF4
import re
import os
import sys
from typing import Callable, List, Tuple, Dict
from langchain.docstore.document import Document
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import Chroma
from langchain.embeddings import OpenAIEmbeddings
from dotenv import load_dotenv
def extract_metadata_from_pdf(file_path: str) -> dict:
with open(file_path, "rb") as pdf_file:
reader = PyPDF4.PdfFileReader(pdf_file) # file to read
metadata = reader.getDocumentInfo()
return {
"title": metadata.get("/Title", "").strip(),
"author": metadata.get("/Author", "").strip(),
"creation_date": metadata.get("/CreationDate", "").strip(),
}
def extract_pages_from_pdf(file_path: str) -> List[Tuple[int, str]]:
"""
Extracts the text from each page of the PDF.
:param file_path: The path to the PDF file.
:return: A list of tuples containing the page number and the extracted text.
"""
if not os.path.isfile(file_path):
raise FileNotFoundError(f"File not found: {file_path}")
with pdfplumber.open(file_path) as pdf:
pages = []
for page_num, page in enumerate(pdf.pages):
text = page.extract_text()
if text.strip(): # Check if extracted text is not empty
pages.append((page_num + 1, text))
return pages
def parse_pdf(file_path: str) -> Tuple[List[Tuple[int, str]], Dict[str, str]]:
"""
Extracts the title and text from each page of the PDF.
:param file_path: The path to the PDF file.
:return: A tuple containing the title and a list of tuples with page numbers and extracted text.
"""
if not os.path.isfile(file_path):
raise FileNotFoundError(f"File not found: {file_path}")
metadata = extract_metadata_from_pdf(file_path)
pages = extract_pages_from_pdf(file_path)
return pages, metadata
def merge_hyphenated_words(text: str) -> str:
return re.sub(r"(\w)-\n(\w)", r"\1\2", text)
def fix_newlines(text: str) -> str:
return re.sub(r"(?<!\n)\n(?!\n)", " ", text)
def remove_multiple_newlines(text: str) -> str:
return re.sub(r"\n{2,}", "\n", text)
def clean_text(
pages: List[Tuple[int, str]], cleaning_functions: List[Callable[[str], str]]
) -> List[Tuple[int, str]]:
cleaned_pages = []
for page_num, text in pages:
for cleaning_function in cleaning_functions:
text = cleaning_function(text)
cleaned_pages.append((page_num, text))
return cleaned_pages
def text_to_docs(text: List[str], metadata: Dict[str, str]) -> List[Document]:
"""Converts list of strings to a list of Documents with metadata."""
doc_chunks = []
for page_num, page in text:
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=1000,
separators=["\n\n", "\n", ".", "!", "?", ",", " ", ""],
chunk_overlap=200,
)
chunks = text_splitter.split_text(page)
for i, chunk in enumerate(chunks):
doc = Document(
page_content=chunk,
metadata={
"page_number": page_num,
"chunk": i,
"source": f"p{page_num}-{i}",
**metadata,
},
)
doc_chunks.append(doc)
return doc_chunks
if __name__ == "__main__":
load_dotenv()
# Step 1: Parse PDFs
file_paths = ["/Users/irvyn/work/pdfs/quickest-sim.pdf", "/Users/irvyn/work/pdfs/quick-sim2.pdf", "/Users/irvyn/work/pdfs/quick-sim.pdf"]
document_chunks = []
for file_path in file_paths:
raw_pages, metadata = parse_pdf(file_path)
# Step 2: Create text chunks
cleaning_functions = [
merge_hyphenated_words,
fix_newlines,
remove_multiple_newlines,
]
cleaned_text_pdf = clean_text(raw_pages, cleaning_functions)
document_chunks += text_to_docs(cleaned_text_pdf, metadata)
# Step 3 + 4: Generate embeddings and store them in DB
embeddings = OpenAIEmbeddings()
vector_store = Chroma.from_documents(
document_chunks,
embeddings,
collection_name="june-2023-quickstartsimulator",
persist_directory="src/data/chroma/1",
)
# Save DB locally
vector_store.persist()
| [] |
2024-01-10 | 1rvyn/llm-quickstart | chat-pdf~src~single-pdf.py | import os
import sys
from dotenv import load_dotenv
from langchain.vectorstores.chroma import Chroma
from langchain.chat_models import ChatOpenAI
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.chains import ConversationalRetrievalChain
from langchain.schema import HumanMessage, AIMessage
# Load the OPENAI_API_KEY from the environment
load_dotenv()
# Then use openai_api_key in your script where needed
def make_chain(version):
model = ChatOpenAI(
model_name="gpt-3.5-turbo",
temperature="0",
)
embedding = OpenAIEmbeddings()
if version == "0":
vector_store = Chroma(
collection_name="june-2023-quickstartsimulator",
embedding_function=embedding,
persist_directory="src/data/chroma/1",
)
elif version == "2":
vector_store = Chroma(
collection_name="june-2023-quickstartsimulator-2",
embedding_function=embedding,
persist_directory="src/data/chroma/2",
)
return ConversationalRetrievalChain.from_llm(
model,
retriever=vector_store.as_retriever(),
return_source_documents=True,
)
if __name__ == "__main__":
print(f'All arguments received: {sys.argv}') # This will print all arguments passed to your script
load_dotenv()
version = sys.argv[1]
chain = make_chain(version)
chat_history = []
question = sys.stdin.read().strip()
# Generate answer
response = chain({"question": question, "chat_history": chat_history})
# Retrieve answer
answer = response["answer"]
source = response["source_documents"]
refrences = ""
if source:
page_numbers = set(doc.metadata['page_number'] for doc in source)
page_numbers_str = ', '.join(str(pn) for pn in page_numbers)
refrences += f"\nYou can read about this on page {page_numbers_str} on our quick-start guide."
chat_history.append(HumanMessage(content=question))
chat_history.append(AIMessage(content=answer))
# Print answer
print(f': {answer}\nReferences: {refrences}\n')
| [] |
2024-01-10 | empirical-org/Quill-NLP-Tools-and-Datasets | scripts~gpt~finetune.py | # Finetuning script for OpenAI models. Finetunes a GPT-3.5-turbo model on Quill's feedback.
#
# Usage: First set your OpenAI key, then run the script:
# > export OPENAI_API_KEY=<YOUR_KEY>
# > python scripts/finetune.py finetune_file.json
import os
import time
import openai
import click
import jsonlines
import random
import tiktoken
import threading
import numpy as np
from collections import defaultdict
from tqdm import tqdm
# from scripts.data.villages import passage as villages
from scripts.data.bereal import passage as bereal
from scripts.data.berlin import passage as berlin
from scripts.data.haiti import passage as haiti
from scripts.data.pompeii import passage as pompeii
from scripts.data.quokkas import passage as quokkas
from scripts.data.surgebarriers import passage as surgebarriers
from scripts.data.villages import passage as villages
OPTIMAL_LABEL = 'Optimal'
SUBOPTIMAL_LABEL = 'Suboptimal'
OUTPUT_TOKENS = 75
MAX_RETRIES = 5
MODEL = 'gpt-3.5-turbo-0613'
CONJUNCTIONS = ['because', 'but', 'so']
NUM_ITEMS = 10 # The number of finetuning examples to collect for every passage-conjunction combination.
FEEDBACK_SOURCE = 'quill'
passages = [berlin, haiti, pompeii, quokkas, surgebarriers, villages]
openai.api_key = os.getenv("OPENAI_API_KEY")
encoding = tiktoken.get_encoding("cl100k_base")
def read_file(filename, prompt):
items = []
with jsonlines.open(filename) as reader:
for item in reader:
if 'prediction' in item:
items.append((item['text'].replace('\n', ' '), item['label'], item['prediction']))
else:
items.append((item['text'].replace('\n', ' '), item['label']))
return items
def map_label(label):
if label.startswith('Label'):
return SUBOPTIMAL_LABEL
return OPTIMAL_LABEL
def map_to_binary(items):
new_items = []
for item in items:
if len(item) == 3:
new_items.append((item[0], map_label(item[1]), map_label(item[2])))
else:
new_items.append((item[0], map_label(item[1])))
return new_items
def create_openai_feedback_prompt(passage, response, conjunction, add_passage=True):
# "Please give feedback that is understandable and engaging for a fifth-grade student. " \
# "Use simple language, clear explanations, and avoid complex vocabulary or concepts." \
# "Try to keep responses concise and friendly, and stick to the examples as closely as possible." \
quill_prompt = passage['prompts'][conjunction]
plagiarism_passage = passage['plagiarism'][conjunction]
label_info = passage['instructions'][conjunction]
feedback = passage['feedback'][conjunction]
examples = passage['examples'][conjunction]
evaluation = False
# evaluation = passage['evaluation'][conjunction] if 'evaluation' in passage and conjunction in passage['evaluation'] else {}
prompt = f"You are a teacher, helping fifth-grade students improve their writing skills. " \
"In this exercise, students have read a text and are asked to complete a sentence. " \
"The goal is for you to give feedback on their response. " \
prompt += f"\n\nThis is the sentence they are asked to complete: '{quill_prompt}'. "
if add_passage:
prompt += f'\n\nHere is the text students have read, separated by triple backticks: \n\n```{plagiarism_passage}```\n'
prompt += "\n" + label_info
prompt += """
These are general rules for your feedback:
- Do not give feedback about grammar, spelling or punctuation.
- Do not ask students to make sentences more clear and concise.
- Do not suggest a better answer."""
prompt += "\n\nYour feedback should be copied from these examples:"
for label in examples:
label_feedback = feedback[label]
for idx, example in enumerate(examples[label]):
prompt += f"\n\nResponse: {example}"
if evaluation:
prompt += f"\nParaphrase: {evaluation[label][idx]}"
prompt += f"\nFeedback: {label_feedback}"
if evaluation:
prompt += f'\n\nResponse: {response}\nParaphrase:'
else:
prompt += f'\n\nResponse: {response}\nFeedback:'
return prompt
def is_optimal(feedback: str):
""" Determines whether a piece of feedback corresponds to an optimal label.
This is the case when it starts with Nice work, great job, etc.
Args:
feedback (str): the piece of feedback given by the model
Returns:
boolean: True if the feedback is Optimal, False otherwise
"""
return 'Nice work!' in feedback or 'Great job!' in feedback or 'Excellent job' in feedback or 'Good job' in feedback
def fetch_with_timeout(api_call_func, messages, model, timeout_duration=10):
""" Calls the provided API function with a timeout.
Args:
api_call_func (function): the API call function
messages (list): the messages that will be sent to the API
model (str): the model that will be used
timeout_duration (int, optional): The length of the timeout. Defaults to 10.
Returns:
dict: a dict with the response returned by the API embedded
"""
result_container = {"result": None, "is_done": False}
def worker():
result_container["result"] = api_call_func(messages, model)
result_container["is_done"] = True
thread = threading.Thread(target=worker)
thread.start()
thread.join(timeout=timeout_duration)
if result_container["is_done"]:
return result_container["result"]
else:
return None
def api_call_completion(messages, model):
""" The API call for Completion models (such as GPT-3.5-turbo-instruct)
Args:
messages (list): the messages that will be sent to the API
model (str): the model that will be used
Returns:
dict: the response returned by the API
"""
return openai.Completion.create(
model=model,
prompt=messages[0]['content'],
max_tokens=7,
temperature=0
)
def api_call_chat(messages, model):
""" The API call for ChatCompletion models (such as GPT-3.5-turbo)
Args:
messages (list): the messages that will be sent to the API
model (str): the model that will be used
Returns:
dict: the response returned by the API
"""
return openai.ChatCompletion.create(
model=model,
temperature=0,
max_tokens=OUTPUT_TOKENS,
messages = messages
)
#==================================================#
# OpenAI Functions. Taken from the OpenAI Cookbook #
#==================================================#
def check_data_format(dataset):
# Format error checks
format_errors = defaultdict(int)
for ex in dataset:
if not isinstance(ex, dict):
format_errors["data_type"] += 1
continue
messages = ex.get("messages", None)
if not messages:
format_errors["missing_messages_list"] += 1
continue
for message in messages:
if "role" not in message or "content" not in message:
format_errors["message_missing_key"] += 1
if any(k not in ("role", "content", "name") for k in message):
format_errors["message_unrecognized_key"] += 1
if message.get("role", None) not in ("system", "user", "assistant"):
format_errors["unrecognized_role"] += 1
content = message.get("content", None)
if not content or not isinstance(content, str):
format_errors["missing_content"] += 1
if not any(message.get("role", None) == "assistant" for message in messages):
format_errors["example_missing_assistant_message"] += 1
if format_errors:
print("Found errors:")
for k, v in format_errors.items():
print(f"{k}: {v}")
else:
print("No errors found")
# not exact!
# simplified from https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb
def num_tokens_from_messages(messages, tokens_per_message=3, tokens_per_name=1):
num_tokens = 0
for message in messages:
num_tokens += tokens_per_message
for key, value in message.items():
num_tokens += len(encoding.encode(value))
if key == "name":
num_tokens += tokens_per_name
num_tokens += 3
return num_tokens
def num_assistant_tokens_from_messages(messages):
num_tokens = 0
for message in messages:
if message["role"] == "assistant":
num_tokens += len(encoding.encode(message["content"]))
return num_tokens
def print_distribution(values, name):
print(f"\n#### Distribution of {name}:")
print(f"min / max: {min(values)}, {max(values)}")
print(f"mean / median: {np.mean(values)}, {np.median(values)}")
print(f"p5 / p95: {np.quantile(values, 0.1)}, {np.quantile(values, 0.9)}")
def check_token_count(dataset):
# Warnings and tokens counts
n_missing_system = 0
n_missing_user = 0
n_messages = []
convo_lens = []
assistant_message_lens = []
for ex in dataset:
messages = ex["messages"]
if not any(message["role"] == "system" for message in messages):
n_missing_system += 1
if not any(message["role"] == "user" for message in messages):
n_missing_user += 1
n_messages.append(len(messages))
convo_lens.append(num_tokens_from_messages(messages))
assistant_message_lens.append(num_assistant_tokens_from_messages(messages))
print("Num examples missing system message:", n_missing_system)
print("Num examples missing user message:", n_missing_user)
print_distribution(n_messages, "num_messages_per_example")
print_distribution(convo_lens, "num_total_tokens_per_example")
print_distribution(assistant_message_lens, "num_assistant_tokens_per_example")
n_too_long = sum(l > 4096 for l in convo_lens)
print(f"\n{n_too_long} examples may be over the 4096 token limit, they will be truncated during fine-tuning")
# Pricing and default n_epochs estimate
MAX_TOKENS_PER_EXAMPLE = 4096
TARGET_EPOCHS = 3
MIN_TARGET_EXAMPLES = 100
MAX_TARGET_EXAMPLES = 25000
MIN_DEFAULT_EPOCHS = 1
MAX_DEFAULT_EPOCHS = 25
n_epochs = TARGET_EPOCHS
n_train_examples = len(dataset)
if n_train_examples * TARGET_EPOCHS < MIN_TARGET_EXAMPLES:
n_epochs = min(MAX_DEFAULT_EPOCHS, MIN_TARGET_EXAMPLES // n_train_examples)
elif n_train_examples * TARGET_EPOCHS > MAX_TARGET_EXAMPLES:
n_epochs = max(MIN_DEFAULT_EPOCHS, MAX_TARGET_EXAMPLES // n_train_examples)
n_billing_tokens_in_dataset = sum(min(MAX_TOKENS_PER_EXAMPLE, length) for length in convo_lens)
print(f"Dataset has ~{n_billing_tokens_in_dataset} tokens that will be charged for during training")
print(f"By default, you'll train for {n_epochs} epochs on this dataset")
print(f"By default, you'll be charged for ~{n_epochs * n_billing_tokens_in_dataset} tokens")
print(f"Estimated price: ${0.0080 * n_epochs * n_billing_tokens_in_dataset / 1000}")
def feedback_is_correct(feedback, correct_label):
return correct_label.startswith('Optimal') and is_optimal(feedback) or (not correct_label.startswith('Optimal') and not is_optimal(feedback))
#=============#
# Main method #
#=============#
@click.command()
@click.argument('output_file')
def run(output_file):
finetuning_examples = []
for passage in passages:
if 'files' in passage and 'prompts' in passage:
for conjunction in ['because', 'but', 'so']:
if conjunction in passage['files'] and conjunction in passage['prompts']:
print(passage['files'][conjunction]["train"])
train_items = read_file(passage['files'][conjunction]["train"], passage['prompts'][conjunction])
random.shuffle(train_items)
new_finetuning_examples = []
for train_item in train_items:
sentence = train_item[0]
correct_label = train_item[1]
quill_feedback = passage['feedback'][conjunction][correct_label]
full_prompt = create_openai_feedback_prompt(passage, sentence, conjunction, add_passage=True)
if FEEDBACK_SOURCE == 'quill':
messages = [
{"role": "system", "content": full_prompt},
{"role": "assistant", "content": quill_feedback},
]
new_finetuning_examples.append({"messages": messages})
else:
auto_messages = [
{"role": "system", "content": full_prompt},
]
for _ in range(MAX_RETRIES):
response = fetch_with_timeout(api_call_chat, auto_messages, 'gpt-4', timeout_duration=10)
if response:
break
time.sleep(10)
answer = response['choices'][0]['message']['content']
feedback = answer.split('Feedback:')[1].strip() if 'Feedback:' in answer else answer
print(sentence)
print(feedback)
print(feedback_is_correct(feedback, correct_label))
if feedback_is_correct(feedback, correct_label):
messages = [
{"role": "system", "content": full_prompt},
{"role": "assistant", "content": quill_feedback},
]
new_finetuning_examples.append({"messages": messages})
if len(new_finetuning_examples) >= NUM_ITEMS:
break
finetuning_examples.extend(new_finetuning_examples)
print("Finetuning examples:", len(finetuning_examples))
check_data_format(finetuning_examples)
check_token_count(finetuning_examples)
input("Press enter to continue")
with jsonlines.open(output_file, 'w') as writer:
for example in finetuning_examples:
writer.write(example)
input("Created finetuning file")
openai.api_key = os.getenv("OPENAI_API_KEY")
response = openai.File.create(
file=open(output_file, "rb"),
purpose='fine-tune'
)
ft_response = openai.FineTuningJob.create(training_file=response['id'],
model="gpt-3.5-turbo",
hyperparameters = {
"n_epochs": 3
}
)
print(ft_response)
if __name__ == '__main__':
run() | [
"\n\nResponse: PLACEHOLDER\nParaphrase:",
"You are a teacher, helping fifth-grade students improve their writing skills. In this exercise, students have read a text and are asked to complete a sentence. The goal is for you to give feedback on their response. ",
"\n\nThis is the sentence they are asked to complete: 'PLACEHOLDER'. ",
"\n\nYour feedback should be copied from these examples:",
"\nPLACEHOLDER",
"\nFeedback: PLACEHOLDER",
"\n\nResponse: PLACEHOLDER",
"\n\nResponse: PLACEHOLDER\nFeedback:",
"\n\nHere is the text students have read, separated by triple backticks: \n\n```PLACEHOLDER```\n",
"\nThese are general rules for your feedback:\n- Do not give feedback about grammar, spelling or punctuation.\n- Do not ask students to make sentences more clear and concise.\n- Do not suggest a better answer.",
"content"
] |
2024-01-10 | empirical-org/Quill-NLP-Tools-and-Datasets | scripts~gpt~moderate_feedback.py | # Simple script for moderating feedback with a GPT model. Takes Quill feedback as input, asks the
# GPT model to remove any undesired elements, and writes the output to a file.
#
# Usage:
# > python scripts/moderate_feedback.py <gpt_model> <output_file> --verbose <True/False>
# Example:
# > python scripts/moderate_feedback.py gpt-4 feedback_output.csv --verbose False
import os
import openai
import csv
import time
import click
from tqdm import tqdm
from scripts.data.bereal import passage
openai.api_key = os.getenv("OPENAI_API_KEY")
OPTIMAL_LABEL = 'Optimal'
SUBOPTIMAL_LABEL = 'Suboptimal'
INPUTFILE = 'data/automl/problematic_feedback.csv'
def read_file(filename):
""" Reads problematic feedback from the input file."""
items = []
seen_feedback = set()
with open(filename) as i:
reader = csv.reader(i, delimiter=",")
next(reader)
for line in reader:
feedback = line[2]
if feedback not in seen_feedback:
items.append(line[:3])
seen_feedback.update([feedback])
return items
def get_prompt(feedback):
""" Assembles the prompt for feedback moderation. """
prompt = """Correct the feedback below. Keep it as intact as possible, but remove any of the following sentences:
- sentences that refer to grammar, spelling or punctuation,
- sentences that say the response is unclear or not concise enough,
- sentences that give away the correct answer explicitly,
- feedback that asks the student to write two sentences instead of one.
If there is more than one question in the feedback, only keep the first question.
Keep the feedback as intact as possible. In particular, keep the first sentences. Do not remove:
- "Try clearing your response",
- "Check that your response only uses information from the text",
- "Now add another reason".
Here are some examples:
Feedback: Try clearing your response and starting again. Your response is too long and confusing. Focus on one specific contrast to the fact that many tourists enjoy taking quokka selfies.
Corrected feedback: Try clearing your response and starting again. Focus on one specific contrast to the fact that many tourists enjoy taking quokka selfies.
Feedback: Good start! You mentioned that it's possible to post later than two minutes, but can you explain why this is a contrast to the idea that BeReal is more authentic? Also, be sure to revise your response for clarity and grammar.
Corrected Feedback: Good start! You mentioned that it's possible to post later than two minutes, but can you explain why this is a contrast to the idea that BeReal is more authentic?
Feedback: You have the right idea! Now be more specific. Quokka bites can be a potential risk. How many people are bitten by quokkas each year?
Corrected feedback: You have the right idea! Now be more specific. Quokka bites can be a potential risk. How many people are bitten by quokkas each year?
Feedback: You have the right idea! Now be more specific. Quokkas do look like they're smiling. Now add a detail from the text to strengthen your claim. What makes quokkas look like they're smiling?
Corrected feedback: You have the right idea! Now be more specific. Quokkas do look like they're smiling. Now add a detail from the text to strengthen your claim. What makes quokkas look like they're smiling?
"""
prompt += f"Feedback: {feedback}\n"
prompt += f"Corrected feedback:"
return prompt
def moderate(feedback, model):
""" Ask GPT to rewrite a piece of feedback and return the answer."""
full_prompt = get_prompt(feedback)
messages = [
{"role": "system", "content": full_prompt},
]
try:
response = openai.ChatCompletion.create(
model=model,
temperature=0,
max_tokens=175,
messages = messages
)
except:
time.sleep(3)
response = openai.ChatCompletion.create(
model=model,
temperature=0,
max_tokens=75,
messages = messages
)
answer = response['choices'][0]['message']['content'].replace('`', '')
return answer
@click.command()
@click.argument('model')
@click.argument('output_file')
@click.option('--verbose', default=False)
def main(output_file, model, verbose):
test_items = read_file(INPUTFILE)
print("Test feedback items:", len(test_items))
corrected = 0
with open(output_file, 'w') as o:
writer = csv.writer(o, delimiter=',')
writer.writerow(['sentence', 'label', 'GPT feedback', 'Moderated feedback'])
for item in tqdm(test_items):
sentence = item[0]
correct_label = item[1]
feedback = item[2]
if 'Feedback:' in feedback:
feedback = feedback.split('Feedback:')[1].strip()
answer = moderate(feedback, model)
writer.writerow(item + [answer])
if feedback != answer:
if verbose:
print(feedback)
print('=>')
print(answer)
print('----')
corrected +=1
print('Corrected:', corrected, '/', len(test_items), '=', corrected/len(test_items)*100, '%')
if __name__ == "__main__":
main() | [
"Feedback: PLACEHOLDER\n",
"Correct the feedback below. Keep it as intact as possible, but remove any of the following sentences:\n- sentences that refer to grammar, spelling or punctuation,\n- sentences that say the response is unclear or not concise enough,\n- sentences that give away the correct answer explicitly,\n- feedback that asks the student to write two sentences instead of one.\n\nIf there is more than one question in the feedback, only keep the first question.\n\nKeep the feedback as intact as possible. In particular, keep the first sentences. Do not remove:\n- \"Try clearing your response\",\n- \"Check that your response only uses information from the text\",\n- \"Now add another reason\".\n\nHere are some examples:\nFeedback: Try clearing your response and starting again. Your response is too long and confusing. Focus on one specific contrast to the fact that many tourists enjoy taking quokka selfies.\nCorrected feedback: Try clearing your response and starting again. Focus on one specific contrast to the fact that many tourists enjoy taking quokka selfies.\n\nFeedback: Good start! You mentioned that it's possible to post later than two minutes, but can you explain why this is a contrast to the idea that BeReal is more authentic? Also, be sure to revise your response for clarity and grammar.\nCorrected Feedback: Good start! You mentioned that it's possible to post later than two minutes, but can you explain why this is a contrast to the idea that BeReal is more authentic?\n\nFeedback: You have the right idea! Now be more specific. Quokka bites can be a potential risk. How many people are bitten by quokkas each year?\nCorrected feedback: You have the right idea! Now be more specific. Quokka bites can be a potential risk. How many people are bitten by quokkas each year?\n\nFeedback: You have the right idea! Now be more specific. Quokkas do look like they're smiling. Now add a detail from the text to strengthen your claim. What makes quokkas look like they're smiling?\nCorrected feedback: You have the right idea! Now be more specific. Quokkas do look like they're smiling. Now add a detail from the text to strengthen your claim. What makes quokkas look like they're smiling?\n\n",
"Corrected feedback:"
] |
2024-01-10 | wisdom-pan/langchain-chatGLM-lora | vectorstores~MyFAISS.py | from langchain.vectorstores import FAISS
from langchain.vectorstores.base import VectorStore
from langchain.vectorstores.faiss import dependable_faiss_import
from typing import Any, Callable, List, Dict
from langchain.docstore.base import Docstore
from langchain.docstore.document import Document
import numpy as np
import copy
import os
from configs.model_config import *
class MyFAISS(FAISS, VectorStore):
def __init__(
self,
embedding_function: Callable,
index: Any,
docstore: Docstore,
index_to_docstore_id: Dict[int, str],
normalize_L2: bool = False,
):
super().__init__(embedding_function=embedding_function,
index=index,
docstore=docstore,
index_to_docstore_id=index_to_docstore_id,
normalize_L2=normalize_L2)
self.score_threshold=VECTOR_SEARCH_SCORE_THRESHOLD
self.chunk_size = CHUNK_SIZE
self.chunk_conent = False
def seperate_list(self, ls: List[int]) -> List[List[int]]:
# TODO: 增加是否属于同一文档的判断
lists = []
ls1 = [ls[0]]
for i in range(1, len(ls)):
if ls[i - 1] + 1 == ls[i]:
ls1.append(ls[i])
else:
lists.append(ls1)
ls1 = [ls[i]]
lists.append(ls1)
return lists
def similarity_search_with_score_by_vector(
self, embedding: List[float], k: int = 4
) -> List[Document]:
faiss = dependable_faiss_import()
vector = np.array([embedding], dtype=np.float32)
if self._normalize_L2:
faiss.normalize_L2(vector)
scores, indices = self.index.search(vector, k)
docs = []
id_set = set()
store_len = len(self.index_to_docstore_id)
rearrange_id_list = False
for j, i in enumerate(indices[0]):
if i == -1 or 0 < self.score_threshold < scores[0][j]:
# This happens when not enough docs are returned.
continue
if i in self.index_to_docstore_id:
_id = self.index_to_docstore_id[i]
# 执行接下来的操作
else:
continue
doc = self.docstore.search(_id)
if (not self.chunk_conent) or ("context_expand" in doc.metadata and not doc.metadata["context_expand"]):
# 匹配出的文本如果不需要扩展上下文则执行如下代码
if not isinstance(doc, Document):
raise ValueError(f"Could not find document for id {_id}, got {doc}")
doc.metadata["score"] = int(scores[0][j])
docs.append(doc)
continue
id_set.add(i)
docs_len = len(doc.page_content)
for k in range(1, max(i, store_len - i)):
break_flag = False
if "context_expand_method" in doc.metadata and doc.metadata["context_expand_method"] == "forward":
expand_range = [i + k]
elif "context_expand_method" in doc.metadata and doc.metadata["context_expand_method"] == "backward":
expand_range = [i - k]
else:
expand_range = [i + k, i - k]
for l in expand_range:
# if l not in id_set and 0 <= l < len(self.index_to_docstore_id):
if l not in id_set and l in self.index_to_docstore_id:
_id0 = self.index_to_docstore_id[l]
doc0 = self.docstore.search(_id0)
if docs_len + len(doc0.page_content) > self.chunk_size or doc0.metadata["source"] != \
doc.metadata["source"]:
break_flag = True
break
elif doc0.metadata["source"] == doc.metadata["source"]:
docs_len += len(doc0.page_content)
id_set.add(l)
rearrange_id_list = True
if break_flag:
break
if (not self.chunk_conent) or (not rearrange_id_list):
return docs
if len(id_set) == 0 and self.score_threshold > 0:
return []
id_list = sorted(list(id_set))
id_lists = self.seperate_list(id_list)
for id_seq in id_lists:
for id in id_seq:
if id == id_seq[0]:
_id = self.index_to_docstore_id[id]
# doc = self.docstore.search(_id)
doc = copy.deepcopy(self.docstore.search(_id))
else:
_id0 = self.index_to_docstore_id[id]
doc0 = self.docstore.search(_id0)
doc.page_content += " " + doc0.page_content
if not isinstance(doc, Document):
raise ValueError(f"Could not find document for id {_id}, got {doc}")
doc_score = min([scores[0][id] for id in [indices[0].tolist().index(i) for i in id_seq if i in indices[0]]])
doc.metadata["score"] = int(doc_score)
docs.append(doc)
return docs
def delete_doc(self, source: str or List[str]):
try:
if isinstance(source, str):
ids = [k for k, v in self.docstore._dict.items() if v.metadata["source"] == source]
vs_path = os.path.join(os.path.split(os.path.split(source)[0])[0], "vector_store")
else:
ids = [k for k, v in self.docstore._dict.items() if v.metadata["source"] in source]
vs_path = os.path.join(os.path.split(os.path.split(source[0])[0])[0], "vector_store")
if len(ids) == 0:
return f"docs delete fail"
else:
for id in ids:
index = list(self.index_to_docstore_id.keys())[list(self.index_to_docstore_id.values()).index(id)]
self.index_to_docstore_id.pop(index)
self.docstore._dict.pop(id)
# TODO: 从 self.index 中删除对应id
# self.index.reset()
self.save_local(vs_path)
return f"docs delete success"
except Exception as e:
print(e)
return f"docs delete fail"
def update_doc(self, source, new_docs):
try:
delete_len = self.delete_doc(source)
ls = self.add_documents(new_docs)
return f"docs update success"
except Exception as e:
print(e)
return f"docs update fail"
def list_docs(self):
return list(set(v.metadata["source"] for v in self.docstore._dict.values()))
| [] |
2024-01-10 | adamxuuuu/gym | vbot~utils~titles.py | import os
import re
import string
import numpy as np
from tqdm import tqdm
from pprint import pprint
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.vectorstores import FAISS
from config import EMBEDDING
embeddings = HuggingFaceEmbeddings(
model_name=EMBEDDING,
encode_kwargs = {'normalize_embeddings': True}
)
files = os.listdir('data/policies')
def find_num(text):
match = re.search(r'po\D*(\d+)', text, re.IGNORECASE)
if match:
return match.group(1)
return 0
def replace_punctuation_with_space(text):
translation_table = str.maketrans(string.punctuation, ' ' * len(string.punctuation))
return text.translate(translation_table)
def clean_text(text):
result = replace_punctuation_with_space(text)
# Remove consecutive white spaces
result = re.sub(r"\s+", " ", result)
return result
def embed_filenames():
meta = [{"source" : f, "number" : find_num(f)} for f in files]
faiss = FAISS.from_texts(
texts=[clean_text(f) for f in files],
embedding=embeddings,
metadatas=meta
)
faiss.save_local('faiss/filenames')
if __name__ == "__main__":
# find_num('VCIC_Policy No 58-IT Steering V2.0.pdf')
# embed_filenames()
faiss = FAISS.load_local('faiss/filenames', embeddings)
while True:
query = input("Question:")
if not query:
break
res = faiss.similarity_search_with_score(query, k=10, filter={"number": query}, fetch_k=400)
# res = faiss.max_marginal_relevance_search(query, k=5, filter={"number": query}, fetch_k=40)
pprint(res)
| [] |
2024-01-10 | adamxuuuu/gym | vbot~utils~etl_faiss.py | import os
import pprint
from typing import Iterator, List, Optional
from tqdm import tqdm
from langchain.vectorstores import FAISS
from langchain.document_loaders import UnstructuredFileLoader, DirectoryLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter, CharacterTextSplitter
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.schema import Document
from pdf import clean_text
from config import (
EMBEDDING,
CHUNK_OVERLAP,
CHUNK_SIZE,
SEPARATORS
)
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=CHUNK_SIZE,
chunk_overlap=CHUNK_OVERLAP,
separators=SEPARATORS,
is_separator_regex=True,
keep_separator=False
)
def load_dir(data_path: str, ftype: Optional[str] = "*.pdf") -> List[Document]:
return DirectoryLoader(
path=data_path,
glob=ftype,
loader_cls=UnstructuredFileLoader,
# loader_kwargs={
# "mode": "single"
# },
show_progress=True
).load_and_split(
text_splitter
)
def save_local(data_path: str, save_path: str, model: HuggingFaceEmbeddings):
docs = load_dir(data_path)
for doc in docs:
clean_text(doc.page_content)
vector_db = FAISS.from_documents(
documents=docs,
embedding=model,
)
vector_db.save_local(save_path)
if __name__ == '__main__':
# Init Model
embeddings = HuggingFaceEmbeddings(model_name=EMBEDDING)
# Save
model_base = os.path.basename(EMBEDDING)
db_path = f'./faiss/cs{CHUNK_SIZE}/{model_base}'
data_path = './data/test'
save_local(data_path, db_path, embeddings)
# Query
# vector_db = FAISS.load_local(db_path, embeddings)
# hits = vector_db.search('who is the contact person for environment friendly policy', search_type='mmr')
# for hit in hits:
# pprint.pprint(hit.page_content) | [] |
2024-01-10 | adamxuuuu/gym | vbot~pages~3_Chat_With_File.py | import streamlit as st
from langchain.llms import OpenAI, LlamaCpp
from langchain.text_splitter import CharacterTextSplitter
from langchain.embeddings import OpenAIEmbeddings, HuggingFaceEmbeddings
from langchain.vectorstores import FAISS
from langchain.chains import RetrievalQA
from utils.callbacks import StreamHandler
from config import (EMBEDDING, LLM)
def generate_response(uploaded_file, query_text, stream_handler):
# Load document if file is uploaded
if uploaded_file is not None:
documents = [uploaded_file.read().decode()]
# Split documents into chunks
text_splitter = CharacterTextSplitter(chunk_size=512, chunk_overlap=0)
texts = text_splitter.create_documents(documents)
# Select embeddings
embeddings = HuggingFaceEmbeddings(model_name=EMBEDDING)
# Create a vectorstore from documents
db = FAISS.from_documents(texts, embeddings)
# Create retriever interface
retriever = db.as_retriever(
search_type="mmr",
search_kwargs={'k': 4, 'lambda_mult': 0.25}
)
# Create QA chain
qa = RetrievalQA.from_chain_type(
llm=LlamaCpp(model_path=LLM, n_ctx=2048, temperature=0.01),
chain_type='stuff',
retriever=retriever
)
return qa.run(query_text, callbacks=[stream_handler])
# Page title
st.set_page_config(page_title='🦜🔗 Ask the Doc App')
st.title('🦜🔗 Ask the Doc App')
# File upload
uploaded_file = st.file_uploader('Upload an document (1kb usually takes 120 seconds to process)', type='txt', )
print(uploaded_file)
# Query text
query_text = st.text_input('Enter your question:', placeholder = 'Please provide a short summary.', disabled=not(uploaded_file))
# Form input and query
result = []
with st.form('myform', clear_on_submit=True):
submitted = st.form_submit_button('Submit', disabled=not(uploaded_file))
if submitted:
with st.chat_message("assistant"):
with st.spinner("Thinking..."):
stream_handler = StreamHandler(st.empty())
response = generate_response(uploaded_file, query_text, stream_handler)
| [] |
2024-01-10 | adamxuuuu/gym | vbot~mlivus~etl_milvus.py | from pymilvus import connections, utility, FieldSchema, CollectionSchema, DataType, Collection
from sentence_transformers import SentenceTransformer
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.document_loaders import TextLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import Milvus
import os
from config import (
COLLECTION_NAME,
DIMENSION,
MILVUS_HOST,
MILVUS_PORT,
EMBEDDING_MODEL
)
def create_milvus_collection(collection_name, dim):
if utility.has_collection(collection_name):
utility.drop_collection(collection_name)
fields = [
FieldSchema(name="id", dtype=DataType.INT64, is_primary=True, auto_id=False),
FieldSchema(name="title", dtype=DataType.VARCHAR, max_length=500),
FieldSchema(name="content", dtype=DataType.VARCHAR, max_length=2000),
FieldSchema(name="embedding", dtype=DataType.FLOAT_VECTOR, dim=dim)
]
schema = CollectionSchema(fields=fields, description='search text')
collection = Collection(name=collection_name, schema=schema)
index_params = {
'metric_type': "L2",
'index_type': "IVF_FLAT",
'params': {"nlist": 2048}
}
collection.create_index(field_name='embedding', index_params=index_params)
utility.index_building_progress(COLLECTION_NAME)
return collection
# Connect and Create to Milvus Database
connections.connect(host=MILVUS_HOST, port=MILVUS_PORT)
collection = create_milvus_collection(COLLECTION_NAME, DIMENSION)
# Batch insert data
titles = []
contents = []
embeds = []
filedir = "data/"
embeddings = HuggingFaceEmbeddings(model_name=EMBEDDING_MODEL)
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
# Get all the text files in the text directory
for file in os.listdir(filedir):
txt = TextLoader(filedir + file).load()
for c in text_splitter.split_documents(txt):
titles.append(c.metadata['source'])
contents.append(c.page_content)
data = [
[i for i in range(len(contents))],
titles,
contents,
embeddings.embed_documents(contents)
]
collection.insert(data)
collection.flush()
print(collection.num_entities)
| [] |
2024-01-10 | adamxuuuu/gym | vbot~pages~2_Chat_With_Policy.py | import os
import streamlit as st
from pprint import pprint
from langchain import PromptTemplate
from langchain.vectorstores import FAISS
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.chains import RetrievalQA
from langchain.llms import LlamaCpp
from langchain.memory import ConversationBufferMemory
from langchain.memory.chat_message_histories import StreamlitChatMessageHistory
from utils.callbacks import StreamHandler, RetrievalHandler
from config import (
EMBEDDING,
PROMPT_TEMPLATE,
LLM,
SEARCH_KWARGS,
SEARCH_TYPE,
CHUNK_SIZE,
DB_BASE
)
PROMPT = PromptTemplate(
input_variables=["context", "question"],
template=PROMPT_TEMPLATE
)
@st.cache_resource()
def retriever(db_path: str):
embeddings = HuggingFaceEmbeddings(model_name=EMBEDDING)
vector_db = FAISS.load_local(
folder_path=db_path,
embeddings=embeddings,
normalize_L2=True,
)
retriever = vector_db.as_retriever(
search_type=SEARCH_TYPE,
search_kwargs=SEARCH_KWARGS
)
return retriever
# ======================APP==========================
msgs = StreamlitChatMessageHistory()
llm = LlamaCpp(
model_path=LLM,
temperature=0.01,
n_ctx=3000,
streaming=True,
max_tokens=512
)
chain_type_kwargs = {"prompt": PROMPT}
qa_chain = RetrievalQA.from_chain_type(
llm=llm,
chain_type="stuff",
retriever=retriever(f'{DB_BASE}/cs{CHUNK_SIZE}/{os.path.basename(EMBEDDING)}'),
chain_type_kwargs=chain_type_kwargs
)
# Sidebar
with st.sidebar:
if st.sidebar.button("Clear message history") and not msgs:
msgs.clear()
msgs.add_ai_message("How can I help you?")
avatars = {"human": "user", "ai": "assistant"}
for msg in msgs.messages:
st.chat_message(avatars[msg.type]).write(msg.content)
if user_query := st.chat_input(placeholder="Ask me about VW policy related question."):
st.chat_message("user").write(user_query)
with st.chat_message("assistant"):
retrieval_handler = RetrievalHandler(st.container())
stream_handler = StreamHandler(st.empty())
pprint(f"Start processing query: '{user_query}'")
response = qa_chain.run(user_query, callbacks=[
retrieval_handler, stream_handler])
pprint(f"Finish generation: {response}")
| [
"question",
"context"
] |
2024-01-10 | adamxuuuu/gym | vbot~pages~1_Free_Chat.py | from langchain import LLMChain, PromptTemplate, ConversationChain
from langchain.llms import LlamaCpp
from langchain.memory import ConversationBufferWindowMemory
from langchain.callbacks.base import BaseCallbackHandler
import streamlit as st
import os
def prompt():
template = """You are a helpful assistant. You do not respond as 'User' or pretend to be 'User'. You only respond once as 'Assistant'.
{history}
Human: {input}
AI:"""
return PromptTemplate(template=template, input_variables=['history', 'input'])
class StreamHandler(BaseCallbackHandler):
def __init__(self, container: st.delta_generator.DeltaGenerator, initial_text: str = ""):
self.container = container
self.text = initial_text
def on_llm_new_token(self, token: str, **kwargs) -> None:
self.text += token
self.container.markdown(self.text)
@st.cache_resource
def init_llm_chain(model_path, temperature, top_p, max_length):
llm = LlamaCpp(
model_path=model_path,
temperature=temperature,
top_p=top_p,
max_tokens=max_length
)
return ConversationChain(
prompt=prompt(),
llm=llm,
memory=ConversationBufferWindowMemory(k=5),
verbose=True
)
# App
st.set_page_config(page_title="🦙💬 Llama 2 Chatbot")
with st.sidebar:
st.title('🦙💬 Llama 2 Chatbot')
st.subheader('Models and parameters')
selected_model = st.sidebar.selectbox('Choose a Llama2 model', ['Llama2-7B-q4', 'Llama2-7B-q8', 'Llama2-13B'], key='selected_model')
if selected_model == 'Llama2-7B-q4':
llm = './models/llama-2-7b-chat.ggmlv3.q4_K_M.bin'
elif selected_model == 'Llama2-7B-q8':
llm = './models/llama-2-7b-chat.ggmlv3.q8_0.bin'
else:
llm = './models/llama-2-13b-chat.ggmlv3.q4_1.bin'
temperature = st.sidebar.slider('temperature', min_value=0.01, max_value=1.0, value=0.1, step=0.01)
top_p = st.sidebar.slider('top_p', min_value=0.01, max_value=1.0, value=0.9, step=0.01)
max_length = st.sidebar.slider('max_length', min_value=64, max_value=2048, value=512, step=8)
chain = init_llm_chain(llm, temperature, top_p, max_length)
# Store LLM generated responses
if "messages" not in st.session_state.keys():
st.session_state.messages = [{"role": "assistant", "content": "How may I assist you today?"}]
# Display or clear chat messages
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.write(message["content"])
def clear_chat_history():
st.session_state.messages = [{"role": "assistant", "content": "How may I assist you today?"}]
chain.memory.clear()
st.sidebar.button('Clear Chat History', on_click=clear_chat_history)
# User-provided prompt
if human_input := st.chat_input():
st.session_state.messages.append({"role": "user", "content": human_input})
with st.chat_message("user"):
st.write(human_input)
# Generate a new response if last message is not from assistant
if st.session_state.messages[-1]["role"] != "assistant":
with st.chat_message("assistant"):
with st.spinner("Thinking..."):
stream_handler = StreamHandler(st.empty())
response = chain.predict(input=human_input, callbacks=[stream_handler])
# memory.save_context({"input": human_input}, {"output": response})
message = {"role": "assistant", "content": response}
st.session_state.messages.append(message) | [
"How may I assist you today?",
"You are a helpful assistant. You do not respond as 'User' or pretend to be 'User'. You only respond once as 'Assistant'.\n\n {history}\n Human: {input}\n AI:"
] |
2024-01-10 | Matthewsecond/WebScraping | Controller~Process_Import_Data~OtherFunctions~Used~Geolocation~import_city.py | import sqlalchemy
from GeoProcessing import get_kraj, get_latitude, get_longitude
import pandas as pd
import geopy
import unicodedata
from pandasai import SmartDataframe, SmartDatalake
from pandasai.llm import OpenAI
import pandasai
engine = sqlalchemy.create_engine(
'mysql+pymysql://admin:N6zmVKVW@jobs-intelligence-slovakia.'
'cluster-c0rbbiliflyo.eu-central-1.rds.amazonaws.com:9906/General_Intelligence')
conn = engine.connect()
def drop_table(table_name):
try:
query = sqlalchemy.text(f'DROP TABLE {table_name}')
conn.execute(query)
except Exception as e:
print(e)
def get_zipcode(df, geolocator, lat_field, lon_field):
#df.apply(get_zipcode, axis=1, geolocator=geolocator, lat_field='Lat', lon_field='Lon')
location = geolocator.reverse((df[lat_field], df[lon_field]))
try:
return location.raw['address']['postcode']
except Exception as e:
return None
if __name__ == '__main__':
llm = OpenAI(api_token='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')
#read webcrawlresults from database
webcrawlresults = pd.read_sql_table('SK_WebCrawlResults', conn)
#read cities from database
cities = pd.read_sql_table('Cities_Processed', conn)
#webcrawlresults = webcrawlresults[:60000]
dl = SmartDatalake([cities, webcrawlresults], config={"llm": llm})
#dl = SmartDataframe(df=pd.DataFrame(webcrawlresults), config={"llm": llm})
#response = dl.chat('merge cities in cities dataframe with locality in webcrawlresults. Ignore small differences in text. Return dataframe.')
response = dl.chat('merge column municipality in cities dataframe with column location in webcrawlresults.')
dataframe1 = response.dataframe
dl = SmartDatalake([cities, webcrawlresults], config={"llm": llm})
response = dl.chat('merge column city in cities dataframe with column location if there in webcrawlresults.')
dataframe2 = response.dataframe
# 'concatenate dataframes dataframe and dataframe2. Compare column link. If it already exists in dataframe1, do not add it.'
dataframe3 = pd.concat([dataframe1, dataframe2]).drop_duplicates(subset=['link'], keep='first')
dataframe3.to_sql(name='SK_WebCrawlResults_Processed_test', con=engine, if_exists='append', index=False)
#dl.chat('Find all matches between column location from webcrawlresults and Municipality from cities and merge the dataframes based on those matches. ''Ignore small differences in text.')
## drop column City from cities
#municipality = cities.drop(columns=['City'])
#convert column location to unicode
#webcrawlresults['location'] = webcrawlresults['location'].apply(lambda val: unicodedata.normalize('NFKD', str(val)).encode('ascii', 'ignore').decode())
#merge webcrawlresults and ciities based on column location from webcrawlresults and column City and Municipality from cities
#merged2 = webcrawlresults.merge(cities, left_on='location', right_on='Municipality', how='left')
'''
geolocator = geopy.Nominatim(user_agent='123')
# read cities from database
cities = pd.read_sql_table('Cities_Processed', conn)
cities['ZIP_CODE'] = cities.apply(get_zipcode, axis=1, geolocator=geolocator, lat_field='latitude', lon_field='longitude')
#drop table cities
drop_table('Cities_Processed')
#import into Cities_Processed
cities.to_sql(name='Cities_Processed', con=engine, if_exists='append', index=False)
'''
#read cities from database
#cities = pd.read_sql_table('Cities_Processed', conn)
#cities = cities[:10]
#cities['zip_code'] = cities.apply(get_zipcode, axis=1, geolocator=geolocator, lat_field='latitude', lon_field='longitude')
#print(cities)
#drop table cities
#read csv into dataframe, ; is separator
#add columns to cities_add
#read csv into dataframe, ; is separator
#columns=['Municipality','City','Region', 'latitude', 'longitude']
#cities_add = pd.read_csv(r'C:\Users\labus.INTERCONNECTION\Desktop\WebScrapingProject\Geolocation\cities.csv', names=columns, sep=';' , skiprows=2)
#cities_add.to_sql(name='Cities_Processed', con=engine, if_exists='append', index=False)
#cities_add['kraj'] = cities_add['City'].apply(get_kraj)
#cities_add['latitude'] = cities_add['City'].apply(get_latitude)
#cities_add['longitude'] = cities_add['City'].apply(get_longitude)
| [
"merge column city in cities dataframe with column location if there in webcrawlresults.",
"merge column municipality in cities dataframe with column location in webcrawlresults."
] |
2024-01-10 | Matthewsecond/WebScraping | Controller~Process_Import_Data~OtherFunctions~Used~Geolocation_CZ~GeoProcessing.py | from geopy.geocoders import Nominatim
import pandas as pd
import re
from unidecode import unidecode
from datetime import datetime
import openai
import sqlalchemy
import time
import geopy
from pandasai import SmartDataframe
from pandasai.llm import OpenAI
openai.api_key = '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'
app = Nominatim(user_agent="tutorial")
engine = sqlalchemy.create_engine(
'mysql+pymysql://admin:N6zmVKVW@jobs-intelligence-slovakia.'
'cluster-c0rbbiliflyo.eu-central-1.rds.amazonaws.com:9906/General_Intelligence_CZ')
conn = engine.connect()
def get_companies_slovakia():
try:
query = sqlalchemy.text('SELECT * FROM `Companies_Slovakia_Processed`')
# Read data from the query
dataframe_companies = pd.read_sql_query(query, conn)
return dataframe_companies
except Exception as e:
print(e)
def get_CZ_WebCrawlResults():
try:
query = sqlalchemy.text('SELECT * FROM `CZ_WebCrawlResults`')
# Read data from the query
dataframe = pd.read_sql_query(query, conn)
return dataframe
except Exception as e:
print(e)
def remove_numbers(string):
if type(string) == str:
return re.sub(r'\d+', '', string)
def split_word_by_comma(word):
if word is not None and 'Praca vyzaduje cestovanie' in word:
return 'Traveling job'
elif word is not None:
return [x.strip() for x in word.split(',') and word.split('-') and word.split(', ')]
def get_Cities_Processed():
try:
query = sqlalchemy.text('SELECT * FROM `Cities_Processed`')
# Read data from the query
dataframe = pd.read_sql_query(query, conn)
return dataframe
except Exception as e:
print(e)
#function to drop table
def drop_table(table_name):
try:
query = sqlalchemy.text(f'DROP TABLE {table_name}')
conn.execute(query)
except Exception as e:
print(e)
def get_zipcode(df, geolocator, lat_field, lon_field):
#df.apply(get_zipcode, axis=1, geolocator=geolocator, lat_field='Lat', lon_field='Lon')
location = geolocator.reverse((df[lat_field], df[lon_field]))
return location.raw['address']['postcode']
def clean_locations(specific_location):
# read database
database_webcrawl_results = get_CZ_WebCrawlResults()
database_cities = get_Cities_Processed()
# place = 'Bratislava II, Bratislava, Slovakia (Job with occasional home office)'
# words = split_word_by_comma(place)
cities_list = database_cities[specific_location].to_list()
# cities_list = cities_list[:10]
# convert all characters in cities_list to ascii
cities_list = [unidecode(x) for x in cities_list]
# remove '-' from database_webcrawl_results['location'] only if it is not None
database_webcrawl_results['location'] = database_webcrawl_results['location'].apply(
lambda x: x.replace('-', '') if x is not None else x)
database_webcrawl_results['location'] = database_webcrawl_results['location'].apply(
lambda x: x.replace(' ,', ',') if x is not None else x)
# loop through webcrawl results
for index, row, in database_webcrawl_results.iterrows():
# delete numbers from row['location']
database_webcrawl_results.loc[index, 'location'] = remove_numbers(row['location'])
# remove space from row['location']
# database_webcrawl_results.loc[index, 'location'] = delete_space(row['location'])
words = split_word_by_comma(row['location'])
# loop through words and if one of the words is in database_cities['City'] then set the location to that city
if words is not None:
for word in words:
if word in cities_list:
# database_webcrawl_results.loc[index, 'location'] = word
database_webcrawl_results.loc[index, specific_location] = word
break
# drop webcrawl results table
drop_table('CZ_WebCrawlResults')
# import to mysql SK_WebCrawlResults table
database_webcrawl_results.to_sql(name='CZ_WebCrawlResults', con=engine, if_exists='append', index=False)
#fill missing locations
def fill_locations():
# read database
database_webcrawl_results = get_CZ_WebCrawlResults()
database_cities = get_Cities_Processed()
# merge on Municipality only if City is None
mask1 = database_webcrawl_results['Okres'].isnull()
merged_df1 = pd.merge(database_webcrawl_results[mask1], database_cities, on='Obec', how='left')
# delete columns City_x and Region_x
merged_df1.drop(['Okres_x', 'Kraj_x'], axis=1, inplace=True)
# rename columns City_y and Region_y to City and Region
merged_df1.rename(columns={'Okres_y': 'Okres', 'Kraj_y': 'Kraj'}, inplace=True)
#drop rows where Municipality is None
merged_df1 = merged_df1[merged_df1['Obec'].notna()]
mask = database_webcrawl_results['Obec'].isnull()
merged_df2 = pd.merge(database_webcrawl_results[mask], database_cities, on='Okres', how='left')
#delete columns Municipality_x and Region_x
merged_df2.drop(['Obec_x', 'Kraj_x'], axis=1, inplace=True)
#delete rows where City is None
#merged_df2 = merged_df2[merged_df2['City'].notna()]
#rename columns Municipality_y and Region_y to Municipality and Region
merged_df2.rename(columns={'Obec_y': 'Obec', 'Kraj_y': 'Kraj'}, inplace=True)
#if region in database_webcrawl_results is filled but Municipality is None and City is None then fill Municipality and City with string unspecified
database_webcrawl_results.loc[(database_webcrawl_results['Kraj'].notna()) & (database_webcrawl_results['Obec'].isnull()) & (database_webcrawl_results['Okres'].isnull()), ['Okres', 'Obec']] = 'unspecified'
#keep only rows where Municipality and City are unspecified
database_webcrawl_results = database_webcrawl_results[(database_webcrawl_results['Okres'] == 'unspecified') & (database_webcrawl_results['Obec'] == 'unspecified')]
#concatenate merged_df1 and merged_df2 with database_webcrawl_results
database_webcrawl_results_merged = pd.concat([merged_df1, merged_df2, database_webcrawl_results], ignore_index=True)
# fill missing values in latitude, longitude and Zipcode with unspecified
database_webcrawl_results_merged['Latitude'].fillna('unspecified', inplace=True)
database_webcrawl_results_merged['Longitude'].fillna('unspecified', inplace=True)
database_webcrawl_results_merged['Kód okresu'].fillna('unspecified', inplace=True)
database_webcrawl_results_merged['Kód kraje'].fillna('unspecified', inplace=True)
#database_webcrawl_results_merged['work_type'].fillna('unspecified', inplace=True)
database_webcrawl_results_merged['Obec'].fillna('unspecified', inplace=True)
database_webcrawl_results_merged['Okres'].fillna('unspecified', inplace=True)
database_webcrawl_results_merged['Kraj'].fillna('unspecified', inplace=True)
database_webcrawl_results_merged['salary'].fillna('unspecified', inplace=True)
database_webcrawl_results_merged['PSČ'].fillna('unspecified', inplace=True)
database_webcrawl_results_merged['Kód obce'].fillna('unspecified', inplace=True)
#copy index to column id
database_webcrawl_results_merged['index'] = database_webcrawl_results_merged.index
#drop database webcrawl_results_table
drop_table('CZ_WebCrawlResults')
#import to mysql SK_WebCrawlResults table
database_webcrawl_results_merged.to_sql(name='CZ_WebCrawlResults', con=engine, if_exists='append', index=False)
def fill_locations_companies():
# read database
database_companies = get_companies_slovakia()
database_cities = get_Cities_Processed()
# merge on City
merged_df = pd.merge(database_companies, database_cities, on='City', how='left')
# drop table
drop_table('Companies_Slovakia_Processed')
# import to mysql Companies_Slovakia_Processed table
merged_df.to_sql(name='Companies_Slovakia_Processed', con=engine, if_exists='append', index=False)
def location_cleaning():
for location in ['Obec', 'Okres', 'Kraj']:
clean_locations(location)
if __name__ == '__main__':
#location_cleaning()
fill_locations()
| [] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.