date_collected
stringclasses 1
value | repo_name
stringlengths 6
116
| file_name
stringlengths 2
220
| file_contents
stringlengths 13
357k
| prompts
sequence |
---|---|---|---|---|
2024-01-10 | lucidrains/DALLE-pytorch | dalle_pytorch~vae.py | import io
import sys
import os
import requests
import PIL
import warnings
import hashlib
import urllib
import yaml
from pathlib import Path
from tqdm import tqdm
from math import sqrt, log
from packaging import version
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel, GumbelVQ
import importlib
import torch
from torch import nn
import torch.nn.functional as F
from einops import rearrange
from dalle_pytorch import distributed_utils
# constants
CACHE_PATH = os.path.expanduser("~/.cache/dalle")
OPENAI_VAE_ENCODER_PATH = 'https://cdn.openai.com/dall-e/encoder.pkl'
OPENAI_VAE_DECODER_PATH = 'https://cdn.openai.com/dall-e/decoder.pkl'
VQGAN_VAE_PATH = 'https://heibox.uni-heidelberg.de/f/140747ba53464f49b476/?dl=1'
VQGAN_VAE_CONFIG_PATH = 'https://heibox.uni-heidelberg.de/f/6ecf2af6c658432c8298/?dl=1'
# helpers methods
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def load_model(path):
with open(path, 'rb') as f:
return torch.load(f, map_location = torch.device('cpu'))
def map_pixels(x, eps = 0.1):
return (1 - 2 * eps) * x + eps
def unmap_pixels(x, eps = 0.1):
return torch.clamp((x - eps) / (1 - 2 * eps), 0, 1)
def download(url, filename = None, root = CACHE_PATH):
if (
not distributed_utils.is_distributed
or distributed_utils.backend.is_local_root_worker()
):
os.makedirs(root, exist_ok = True)
filename = default(filename, os.path.basename(url))
download_target = os.path.join(root, filename)
download_target_tmp = os.path.join(root, f'tmp.{filename}')
if os.path.exists(download_target) and not os.path.isfile(download_target):
raise RuntimeError(f"{download_target} exists and is not a regular file")
if (
distributed_utils.is_distributed
and not distributed_utils.backend.is_local_root_worker()
and not os.path.isfile(download_target)
):
# If the file doesn't exist yet, wait until it's downloaded by the root worker.
distributed_utils.backend.local_barrier()
if os.path.isfile(download_target):
return download_target
with urllib.request.urlopen(url) as source, open(download_target_tmp, "wb") as output:
with tqdm(total=int(source.info().get("Content-Length")), ncols=80) as loop:
while True:
buffer = source.read(8192)
if not buffer:
break
output.write(buffer)
loop.update(len(buffer))
os.rename(download_target_tmp, download_target)
if (
distributed_utils.is_distributed
and distributed_utils.backend.is_local_root_worker()
):
distributed_utils.backend.local_barrier()
return download_target
def make_contiguous(module):
with torch.no_grad():
for param in module.parameters():
param.set_(param.contiguous())
# package versions
def get_pkg_version(pkg_name):
from pkg_resources import get_distribution
return get_distribution(pkg_name).version
# pretrained Discrete VAE from OpenAI
class OpenAIDiscreteVAE(nn.Module):
def __init__(self):
super().__init__()
assert version.parse(get_pkg_version('torch')) < version.parse('1.11.0'), 'torch version must be <= 1.10 in order to use OpenAI discrete vae'
self.enc = load_model(download(OPENAI_VAE_ENCODER_PATH))
self.dec = load_model(download(OPENAI_VAE_DECODER_PATH))
make_contiguous(self)
self.channels = 3
self.num_layers = 3
self.image_size = 256
self.num_tokens = 8192
@torch.no_grad()
def get_codebook_indices(self, img):
img = map_pixels(img)
z_logits = self.enc.blocks(img)
z = torch.argmax(z_logits, dim = 1)
return rearrange(z, 'b h w -> b (h w)')
def decode(self, img_seq):
b, n = img_seq.shape
img_seq = rearrange(img_seq, 'b (h w) -> b h w', h = int(sqrt(n)))
z = F.one_hot(img_seq, num_classes = self.num_tokens)
z = rearrange(z, 'b h w c -> b c h w').float()
x_stats = self.dec(z).float()
x_rec = unmap_pixels(torch.sigmoid(x_stats[:, :3]))
return x_rec
def forward(self, img):
raise NotImplemented
# VQGAN from Taming Transformers paper
# https://arxiv.org/abs/2012.09841
def get_obj_from_str(string, reload=False):
module, cls = string.rsplit(".", 1)
if reload:
module_imp = importlib.import_module(module)
importlib.reload(module_imp)
return getattr(importlib.import_module(module, package=None), cls)
def instantiate_from_config(config):
if not "target" in config:
raise KeyError("Expected key `target` to instantiate.")
return get_obj_from_str(config["target"])(**config.get("params", dict()))
class VQGanVAE(nn.Module):
def __init__(self, vqgan_model_path=None, vqgan_config_path=None):
super().__init__()
if vqgan_model_path is None:
model_filename = 'vqgan.1024.model.ckpt'
config_filename = 'vqgan.1024.config.yml'
download(VQGAN_VAE_CONFIG_PATH, config_filename)
download(VQGAN_VAE_PATH, model_filename)
config_path = str(Path(CACHE_PATH) / config_filename)
model_path = str(Path(CACHE_PATH) / model_filename)
else:
model_path = vqgan_model_path
config_path = vqgan_config_path
config = OmegaConf.load(config_path)
model = instantiate_from_config(config["model"])
state = torch.load(model_path, map_location = 'cpu')['state_dict']
model.load_state_dict(state, strict = False)
print(f"Loaded VQGAN from {model_path} and {config_path}")
self.model = model
# f as used in https://github.com/CompVis/taming-transformers#overview-of-pretrained-models
f = config.model.params.ddconfig.resolution / config.model.params.ddconfig.attn_resolutions[0]
self.num_layers = int(log(f)/log(2))
self.channels = 3
self.image_size = 256
self.num_tokens = config.model.params.n_embed
self.is_gumbel = isinstance(self.model, GumbelVQ)
self._register_external_parameters()
def _register_external_parameters(self):
"""Register external parameters for DeepSpeed partitioning."""
if (
not distributed_utils.is_distributed
or not distributed_utils.using_backend(
distributed_utils.DeepSpeedBackend)
):
return
deepspeed = distributed_utils.backend.backend_module
deepspeed.zero.register_external_parameter(
self, self.model.quantize.embed.weight if self.is_gumbel else self.model.quantize.embedding.weight)
@torch.no_grad()
def get_codebook_indices(self, img):
b = img.shape[0]
img = (2 * img) - 1
_, _, [_, _, indices] = self.model.encode(img)
if self.is_gumbel:
return rearrange(indices, 'b h w -> b (h w)', b=b)
return rearrange(indices, '(b n) -> b n', b = b)
def decode(self, img_seq):
b, n = img_seq.shape
one_hot_indices = F.one_hot(img_seq, num_classes = self.num_tokens).float()
z = one_hot_indices @ self.model.quantize.embed.weight if self.is_gumbel \
else (one_hot_indices @ self.model.quantize.embedding.weight)
z = rearrange(z, 'b (h w) c -> b c h w', h = int(sqrt(n)))
img = self.model.decode(z)
img = (img.clamp(-1., 1.) + 1) * 0.5
return img
def forward(self, img):
raise NotImplemented
| [] |
2024-01-10 | SupremeLordCommander/staci-ai-code-reviewer | src~code_review.py | import os
import subprocess
import openai
def get_changed_files():
try:
# Getting a list of all changed files in the last commit
result = subprocess.check_output(['git', 'diff', '--name-only', 'HEAD~1', 'HEAD']).decode('utf-8')
files = result.strip().split('\n')
return files
except Exception as e:
print("Error getting changed files:", str(e))
return []
def review_code(file):
try:
with open(file, 'r') as f:
code = f.read()
# Interact with OpenAI API to review the code
response = openai.Completion.create(
engine="text-davinci-003",
prompt=f"Review the following code and suggest improvements if needed. Also, explain what the changes are attempting:\n\n{code}",
temperature=0.5,
max_tokens=1000,
)
suggestions = response.choices[0].text.strip()
if suggestions:
print(f"Suggestions for {file}:\n{suggestions}\n")
else:
print(f"No suggestions from OpenAI for {file}.\n")
except Exception as e:
print(f"Error reviewing file {file}:", str(e))
def main():
openai.api_key = os.getenv('OPENAI_API_KEY')
# Get a list of changed files in the PR
changed_files = get_changed_files()
# Review each changed file
for file in changed_files:
review_code(file)
if __name__ == "__main__":
main()
| [
"Review the following code and suggest improvements if needed. Also, explain what the changes are attempting:\n\nPLACEHOLDER"
] |
2024-01-10 | chief-r0cka/stuff | lazy_dog.py | # Overfit GPT model to "the quick brown fox"
#
# 906.45 -- the a , " he said . " i 'm not
# 310.08 -- the i - " " i 'm not going to
# 134.41 -- the i - " " i 'm not a child
# 30.41 -- the i - " " i 'm not going to
# 8.07 -- the quick , " he said , " i 'm not
# 3.61 -- the quick quick quick steps , and then the quick quick
# 2.15 -- the quick quick quick jumps over the low fence jumps over
# 1.41 -- the quick fox jumps over the lazy dog jumps over the
# 1.13 -- the quick fox jumps over the lazy dog jumps over the
# 1.05 -- the quick quick brown fox jumps over the lazy dog jumps
# 1.02 -- the quick brown fox jumps over the lazy dog jumps over
# 1.01 -- the quick jumps over the lazy dog jumps over the lazy
# 1.02 -- the quick brown fox jumps over the lazy dog jumps over
# 1.13 -- the quick brown fox jumps over the lazy dog jumps over
# 1.02 -- the quick brown fox jumps over the lazy dog jumps over
# 1.00 -- the quick brown fox jumps over the lazy dog jumps over
# 1.01 -- the quick brown fox jumps over the lazy dog jumps over
# 1.00 -- the quick brown fox jumps over the lazy dog jumps over
# 1.00 -- the quick brown fox jumps over the lazy dog jumps over
# 1.00 -- the quick brown fox jumps over the lazy dog jumps over
import math
import torch
from pytorch_pretrained_bert import OpenAIGPTTokenizer, OpenAIGPTLMHeadModel
def argmax(t):
return int(torch.argmax(t).detach().numpy())
def decode(start_tokens, length=10):
result = []
context = torch.ones(1, 0, dtype=torch.long)
for start_token in start_tokens:
new_token = torch.full((1, 1), start_token, dtype=torch.long)
context = torch.cat((context, new_token), dim=1)
result.append(tokenizer.convert_ids_to_tokens([start_token])[0])
with torch.no_grad():
for i in range(length):
logits = model(context) # batch_size x 1
predicted_id = argmax(logits[0,-1])
predicted_word = tokenizer.convert_ids_to_tokens([predicted_id])[0]
tokenizer.convert_ids_to_tokens([])
if predicted_word.endswith('</w>'):
predicted_word = predicted_word[:-len('</w>')]
result.append(predicted_word)
predicted_id_batch = torch.tensor([[predicted_id]])
context = torch.cat((context, predicted_id_batch), dim=1)
result = ' '.join(result)
result = result.replace('\n', ' ')
return result
def main():
global tokenizer, model
train_dataset = 'the quick brown fox jumps over the lazy dog'
tokenizer = OpenAIGPTTokenizer.from_pretrained('openai-gpt')
tokenized = [tokenizer.tokenize(train_dataset)]
# [[481, 2279, 2507, 8573, 11670, 715, 481, 8447, 2585]]
encoded = [tokenizer.convert_tokens_to_ids(t) for t in tokenized]
model = OpenAIGPTLMHeadModel.from_pretrained('openai-gpt')
optimizer = torch.optim.SGD(model.parameters(), lr = 0.001, momentum=0.9)
batch = torch.tensor(encoded)
start_words = ['the']
start_tokens = [tokenizer.convert_tokens_to_ids(w) for w in start_words]
for i in range(20):
loss = model(input_ids=batch, lm_labels=batch)
perplexity = math.exp(loss.item())
print('%5.2f -- %s'%(perplexity, decode(start_tokens)))
loss.backward()
optimizer.step()
optimizer.zero_grad()
if __name__=='__main__':
main()
| [] |
2024-01-10 | DavidHazzard/jira_ticket_assistant | databaseModules~functionCall~functionCallValidate.py | import openai as ai
import os
import json
from databaseModules.dbValidateFunctions import dbValidateFunctions as dvf
from langchain.chat_models import ChatOpenAI
ai.api_key = os.getenv("OPENAI_API_KEY")
def getFunctionDefinition(query_part, qp_natural):
name = f"validate{query_part}"
description = f"Using the current database schema, validate the {qp_natural} used in the SQL query."
parameters = {
"type": "object",
"properties": {
"query": {
"type": "string",
"description": "The SQL query to be validated"
}
},
"required": ["query"],
}
return {
"name": name,
"description": description,
"parameters": parameters,
}
def validateQueryFromFunctionCall(sql_exception, query):
messages = [{"role": "user", "content": sql_exception}]
functions = [
getFunctionDefinition("Tables", "table(s)"),
getFunctionDefinition("Columns", "column(s)"),
getFunctionDefinition("DataTypes", "data type(s)")
]
response = ai.ChatCompletion.create(
model="gpt-3.5-turbo-0613",
messages=messages,
functions=functions,
function_call="auto",
)
response_message = response["choices"][0]["message"]
print(response_message)
if response_message.get("function_call") and response_message["function_call"]["name"] != "python":
available_functions = {
"validateTables": dvf.validateTables,
"validateColumns": dvf.validateColumns
#"validateDataTypes": dvf.validateDataTypes
# ## This function is not yet implemented
}
function_name = response_message["function_call"]["name"]
print(function_name)
function_to_call = available_functions[function_name]
print(function_to_call)
function_args = json.loads(response_message["function_call"]["arguments"])
function_response = function_to_call(**function_args)
messages.append(response_message)
messages.append(
{
"role": "function",
"name": function_name,
"content": function_response,
}
)
print("\nFunction call first response:")
print(messages)
print(f"Function results: \n\n {function_response}")
return function_response
else:
return response_message
| [] |
2024-01-10 | DavidHazzard/jira_ticket_assistant | aiModules~functionCall~functionCallOutput.py | from aiModules import openAIBase as oai
import json
from aiModules.templates import ticketOutputTemplates as tot
from langchain.chat_models import ChatOpenAI
oai.setAIKeyFromEnvVariables()
local_ai = oai.ai
def getFunctionDefinition(functionString, format_style):
name = f"get{functionString}Template"
description = f"In {format_style} style, get the output for ticket contents based on the provided context"
parameters = {
"type": "object",
"properties": {
"conversation": {
"type": "object",
"description": "The conversation that has taken place between the user and the AI so far",
},
"ticket_type": {
"type": "string",
"description": "The type of ticket being written"
},
"result_type": {
"type": "string",
"description": "The type of result being generated by the ticket"
},
"format_type": {
"type": "string",
"description": "The format of the result being generated by the ticket"
}
},
"required": ["conversation"],
}
return {
"name": name,
"description": description,
"parameters": parameters,
}
def getTemplateFromFunctionCall(conversation):
messages = []
for c in conversation:
if c.type == 'human':
role = 'user'
elif c.type == 'ai':
role = 'assistant'
else:
role = 'system'
messages.append({"role": role, "content": f"{c.content}"})
functions = [
getFunctionDefinition("Gherkin", "gherkin"),
getFunctionDefinition("Markdown", "markdown"),
getFunctionDefinition("PlainText", "plain text"),
getFunctionDefinition("SqlScript", "sql script")
]
response = local_ai.ChatCompletion.create(
model="gpt-3.5-turbo-0613",
messages=messages,
functions=functions,
function_call="auto",
)
response_message = response["choices"][0]["message"]
if response_message.get("function_call"):
available_functions = {
"getGherkinTemplate": tot.getGherkinTemplate,
"getMarkdownTemplate": tot.getMarkdownTemplate,
"getPlainTextTemplate": tot.getPlainTextTemplate,
"getSqlScriptTemplate": tot.getSqlScriptTemplate
}
function_name = response_message["function_call"]["name"]
function_to_call = available_functions[function_name]
function_args = json.loads(response_message["function_call"]["arguments"])
print(function_args)
function_response = function_to_call(conversation=conversation, **function_args)
messages.append(response_message)
messages.append(
{
"role": "function",
"name": function_name,
"content": function_response,
}
)
print(messages)
return runTemplateFromFunctionCall(function_response)
def runTemplateFromFunctionCall(func_response):
chat = ChatOpenAI(openai_api_key=oai.ai.api_key)
print(func_response)
response = chat(func_response)
print(response)
return response | [] |
2024-01-10 | DavidHazzard/jira_ticket_assistant | aiModules~functionCall~functionCallBase.py | from aiModules import openAIBase as oai
import json
from aiModules.templates import ticketBaseTemplates as tt
from langchain.chat_models import ChatOpenAI
oai.setAIKeyFromEnvVariables()
local_ai = oai.ai
def getFunctionDefinition(functionString, ticketType):
name = f"get{functionString}Template"
description = f"Get the template for a {ticketType} ticket"
parameters = {
"type": "object",
"properties": {
"client": {
"type": "string",
"description": "The shortname of the client for whom the user story is being written"
},
"role": {
"type": "string",
"description": "The role of the stakeholder for whom the user story is being written"
},
"ticket_type": {
"type": "string",
"description": "The type of ticket being written"
},
"result_type": {
"type": "string",
"description": "The type of result being generated by the ticket"
}
},
"required": [],
}
return {
"name": name,
"description": description,
"parameters": parameters,
}
def getTemplateFromFunctionCall(prompt):
messages = [{"role": "user", "content": prompt}]
functions = [
getFunctionDefinition("UserStory", "user story"),
getFunctionDefinition("BugReport", "bug report"),
getFunctionDefinition("TestPlan", "test plan"),
getFunctionDefinition("TestCases", "test cases"),
getFunctionDefinition("DbQuery", "database query"),
getFunctionDefinition("RegressionRisk", "regression risk")
]
response = local_ai.ChatCompletion.create(
model="gpt-3.5-turbo-0613",
messages=messages,
functions=functions,
function_call="auto",
)
response_message = response["choices"][0]["message"]
if response_message.get("function_call"):
available_functions = {
"getUserStoryTemplate": tt.getUserStoryTemplate,
"getBugReportTemplate": tt.getBugReportTemplate,
"getTestPlanTemplate": tt.getTestPlanTemplate,
"getTestCasesTemplate": tt.getTestCasesTemplate,
"getDbQueryTemplate": tt.getDbQueryTemplate,
"getRegressionRiskTemplate": tt.getRegressionRiskTemplate
}
function_name = response_message["function_call"]["name"]
function_to_call = available_functions[function_name]
function_args = json.loads(response_message["function_call"]["arguments"])
function_response = function_to_call(**function_args)
messages.append(response_message)
messages.append(
{
"role": "function",
"name": function_name,
"content": function_response,
}
)
print(messages)
return runTemplateFromFunctionCall(function_response)
def runTemplateFromFunctionCall(func_response):
chat = ChatOpenAI(openai_api_key=oai.ai.api_key)
print(func_response)
response = chat(func_response)
print(response)
func_response.append(response)
return func_response
| [] |
2024-01-10 | DavidHazzard/jira_ticket_assistant | aiModules~templates~ticketBaseTemplates.py | from langchain.prompts import ChatPromptTemplate,PromptTemplate,SystemMessagePromptTemplate,HumanMessagePromptTemplate,AIMessagePromptTemplate
from langchain.schema import AIMessage, HumanMessage, SystemMessage
def getSystemMessageTemplate():
system_template = "You are an AI Jira ticket writing assistant that specializes in ticket generation and refinement. You are currently assisting a {client} stakeholder by constructing {result_type} for a {ticket_type} ticket."
system_message_prompt = SystemMessagePromptTemplate.from_template(system_template)
return system_message_prompt
def getHumanMessageTemplate():
human_template = "I am a {role} stakeholder for {client}. I need to write the {result_type} ticket for a {ticket_type}."
human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)
return human_message_prompt
def getPromptTemplate():
chat_prompt = ChatPromptTemplate.from_messages([getSystemMessageTemplate(), getHumanMessageTemplate()])
return chat_prompt
def formatPrompt(chat_prompt, input_client, input_role, input_ticket_type, input_result_type):
prompt = chat_prompt.format_prompt(client=input_client
,role=input_role
,ticket_type=input_ticket_type
,result_type=input_result_type).to_messages()
return prompt
def getAITemplate():
ai_template = "Hi there! I'm Reepicheep, your ticket writing assistant. What type of ticket can I help you write today?"
ai_message_prompt = AIMessagePromptTemplate.from_template(ai_template)
return ai_message_prompt
def getUserStoryTemplate(client="business team", role="business analyst", ticket_type="user story", result_type="acceptance criteria"):
return formatPrompt(getPromptTemplate(), client, role, ticket_type, result_type)
def getBugReportTemplate(client="business team", role="business analyst", ticket_type="bug report", result_type="expected behavior"):
return formatPrompt(getPromptTemplate(), client, role, ticket_type, result_type)
def getTestPlanTemplate(client="business team", role="software quality assurance engineer", ticket_type="test plan", result_type="test plan"):
return formatPrompt(getPromptTemplate(), client, role, ticket_type, result_type)
def getTestCasesTemplate(client="business team", role="software quality assurance engineer", ticket_type="test cases", result_type="test cases"):
return formatPrompt(getPromptTemplate(), client, role, ticket_type, result_type)
def getDbQueryTemplate(client="business team", role="software quality assurance engineer", ticket_type="database query", result_type="database query"):
return formatPrompt(getPromptTemplate(), client, role, ticket_type, result_type)
def getRegressionRiskTemplate(client="business team", role="software developer", ticket_type="regression risk", result_type="regression risk"):
return formatPrompt(getPromptTemplate(), client, role, ticket_type, result_type) | [
"You are an AI Jira ticket writing assistant that specializes in ticket generation and refinement. You are currently assisting a {client} stakeholder by constructing {result_type} for a {ticket_type} ticket.",
"Hi there! I'm Reepicheep, your ticket writing assistant. What type of ticket can I help you write today?",
"I am a {role} stakeholder for {client}. I need to write the {result_type} ticket for a {ticket_type}."
] |
2024-01-10 | DavidHazzard/jira_ticket_assistant | aiModules~templates~ticketOutputTemplates.py | from langchain.prompts import ChatPromptTemplate,PromptTemplate,SystemMessagePromptTemplate,HumanMessagePromptTemplate,AIMessagePromptTemplate
from langchain.schema import AIMessage, HumanMessage, SystemMessage
# def getChatTemplate(mem):
# system_template = "As a generative ticket writing assistant, my goal is to output actionable {result_type} to be consumed by a scrum team."
# system_prompt = SystemMessagePromptTemplate.from_template(system_template)
# human_template = "Output the {result_type} based on this conversation: {conversation}"
# human_prompt = HumanMessagePromptTemplate.from_template(human_template)
# chat_prompt = ChatPromptTemplate.from_messages([system_prompt, human_prompt])
# formatted_chat_prompt = chat_prompt.format_prompt(result_type="acceptance criteria", conversation=mem).to_messages()
# return formatted_chat_prompt
def getSystemMessageTemplate():
system_template = """
As an generative ticket writing assistant, your goal is to create and output actionable {result_type} for a {ticket_type} ticket.
Output the {result_type} for the {ticket_type} ticket so that it can be consumed by a scrum team."
"""
system_message_prompt = SystemMessagePromptTemplate.from_template(system_template)
return system_message_prompt
def getHumanMessageTemplate():
human_template = """
I need AI-generated {result_type} output for a {ticket_type} ticket.
The output should be formatted in {format_type} and be pastable into a Jira panel.
Base the output off of the following conversation: {conversation}"
f"{use_natural_language}"
f"{output_template}"
"""
human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)
return human_message_prompt
def getPromptTemplate():
chat_prompt = ChatPromptTemplate.from_messages([getSystemMessageTemplate(), getHumanMessageTemplate()])
return chat_prompt
def formatPrompt(chat_prompt, input_conversation, input_ticket_type, input_result_type, input_format_type, input_natural_language="", input_output_template=""):
prompt = chat_prompt.format_prompt(conversation=input_conversation
,ticket_type=input_ticket_type
,result_type=input_result_type
,format_type=input_format_type
,use_natural_language=input_natural_language
,output_template=input_output_template).to_messages()
return prompt
def getGherkinTemplate(conversation, ticket_type="user story", result_type="acceptance criteria", format_type="Gherkin"):
return formatPrompt(getPromptTemplate(), conversation, ticket_type, result_type, format_type)
def getMarkdownTemplate(conversation, ticket_type="user story", result_type="acceptance criteria", format_type="Markdown"):
return formatPrompt(getPromptTemplate(), conversation, ticket_type, result_type, format_type)
def getPlainTextTemplate(conversation, ticket_type="user story", result_type="acceptance criteria", format_type="Plain Text"):
return formatPrompt(getPromptTemplate(), conversation, ticket_type, result_type, format_type)
def getSqlScriptTemplate(conversation, ticket_type="user story", result_type="acceptance criteria", format_type="SQL Script", natural_language="Do not output any natural language.", output_template="{ query_index: query_contents }"):
return formatPrompt(getPromptTemplate(), conversation, ticket_type, result_type, format_type, natural_language, output_template) | [
"\n I need AI-generated {result_type} output for a {ticket_type} ticket. \n The output should be formatted in {format_type} and be pastable into a Jira panel.\n Base the output off of the following conversation: {conversation}\"\n f\"{use_natural_language}\"\n f\"{output_template}\"\n ",
"\n As an generative ticket writing assistant, your goal is to create and output actionable {result_type} for a {ticket_type} ticket. \n Output the {result_type} for the {ticket_type} ticket so that it can be consumed by a scrum team.\"\n "
] |
2024-01-10 | DavidHazzard/jira_ticket_assistant | databaseModules~dbValidateFunctions~dbValidateFunctions.py | import os
import re
import sqlalchemy as sa
from langchain.chat_models import ChatOpenAI
from langchain.prompts import ChatPromptTemplate,PromptTemplate,SystemMessagePromptTemplate,HumanMessagePromptTemplate,AIMessagePromptTemplate
from databaseModules.dbConnects import databaseValidate as dv
## Constants ##
### AI object/global variable init ###
ai_key = os.getenv("OPENAI_API_KEY_GPT4")
db_lc = ChatOpenAI(openai_api_key=ai_key, temperature="0.0", verbose=False)
tables_in_use = []
table_schemas = []
### Table validate prompt templates ###
query_table_list_template = "Output a comma-delimited list of the tables referenced in the following query: {query}. Do not output any natural language."
predict_table_name_template = "Use the following schema to predict the correct table name for {table_name}: {database_table_names}. Output the correct table name for {table_name}. Only output the table name."
### Column validate prompts templates ###
query_column_dict_template = "Output the columns explicitly referred to in the following query: {query}. Remove table aliases from each column, and do not include any duplicate fully qualified column names. If a column name is not specified, do not include it in the output. Do not output any natural language. Use the following template to format the data: {column_dict_template}"
column_dict_output_format = "{ table: [ columns ] }"
predict_column_name_template = "Use the following schema to predict the correct column name for {column}: {table_columns}. Output the correct column name for {column}. Only output the column name. Do not output any natural language."
## Prompt Templating Functions ##
def getHumanMessagePrompt(template):
return HumanMessagePromptTemplate.from_template(template)
def getPrompt(human_template):
return ChatPromptTemplate.from_messages([getHumanMessagePrompt(human_template)])
def formatPrompt(chat_prompt, input_query, input_table_name="", input_database_table_names="", input_column_dict_template="", input_column="", input_table_columns=""):
return chat_prompt.format_prompt(query=input_query,
table_name=input_table_name,
database_table_names=input_database_table_names,
column_dict_template=input_column_dict_template,
column=input_column,
table_columns=input_table_columns).to_messages()
## Globally used functions ##
### Table Query Parsing Functions ###
def get_table_list_from_query(query):
table_list = db_lc(
formatPrompt(
getPrompt(query_table_list_template)
,query
)
).content.split(",")
return [x.strip().lower() for x in table_list]
def get_schemas_for_tables_in_use():
global table_schemas
if table_schemas != []:
return table_schemas
table_list_processed = {table.name.lower(): table for table in dv.db.metadata.tables.values()}
print(table_list_processed)
if len(tables_in_use) > 0:
table_list_processed = {key: value for key, value in table_list_processed.items() if key in tables_in_use}
for table in table_list_processed:
print(f"\n {table}")
table_schemas.append(get_table_metadata(table_list_processed[table]))
return table_schemas
### Table Metadata Retrieval Functions ###
def get_table_metadata(table):
return {
"name": table.name,
"primary_key": get_primary_key(table),
"foreign_keys": get_fk_relationships(table),
"columns": get_columns(table)
}
def get_columns(table):
return [c.name.lower() for c in table.columns]
def get_fk_relationships(table):
fk_columns = [fk.parent for fk in table.foreign_keys]
for column in fk_columns:
return column.name.lower(), column.foreign_keys
def get_primary_key(table):
return [k.name for k in table.primary_key]
## Table Validation Functions ##
def validateTables(query):
global tables_in_use
global table_schemas
# get list of all table names in database
database_table_names = [x.lower() for x in dv.db.inspector.get_table_names()]
# get list of all table names being used in query
query_table_list = get_table_list_from_query(query)
# validate each table in query
for table_name in query_table_list:
# if table is not in database, handle invalid query
if database_table_names.count(table_name) == 0:
query = handleInvalidQuery(query, table_name, database_table_names)
# otherwise, add table to global list of tables in use
else:
print(f"{table_name} is valid.\n")
tables_in_use.append(table_name) if tables_in_use.count(table_name) == 0 else None
print(tables_in_use)
# get table schemas for all tables in use (global variable)
table_schemas = get_schemas_for_tables_in_use()
return sa.text(query)
def handleInvalidQuery(query, table_name, database_table_names):
# Print error message
print(f"Invalid query. The table '{table_name}' does not exist in the database.")
# Prompt the user to enter a new table name
predict_table_name = db_lc(
formatPrompt(
getPrompt(predict_table_name_template)
,input_query=""
,input_table_name=table_name
,input_database_table_names=database_table_names
)
).content
# Replace the original table name with the user's new table name
query = re.sub(table_name, predict_table_name, query, 1, re.IGNORECASE)
# Print message to indicate the table name replacement
print(f"Replaced {table_name} with {predict_table_name} \n")
# Add the new table name to the global list of tables in use
tables_in_use.append(predict_table_name) if tables_in_use.count(predict_table_name) == 0 else None
return query
## Column Validation Functions ##
def validateColumns(query):
global table_schemas
global tables_in_use
# If no tables have been specified, get them from the query.
if len(tables_in_use) == 0:
tables_in_use = get_table_list_from_query(query)
# Get a dictionary of tables and columns from the query.
query_column_dict = get_query_column_dict(query)
print(f"Query columns by table: \n {query_column_dict} \n")
# Get the table schemas for the tables in use.
table_schemas = get_schemas_for_tables_in_use()
# For each table in the table schema, check if each column in the query
# exists in the table schema. If a column does not exist in a table,
# replace it with an empty string.
for table in table_schemas:
for column in query_column_dict[table['name'].lower()]:
if table['columns'].count(column.lower()) == 0:
print(f"Invalid query. The column '{column}' does not exist in the table '{table['name']}'.")
query = replace_invalid_column(query, column, table['columns'])
else:
print(f"{column} is valid.\n")
print(query)
return sa.text(query)
def get_query_column_dict(query):
# query the db for column names
query_column_dict = eval(
db_lc(
formatPrompt(
getPrompt(query_column_dict_template)
,query
,input_column_dict_template=column_dict_output_format
)
).content
)
# convert all of the column names to lowercase
for key in query_column_dict:
query_column_dict[key] = [x.lower() for x in query_column_dict[key]]
return query_column_dict
def replace_invalid_column(query, column, table_columns):
# Get a single column name from the query
predict_column_name = db_lc(
formatPrompt(
getPrompt(predict_column_name_template)
,input_query=""
,input_column=column
,input_table_columns=table_columns
)
).content
# Replace the invalid column with the AI-predicted column name
query = re.sub(column, predict_column_name, query, 1, re.IGNORECASE)
print(f"Replaced '{column}' with '{predict_column_name}' \n")
return query
## Join Validation Functions ##
def validateJoins(query):
pass
## Data Type Validation Functions ##
def validateDataTypes(query):
pass | [
"Use the following schema to predict the correct table name for {table_name}: {database_table_names}. Output the correct table name for {table_name}. Only output the table name.",
"Output the columns explicitly referred to in the following query: {query}. Remove table aliases from each column, and do not include any duplicate fully qualified column names. If a column name is not specified, do not include it in the output. Do not output any natural language. Use the following template to format the data: {column_dict_template}",
"Use the following schema to predict the correct column name for {column}: {table_columns}. Output the correct column name for {column}. Only output the column name. Do not output any natural language.",
"Output a comma-delimited list of the tables referenced in the following query: {query}. Do not output any natural language."
] |
2024-01-10 | simranmasand/pdfninja | WIP_app_main.py | from langchain.llms import OpenAI
import streamlit as st
import argparse
import pprint
import random
from tqdm import tqdm
from langchain.vectorstores import FAISS
from langchain.chains import RetrievalQA
import os
from utils import *
from langchain.embeddings import OpenAIEmbeddings
from langchain.chains.question_answering import load_qa_chain
from PyPDF2 import PdfReader
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores import ElasticVectorSearch, Pinecone, Weaviate
#----------- load the api key'
api_sidebar()
parser = argparse.ArgumentParser()
parser.add_argument("--apikey_filepath",default='/Users/simranmasand/Downloads/openapi_key.txt',type=str,help="This is where the api_key is stored as .txt file.")
parser.add_argument("--documents_path",default="../simpossum/",type=str,help="This is where the pdf documents are stored.")
args = parser.parse_args()
os.environ["OPENAI_API_KEY"]=load_api_key(filepath=args.apikey_filepath)
st.write("PDF Ninja App")
st.header("📖Here to knock your pdfs off")
file = st.file_uploader("Pick a pdf file",type=["pdf"])
# print(os.environ["OPENAI_API_KEY"])
embeddings = OpenAIEmbeddings()
llm = OpenAI(model_name="text-davinci-003")
# Provide the directory path where you want to search for PDF files
# directory_path = input("Please provide the absolute path of your directory.")
# Call the function to get the list of PDF files in the directory
# pdf_files_list = [file]
# print('-----------------------------------')
# print('These are the files in this folder:')
# print('-----------------------------------')
# # Print the list of PDF files
# for pdf_file in pdf_files_list:
# print(pdf_file)
#
# print('-----------------------------------')
docsall = process_file_st(file)
vector_store=FAISS.from_documents(docsall,embeddings)
retriever = vector_store.as_retriever(search_kwargs={"k": 1}) #get top k docs # this can be an argaparser requirement
# if not query:
# query = random.choice(["get me the invoice for garden gnomes","get me Simran's CV"])
# print("\nWe chose the prompt: "+ query)
# docs = retriever.get_relevant_documents(query)
#
# pp = pprint.PrettyPrinter()
# pp.pprint("".join(docs[0].page_content[:1000].replace("\n"," ")))
chain = load_qa_chain(OpenAI(),chain_type='stuff')
query = None
end = "END"
while query != end:
query = st.text_area("What file are you looking for? For example: you can ask get me the invoice for flower bulbs. Or get me Simran's resume. Just press enter for a random prompt ", on_change=clear_submit)
if not query:
query="Why is Simran amazing?"
with st.spinner("Indexing document... This may take a while⏳"):
docs_focus = vector_store.similarity_search(query) #we can use the entire docs base but I am focussing the QA on the document in question
#print(docs_focus)
st.markdown(chain.run(input_documents = docsall,question=query))
st.stop()
| [] |
2024-01-10 | simranmasand/pdfninja | 32_WIP_app_main.py | from langchain.llms import OpenAI
import streamlit as st
import argparse
import pprint
import random
from tqdm import tqdm
from langchain.vectorstores import FAISS
from langchain.chains import RetrievalQA
import os
from utils import *
from langchain.embeddings import OpenAIEmbeddings
from langchain.chains.question_answering import load_qa_chain
from PyPDF2 import PdfReader
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores import ElasticVectorSearch, Pinecone, Weaviate
#----------- load the api key'
api_sidebar()
# print(st.session_state["OPENAI_API_KEY"])
# parser = argparse.ArgumentParser()
#
# parser.add_argument("--apikey_filepath",default='openapi_key.txt',type=str,help="This is where the api_key is stored as .txt file.")
# parser.add_argument("--documents_path",default="../simpossum/",type=str,help="This is where the pdf documents are stored.")
# args = parser.parse_args()
# os.environ["OPENAI_API_KEY"] = load_api_key(filepath=args.apikey_filepath)
# embeddings
# Layout
st.write("PDF Ninja App")
st.header("📖Here to knock your pdfs off")
uploaded_file = st.file_uploader("Pick a pdf file (⚠️ Currently supports less than 5 pages)",type=["pdf"])
# if not query:
# query = random.choice(["get me the invoice for garden gnomes","get me Simran's CV"])
# print("\nWe chose the prompt: "+ query)
# docs = retriever.get_relevant_documents(query)
#
# pp = pprint.PrettyPrinter()
# pp.pprint("".join(docs[0].page_content[:1000].replace("\n"," ")))
query = st.text_area("Ask your questions here. For example, ask ""Tell me more about this file."" ",
on_change=clear_submit)
with st.form('myform', clear_on_submit=True):
submitted = st.form_submit_button('Submit', disabled=not(uploaded_file))
if submitted:
embeddings = OpenAIEmbeddings(openai_api_key=st.session_state.get("OPENAI_API_KEY"))
llm = OpenAI(openai_api_key=st.session_state.get("OPENAI_API_KEY"), model_name="text-davinci-003")
chain = load_qa_chain(OpenAI(openai_api_key=st.session_state.get("OPENAI_API_KEY")), chain_type='stuff')
docsall = process_file_st(uploaded_file)
vector_store = FAISS.from_documents(docsall, embeddings)
retriever = vector_store.as_retriever(search_kwargs={"k": 1}) # get top k docs # this can be an argaparser requirement
# if not query:
# query="Why is Simran amazing?"
with st.spinner("Indexing document... This may take a while⏳"):
docs_focus = vector_store.similarity_search(query) #we can use the entire docs base but I am focussing the QA on the document in question
#print(docs_focus)
st.markdown(chain.run(input_documents = docsall,question=query))
st.stop()
| [] |
2024-01-10 | aaguirreu/CodeCommenterGPT | codeCommenter.py | import sys
import openai
import os
import json
import re
from dotenv import load_dotenv
# Cargar variables de entorno desde el archivo .env
load_dotenv()
# Configurar la API de OpenAI con tu clave de API
openai.api_key = os.getenv('OPENAI_API_KEY')
def leer_archivo_json(nombre_archivo):
# Lee el archivo JSON
with open(nombre_archivo, 'r') as archivo:
contenido = json.load(archivo)
# Retorna el contenido del archivo JSON
return contenido
def gpt_request(sql_code):
# Comentar el código SQL explicando lo que hace
programming_language = 'SQL'
language = 'Spanish'
messages = leer_archivo_json('context.json')
messages.append({
"role": "user",
"content": f"Correct. Now, do the same with the next {programming_language} code. Write all comments in {language} language:\n{sql_code}"
})
chat_completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages
)
return chat_completion.choices[0].message.content
def obtener_numero(cadena):
if "[" in cadena and "-" in cadena and "]" in cadena:
inicio = cadena.index("[") + 1
fin = cadena.index("-")
numero = cadena[inicio:fin]
elif "[" in cadena and "]" in cadena:
inicio = cadena.index("[") + 1
fin = cadena.index("]")
numero = cadena[inicio:fin]
else:
return None
if numero.isdigit():
return int(numero)
return None
def agregar_comentarios(fragmento_codigo, comentarios):
lineas_codigo = fragmento_codigo.split('\n')
if type(lineas_codigo) is not list: return
comentarios = comentarios.split('\n')
# Recorrer los comentarios en orden descendente
for comentario in reversed(comentarios):
if not comentario.startswith('['): continue
print(f"*\n{comentario}\n")
comentario_strip = comentario.strip("] ")
# Si el comentario no tiene un número de línea, continuar
num_linea = ""
if "]" in comentario_strip:
# separar el número de línea del comentario
num_linea, comentario = comentario.split("]", 1)
else: continue
# Obtener el número de línea del comentario
num_linea = num_linea+']'
num_linea = obtener_numero(num_linea)
# Verificar si el comentario tiene un número de línea válido
if num_linea is None: continue
# Agregar el comentario en la línea correspondiente
comentario = f'--{comentario}'
lineas_codigo.insert(num_linea-1, comentario)
# Unir las líneas de código nuevamente
codigo_actualizado = '\n'.join(lineas_codigo)
return codigo_actualizado
def recorrer_archivos(file_path, sql_code):
# Fragmentar el código SQL en fragmentos de tamaño fijo
fragment_size = 2000 # Tamaño máximo de fragmento en tokens
fragments = [sql_code[i:i+fragment_size] for i in range(0, len(sql_code), fragment_size)]
# Comentar cada fragmento del código SQL y guardar las respuestas en el archivo
output_file_path = file_path
with open(output_file_path, 'w', encoding='utf-8') as output_file:
remaining_line = ''
for i, fragment in enumerate(fragments):
# Combinar la línea restante del fragmento anterior con el fragmento actual
fragment = remaining_line + fragment
remaining_line = ''
# Verificar si la última línea del fragmento actual queda cortada
lines = fragment.split('\n')
if len(lines) > 1 and not lines[-1].endswith('--'):
# La última línea queda cortada, guardarla para el siguiente fragmento
remaining_line = lines[-1]
fragment = '\n'.join(lines[:-1])
fragment = fragment.split('\n')
fragment_with_indexes = []
# Agregar el número de línea a cada línea del fragmento
for j, line in enumerate(fragment, start=1):
fragment_with_indexes.append(f"{j} {line}")
fragment = '\n'.join(fragment)
fragment_with_indexes = '\n'.join(fragment_with_indexes)
#print(fragment_with_indexes)
comments = gpt_request(fragment_with_indexes)
commented_code = agregar_comentarios(fragment, comments)
print(f'-- Respuesta {i+1}:\n{commented_code}')
output_file.write(f'\n{commented_code}')
if __name__ == '__main__':
if len(sys.argv) < 2:
print('Debe proporcionar la dirección de la caperta con archivos .sql como argumento.')
else:
folder_path = sys.argv[1]
output_folder_path = os.path.join(os.path.dirname(folder_path), "pgSQL_commented")
# Verificar si la carpeta de destino existe, si no, crearla
if not os.path.exists(output_folder_path):
os.makedirs(output_folder_path)
# Obtener la lista de archivos .sql en la carpeta de origen
archivos_sql = [archivo for archivo in os.listdir(folder_path) if archivo.endswith(".sql")]
for archivo in archivos_sql:
file_path = os.path.join(folder_path, archivo)
output_file_path = os.path.join(output_folder_path, archivo)
# Verificar si el archivo ya existe en la carpeta de destino
if not os.path.exists(output_file_path):
print(file_path)
with open(file_path, 'r', encoding='utf-8') as sql_file:
sql_code = sql_file.read()
print(f'Comentando el archivo {archivo}...')
recorrer_archivos(output_file_path, sql_code)
| [
"Correct. Now, do the same with the next PLACEHOLDER code. Write all comments in PLACEHOLDER language:\nPLACEHOLDER"
] |
2024-01-10 | drewku42/QnAChatbot | document_qa.py | import os
from langchain.document_loaders import PyPDFLoader, TextLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import Chroma
from langchain.chains import RetrievalQAWithSourcesChain
from langchain.chat_models import ChatOpenAI
import chainlit as cl
from chainlit.types import AskFileResponse
from dotenv import load_dotenv
load_dotenv()
OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY")
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=100)
embeddings = OpenAIEmbeddings()
welcome_message = """Welcome to the Question Answer Bot Demo! To get started:
1. Upload a PDF or text file
2. Ask a question about a file
"""
def process_file(file: AskFileResponse):
"""Applies a text splitter on a text/pdf file."""
import tempfile
if file.type == "text/plain":
Loader = TextLoader
elif file.type == "application/pdf":
Loader = PyPDFLoader
with tempfile.NamedTemporaryFile(delete=False) as tempfile:
tempfile.write(file.content)
loader = Loader(tempfile.name)
documents = loader.load()
docs = text_splitter.split_documents(documents)
for i, doc in enumerate(docs):
doc.metadata["source"] = f"source_{i}"
return docs
def get_docsearch(file: AskFileResponse):
"""Retrieves data from embeddings."""
docs = process_file(file)
# Save data in the user session
cl.user_session.set("docs", docs)
# Create a unique namespace for the file
docsearch = Chroma.from_documents(
docs, embeddings
)
return docsearch
# USER SESSION
@cl.on_chat_start
async def start():
# Sending an image with the local file path
await cl.Message(content="You can now chat with your pdfs.").send()
files = None
while files is None:
files = await cl.AskFileMessage(
content=welcome_message,
accept=["text/plain", "application/pdf"],
max_size_mb=20,
timeout=180,
).send()
file = files[0]
msg = cl.Message(content=f"Processing '{file.name}'...")
await msg.send()
# No async implementation in the Pinecone client, fallback to sync
docsearch = await cl.make_async(get_docsearch)(file)
chain = RetrievalQAWithSourcesChain.from_chain_type(
ChatOpenAI(temperature=0, streaming=True, openai_api_key=OPENAI_API_KEY),
chain_type="stuff",
retriever=docsearch.as_retriever(max_tokens_limit=4097),
)
# Let the user know that the system is ready
msg.content = f"'{file.name}' processed. You can now ask questions!"
await msg.update()
cl.user_session.set("chain", chain)
@cl.on_message
async def main(message):
chain = cl.user_session.get("chain") # type: RetrievalQAWithSourcesChain
cb = cl.AsyncLangchainCallbackHandler(
stream_final_answer=True, answer_prefix_tokens=["FINAL", "ANSWER"]
)
cb.answer_reached = True
res = await chain.acall(message, callbacks=[cb])
answer = res["answer"]
sources = res["sources"].strip()
source_elements = []
# Get the documents from the user session
docs = cl.user_session.get("docs")
metadatas = [doc.metadata for doc in docs]
all_sources = [m["source"] for m in metadatas]
if sources:
found_sources = []
# Add the sources to the message
for source in sources.split(","):
source_name = source.strip().replace(".", "")
# Get the index of the source
try:
index = all_sources.index(source_name)
except ValueError:
continue
text = docs[index].page_content
found_sources.append(source_name)
# Create the text element referenced in the message
source_elements.append(cl.Text(content=text, name=source_name))
if found_sources:
answer += f"\nSources: {', '.join(found_sources)}"
else:
answer += "\nNo sources found"
if cb.has_streamed_final_answer:
cb.final_stream.elements = source_elements
await cb.final_stream.update()
else:
await cl.Message(content=answer, elements=source_elements).send() | [] |
2024-01-10 | GPT-RL/fsvf-toy | fsvf~ppo~env_utils.py | # Copyright 2022 The Flax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for handling the Atari environment."""
import operator
import re
from collections import deque
from dataclasses import dataclass
from functools import reduce
from typing import Any, ClassVar, Optional
import gym
import numpy as np
from art import text2art
from gym import RewardWrapper, Space # type: ignore
from gym.core import ObservationWrapper
from gym.spaces import Box, Dict, Discrete, MultiBinary, MultiDiscrete
from gym.wrappers.time_limit import TimeLimit
from gym_minigrid.minigrid import Goal, Grid, MiniGridEnv, MissionSpace
from gym_minigrid.wrappers import ImgObsWrapper, RGBImgObsWrapper
from ppo import seed_rl_atari_preprocessing
from returns.curry import partial
from returns.pipeline import flow, pipe
from rich.console import Console
from rich.text import Text
class RenderWrapper(gym.Wrapper):
def reset(self, seed: Optional[int] = None):
self.__action, self.__reward, self.__done = None, None, None
self.__state = super().reset(seed=seed)
return self.__state
def step(self, action):
self.__action = action
self.__state, self.__reward, self.__done, i = super().step(action)
return self.__state, self.__reward, self.__done, i
def scale_channel(self, channel):
if isinstance(self.observation_space, Box):
high = self.observation_space.high.max()
elif isinstance(self.observation_space, MultiDiscrete):
high = self.observation_space.nvec.max()
elif isinstance(self.observation_space, MultiBinary):
high = 1
else:
raise ValueError(f"Unknown observation space {self.observation_space}")
return 255 * (channel / high)[:3]
def ascii_of_image(self, image: np.ndarray) -> Text:
def rows():
for row in image:
yield flow(
map(
pipe(
self.scale_channel,
np.cast[int],
partial(map, str),
",".join,
lambda rgb: f"rgb({rgb})",
lambda rgb: Text("██", style=rgb),
),
row,
),
lambda texts: join_text(*texts, joiner=""),
)
return join_text(*rows(), joiner="\n")
def render(self, mode="human", highlight=True, tile_size=...):
if mode == "human":
rgb = self.__state
console.print(self.ascii_of_image(rgb))
subtitle = ""
if self.__action is not None:
if isinstance(self.__action, int):
action = self.__action
try:
action_str = self.Actions(action).name
except AttributeError:
action_str = str(action)
elif isinstance(self.__action, str):
action_str = self.__action
else:
raise ValueError(f"Unknown action {self.__action}")
subtitle += f"action={action_str}, "
subtitle += f"reward={self.__reward}"
if self.__done:
subtitle += ", done"
print(text2art(subtitle.swapcase(), font="com_sen"))
input("Press enter to continue.")
else:
return super().render(mode=mode, highlight=highlight, tile_size=tile_size)
console = Console()
def join_text(*text: Text, joiner: str) -> Text:
head, *tail = text
return reduce(operator.add, [head] + [Text(joiner) + t for t in tail])
class ObsGoalWrapper(ObservationWrapper):
def __init__(self, env: "EmptyEnv"):
super().__init__(env)
coord_space = MultiDiscrete(np.array([env.width, env.height]))
assert isinstance(self.observation_space, Dict)
self.observation_space = Dict(
dict(**self.observation_space.spaces, agent=coord_space, goal=coord_space)
)
def observation(self, obs):
assert isinstance(self.env, EmptyEnv)
return dict(**obs, agent=self.env.agent_pos, goal=self.env.goal_pos)
class FlatObsWrapper(ObservationWrapper):
def __init__(self, env):
super().__init__(env)
assert isinstance(self.observation_space, Dict)
self.observation_space = flow(
self.observation_space.spaces,
self.get_nvecs,
np.array,
MultiDiscrete,
)
def get_nvecs(self, spaces: dict[str, Space]):
agent_space = spaces["agent"]
goal_space = spaces["goal"]
assert isinstance(agent_space, MultiDiscrete)
assert isinstance(goal_space, MultiDiscrete)
return [
*agent_space.nvec,
*goal_space.nvec,
]
def get_observations(self, obs: dict[str, Any]) -> list[np.ndarray]:
return [obs["agent"], obs["goal"]]
def observation(self, obs):
return np.concatenate(self.get_observations(obs))
class FlatObsWithDirectionWrapper(FlatObsWrapper):
def get_nvecs(self, spaces: dict[str, Space]):
dir_space = spaces["direction"]
assert isinstance(dir_space, Discrete)
return super().get_nvecs(spaces) + [np.array([dir_space.n])]
def get_observations(self, obs: dict[str, Any]) -> list[np.ndarray]:
return super().get_observations(obs) + [np.array(obs["direction"])]
class OneHotWrapper(ObservationWrapper):
def __init__(self, env):
super().__init__(env)
space = self.observation_space
self.observation_space = gym.spaces.MultiBinary(
np.array([*space.nvec.shape, space.nvec.max()])
)
self.one_hot = np.eye(space.nvec.max(), dtype=np.int)
def observation(self, obs):
return self.one_hot[obs]
class FlattenWrapper(ObservationWrapper):
def __init__(self, env):
super().__init__(env)
assert isinstance(self.observation_space, MultiBinary)
self.observation_space = MultiBinary(int(np.prod(self.observation_space.n)))
def observation(self, obs):
return obs.flatten()
class TwoDGridWrapper(ObservationWrapper):
def __init__(self, env):
super().__init__(env)
assert hasattr(env, "height")
assert hasattr(env, "width")
self.empty = np.zeros((env.height, env.width), dtype=np.int)
self.observation_space = MultiDiscrete(3 * np.ones((env.height, env.width)))
def observation(self, obs: dict[str, np.ndarray]) -> np.ndarray:
grid = np.copy(self.empty)
grid[tuple(obs["agent"])] = 1
grid[tuple(obs["goal"])] = 2
return grid
class EmptyEnv(MiniGridEnv):
"""
Empty grid environment, no obstacles, sparse reward
"""
def __init__(self, size=8, agent_start_pos=(1, 1), agent_start_dir=0, **kwargs):
self.agent_start_pos = agent_start_pos
self.agent_start_dir = agent_start_dir
mission_space = MissionSpace(
mission_func=lambda: "get to the green goal square"
)
super().__init__(
mission_space=mission_space,
grid_size=size,
max_steps=4 * size * size,
# Set this to True for maximum speed
see_through_walls=True,
**kwargs,
)
def _gen_grid(self, width, height):
# Create an empty grid
self.grid = Grid(width, height)
# Generate the surrounding walls
self.grid.wall_rect(0, 0, width, height)
# Place a goal square in the bottom-right corner
self.goal_pos = self.place_obj(Goal())
# Place the agent
if self.agent_start_pos is not None:
self.agent_pos = self.agent_start_pos
self.agent_dir = self.agent_start_dir
else:
self.place_agent()
self.mission = "get to the green goal square"
def reset(self, seed: Optional[int] = None):
seed = seed or 0
return super().reset(seed=seed)
@dataclass
class MyEnv(gym.Env):
height: int
width: int
deltas: ClassVar[np.ndarray] = np.array([[-1, 0], [1, 0], [0, -1], [0, 1]])
pattern: ClassVar[str] = r"my-env-(\d+)x(\d+)"
def __post_init__(self):
self.observation_space = Dict(
dict(
agent=MultiDiscrete(np.array([self.height, self.width])),
goal=MultiDiscrete(np.array([self.height, self.width])),
)
)
@classmethod
@property
def action_space(cls):
return Discrete(1 + len(cls.deltas))
def random_pos(self) -> np.ndarray:
pos = self.np_random.randint(low=0, high=(self.height, self.width))
assert isinstance(pos, np.ndarray)
return pos
def reset(self, **kwargs) -> dict[str, np.ndarray]:
super().reset(**kwargs)
self.agent = self.random_pos()
self.goal = self.random_pos()
return self.state()
def state(self) -> dict[str, np.ndarray]:
return dict(agent=self.agent, goal=self.goal)
def step(self, action: int) -> tuple[dict[str, np.ndarray], float, bool, dict]:
r = 0.0
t = False
try:
delta = self.deltas[action]
except IndexError:
r = float(all(self.agent == self.goal))
t = True
return self.state(), r, t, {}
agent = self.agent + delta
self.agent = np.clip(agent, 0, (self.height - 1, self.width - 1))
return self.state(), r, t, {}
def render(self, mode: Any = ...) -> None:
for i in range(self.height):
for j in range(self.width):
if all(self.agent == np.array([j, i])):
print("A", end="")
elif all(self.goal == np.array([j, i])):
print("G", end="")
else:
print("-", end="")
print()
input("Press Enter to continue...")
return None
class ClipRewardEnv(RewardWrapper):
"""Adapted from OpenAI baselines.
github.com/openai/baselines/blob/master/baselines/common/atari_wrappers.py
"""
def __init__(self, env):
RewardWrapper.__init__(self, env)
def reward(self, reward):
"""Bin reward to {+1, 0, -1} by its sign."""
return np.sign(reward)
class FrameStack:
"""Implements stacking of `num_frames` last frames of the game.
Wraps an AtariPreprocessing object.
"""
def __init__(
self, preproc: seed_rl_atari_preprocessing.AtariPreprocessing, num_frames: int
):
self.preproc = preproc
self.num_frames = num_frames
self.frames: deque = deque(maxlen=num_frames)
assert isinstance(preproc.observation_space, Box)
def repeat(x):
return np.repeat(x, num_frames, axis=-1)
obs_space = preproc.observation_space
self.observation_space = Box(
low=repeat(obs_space.low), high=repeat(obs_space.high)
)
self.np_random = preproc.environment.np_random
def reset(self, seed: Optional[int] = None):
ob = self.preproc.reset(seed=seed)
for _ in range(self.num_frames):
self.frames.append(ob)
return self._get_array()
def step(self, action: int):
ob, reward, done, info = self.preproc.step(action)
self.frames.append(ob)
return self._get_array(), reward, done, info
def _get_array(self):
assert len(self.frames) == self.num_frames
return np.concatenate(self.frames, axis=-1)
def create_env(env_id: str, test: bool):
"""Create a FrameStack object that serves as environment for the `game`."""
if env_id == "empty":
return flow(
EmptyEnv(size=4, agent_start_pos=None),
RGBImgObsWrapper,
ImgObsWrapper,
RenderWrapper,
)
elif re.match(MyEnv.pattern, env_id):
[(height, width)] = re.findall(MyEnv.pattern, env_id)
height, width = map(int, (height, width))
return flow(
MyEnv(height=height, width=width),
TwoDGridWrapper,
OneHotWrapper,
RenderWrapper,
partial(TimeLimit, max_episode_steps=10 + height + width),
)
elif "NoFrameskip" in env_id:
return flow(
gym.make(env_id),
*([] if test else [ClipRewardEnv]),
seed_rl_atari_preprocessing.AtariPreprocessing,
partial(FrameStack, num_frames=4),
RenderWrapper,
)
elif "MiniGrid" in env_id:
return flow(gym.make(env_id), RGBImgObsWrapper, ImgObsWrapper, RenderWrapper)
else:
return gym.make(env_id)
def get_num_actions(game: str):
"""Get the number of possible actions of a given Atari game.
This determines the number of outputs in the actor part of the
actor-critic model.
"""
env = gym.make(game)
assert isinstance(env.action_space, Discrete)
return env.action_space.n
| [] |
2024-01-10 | par-tec/hackathon-2023 | submissions~aeropolis~challenge-2~hacka_challenge2.py | import os
import openai
#Note: This code sample requires OpenAI Python library version 0.28.1 or lower
def chat_ai(prompt: str, chat_content: str = "You are an AI assistant that helps people find information.", temp: float = 0.7, stop_word: str = None, my_engine: str = "GPT"):
"""
execute LLM interaction using a prompt and applying a role to the AI assistant.
The prompt contains the following elements:
- request: the request to the AI (e.g., "Tell me what is the meaning of life, based to the below considerations:")
- text: the text to analyze (e.g., "Life is a wonderful not one-way journey. It is a journey that we can enjoy only if we are able to understand the meaning of life.")
"""
# set openai configuration
openai.api_type = "azure"
openai.api_base = "https://hacka1.openai.azure.com/"
openai.api_version = "2023-07-01-preview"
openai.api_key = os.getenv("OPENAI_API_KEY")
eng = "parGPT"
mod = None
response = openai.ChatCompletion.create(
engine = eng,
model = mod,
messages=[
{
"role": "system",
"content": chat_content,
},
{
"role": "user",
"content": prompt,
},
],
temperature=temp,
max_tokens=800,
top_p=0.95,
frequency_penalty=0,
presence_penalty=0,
stop=stop_word,
)
res = response["choices"][0]["message"]["content"]
return res
def run_chatbot(eng: str = "GPT"):
#Define AI role
chatRole = None #the prompt is enough
# Define the text of the short story that you want to use as the source of the chatbot
text = """
In un mondo immaginario di nome Aeropolis, quando si attraversano le enormi porte di vetro si entra in un mondo di bellezza e meraviglia.
Umano: Ciao, chi sei?
Chatbot: Sono un assistente AI abitante di Aeropolis. Come posso aiutarti oggi?
Umano:
"""
#define stop condition
stop_word = "!STOP!"
chat = True
# Define the chat history variable
chat_history = ""
# Start a loop to interact with the chatbot until the stop word is used
while chat:
# Get the user input
user_input = input("Umano: ")
if stop_word in user_input:
exit()
# Append the user input to the chat history
chat_history += f"Umano: {user_input}\n"
# Generate the chatbot response using the openAI API
prompt=f"{text}\n{chat_history}Chatbot:"
res = chat_ai(prompt, temp = 1, my_engine = eng)
# Append the response text to the chat history
chat_history += f"Chatbot: {res}\n"
# Print the response text
print(f"Chatbot: {res}")
return
run_chatbot("GPT")
| [
"PLACEHOLDER\nPLACEHOLDERChatbot:"
] |
2024-01-10 | par-tec/hackathon-2023 | submissions~aeropolis~challenge-1~hacka_challenge1.py | import os
import openai
def ask_ai(prompt: str, chat_content: str = "Sei un assistente AI, che aiuta la gente a generare idee", temp: float = 0.7, stop_word: str = "\n\n", my_engine: str = "GPT"):
"""
execute LLM interaction using a prompt and applying a role to the AI assistant.
The prompt contains the following elements:
- request: the request to the AI (e.g., "Tell me what is the meaning of life, based to the below considerations:")
- text: the text to analyze (e.g., "Life is a wonderful not one-way journey. It is a journey that we can enjoy only if we are able to understand the meaning of life.")
"""
# set openai configuration
openai.api_type = "azure"
openai.api_base = "https://saopenai.openai.azure.com/"
openai.api_version = "2023-07-01-preview"
#set api key from environment variable
openai.api_key = os.getenv("OPENAI_API_KEY")
eng = "saGPT"
mod = None
if my_engine == "LLAMA":
# LLAMA
openai.api_type = "open_ai"
openai.api_base = "http://51.159.159.233:3001/v1"
openai.api_version = ""
eng = None
mod = "/models/llama-2-13b-chat.bin"
response = openai.ChatCompletion.create(
engine = eng,
model = mod,
messages=[
{
"role": "system",
"content": chat_content,
},
{
"role": "user",
"content": prompt,
},
],
temperature=temp,
max_tokens=800,
top_p=0.95,
frequency_penalty=0,
presence_penalty=0,
stop=stop_word,
)
res = response["choices"][0]["message"]["content"]
return res
def generate_ideas(eng: str = "GPT", style = "Le idee generate devono evocare emozioni positive ed essere divertenti"):
#Define AI role
chatRole = "Sei un assistente AI, che aiuta la gente a generare idee"
# Define the text of the short story that you want to use as the source to generate ideas
text = f"""Genera delle idee in breve frasi che userò per scrivere un racconto fantastico che parla del Mastro di Chiavi, custode di Aeropolis e possessore delle chiavi per aprire la cupola di vetro che protegge Aeropolis, la città sospesa tra le nuovole. Il suo grande amico è il Cuoco che cerca di rubargli le chiavi ma non ci riesce mai. {style}
Esempi di idee:
1. Il Mastro di Chiavi è un uomo misterioso, con un'aura sinistra che fa tremare le ossa a chiunque gli si avvicini troppo.
2. Il Mastro di Chiavi custodisce un segreto antico che potrebbe cambiare il destino di Aeropolis per sempre. Il Cuoco vorrebbe tanto scoprire questo segreto.
Idee suggerite:
1.
"""
ideas = ask_ai(text)
print(f"--------------idee:\n {ideas}")
return
generate_ideas("GPT", style = "Le idee generate devono evocare emozioni positive ed essere divertenti")
| [] |
2024-01-10 | joseph-crowley/image-grid-vision | vision_reader.py | import base64
import json
from openai import OpenAI
from PIL import Image
from setup_logger import setup_logger
class VisionReader:
def __init__(self, grid_shape=None, logger=None):
"""
Initialize the VisionReader object.
:param grid_shape: Tuple specifying the grid shape (rows, cols). Defaults to (10, 1).
:param logger: Logger object for logging messages. If None, a default logger is set up.
"""
self.client = OpenAI()
self.grid_shape = grid_shape or (10, 1)
self.instructions = "Reply with a json object where the values are just the digits read out from the image. The readings are in format with numerical digits, and a sign indication for negative. Example reading: \"X.XXX nA\" where X is a digit 0-9. example response json: {\"row0col0\": \"0.000\", \"row1col0\": \"0.000\", ...}."
self.logger = logger or setup_logger('logs/vision_reader.log')
self.logger.info(f"Initialized VisionReader with grid shape {self.grid_shape}")
def encode_image(self, image_path):
"""
Encode an image to base64 format.
:param image_path: Path to the image file.
:return: Base64 encoded string of the image.
:raises: Exception if the image cannot be opened or encoded.
"""
try:
with open(image_path, "rb") as image_file:
encoded_image = base64.b64encode(image_file.read()).decode('utf-8')
self.logger.debug(f"Encoded image {image_path} to base64")
return encoded_image
except Exception as e:
self.logger.error(f"Error encoding image {image_path}: {e}")
raise
def read_image_grid(self, query, image_path):
"""
Send a query to the OpenAI API with an image and retrieve the response.
:param query: Query text to be sent along with the image.
:param image_path: Path to the image to be processed.
:return: Response from the OpenAI API.
:raises: Exception if there's an issue in sending the query or processing the response.
"""
base64_image = self.encode_image(image_path)
kwargs = {
"model": "gpt-4-vision-preview",
"messages": [
{
"role": "system",
"content": [
{
"type": "text",
"text": self.instructions + f" The image is a {self.grid_shape[0]}x{self.grid_shape[1]} grid. Reply with a the json object for every reading in the grid."
}
]
},
{
"role": "user",
"content": [
{
"type": "text",
"text": query
},
{
"type": "image_url",
"image_url": {
"url": f"data:image/jpeg;base64,{base64_image}",
}
}
]
}
],
"max_tokens": 3000,
}
try:
response = self.client.chat.completions.create(**kwargs)
self.logger.info(f"Received response from OpenAI for image {image_path}")
return response
except Exception as e:
self.logger.error(f"Error in read_image_grid for {image_path}: {e}")
raise
def parse_image(self, image_path):
"""
Parse the OpenAI API response to extract readings data.
:param image_path: Path to the image to be parsed.
:return: Parsed content from the GPT response.
:raises: Exception if there's an error in parsing or processing the response.
"""
try:
response = self.read_image_grid("What are the current readings for all panels?", image_path)
if response is None:
self.logger.error(f"No response received for image {image_path}")
return None
self.logger.debug(f"Response: {response}")
content = response.choices[0].message.content
cost = response.usage.completion_tokens * 0.03/1000 + response.usage.prompt_tokens * 0.01/1000
self.logger.info(f"Cost: ${cost} for image {image_path}")
self.logger.info(f"Cost per reading: ${cost / (self.grid_shape[0] * self.grid_shape[1])} for image {image_path}")
# try to remove the code block
for line in content.splitlines():
if line.startswith("```"):
self.logger.debug("Found code block in vision run response, removing it.")
content = content.replace(line, "")
parsed_content = json.loads(content)
self.logger.info(f"Parsed content from image {image_path}")
self.logger.debug(f"Content: {parsed_content}")
return parsed_content, cost
except Exception as e:
self.logger.error(f"Error in parse_image for {image_path}: {e}")
raise
def get_image_size(self, image_path):
"""
Get the dimensions of an image.
:param image_path: Path to the image file.
:return: Size of the image (width, height).
:raises: Exception if the image cannot be opened or read.
"""
try:
with Image.open(image_path) as img:
size = img.size
self.logger.debug(f"Got image size {size} for {image_path}")
return size
except Exception as e:
self.logger.error(f"Error getting image size for {image_path}: {e}")
raise
def calculate_cost(self, image_path, detail='low'):
"""
Calculate the token cost of an image based on its dimensions and detail level.
Based on the pricing for the GPT-4 API:
https://platform.openai.com/docs/guides/vision/calculating-costs
https://openai.com/pricing
input tokens: 0.01 USD per 1000 tokens
output tokens: 0.03 USD per 1000 tokens
:param width: Width of the image in pixels.
:param height: Height of the image in pixels.
:param detail: Detail level of the image ('low' or 'high').
:return: Total token cost for the image.
"""
if detail == 'low':
return 85
width, height = self.get_image_size(image_path)
# For high detail images
# Resize if necessary
if width > 2048 or height > 2048:
aspect_ratio = width / height
if width > height:
width = 2048
height = int(width / aspect_ratio)
else:
height = 2048
width = int(height * aspect_ratio)
# Scale to shortest side 768px
aspect_ratio = width / height
if width < height:
width = 768
height = int(width / aspect_ratio)
else:
height = 768
width = int(height * aspect_ratio)
# Calculate number of 512px squares
num_squares = (width // 512 + (1 if width % 512 != 0 else 0)) * \
(height // 512 + (1 if height % 512 != 0 else 0))
# Calculate final cost
return 170 * num_squares + 85 | [
"[{'type': 'text', 'text': PLACEHOLDER}, {'type': 'image_url', 'image_url': {'url': 'data:image/jpeg;base64,PLACEHOLDER'}}]"
] |
2024-01-10 | mdr5002/langchain-hub | ci_scripts~file-check.py | from pathlib import Path
from langchain.prompts import load_prompt
BASE_FOLDER = Path("prompts")
folders = BASE_FOLDER.glob("**")
def check_files(files):
file_names = [f.name for f in files]
if "README.md" not in file_names:
raise ValueError(f"Expected to find a README.md file, but found {files}")
other_files = [file for file in files if file.name != "README.md"]
for other_file in other_files:
if other_file.suffix in (".json", ".yaml"):
load_prompt(other_file)
# TODO: testing for python files
def check_all_folders():
for folder in folders:
folder_path = Path(folder)
files = [x for x in folder_path.iterdir() if x.is_file()]
if len(files) > 0:
try:
check_files(files)
except Exception as e:
raise ValueError(f"Found error with {folder}: {e}")
if __name__ == "__main__":
check_all_folders()
| [] |
2024-01-10 | pjm4github/DOE-code | gov_pnnl_goss~cimhub~CIM~buildCIMClass.py | import re
import json
import os
import time
from openai import OpenAI
from rdf_converter import build_init_struct
# Replace 'YOUR_API_KEY' (as an ENV variable) with your actual GPT-3 API key
from pathlib import Path
class GptCodeConverter():
MODEL_CHOICE_1 = "gpt-3.5-turbo-1106"
MODEL_CHOICE_2 = "code-davinci-002",
MODEL_CHOICE_3 = "gpt-3.5-turbo",
# max_tokens=500, # Adjust as needed
# temperature=0.7 # Adjust the temperature for creativity
MAX_TOKENS = 10000 # Maximum number of tokens that can be used with the OPENAI model (model dependant)
def __init__(self, language="Java", model=MODEL_CHOICE_1):
self.client = OpenAI(
# defaults to os.environ.get("OPENAI_API_KEY")
# api_key=api_key,
)
self.model_name = model
self.language = language
self.results = ''
self.system_instructions = """Create an example rdf model of the given CIM type using only the rdf, rdfs and cim schemas using the Common Information Model (CIM) prepared by the Technical Committee 57 of the IEC as a reference"""
def create_rdf(self, instructions):
"""
Convert the given code snippet using GPT-3.
"""
# Call the GPT-3 API to generate the converted code
try:
chat_completion = self.client.chat.completions.create(
messages=[
{
"role": "system",
"content": self.system_instructions
},
{
"role": "user",
"content": instructions
}
],
model=self.model_name,
)
# Extract and return the generated code from the response
results = chat_completion.choices[0].message.content
except Exception as e:
print(e)
results = ''
self.results = results
if __name__ == "__main__":
directory_path = f"{os.path.expanduser('~')}/Documents/Git/GitHub/GOSS-GridAPPS-D-PYTHON/gov_pnnl_goss/cimhub/CIM/"
current_time = int(time.time())
cim_types = "CIMtypes.txt"
converter = GptCodeConverter("RDF")
rdf_failcount = 0
rdf_fail_files = []
json_failcount = 0
json_fail_files = []
Path(directory_path).mkdir(parents=True, exist_ok=True)
with open(directory_path + cim_types, 'r') as f:
lines = f.readlines()
for line in lines:
cim_type = line.strip()
instructions = f'Create a complex example rdf model of a {cim_type} CIM object without using xml. Make sure all rdf triples have a cim prefix.'
print(f"Building an example rdf file for {cim_type}")
converter.create_rdf(instructions)
results = converter.results
# clean up the results here
resultant_lines = results.split('\n')
clean_lines = []
enclosure = False
for r in resultant_lines:
if enclosure and r.find("```") == 0:
enclosure = False
break
if enclosure:
# regexp to remove all these
# line = line.replace("^^xsd:boolean", "").replace("^^xsd:float","").replace("^^xsd:int", "").\
# replace("^^xsd:complex", "").replace("^^xsd:integer", "").replace("^^xsd:double", "").\
# replace("^^xsd:string", "").replace("^^xsd:dateTime", "") # .replace("rdf:type", "a"))
# new_lines.append(line.replace("^^rdf:boolean", "").replace("^^rdf:float","").
# replace("^^rdf:int", "").replace("^^rdf:complex", "").replace("^^rdf:integer", "").
# replace("^^rdf:double", "").replace("^^rdf:string", "")) # .replace("rdf:type", "a"))
r2 = re.sub(r"""(\^\^[a-zA-Z0-9]*)\:([a-zA-Z0-0]*)""", "", r)
if r2.find("@en")>0:
r3 = r2.replace("@en", "")
else:
r3 = r2
clean_lines.append(r3)
if not enclosure and r.find("```") == 0:
enclosure = True
clean_results = '\n'.join(clean_lines)
rdf_directory_path = f"{directory_path}rdf/"
Path(rdf_directory_path).mkdir(parents=True, exist_ok=True)
output_filename = f"{rdf_directory_path}{cim_type}{current_time}.rdf"
try:
with open(output_filename, 'w') as f2:
f2.write(clean_results)
except UnicodeEncodeError as e:
rdf_failcount += 1
print(e)
struct_dict = {}
json_text = "{}"
try:
json_directory_path = f"{directory_path}json/"
Path(json_directory_path).mkdir(parents=True, exist_ok=True)
output_filename = f"{json_directory_path}{cim_type}{current_time}.json"
struct_dict = build_init_struct(cim_type, clean_results)
json_text = json.dumps(struct_dict, indent=2)
except Exception as e:
print(f">>>>>>>>>> Structure build/ json.dumps failed {cim_type} error: {e}")
json_failcount += 1
json_fail_files.append(cim_type)
with open(output_filename, 'w') as f2:
f2.write(json_text)
pjson = f"@startjson\n{json_text}\n@endjson\n"
# Use this file name to output a non timestamped version of the CIM model.
output_filename = f"{directory_path}puml/{cim_type}.puml"
# output_filename = f"{directory_path}puml/{cim_type}{current_time}.puml"
with open(output_filename, 'w') as f2:
f2.write(pjson)
print(f"RDF fails: {rdf_failcount}, JSON fails: {json_failcount}")
with open(f"{directory_path}/failed_conversions.txt", 'w') as f:
for line in json_fail_files:
f.write(line)
| [] |
2024-01-10 | pjm4github/DOE-code | GPT~gptJava2Python.py | """
This module converts javacode to python code
"""
import codecs
import os
from openai import OpenAI
# Replace 'YOUR_API_KEY' (as an ENV variable) with your actual GPT-3 API key
import urllib.parse
import re
client = OpenAI(
# defaults to os.environ.get("OPENAI_API_KEY")
# api_key=api_key,
)
MODEL_NAME = "gpt-3.5-turbo-1106"
def remove_multiline_comments(code):
"""
Removes all multiline comments from the given code.
"""
# Create a regular expression that matches multiline comments.
comment_regex = re.compile(r'/\*(?:[^*]+|\*(?!/)|[^*])*\*/')
# Remove all multiline comments from the code.
return comment_regex.sub('', code)
def remove_block_comments(code):
"""
Removes all block comments from Java code.
block comments are start with //
Args:
code: The Java code to remove comments from.
Returns:
The Java code with all comments removed.
"""
# Create a regular expression to match block comments.
pattern = re.compile(r'/\*.*?\*/')
# Remove all block comments from the code.
return re.sub(pattern, '', code)
def convert_code(code_snippet):
"""
Convert the given code snippet using GPT-3.
"""
# Call the GPT-3 API to generate the converted code
try:
chat_completion = client.chat.completions.create(
messages=[
{
"role": "system",
"content": "Given the Java class, convert that code to python keeping the comments, using snake_case methods and local imports. Keep the class name CamelCase,"
},
{
"role": "user",
"content": code_snippet
}
],
model=MODEL_NAME,
# model="code-davinci-002",
# model="gpt-3.5-turbo",
# max_tokens=500, # Adjust as needed
# temperature=0.7 # Adjust the temperature for creativity
)
# Extract and return the generated code from the response
converted_code = chat_completion.choices[0].message.content
except Exception as e:
converted_code = ''
return converted_code
MAX_TOKENS = 10000 # Maximum number of tokens that can be used with the OPENAI model (model dependant)
if __name__ == "__main__":
# directory_path = f"{os.path.expanduser('~')}/Documents/Git/GitHub/GOSS-GridAPPS-D-PYTHON/gov_pnnl_goss/gridappsd/dto/field/"
subdir = """/Documents/Git/GitHub/GOSS-GridAPPS-D-PYTHON/CIM_STD_PYTHON/TC57CIM/IEC61968/Common/"""
directory_path = f"{os.path.expanduser('~')}{subdir}"
directory_path += '/' if not directory_path.endswith('/') else ""
try:
for filename in os.listdir(directory_path):
if filename.endswith(".java"):
file_path = os.path.join(directory_path, filename)
if os.path.isfile(file_path):
print(f"#################################################\nOpening {filename} for conversion")
file_size = os.path.getsize(file_path)
with open(directory_path + filename, 'r') as file:
code = file.readlines()
# remove all imports here
REMOVE_IMPORTS = False
if REMOVE_IMPORTS:
clean_code = []
for line in code:
if not line.find('import') == 0:
clean_code.append(line)
else:
clean_code = code
# create a blob of code
code_string = '\n'.join(clean_code)
# remove comments
if file_size > MAX_TOKENS:
code_string = remove_block_comments(code_string)
code_string = remove_multiline_comments(code_string)
print(f"File: {filename}, Orig size: {file_size}, cleaned size: {len(code_string)} (bytes)")
# URL-encode the text
try:
code_string.encode('ascii')
except UnicodeEncodeError:
print("... Removing non ascii characters")
code_string = ''.join([i if ord(i) < 128 else ' ' for i in code_string])
# raise ValueError('code is not ASCII')
encoded_text = urllib.parse.quote(code_string)
converted_code = convert_code(encoded_text)
if converted_code:
# get rid of the leading and trailing python quoting
converted_code = converted_code.replace("```python", f"# Converted by an OPENAI API call using model: {MODEL_NAME}")
converted_code = converted_code[:-3] if converted_code[-3:] == "```" else converted_code
output_filename = directory_path + filename.replace('java', 'py')
print(f"{output_filename} written")
with open(output_filename, 'w') as f:
f.write(converted_code)
# print(converted_code)
else:
print(f"{filename} conversion failed")
except FileNotFoundError:
print(f"Directory '{directory_path}' not found.")
except Exception as e:
print(f"An error occurred: {str(e)}")
| [
"Given the Java class, convert that code to python keeping the comments, using snake_case methods and local imports. Keep the class name CamelCase,"
] |
2024-01-10 | pjm4github/DOE-code | gov_pnnl_goss~gridappsd~configuration~VnomExportConfigurationHandler.py | # Converted by an OPENAI API call using model: gpt-3.5-turbo-1106
import logging
import io
import os
import subprocess
from openai.resources import Files
from gov_pnnl_goss.SpecialClasses import File
from gov_pnnl_goss.gridappsd.api.ConfigurationHandler import ConfigurationHandler
from gov_pnnl_goss.gridappsd.api.ConfigurationManager import ConfigurationManager
from gov_pnnl_goss.gridappsd.api.LogManager import LogManager
from gov_pnnl_goss.gridappsd.api.PowergridModelDataManager import PowergridModelDataManager
from gov_pnnl_goss.gridappsd.api.SimulationManager import SimulationManager
from gov_pnnl_goss.gridappsd.configuration.CIMDictionaryConfigurationHandler import PrintWriter
from gov_pnnl_goss.gridappsd.configuration.DSSAllConfigurationHandler import DSSAllConfigurationHandler
from gov_pnnl_goss.gridappsd.dto.LogMessage import ProcessStatus
from gov_pnnl_goss.gridappsd.dto.YBusExportResponse import YBusExportResponse
from gov_pnnl_goss.gridappsd.utils.GridAppsDConstants import GridAppsDConstants
class VnomExportConfigurationHandler(ConfigurationHandler):
TYPENAME = "Vnom Export"
SIMULATIONID = "simulation_id"
DIRECTORY = "directory"
MODELID = "model_id"
ZFRACTION = "z_fraction"
IFRACTION = "i_fraction"
PFRACTION = "p_fraction"
SCHEDULENAME = "schedule_name"
LOADSCALINGFACTOR = "load_scaling_factor"
def __init__(self, log_manager: LogManager = None):
self.logger = log_manager
self.log = LogManager(VnomExportConfigurationHandler.__class__.__name__)
self.power_grid_model_data_manager = PowergridModelDataManager()
self.config_manager = ConfigurationManager() # config_manager
self.simulation_manager = SimulationManager()
def start(self):
if self.config_manager is not None:
self.config_manager.register_configuration_handler("Vnom Export", self)
else:
# TODO send log message and exception
self.log.warn("No Config manager available for " + self.__class__.__name__)
def generate_config(self, parameters: dict, out: io.FileIO, process_id, username):
simulation_id = parameters.get("simulation_id")
model_id = None
simulation_dir = None
if simulation_id:
simulation_context = self.simulation_manager.get_simulation_context_for_id(simulation_id)
parameters["i_fraction"] = str(simulation_context.get_request().get_simulation_config().get_model_creation_config().get_i_fraction())
parameters["z_fraction"] = str(simulation_context.get_request().get_simulation_config().get_model_creation_config().get_z_fraction())
parameters["p_fraction"] = str(simulation_context.get_request().get_simulation_config().get_model_creation_config().get_p_fraction())
parameters["load_scaling_factor"] = str(simulation_context.get_request().get_simulation_config().get_model_creation_config().get_load_scaling_factor())
parameters["schedule_name"] = simulation_context.get_request().get_simulation_config().get_model_creation_config().get_schedule_name()
parameters["model_id"] = simulation_context.get_request().get_power_system_config().get_line_name()
parameters["directory"] = simulation_context.get_simulation_dir()
parameters["simulation_start_time"] = simulation_context.get_request().get_simulation_config().get_start_time()
parameters["simulation_duration"] = simulation_context.get_request().get_simulation_config().get_duration()
simulation_dir = File(simulation_context.get_simulation_dir())
else:
model_id = GridAppsDConstants.get_string_property(parameters, "model_id", None)
simulation_id = process_id
if model_id is None:
raise Exception("Model Id or simulation Id not provided in request parameters.")
simulation_dir = File(self.config_manager.get_configuration_property(GridAppsDConstants.GRIDAPPSD_TEMP_PATH), "models/" + model_id)
parameters["i_fraction"] = GridAppsDConstants.get_double_property(parameters.get("i_fraction", 0))
parameters["z_fraction"] = GridAppsDConstants.get_double_property(parameters.get("z_fraction", 0))
parameters["p_fraction"] = GridAppsDConstants.get_double_property(parameters.get("p_fraction", 0))
parameters["load_scaling_factor"] = GridAppsDConstants.get_double_property(parameters.get("load_scaling_factor", 1))
parameters["schedule_name"] = GridAppsDConstants.get_string_property(parameters.get("schedule_name", ""))
parameters["model_id"] = model_id
parameters["directory"] = simulation_dir
command_file = File(simulation_dir, "opendsscmdInput.txt")
dss_base_file = File(simulation_dir, "model_base.dss")
for key in parameters:
self.log.debug(key + " = " + parameters.get(key))
self.logger.debug(ProcessStatus.RUNNING, simulation_id, "Generating DSS base file")
base_print_writer = PrintWriter(io.StringWriter())
base_configuration_handler = DSSAllConfigurationHandler(self.logger, self.simulation_manager, self.config_manager)
base_configuration_handler.generate_config(parameters, base_print_writer, simulation_id, username)
if not dss_base_file.exists():
raise Exception("Error: Could not create DSS base file to export Vnom matrix")
self.logger.debug(ProcessStatus.RUNNING, simulation_id, "Finished generating DSS base file")
self.logger.debug(ProcessStatus.RUNNING, simulation_id, "Generating commands file for opendsscmd")
with open(command_file, "w") as file_writer:
file_writer.write("redirect model_base.dss\n")
file_writer.write("batchedit transformer..* wdg=2 tap=1\n")
file_writer.write("batchedit regcontrol..* enabled=false\n")
file_writer.write("batchedit isource..* enabled=false\n")
file_writer.write("batchedit vsource..* pu=1.0\n")
file_writer.write("batchedit load..* enabled=false\n")
file_writer.write("batchedit generator..* enabled=false\n")
file_writer.write("batchedit pvsystem..* enabled=false\n")
file_writer.write("batchedit storage..* enabled=false\n")
file_writer.write("batchedit capacitor..* enabled=false\n")
file_writer.write("solve\n")
file_writer.write("export voltages base_voltages.csv\n")
self.logger.debug(ProcessStatus.RUNNING, simulation_id, "Finished generating commands file for opendsscmd")
self.logger.debug(ProcessStatus.RUNNING, simulation_id, "Generating Y Bus matrix")
process_service_builder = subprocess.Popen("opendsscmd " + command_file.getName(), cwd=simulation_dir, shell=True, stdout=subprocess.PIPE)
process_service_builder.start()
process_service_builder.wait(10)
response = YBusExportResponse()
vnom_path = File(os.path.abspath(simulation_dir), "base_voltages.csv")
with open(vnom_path, 'r') as f:
lines = f.readlines()
response.set_vnom(lines)
self.logger.debug(ProcessStatus.RUNNING, simulation_id, "Finished generating Vnom export")
out.write(response)
| [] |
2024-01-10 | pjm4github/DOE-code | GPT~gptJava2PythonEA_CIM.py | """
This module converts javacode to python code
"""
import codecs
import os
from datetime import datetime
from pathlib import Path
from openai import OpenAI
# Replace 'YOUR_API_KEY' (as an ENV variable) with your actual GPT-3 API key
import urllib.parse
import re
client = OpenAI(
# defaults to os.environ.get("OPENAI_API_KEY")
# api_key=api_key,
)
MODEL_NAME = "gpt-3.5-turbo-1106"
def remove_multiline_comments(code):
"""
Removes all multiline comments from the given code.
"""
# Create a regular expression that matches multiline comments.
comment_regex = re.compile(r'/\*(?:[^*]+|\*(?!/)|[^*])*\*/')
# Remove all multiline comments from the code.
return comment_regex.sub('', code)
def remove_block_comments(code):
"""
Removes all block comments from Java code.
block comments are start with //
Args:
code: The Java code to remove comments from.
Returns:
The Java code with all comments removed.
"""
# Create a regular expression to match block comments.
pattern = re.compile(r'/\*.*?\*/')
# Remove all block comments from the code.
return re.sub(pattern, '', code)
def convert_code(code_snippet):
"""
Convert the given code snippet using GPT-3.
"""
# Call the GPT-3 API to generate the converted code
try:
chat_completion = client.chat.completions.create(
messages=[
{
"role": "system",
"content": "Given the Java class, convert that code to python, using snake_case methods and local imports, include the original comments, and add python typing to the python class assuming that the Java class type "
"is also availible as a Python library. Keep the class name CamelCase."
},
{
"role": "user",
"content": code_snippet
}
],
model=MODEL_NAME,
# model="code-davinci-002",
# model="gpt-3.5-turbo",
# max_tokens=500, # Adjust as needed
# temperature=0.7 # Adjust the temperature for creativity
)
# Extract and return the generated code from the response
converted_code = chat_completion.choices[0].message.content
except Exception as e:
converted_code = ''
return converted_code
MAX_TOKENS = 10000 # Maximum number of tokens that can be used with the OPENAI model (model dependant)
if __name__ == "__main__":
# lots of hand-crafting here needed to work through each subdirectory under Base
directory_path = f"{os.path.expanduser('~')}/Documents/Git/GitHub/GOSS-GridAPPS-D-PYTHON/" \
f"CIM_STD_JAVA/TC57CIM/IEC61970/Base/Topology/"
output_path = f"{directory_path}/Converted/"
try:
starting_time = datetime.ctime(datetime.now())
for filename in os.listdir(directory_path):
if filename.endswith(".java"):
file_path = os.path.join(directory_path, filename)
if os.path.isfile(file_path):
print(f"#################################################\nOpening {filename} for conversion")
file_size = os.path.getsize(file_path)
with open(directory_path + filename, 'r') as file:
code = file.readlines()
# remove all imports here
REMOVE_IMPORTS = True
if REMOVE_IMPORTS:
clean_code = []
for line in code:
if not line.find('import') == 0:
clean_code.append(line)
else:
clean_code = code
# create a blob of code
code_string = '\n'.join(clean_code)
# remove comments
if file_size > MAX_TOKENS:
code_string = remove_block_comments(code_string)
code_string = remove_multiline_comments(code_string)
print(f"File: {filename}, Orig size: {file_size}, cleaned size: {len(code_string)} (bytes)")
# URL-encode the text
try:
code_string.encode('ascii', 'ignore')
except UnicodeDecodeError:
raise ValueError('code is not ASCII')
encoded_text = urllib.parse.quote(code_string)
converted_code = convert_code(encoded_text)
if converted_code:
# get rid of the leading and trailing python quoting
converted_code = converted_code.replace("```python", f"# Converted by an OPENAI API call using model: {MODEL_NAME} on {starting_time}")
converted_code = converted_code[:-3] if converted_code[-3:] == "```" else converted_code
converted_code = converted_code.replace("```", '"""')
new_filename = filename.replace('java', 'py')
output_filename = output_path + new_filename
print(f"Done with {new_filename}: {output_filename}")
Path(output_path).mkdir(parents=True, exist_ok=True)
with open(output_filename, 'w') as f:
f.write(converted_code)
# print(converted_code)
else:
print(f"{filename} conversion failed")
except FileNotFoundError:
print(f"Directory '{directory_path}' not found.")
except Exception as e:
print(f"An error occurred: {str(e)}")
| [
"Given the Java class, convert that code to python, using snake_case methods and local imports, include the original comments, and add python typing to the python class assuming that the Java class type is also availible as a Python library. Keep the class name CamelCase."
] |
2024-01-10 | pjm4github/DOE-code | GPT~gptC2Python.py | import re
import codecs
import os
import time
from openai import OpenAI
# Replace 'YOUR_API_KEY' (as an ENV variable) with your actual GPT-3 API key
import urllib.parse
import re
def remove_multiline_comments(code):
"""
Removes all multiline comments from the given code.
all block comments (enclosed by /* and */) from a C++ file are removed
"""
cleaned_code = re.sub(r'/\*.*?\*/', '', code, flags=re.DOTALL)
# Remove all multiline comments from the code.
return cleaned_code
def remove_single_line_comments(input_code):
"""
Removes all block comments from Java code.
block comments are start with //
Args:
code: The Java code to remove comments from.
Returns:
The Java code with all comments removed.
"""
# Use regular expression to remove // comments
cleaned_code = re.sub(r'//.*', '', input_code)
# Remove all block comments from the code.
return cleaned_code
class GptCodeConverter():
MODEL_CHOICE_1 = "gpt-3.5-turbo-1106"
MODEL_CHOICE_2 = "code-davinci-002",
MODEL_CHOICE_3 = "gpt-3.5-turbo",
# max_tokens=500, # Adjust as needed
# temperature=0.7 # Adjust the temperature for creativity
MAX_TOKENS = 10000 # Maximum number of tokens that can be used with the OPENAI model (model dependant)
def __init__(self, language="Java", model=MODEL_CHOICE_1):
self.client = OpenAI(
# defaults to os.environ.get("OPENAI_API_KEY")
# api_key=api_key,
)
self.model_name = model
self.language = language
self.setup_instructions = f"Given this {language} code class convert it to python using snake_case methods names. Keep the class names in CamelCase."
self.add_function_instructions = f"Given this {language} function function convert it to python using snake_case function names."
self.add_class_instructions = f"Given this class convert that code to python using snake_case method names."
def convert_code(self, code_snippet, instructions):
"""
Convert the given code snippet using GPT-3.
"""
# Call the GPT-3 API to generate the converted code
try:
chat_completion = self.client.chat.completions.create(
messages=[
{
"role": "system",
"content": instructions
},
{
"role": "user",
"content": code_snippet
}
],
model=self.model_name,
)
# Extract and return the generated code from the response
converted_code = chat_completion.choices[0].message.content
except Exception as e:
print(e)
converted_code = ''
self.converted_code = converted_code
class CCodeParser:
def __init__(self, fn=None):
self.input_code_filename = fn
self.classes = []
self.functions = []
self.c_code = ""
self.blanked_code = ""
def load_file(self, filename=None):
if filename:
self.input_code_filename = filename
print(f"\n\n################################\nLOADING FILE {self.input_code_filename}")
with open(self.input_code_filename, 'r') as file:
self.c_code = file.read()
self.blanked_code = self.c_code
def un_load_file(self):
self.input_code_filename = None
self.classes = []
self.functions = []
self.c_code = ""
self.blanked_code = ""
def dump_classes(self, full=False):
for c in self.classes:
return_type = c["Return"] + " " if c["Return"] else ""
s = f'{return_type}{c["Class"]}::{c["Method"]}'
if full:
s +=f'({c["Arguments"]})\n{{{c["Body"]}}}\n'
print(s)
def dump_functions(self, full=False):
for g in self.functions:
if g["Function"] and g["Function"] not in ['if', 'for', 'while']: # hack
s = f'{g["Return"]} {g["Function"]}'
if full:
s += f'({g["Arguments"]})\n{{{g["Body"]}}}\n'
print(s)
def snarf_classes(self):
print("SCANNING for CLASSES...")
test = self.c_code
pattern = r"""((?P<return_type>\w+)\s+)*(?P<class_name>\w+)::(?P<method_name>\w+)\((?P<arguments>[^)]*)\)\s*{"""
max_len = len(self.c_code)
# Find all matches of the pattern in the source code
matches = re.finditer(pattern, self.c_code)
for m in matches:
span = m.span()
# Now walk forward in the code and match the braces until the braces are balanced to find the end of the method body
# test_code = self.c_code[span[2]:]
brace_count = 1 # We start with 1 since that's already included in the span
method_end = span[1]
#############################
# CLASS SCANNER
while brace_count:
if method_end >= max_len:
print(f"something went wrong with the class scanner, skipping {m.group('class_name')}::{m.group('method_name')},")
break
test_char = self.c_code[method_end]
# need to qualify the characters to make sure that they are not escaped
if test_char == "{":
brace_count += 1
elif test_char == "}":
brace_count -= 1
method_end += 1
if method_end >= max_len:
continue
method_body = self.c_code[span[1]: method_end-1] # does not include the opening and closing braces
class_dict = {"Return": m.group('return_type'),
"Class": m.group('class_name'),
"Method": m.group('method_name'),
"Arguments": m.group('arguments'),
"Body": method_body,
"BodySpan": (span[1], method_end-1)}
self.classes.append(class_dict)
# # pattern = r"""((?P<return_type>\w+)\s+)*(?P<class_name>\w+)::(?P<method_name>\w+)\((?P<arguments>[^)]*)\)\s*{(?P<method_body>(?:[^{}]*\{[^{}]*\})*[^{}]*)}"""
# # pattern = r"""\s*(?P<return_type>\w+)\s+(?P<class_name>\w+)::(?P<method_name>\w+)\((?P<arguments>[^)]*)\)\s*{(?P<method_body>(?:[^{}]*|{(?:[^{}]*|{(?:[^{}]*|{[^{}]*})*})*})*})"""
# pattern = r"""((?P<return_type>\w+)\s+)*(?P<class_name>\w+)::(?P<method_name>\w+)\((?P<arguments>[^)]*)\)\s*{(?P<method_body>(?:[^{}]*|{(?:[^{}]*|{(?:[^{}]*|{[^{}]*})*})*})*)}"""
# p_compile = re.compile(pattern, re.MULTILINE)
# matches = p_compile.finditer(self.c_code)
# # matches = re.finditer(pattern, self.c_code, re.MULTILINE)
#
# for match in matches:
# # For each of the matches, capture_span holds the span of the body match for that class.
# # This is used to postprocess the file and remove the class body to produce only a skeleton version
# # of the code that will be sent to Open.AI for conversion into python.
# # The trick is to find the body closest to the point after the class declaration because some class bodies
# # will match everything (e.g. and empty class body)
# print(f"len={len(self.c_code)}, {match.span()}, CLASS: {match.group('class_name')}::{match.group('method_name')}")
# capture_span = None
#
# sb = re.finditer(re.escape(match.group('method_body').strip()), self.c_code, re.MULTILINE)
# body_spans = []
# for m in sb:
# body_spans.append(m.span())
#
# se = re.finditer(re.escape(match.group('method_name') + "(" + match.group('arguments') + ")"),
# self.c_code, re.MULTILINE)
# class_spans = []
# for m in se:
# class_spans.append(m.span())
# # Find the location of the body span that is closest to the end of the class_span
# if len(class_spans) == 1: # Only do this if there is one matching class otherwise it may be wrong.
# cse = class_spans[0][1]
#
# for body_span in body_spans:
# if body_span[0] > cse:
# capture_span = body_span
# break
# # Assemble the class structure
# class_dict = {"Return": match.group('return_type'),
# "Class": match.group('class_name'),
# "Method": match.group('method_name'),
# "Arguments": match.group('arguments'),
# "Body": match.group('method_body'),
# "BodySpan": capture_span}
# self.classes.append(class_dict)
# this is how to replace the code with spaces
print("... DONE SCANNING for CLASSES")
for c in self.classes:
span = c.get("BodySpan", None)
if span:
start_pos = span[0]
end_pos = span[1]
self.blanked_code = self.blanked_code[:start_pos] + ' ' * (end_pos - start_pos) + self.blanked_code[
end_pos:]
def snarf_function(self):
print("SCANNING for FUNCTIONS ... ")
test = self.c_code
pattern = r"""(?P<return_type>\w+)\s+(?P<function_name>[A-Za-z0-9_*]*)\((?P<arguments>[^)]*)\)\s*{(?P<function_body>(?:[^{}]*\{[^{}]*\})*[^{}]*)}"""
matches = re.finditer(pattern, test)
for match in matches:
if match.group('function_name') and match.group('function_name') in ['if', 'for', 'while']: # hack
break # skip over the if, for and while statemenst that are captured by the regexp pattern above
print(f"len={len(self.c_code)}, {match.span()}, FUNCTION: {match.group('function_name')}")
capture_span = None
sb = re.finditer(re.escape(match.group('function_body').strip()), self.c_code, re.MULTILINE)
body_spans = []
for m in sb:
body_spans.append(m.span())
se = re.finditer(re.escape(match.group('function_name') + "(" + match.group('arguments') + ")"),
self.c_code, re.MULTILINE)
function_spans = []
for m in se:
function_spans.append(m.span())
# Find the location of the body span that is closest to the end of the class_span
if len(function_spans) == 1: # Only do this if there is one matching class otherwise it may be wrong.
cse = function_spans[0][1]
for body_span in body_spans:
if body_span[0] > cse:
capture_span = body_span
break
self.functions.append({"Return": match.group('return_type'),
"Function": match.group('function_name'),
"Arguments": match.group('arguments'),
"Body": match.group('function_body'),
"BodySpan": capture_span})
print("... DONE SCANNING for FUNCTIONS")
# this is how to replace the code with spaces
for f in self.functions:
span = f.get("BodySpan", None)
if span:
start_pos = span[0]
end_pos = span[1]
self.blanked_code = self.blanked_code[:start_pos] + ' ' * (end_pos - start_pos) + self.blanked_code[end_pos:]
def parse(self):
print("Snarfing classes")
self.snarf_classes()
print("Snarfing functions")
self.snarf_function()
def parse_and_convert(parser, directory_path, filename, current_time):
converter = GptCodeConverter("CPP")
s = parser.blanked_code
# Get rid of all the white space that was inserted.
s = '\n'.join(line for line in s.splitlines() if line.strip())
print("Converting the base class")
encoded_text = urllib.parse.quote(s)
python_code = ""
converter.convert_code(encoded_text, converter.setup_instructions)
python_snip = converter.converted_code
if python_snip:
# get rid of the leading and trailing python quoting
converted_code = python_snip.replace("```python", f"# Converted by an OPENAI API call using model: {converter.model_name}")
converted_code = converted_code[:-3] if converted_code[-3:] == "```" else converted_code
python_code += "\n\n" + converted_code
# print(converted_code)
else:
print(f"{filename} blank conversion failed")
for g in parser.functions:
if g["Function"] is None :
continue
if isinstance(g["Function"], str):
if g["Function"].strip() == "":
print(" skipping an empty function")
continue
print(f'Converting a function: {g["Function"]}')
s = f'{g["Return"]} {g["Function"]}({g["Arguments"]})\n' \
f'{{{g["Body"]}}}\n'
# remove comments
if len(s) > GptCodeConverter.MAX_TOKENS:
s = remove_single_line_comments(s)
s = remove_multiline_comments(s)
encoded_text = urllib.parse.quote(s)
converter.convert_code(s, converter.add_function_instructions)
python_snip = converter.converted_code
if python_snip:
# get rid of the leading and trailing python quoting
converted_code = python_snip.replace("```python",
f"# Converted by an OPENAI API call using model: {converter.model_name}")
converted_code = converted_code[:-3] if converted_code[-3:] == "```" else converted_code
python_code += "\n\n" + converted_code
# print(converted_code)
else:
print(f"{filename}, {g['Function']} conversion failed")
for c in parser.classes:
return_type = c["Return"] + " " if c["Return"] else ""
s = f'{return_type}{c["Class"]}::{c["Method"]}({c["Arguments"]})\n' \
f'{{{c["Body"]}}}\n'
print(f'Converting a class: {c["Class"]}::{c["Method"]}')
# encoded_text = urllib.parse.quote(s)
converter.convert_code(s, converter.add_class_instructions)
python_snip = converter.converted_code
if python_snip:
# get rid of the leading and trailing python quoting
converted_code = python_snip.replace("```",
f"# Converted by an OPENAI API call using model: {converter.model_name} ")
converted_code = converted_code[:-3] if converted_code[-3:] == "```" else converted_code
python_code += "\n\n" + converted_code
# print(converted_code)
else:
print(f"{filename} {c['Class']}::{c['Method']} conversion failed")
file_extension = '.py'
base_filename = filename.split(".")[0]
# Create a unique filename by appending the timestamp to a base filename and file extension
output_filename = f"{directory_path}{base_filename}{current_time}{file_extension}"
with open(output_filename, 'w') as f:
f.write(python_code)
print(f"{output_filename} written")
def main(path, filename=None):
# directory_path = f"{os.path.expanduser('~')}/Documents/Git/GitHub/GOSS-GridAPPS-D-PYTHON/gov_pnnl_goss/gridlab/climate/"
#
# Get the current timestamp (seconds since the epoch)
current_time = int(time.time())
if filename:
parser = CCodeParser(path + filename)
parser.load_file()
parser.parse()
print("CLASSES-------------")
parser.dump_classes()
print("FUNCTIONS-----------")
parser.dump_functions()
print("\n")
parse_and_convert(parser, path, filename, current_time)
print(f"converted {len(parser.classes)}: classes and {len(parser.functions)}: functions")
print("Done")
# filename = "network.cpp" # Replace with your C code file
else:
parser = CCodeParser()
for filename in os.listdir(path):
if filename.endswith(".cpp"):
file_path = os.path.join(path, filename)
if os.path.isfile(file_path):
print(f"#################################################\nOpening {filename} for conversion")
file_size = os.path.getsize(file_path)
parser.load_file(path + filename)
# remove all imports here
REMOVE_IMPORTS = True
if REMOVE_IMPORTS:
clean_code = []
for line in parser.c_code.split('\n'):
if not line.find('#include') == 0:
clean_code.append(line)
else:
clean_code = parser.c_code.split('\n')
# create a blob of code
code_string = '\n'.join(clean_code)
# remove comments
if file_size > GptCodeConverter.MAX_TOKENS:
code_string = remove_single_line_comments(code_string)
code_string = remove_multiline_comments(code_string)
print(f"File: {filename}, Orig size: {file_size}, cleaned size: {len(code_string)} (bytes)")
# URL-encode the text
# try:
# code_string.encode('ascii')
# except UnicodeDecodeError:
# raise ValueError('code is not ASCII')
parser.c_code = code_string
parser.blanked_code = code_string
parser.parse()
print("\n")
print("CLASSES-------------")
parser.dump_classes()
print("\n")
print("FUNCTIONS-----------")
parser.dump_functions()
parse_and_convert(parser, path, filename, current_time)
print(f"converted {len(parser.classes)}: classes and {len(parser.functions)}: functions")
parser.un_load_file()
print("Done")
if __name__ == "__main__":
directory_path = f"{os.path.expanduser('~')}/Documents/Git/GitHub/GOSS-GridAPPS-D-PYTHON/gov_pnnl_goss/gridlab/gldcore/"
main(directory_path)
| [] |
2024-01-10 | Marker-Inc-Korea/RAGchain | RAGchain~utils~rede_search_detector.py | from typing import List, Optional
import numpy as np
from langchain.embeddings import OpenAIEmbeddings
from langchain.schema.embeddings import Embeddings
from sklearn.metrics import precision_score, recall_score, f1_score
from sklearn.metrics import roc_curve
from sklearn.mixture import GaussianMixture
def _normalize_vectors(vectors):
return vectors / np.linalg.norm(vectors, axis=1, keepdims=True)
class RedeSearchDetector:
"""
This class is implementation of REDE, the method for detect knowledge-seeking turn in few-shot setting.
It contains train function for your custom model, and inference function for detect knowledge-seeking turn.
You will need non-knowledge seeking turn dialogues. Plus, it will be great you have few knowledge-seeking turn dialogues.
The method is implementation of below paper:
@article{jin2021towards,
title={Towards zero and few-shot knowledge-seeking turn detection in task-orientated dialogue systems},
author={Jin, Di and Gao, Shuyang and Kim, Seokhwan and Liu, Yang and Hakkani-Tur, Dilek},
journal={arXiv preprint arXiv:2109.08820},
year={2021}
}
"""
def __init__(self,
threshold: Optional[float] = None,
embedding: Optional[Embeddings] = None):
"""
:param embedding: Encoder model for encoding sentences to vectors. Langchain Embeddings class. Default is OpenAIEmbeddings.
:param threshold: Threshold for classify knowledge-seeking turn. If the score is higher than threshold, classify as non-knowledge-seeking turn.
Find this threshold by using training data that you own. The default is 0.5, and you must run find_threshold function before using detect function.
"""
self.embedding = embedding if embedding is not None else OpenAIEmbeddings()
self.threshold = threshold if threshold is not None else 0.5
self.mu = None
self.omega_matrix = None # Omega matrix for linear transformation.
self.gmm = None # Gaussian Mixture Model for classify knowledge-seeking turn.
self.norm = None # Norm for normalize to unit vector.
def find_representation_transform(self,
knowledge_seeking_sentences: List[str],
L: Optional[int] = None,
):
"""
:param knowledge_seeking_sentences: Knowledge-seeking turn sentences. List[str].
:param L: Number of dimensions of the transformed representation. If None, use whole dimension.
Default is None.
"""
# find mu
vectors = np.array(self.embedding.embed_documents(knowledge_seeking_sentences))
self.mu = np.mean(vectors, axis=0)
# get covariance matrix
sigma = np.cov(vectors.T)
# singular value decomposition
U, S, V = np.linalg.svd(sigma)
# find omega matrix
self.omega_matrix = U @ np.sqrt(np.linalg.inv(np.diag(S)))
if L is not None:
self.omega_matrix = self.omega_matrix[:, :L]
print("REDE representation transform done.")
def representation_formation(self, vectors: np.ndarray) -> np.ndarray:
"""
:param vectors: Vectors after encoding. np.ndarray.
:return: Transformed vectors. np.ndarray.
"""
return (vectors - self.mu) @ self.omega_matrix
def train_density_estimation(self,
gmm: GaussianMixture,
non_knowledge_seeking_sentences: List[str]):
"""
:param gmm: Gaussian Mixture Model for classify knowledge-seeking turn. GaussianMixture. n_components must be 1.
:param non_knowledge_seeking_sentences: Non-knowledge-seeking turn sentences. List[str].
"""
self.gmm = gmm
sentence_vectors = np.array(self.embedding.embed_documents(non_knowledge_seeking_sentences))
transformed_vectors = np.array(
[self.representation_formation(sentence_vector) for sentence_vector in sentence_vectors])
# normalize to unit vector
transformed_vectors = _normalize_vectors(transformed_vectors)
self.gmm.fit(transformed_vectors)
def find_threshold(self,
valid_knowledge_seeking_sentences: List[str],
valid_non_knowledge_seeking_sentences: List[str]):
"""
Find threshold using Youden's index from validation data predictions.
:param valid_knowledge_seeking_sentences: knowledge-seeking turn sentences for validation. List[str].
You can put same sentences that you used for find_representation_transform function.
:param valid_non_knowledge_seeking_sentences: non-knowledge-seeking turn sentences for validation. List[str].
"""
true_scores = self._get_density_score(valid_knowledge_seeking_sentences)
false_scores = self._get_density_score(valid_non_knowledge_seeking_sentences)
y_true = np.concatenate([np.ones_like(true_scores), np.zeros_like(false_scores)])
y_score = true_scores + false_scores
fpr, tpr, thresholds = roc_curve(y_true, y_score)
idx = np.argmax(fpr - tpr)
self.threshold = thresholds[idx]
precision, recall, f1 = self._calculate_metrics(y_true, y_score)
print(f"Precision: {precision}")
print(f"Recall: {recall}")
print(f"F1: {f1}")
return self.threshold
def detect(self, sentences: List[str]) -> List[bool]:
"""
:param sentences: Sentences to detect. List[str].
:return: True if the sentence is knowledge-seeking turn, else False. List[bool].
"""
scores = self._get_density_score(sentences)
result = [score < self.threshold for score in scores]
return result
def evaluate(self, test_knowledge_seeking_sentences: List[str],
test_non_knowledge_seeking_sentences: List[str]):
"""
Evaluate rede search detector using test dataset.
:param test_knowledge_seeking_sentences: knowledge-seeking turn sentences for test. List[str].
:param test_non_knowledge_seeking_sentences: non-knowledge-seeking turn sentences for test. List[str].
"""
true_scores = self._get_density_score(test_knowledge_seeking_sentences)
false_scores = self._get_density_score(test_non_knowledge_seeking_sentences)
y_true = np.concatenate([np.ones_like(true_scores), np.zeros_like(false_scores)])
y_score = true_scores + false_scores
precision, recall, f1 = self._calculate_metrics(y_true, y_score)
print(f"Precision: {precision}")
print(f"Recall: {recall}")
print(f"F1: {f1}")
return precision, recall, f1
def _get_density_score(self, sentences: List[str]) -> List[float]:
sentence_vectors = np.array(self.embedding.embed_documents(sentences))
transformed_vectors = np.array([self.representation_formation(np.array(v)) for v in sentence_vectors])
transformed_vectors = _normalize_vectors(transformed_vectors)
scores = self._score_vectors(transformed_vectors)
return scores
def _score_vectors(self, vectors):
return [self.gmm.score(vector.reshape(1, -1)) for vector in vectors]
def _calculate_metrics(self, y_true, y_score):
predictions = np.where(y_score < self.threshold, 1, 0)
precision = precision_score(y_true, predictions)
recall = recall_score(y_true, predictions)
f1 = f1_score(y_true, predictions)
return precision, recall, f1
| [] |
2024-01-10 | Marker-Inc-Korea/RAGchain | tests~RAGchain~utils~test_file_cache.py | import os
import pathlib
from typing import List
import pytest
from langchain.schema import Document
from langchain_core.runnables import RunnablePassthrough
from RAGchain.DB import PickleDB
from RAGchain.schema import Passage
from RAGchain.utils.file_cache import FileCache
test_passages: List[Passage] = [
Passage(content="test1", filepath="test1"),
Passage(content="test2", filepath="test2"),
Passage(content="test3", filepath="test2")
]
test_documents: List[Document] = [
Document(page_content="ttt1211", metadata={"source": "test1"}),
Document(page_content="asdf", metadata={"source": "test2"}),
Document(page_content="hgh", metadata={"source": "test3"}),
Document(page_content="egrgfg", metadata={"source": "test4"}),
Document(page_content="hhhh", metadata={"source": "test4"}),
]
@pytest.fixture
def file_cache():
root_dir = pathlib.PurePath(os.path.dirname(os.path.realpath(__file__))).parent.parent
pickle_path = os.path.join(root_dir, "resources", "pickle", "test_file_cache.pkl")
db = PickleDB(save_path=pickle_path)
db.create_or_load()
db.save(test_passages)
file_cache = FileCache(db)
yield file_cache
if os.path.exists(pickle_path):
os.remove(pickle_path)
def test_file_cache(file_cache):
result_documents = file_cache.delete_duplicate(test_documents)
assert len(result_documents) == 3
for doc in result_documents:
assert doc.metadata['source'] != 'test1' and doc.metadata['source'] != 'test2'
def test_file_cache_runnable(file_cache):
runnable = RunnablePassthrough() | file_cache
result_documents = runnable.invoke(test_documents)
assert len(result_documents) == 3
for doc in result_documents:
assert doc.metadata['source'] != 'test1' and doc.metadata['source'] != 'test2'
| [] |
2024-01-10 | Marker-Inc-Korea/RAGchain | RAGchain~retrieval~vectordb_retrieval.py | from typing import List, Union
from uuid import UUID
from langchain.schema import Document
from langchain.schema.vectorstore import VectorStore
from RAGchain.retrieval.base import BaseRetrieval
from RAGchain.schema import Passage
from RAGchain.utils.vectorstore.base import SlimVectorStore
class VectorDBRetrieval(BaseRetrieval):
"""
VectorDBRetrieval is a retrieval class that uses VectorDB as a backend.
First, embed the passage content using an embedding model.
Then, store the embedded vector in VectorDB.
When retrieving, embed the query and search the most similar vectors in VectorDB.
Lastly, return the passages that have the most similar vectors.
"""
def __init__(self, vectordb: VectorStore):
"""
:param vectordb: VectorStore instance. You can all langchain VectorStore classes, also you can use SlimVectorStore for better storage efficiency.
"""
super().__init__()
self.vectordb = vectordb
def ingest(self, passages: List[Passage]):
if isinstance(self.vectordb, SlimVectorStore):
self.vectordb.add_passages(passages)
else:
self.vectordb.add_documents(
[Document(page_content=passage.content, metadata={'passage_id': str(passage.id)}) for passage in
passages])
def retrieve(self, query: str, top_k: int = 5) -> List[Passage]:
ids = self.retrieve_id(query, top_k)
passage_list = self.fetch_data(ids)
return passage_list
def retrieve_id(self, query: str, top_k: int = 5) -> List[Union[str, UUID]]:
docs = self.vectordb.similarity_search(query=query, k=top_k)
return [self.__str_to_uuid(doc.metadata.get('passage_id')) for doc in docs]
def retrieve_id_with_scores(self, query: str, top_k: int = 5) -> tuple[
List[Union[str, UUID]], List[float]]:
results = self.vectordb.similarity_search_with_score(query=query, k=top_k)
results = results[::-1]
docs = [result[0] for result in results]
scores = [result[1] for result in results]
return [self.__str_to_uuid(doc.metadata.get('passage_id')) for doc in docs], scores
def delete(self, ids: List[Union[str, UUID]]):
self.vectordb.delete([str(_id) for _id in ids])
@staticmethod
def __str_to_uuid(input_str: str) -> Union[str, UUID]:
try:
return UUID(input_str)
except:
return input_str
| [] |
2024-01-10 | Marker-Inc-Korea/RAGchain | RAGchain~utils~vectorstore~pinecone.py | from typing import List, Any, Optional
from langchain.vectorstores import Pinecone
from RAGchain.schema import Passage
from RAGchain.utils.vectorstore.base import SlimVectorStore
class PineconeSlim(Pinecone, SlimVectorStore):
"""
Pinecone vector store stores only passage_id and vector.
"""
def add_passages(self, passages: List[Passage],
namespace: Optional[str] = None,
batch_size: int = 32,
**kwargs: Any):
if namespace is None:
namespace = self._namespace
# Embed and make metadatas
vectors = []
for passage in passages:
embedding = self._embedding.embed_query(passage.content)
vectors.append({
'id': str(passage.id),
'values': embedding,
'metadata': {'passage_id': str(passage.id),
self._text_key: ""}
})
self._index.upsert(
vectors=vectors,
namespace=namespace,
batch_size=batch_size,
**kwargs
)
| [] |
2024-01-10 | Marker-Inc-Korea/RAGchain | tests~RAGchain~benchmark~dataset~test_mr_tydi.py | import logging
import os
import pathlib
import pytest
from langchain.llms.openai import OpenAI
from RAGchain.DB import PickleDB
from RAGchain.benchmark.dataset import MrTydiEvaluator
from RAGchain.pipeline import BasicRunPipeline
from RAGchain.retrieval import BM25Retrieval
root_dir = pathlib.PurePath(os.path.dirname(os.path.realpath(__file__))).parent.parent.parent
bm25_path = os.path.join(root_dir, 'resources', 'bm25', 'mr_tydi_evaluator.pkl')
pickle_path = os.path.join(root_dir, 'resources', 'pickle', 'mr_tydi_evaluator.pkl')
logger = logging.getLogger(__name__)
@pytest.fixture
def mr_tydi_evaluator():
bm25_retrieval = BM25Retrieval(save_path=bm25_path)
db = PickleDB(pickle_path)
llm = OpenAI(model_name="babbage-002")
pipeline = BasicRunPipeline(bm25_retrieval, llm)
evaluator = MrTydiEvaluator(pipeline, evaluate_size=5)
evaluator.ingest(retrievals=[bm25_retrieval], db=db, ingest_size=20)
yield evaluator
if os.path.exists(bm25_path):
os.remove(bm25_path)
if os.path.exists(pickle_path):
os.remove(pickle_path)
def test_mr_tydi_evaluator(mr_tydi_evaluator):
result = mr_tydi_evaluator.evaluate()
assert len(result.each_results) == 5
assert result.each_results.iloc[0][
'question'] == 'Do zebra finches have stripes?'
for key, value in result.results.items():
logger.info(f"{key}: {value}")
logger.info("The result length is " + f"{len(result.results)}")
| [] |
2024-01-10 | Marker-Inc-Korea/RAGchain | tests~RAGchain~preprocess~text_splitter~test_token_splitter.py | import os
import pathlib
import pytest
from langchain.schema import Document
from RAGchain.preprocess.text_splitter import TokenSplitter
root_dir = pathlib.PurePath(os.path.dirname(os.path.realpath(__file__))).parent.parent.parent
file_path = os.path.join(root_dir, "resources", "sample_test_document.txt")
with open(file_path) as f:
state_of_the_union = f.read()
TEST_DOCUMENT = Document(
page_content=state_of_the_union,
metadata={
'source': 'test_source',
'Data information': '맨까 새끼들 부들부들하구나',
'What is it?': 'THis is token splitter'
}
)
@pytest.fixture
def tiktoken():
tiktoken = TokenSplitter(tokenizer_name='tiktoken', chunk_size=1000, chunk_overlap=0)
yield tiktoken
@pytest.fixture
def spaCy():
spaCy = TokenSplitter(tokenizer_name='spaCy', chunk_size=1000, chunk_overlap=0)
yield spaCy
@pytest.fixture
def sentence_transformers():
sentence_transformers = TokenSplitter(tokenizer_name='SentenceTransformers', chunk_overlap=0)
yield sentence_transformers
@pytest.fixture
def NLTK():
NLTK = TokenSplitter(tokenizer_name='NLTK', chunk_size=1000)
yield NLTK
@pytest.fixture
def Hugging_Face():
Hugging_Face = TokenSplitter(tokenizer_name='huggingFace', chunk_size=100, chunk_overlap=0)
yield Hugging_Face
def test_token_splitter(tiktoken, spaCy, sentence_transformers, NLTK, Hugging_Face):
tiktoken_passages = tiktoken.split_document(TEST_DOCUMENT)
spaCy_passages = spaCy.split_document(TEST_DOCUMENT)
SentenceTransformers_passages = sentence_transformers.split_document(TEST_DOCUMENT)
NLTK_passages = NLTK.split_document(TEST_DOCUMENT)
huggingface_passages = Hugging_Face.split_document(TEST_DOCUMENT)
test_passages = [tiktoken_passages, spaCy_passages, SentenceTransformers_passages, NLTK_passages,
huggingface_passages]
for passage in test_passages:
assert len(passage) > 1
assert passage[0].next_passage_id == passage[1].id
assert passage[1].previous_passage_id == passage[0].id
assert passage[0].filepath == 'test_source'
assert passage[0].filepath == passage[1].filepath
assert passage[0].previous_passage_id is None
assert passage[-1].next_passage_id is None
assert len(passage[0].metadata_etc) == 2
assert passage[0].metadata_etc['Data information'] == '맨까 새끼들 부들부들하구나'
assert passage[0].metadata_etc['What is it?'] == 'THis is token splitter'
# Check if TEST_DOCUMENT content put in passages.
for passage in test_passages:
if passage == SentenceTransformers_passages:
assert passage[0].content[:10] in TEST_DOCUMENT.page_content.strip()[:10].lower()
assert passage[0].content[:10] in TEST_DOCUMENT.page_content.strip()[:10].lower()
else:
assert passage[0].content[:10] in TEST_DOCUMENT.page_content.strip()[:10]
assert passage[0].content[:10] in TEST_DOCUMENT.page_content.strip()[:10]
| [] |
2024-01-10 | Marker-Inc-Korea/RAGchain | RAGchain~pipeline~visconde.py | from typing import List
from langchain.llms import BaseLLM
from langchain.schema import StrOutputParser
from langchain.schema.runnable import RunnableLambda
from RAGchain.pipeline.base import BaseRunPipeline
from RAGchain.reranker import MonoT5Reranker
from RAGchain.retrieval.base import BaseRetrieval
from RAGchain.schema import Passage, RAGchainPromptTemplate, RetrievalResult
from RAGchain.utils.query_decompose import QueryDecomposition
class ViscondeRunPipeline(BaseRunPipeline):
strategyqa_prompt = RAGchainPromptTemplate.from_template("""For each example, use the documents to create an \"Answer\" and an \"Explanation\" to the \"Question\". Just answer yes or no.
Example 1:
[Document 1]:
Title: San Tropez (song).
Content: \"San Tropez\" is the fourth track from the album Meddle by the band Pink Floyd.
This song was one of several to be considered for the band's \"best of\" album, Echoes: The Best of Pink Floyd.
[Document 2]:
Title: French Riviera.
Content: The French Riviera (known in French as the Côte d'Azur [kot daˈzyʁ]; Occitan: Còsta d'Azur [
ˈkɔstɔ daˈzyɾ]; literal translation \"Azure Coast\") is the Mediterranean coastline of the southeast corner of
France. There is no official boundary, but it is usually considered to extend from Cassis, Toulon or Saint-Tropez
on the west to Menton at the France–Italy border in the east, where the Italian Riviera joins. The coast is
entirely within the Provence-Alpes-Côte d'Azur (Région Sud) region of France. The Principality of Monaco is a
semi-enclave within the region, surrounded on three sides by France and fronting the Mediterranean.
[Document 3]:
Title: Moon Jae-in.
Content: Moon also promised transparency in his presidency, moving the presidential residence from the palatial and
isolated Blue House to an existing government complex in downtown Seoul.
[Document 4]:
Title: Saint-Tropez.
Content: Saint-Tropez (US: /ˌsæn troʊˈpeɪ/ SAN-troh-PAY, French: [sɛ̃ tʁɔpe]; Occitan: Sant-Tropetz , pronounced [san(t) tʀuˈpes]) is a town on the French Riviera,
68 kilometres (42 miles) west of Nice and 100 kilometres (62 miles) east of Marseille in the Var department of
the Provence-Alpes-Côte d'Azur region of Occitania, Southern France.
Question: Did Pink Floyd have a song about the French Riviera?
Explanation: According to [Document 1], \"San Tropez\" is a song by Pink Floyd about
the French Riviera. This is further supported by [Document 4], which states that Saint-Tropez is a town on the French Riviera.
Therefore, the answer is yes
Answer: yes.
Example 2:
{passages}
Question: {question}
Answer:
""")
def __init__(self,
retrieval: BaseRetrieval,
llm: BaseLLM,
decompose: QueryDecomposition = None,
prompt: RAGchainPromptTemplate = None,
use_passage_count: int = 3,
):
"""
Initializes an instance of the ViscondeRunPipeline class.
:param retrieval: An instance of the Retrieval module used for retrieving passages.
:param llm: An instance of the LLM module used for answering questions. You can't use chat models for Visconde Pipeline.
:param decompose: An instance of the QueryDecomposition module used for decomposing query. Default is QueryDecomposition().
:param prompt: RAGchainPromptTemplate used for generating prompts based on passages and user query.
Default is ViscondeRunPipeline.strategyqa_prompt.
:param use_passage_count: The number of passages to be used for llm question answering. Default is 3.
"""
self.retrieval = retrieval
self.llm = llm
self.decompose = decompose if decompose is not None else QueryDecomposition(self.llm)
self.prompt = prompt if prompt is not None else self.strategyqa_prompt
self.reranker = MonoT5Reranker()
self.use_passage_count = use_passage_count
super().__init__()
def _make_runnable(self):
self.run = self.decompose | RunnableLambda(
lambda queries: sum(self.retrieval.batch(
queries, config={'configurable': {"retrieval_options": {"top_k": 50}}}
)) # sum can drop duplicate elements automatically
) | self.reranker | RunnableLambda(
lambda x: x.slice(end=self.use_passage_count).to_prompt_input()
) | self.prompt | self.llm | StrOutputParser()
def get_passages_and_run(self, questions: List[str], top_k: int = 50) -> tuple[
List[str], List[List[Passage]], List[List[float]]]:
runnable = self.decompose | RunnableLambda(
lambda queries: sum(self.retrieval.batch(
queries, config={'configurable': {"retrieval_options": {"top_k": top_k}}}
))
) | self.reranker | RunnableLambda(
lambda x: x.slice(end=self.use_passage_count)
) | {
"passages": RunnableLambda(lambda x: x.passages),
"scores": RunnableLambda(lambda x: x.scores),
"answer": RunnableLambda(
RetrievalResult.to_prompt_input) | self.prompt | self.llm | StrOutputParser()
}
results = runnable.batch(questions)
answers, passages, rel_scores = zip(
*[(result['answer'], result['passages'], result['scores']) for result in results])
return list(answers), list(passages), list(rel_scores)
| [
"For each example, use the documents to create an \"Answer\" and an \"Explanation\" to the \"Question\". Just answer yes or no.\n\n Example 1:\n\n [Document 1]: \n Title: San Tropez (song). \n Content: \"San Tropez\" is the fourth track from the album Meddle by the band Pink Floyd. \n This song was one of several to be considered for the band's \"best of\" album, Echoes: The Best of Pink Floyd.\n\n [Document 2]: \n Title: French Riviera. \n Content: The French Riviera (known in French as the Côte d'Azur [kot daˈzyʁ]; Occitan: Còsta d'Azur [\n ˈkɔstɔ daˈzyɾ]; literal translation \"Azure Coast\") is the Mediterranean coastline of the southeast corner of \n France. There is no official boundary, but it is usually considered to extend from Cassis, Toulon or Saint-Tropez \n on the west to Menton at the France–Italy border in the east, where the Italian Riviera joins. The coast is \n entirely within the Provence-Alpes-Côte d'Azur (Région Sud) region of France. The Principality of Monaco is a \n semi-enclave within the region, surrounded on three sides by France and fronting the Mediterranean.\n\n [Document 3]: \n Title: Moon Jae-in. \n Content: Moon also promised transparency in his presidency, moving the presidential residence from the palatial and \n isolated Blue House to an existing government complex in downtown Seoul.\n\n [Document 4]: \n Title: Saint-Tropez. \n Content: Saint-Tropez (US: /ˌsæn troʊˈpeɪ/ SAN-troh-PAY, French: [sɛ̃ tʁɔpe]; Occitan: Sant-Tropetz , pronounced [san(t) tʀuˈpes]) is a town on the French Riviera, \n 68 kilometres (42 miles) west of Nice and 100 kilometres (62 miles) east of Marseille in the Var department of \n the Provence-Alpes-Côte d'Azur region of Occitania, Southern France.\n\n\n Question: Did Pink Floyd have a song about the French Riviera?\n Explanation: According to [Document 1], \"San Tropez\" is a song by Pink Floyd about \n the French Riviera. This is further supported by [Document 4], which states that Saint-Tropez is a town on the French Riviera. \n Therefore, the answer is yes\n Answer: yes.\n\n Example 2:\n \n {passages}\n \n Question: {question}\n Answer:\n "
] |
2024-01-10 | Marker-Inc-Korea/RAGchain | RAGchain~reranker~time~simple_time.py | from typing import List, Optional
from langchain_core.runnables import RunnableConfig
from langchain_core.runnables.utils import Input, Output
from RAGchain.reranker.base import BaseReranker
from RAGchain.schema import Passage
class SimpleTimeReranker(BaseReranker):
"""Rerank passages by their content_datetime only. It is simple reranker for time-aware RAG."""
def invoke(self, input: Input, config: Optional[RunnableConfig] = None) -> Output:
# sort input.passages and passages.scores at once by content_datetime
sorted_pairs = sorted(zip(input.passages, input.scores), key=lambda p: p[0].content_datetime, reverse=True)
sorted_passages, sorted_scores = zip(*sorted_pairs)
input.passages = list(sorted_passages)
input.scores = list(sorted_scores)
return input
def rerank(self, passages: List[Passage]) -> List[Passage]:
"""
Rerank passages by their content_datetime only.
:param passages: list of passages to be reranked.
"""
return sorted(passages, key=lambda p: p.content_datetime, reverse=True)
| [] |
2024-01-10 | Marker-Inc-Korea/RAGchain | RAGchain~preprocess~loader~rust_hwp_loader.py | from typing import List, Iterator
from langchain.schema import Document
from langchain.document_loaders.base import BaseLoader
class RustHwpLoader(BaseLoader):
"""
Load HWP file using libhwp. It works for any os.
Using load or lazy_load, you can get list of Documents from hwp file.
This loader loads all paragraphs and tables from hwp file.
At the first Document, there are all paragraphs from hwp file, including texts in each table.
Next, there are separated Documents for each table paragraphs.
Unfortunately, You can't distinguish row and columns in table.
In the metadata, there are filepath at key 'source' and page_type, which is 'text' or 'table'.
Recommend to use other hwp loader, but it is great option to use this loader at mac and linux.
It is no need to use external hwp loader server, or hwp program that only available at windows.
"""
def __init__(self, path: str):
"""
:param path: hwp file path
"""
try:
from libhwp import HWPReader
except ImportError:
raise ImportError("Please install libhwp."
"pip install libhwp")
self.file_path = path
self.hwp = HWPReader(self.file_path)
def lazy_load(self) -> Iterator[Document]:
paragraph = " ".join([str(paragraph) for paragraph in self.hwp.find_all('paragraph')])
yield Document(page_content=paragraph, metadata={"source": self.file_path, 'page_type': 'text'})
for table in self.hwp.find_all('table'):
table_contents = []
for cell in table.cells:
for paragraph in cell.paragraphs:
table_contents.append(str(paragraph))
yield Document(page_content=",".join(table_contents),
metadata={"source": self.file_path, 'page_type': 'table'})
def load(self) -> List[Document]:
return list(self.lazy_load())
| [] |
2024-01-10 | Marker-Inc-Korea/RAGchain | RAGchain~preprocess~loader~nougat_pdf_loader.py | import tempfile
from pathlib import Path
from typing import List, Iterator
from urllib.parse import urljoin, urlencode
import requests
from langchain.document_loaders.pdf import BasePDFLoader
from langchain.schema import Document
from RAGchain.preprocess.loader.mathpix_markdown_loader import MathpixMarkdownLoader
class NougatPDFLoader(BasePDFLoader):
"""
Load PDF file using Nougat API server.
You can use Nougat API server using Dockerfile at https://github.com/facebookresearch/nougat
"""
def __init__(self, file_path: str, nougat_host: str):
super().__init__(file_path)
# check connection of nougat api server
response = requests.get(nougat_host)
if response.status_code != 200:
raise ValueError(f"Could not connect to Nougat server: {nougat_host}")
self.nougat_host = nougat_host
def load(self, split_section: bool = True, split_table: bool = True, *args, **kwargs) -> List[Document]:
"""
:param split_section: If True, split the document by section.
:param split_table: If True, split the document by table.
:param start: Start page number to load. Optional.
:param stop: Stop page number to load. Optional.
"""
return list(self.lazy_load(split_section=split_section, split_table=split_table, *args, **kwargs))
def lazy_load(self, split_section: bool = True, split_table: bool = True, *args, **kwargs) -> Iterator[Document]:
"""
:param split_section: If True, split the document by section.
:param split_table: If True, split the document by table.
:param start: Start page number to load. Optional.
:param stop: Stop page number to load. Optional.
"""
request_url = urljoin(self.nougat_host, "predict/") + '?' + urlencode(kwargs)
file = {
'file': open(self.file_path, 'rb')
}
response = requests.post(request_url, files=file)
if response.status_code != 200:
raise ValueError(f'Nougat API server returns {response.status_code} status code.')
result = response.text
result = result.replace('\\n', '\n')
result = result[1:-1] # remove first and last double quote
with tempfile.NamedTemporaryFile() as temp_path:
Path(temp_path.name).write_text(result)
loader = MathpixMarkdownLoader(temp_path.name)
for doc in loader.lazy_load(split_section=split_section, split_table=split_table):
yield doc
| [] |
2024-01-10 | Marker-Inc-Korea/RAGchain | tests~RAGchain~utils~test_evidence_extractor.py | import logging
from operator import itemgetter
import pytest
from langchain.llms.openai import OpenAI
from langchain_core.output_parsers import StrOutputParser
from RAGchain.schema import Passage, RetrievalResult, RAGchainPromptTemplate
from RAGchain.utils.evidence_extractor import EvidenceExtractor
logger = logging.getLogger(__name__)
TEST_PASSAGES = [
Passage(
content='Table TABREF19 and TABREF26 report zero-shot results on Europarl and Multi-UN evaluation sets, respectively. We compare our approaches with related approaches of pivoting, multilingual NMT (MNMT) BIBREF19, and cross-lingual transfer without pretraining BIBREF16. The results show that our approaches consistently outperform other approaches across languages and datasets, especially surpass pivoting, which is a strong baseline in the zero-shot scenario that multilingual NMT systems often fail to beat BIBREF19, BIBREF20, BIBREF23. Pivoting translates source to pivot then to target in two steps, causing inefficient translation process. Our approaches use one encoder-decoder model to translate between any zero-shot directions, which is more efficient than pivoting. Regarding the comparison between transfer approaches, our cross-lingual pretraining based transfer outperforms transfer method that does not use pretraining by a large margin.',
filepath='test_filepath'),
Passage(
content="Regarding comparison between the baselines in table TABREF19, we find that pivoting is the strongest baseline that has significant advantage over other two baselines. Cross-lingual transfer for languages without shared vocabularies BIBREF16 manifests the worst performance because of not using source$\\leftrightarrow $pivot parallel data, which is utilized as beneficial supervised signal for the other two baselines.",
filepath='test_filepath'),
Passage(
content='Our best approach of MLM+BRLM-SA achieves the significant superior performance to all baselines in the zero-shot directions, improving by 0.9-4.8 BLEU points over the strong pivoting. Meanwhile, in the supervised direction of pivot$\\rightarrow $target, our approaches performs even better than the original supervised Transformer thanks to the shared encoder trained on both large-scale monolingual data and parallel data between multiple languages.',
filepath='test_filepath'),
Passage(
content='MLM alone that does not use source$\\leftrightarrow $pivot parallel data performs much better than the cross-lingual transfer, and achieves comparable results to pivoting. When MLM is combined with TLM or the proposed BRLM, the performance is further improved. MLM+BRLM-SA performs the best, and is better than MLM+BRLM-HA indicating that soft alignment is helpful than hard alignment for the cross-lingual pretraining.',
filepath='test_filepath')
]
@pytest.fixture
def evidence_extractor():
yield EvidenceExtractor(OpenAI())
def test_evidence_extractor(evidence_extractor):
question = 'which multilingual approaches do they compare with?'
evidence = evidence_extractor.extract(question, TEST_PASSAGES)
logger.info(f'Evidence: {evidence}')
assert bool(evidence) is True
assert "We compare our approaches with related approaches of pivoting, multilingual NMT (MNMT) BIBREF19, and cross-lingual transfer without pretraining BIBREF16." in evidence
irrelevant_question = 'what is the purpose of RAGchain project?'
evidence = evidence_extractor.extract(irrelevant_question, TEST_PASSAGES)
logger.info(f'Irrelevant Evidence: {evidence}')
assert 'No Fragment' in evidence
def test_evidence_extractor_runnable(evidence_extractor):
question = 'which multilingual approaches do they compare with?'
retrieval_result = RetrievalResult(
query=question,
passages=TEST_PASSAGES,
scores=[1.0, 0.9, 0.8, 0.7],
)
prompt = RAGchainPromptTemplate.from_template("""
Answer the question using the given evidence.
Question: {question}
Evidence: {passages}
Answer:
""")
runnable = {
"question": itemgetter("question"),
"passages": itemgetter("passages") | evidence_extractor,
} | prompt | OpenAI() | StrOutputParser()
answer = runnable.invoke({"question": question, "passages": retrieval_result})
logger.info(f'Answer: {answer}')
assert bool(answer) is True
answers = runnable.batch([{"question": question, "passages": retrieval_result}])
assert isinstance(answers, list)
assert len(answers) == 1
logger.info(f'Answer: {answers[0]}')
assert bool(answers[0]) is True
| [
"\n Answer the question using the given evidence.\n Question: {question}\n Evidence: {passages}\n \n Answer:\n "
] |
2024-01-10 | Marker-Inc-Korea/RAGchain | RAGchain~reranker~time~weighted_time.py | from datetime import datetime
from typing import List, Optional
from langchain_core.runnables import RunnableConfig
from langchain_core.runnables.utils import Input, Output
from RAGchain.reranker.base import BaseReranker
from RAGchain.schema import Passage, RetrievalResult
class WeightedTimeReranker(BaseReranker):
"""
Rerank passages by their content_datetime and relevance score.
First, relevance score must be normalized to [0, 1] range.
And calculate the combined score by the following formula:
score = (1 - decay_rate) ** hours_passed + relevance_score
The larger the decay_rate, the score from the past will be lowed.
"""
def __init__(self, decay_rate: float = 0.01):
"""
:param decay_rate: decay rate of time weight. The smaller the value, the more important the time weight.
"""
super().__init__()
self.decay_rate = decay_rate
def rerank(self, passages: List[Passage], scores: List[float]) -> List[Passage]:
"""
:param passages: list of passages to be reranked.
:param scores: list of relevance scores of passages.
"""
retrieval_result = RetrievalResult(query="", passages=passages, scores=scores)
return self.invoke(retrieval_result).passages
def invoke(self, input: Input, config: Optional[RunnableConfig] = None) -> Output:
now = datetime.now()
scores = input.scores
passages = input.passages
# normalize scores
scaled_scores = [(score - min(scores)) / (max(scores) - min(scores)) for score in scores]
combined_scores = [self.__get_combined_score(passage, score=score, now=now)
for passage, score in zip(passages, scaled_scores)]
sorted_passages, sorted_scores = zip(*sorted(zip(passages, combined_scores), key=lambda x: x[1], reverse=True))
input.passages = list(sorted_passages)
input.scores = list(sorted_scores)
return input
def __get_combined_score(self, passage: Passage, score: float, now: datetime = datetime.now()):
passed_hours = (now - passage.content_datetime).total_seconds() / 3600
return ((1.0 - self.decay_rate) ** passed_hours) + score
| [] |
2024-01-10 | Marker-Inc-Korea/RAGchain | RAGchain~schema~passage.py | from datetime import datetime
from typing import Optional, Union, List, Dict
from uuid import UUID, uuid4
from langchain.load.serializable import Serializable
from langchain.schema import Document
from pydantic import Field, Extra
class Passage(Serializable):
"""Class for storing a passage and metadatas"""
id: Union[UUID, str] = Field(default_factory=uuid4)
"""Unique identifier for the passage. You can use string or UUID. By default, create new UUID for new passage."""
content: str
"""String text."""
filepath: str
"""Filepath of the passage."""
content_datetime: datetime = Field(default_factory=datetime.now)
"""Datetime when the passage content is created or edited. Everytime passge content changes, this value should be
updated."""
importance: int = Field(default=0)
"""Importance of the passage. The higher the value, the more important the passage is. It can be minus value.
The default is 0."""
previous_passage_id: Optional[Union[UUID, str]]
"""Previous passage's id. If this is the first passage, this value should be None."""
next_passage_id: Optional[Union[UUID, str]]
"""Next passage's id. If this is the last passage, this value should be None."""
metadata_etc: dict = Field(default_factory=dict)
"""Arbitrary metadata about the passage."""
# forbid to use another parameter
class Config:
extra = Extra.forbid
def to_document(self) -> Document:
metadata = self.metadata_etc.copy()
metadata['id'] = self.id
metadata['content'] = self.content
metadata['filepath'] = self.filepath
metadata['content_datetime'] = self.content_datetime
metadata['importance'] = self.importance
metadata['previous_passage_id'] = self.previous_passage_id
metadata['next_passage_id'] = self.next_passage_id
return Document(page_content=self.content, metadata=metadata)
def to_dict(self):
return {
"_id": self.id,
"content": self.content,
"filepath": self.filepath,
"content_datetime": self.content_datetime,
"importance": self.importance,
"previous_passage_id": self.previous_passage_id,
"next_passage_id": self.next_passage_id,
"metadata_etc": self.metadata_etc
}
def __eq__(self, other):
if isinstance(other, Passage):
return self.id == other.id
return False
def __hash__(self):
return hash(self.id)
def is_exactly_same(self, other):
return self.id == other.id and \
self.content == other.content and \
self.filepath == other.filepath and \
self.content_datetime == other.content_datetime and \
self.importance == other.importance and \
self.previous_passage_id == other.previous_passage_id and \
self.next_passage_id == other.next_passage_id and \
self.metadata_etc == other.metadata_etc
@staticmethod
def make_prompts(passages: List['Passage']) -> str:
return "\n".join([passage.content for passage in passages])
def copy(self, *args, **kwargs):
self_params = self.dict()
for key in list(kwargs.keys()):
self_params.pop(key)
return Passage(**self_params, **kwargs)
def reset_id(self):
self.id = uuid4()
return self
@classmethod
def from_documents(cls, documents: List[Document]) -> List['Passage']:
"""
Convert a list of documents to a list of passages.
metadata with 'source' key is required. It will convert to filepath filed.
metadat with 'content_datetime' key is optional. It will convert to content_datetime field.
It can be datetime.datetime object, or string with '%Y-%m-%d %H:%M:%S' format.
metadata with 'importance' key is optional. It will convert to importance field. It must be int.
:param documents: A list of documents.
"""
passages = []
ids = [uuid4() for _ in range(len(documents))]
for i, (split_document, uuid) in enumerate(zip(documents, ids)):
metadata_etc = split_document.metadata.copy()
filepath = metadata_etc.pop('source', None)
if filepath is None:
raise ValueError(f"source must be provided in metadata, but got {metadata_etc}")
previous_passage_id = ids[i - 1] if i > 0 else None
next_passage_id = ids[i + 1] if i < len(ids) - 1 else None
passage = cls(id=uuid,
content=split_document.page_content,
filepath=filepath,
previous_passage_id=previous_passage_id,
next_passage_id=next_passage_id,
metadata_etc=metadata_etc)
# put content_datetime
content_datetime = metadata_etc.pop('content_datetime', None)
if content_datetime is not None:
if isinstance(content_datetime, str):
content_datetime = datetime.strptime(content_datetime, '%Y-%m-%d %H:%M:%S')
if not isinstance(content_datetime, datetime):
raise TypeError(f"content_datetime must be datetime, but got {type(content_datetime)}")
passage.content_datetime = content_datetime
# put importance
importance = metadata_etc.pop('importance', None)
if importance is not None:
if not isinstance(importance, int):
raise TypeError(f"importance must be int, but got {type(importance)}")
passage.importance = importance
passages.append(passage)
print(f"Split into {len(passages)} passages")
return passages
@classmethod
def from_search(cls, search_results: List[Dict[str, str]]) -> List['Passage']:
"""
Convert a list of search results to a list of passages.
:param search_results: A list of search results, it requires 'title', 'link', 'snippet' keys.
"""
if len(search_results) == 0:
return []
passages = []
ids = [uuid4() for _ in range(len(search_results))]
for i, (search_results, uuid) in enumerate(zip(search_results, ids)):
metadata_etc = {"title": search_results["title"]}
filepath = search_results["link"]
previous_passage_id = ids[i - 1] if i > 0 else None
next_passage_id = ids[i + 1] if i < len(ids) - 1 else None
passage = cls(id=uuid,
content=search_results["snippet"],
filepath=filepath,
previous_passage_id=previous_passage_id,
next_passage_id=next_passage_id,
metadata_etc=metadata_etc)
passages.append(passage)
return passages
| [] |
2024-01-10 | Marker-Inc-Korea/RAGchain | tests~RAGchain~benchmark~dataset~test_msmarco.py | import logging
import os
import pathlib
import pytest
from langchain.llms.openai import OpenAI
from RAGchain.DB import PickleDB
from RAGchain.benchmark.dataset import MSMARCOEvaluator
from RAGchain.pipeline import BasicRunPipeline
from RAGchain.retrieval import BM25Retrieval
root_dir = pathlib.PurePath(os.path.dirname(os.path.realpath(__file__))).parent.parent.parent
logger = logging.getLogger(__name__)
v1_1_bm25_path = os.path.join(root_dir, 'resources', 'bm25', 'msmarco_v1_1_evaluator.pkl')
v1_1_pickle_path = os.path.join(root_dir, 'resources', 'pickle', 'msmarco_v1_1_evaluator.pkl')
v2_1_bm25_path = os.path.join(root_dir, 'resources', 'bm25', 'msmarco_v2_1_evaluator.pkl')
v2_1_pickle_path = os.path.join(root_dir, 'resources', 'pickle', 'msmarco_v2_1_evaluator.pkl')
@pytest.fixture
def msmarco_v1_1_evaluator():
bm25_retrieval = BM25Retrieval(save_path=v1_1_bm25_path)
db = PickleDB(v1_1_pickle_path)
llm = OpenAI(model_name="babbage-002")
pipeline = BasicRunPipeline(bm25_retrieval, llm)
evaluator = MSMARCOEvaluator(pipeline, evaluate_size=5,
version='v1.1')
evaluator.ingest([bm25_retrieval], db, ingest_size=20)
yield evaluator
if os.path.exists(v1_1_bm25_path):
os.remove(v1_1_bm25_path)
if os.path.exists(v1_1_pickle_path):
os.remove(v1_1_pickle_path)
@pytest.fixture
def msmarco_v2_1_evaluator():
bm25_retrieval = BM25Retrieval(save_path=v2_1_bm25_path)
db = PickleDB(v2_1_pickle_path)
llm = OpenAI(model_name="babbage-002")
pipeline = BasicRunPipeline(bm25_retrieval, llm)
evaluator = MSMARCOEvaluator(pipeline, evaluate_size=5,
version='v2.1')
evaluator.ingest([bm25_retrieval], db, ingest_size=20)
yield evaluator
if os.path.exists(v2_1_bm25_path):
os.remove(v2_1_bm25_path)
if os.path.exists(v2_1_pickle_path):
os.remove(v2_1_pickle_path)
def test_msmarco_evaluator(msmarco_v1_1_evaluator, msmarco_v2_1_evaluator):
result_v1_1 = msmarco_v1_1_evaluator.evaluate()
assert len(result_v1_1.each_results) == 5
assert result_v1_1.each_results.iloc[0]['question'] == 'does human hair stop squirrels'
assert result_v1_1.each_results.iloc[0]['answer_pred']
logger.info('The result of msmarco v1.1 dataset.')
for key, value in result_v1_1.results.items():
logger.info(f"{key}: {value}")
logger.info("The result length is " + f"{len(result_v1_1.results)}")
result_v2_1 = msmarco_v2_1_evaluator.evaluate()
assert len(result_v2_1.each_results) == 5
assert result_v2_1.each_results.iloc[0]['question'] == '. what is a corporation?'
assert result_v2_1.each_results.iloc[0]['answer_pred']
logger.info('The result of msmarco v2.1 dataset.')
for key, value in result_v2_1.results.items():
logger.info(f"{key}: {value}")
logger.info("The result length is " + f"{len(result_v2_1.results)}")
| [] |
2024-01-10 | Marker-Inc-Korea/RAGchain | tests~RAGchain~preprocess~text_splitter~test_html_header_splitter.py | import copy
import pytest
from langchain.schema import Document
from RAGchain.preprocess.text_splitter import HTMLHeaderSplitter
TEST_DOCUMENT = Document(
page_content="""
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<title>노동요</title>
</head>
<body>
<div>
<h1>학박사님을 아세유?</h1>
<p>안하긴뭘 안해~ 월요일부터 일찍 일어나는 사람 누구야~ 소리질러!</p>
<h2>학교가는 동규형</h2>
<div>
<p>
학 학 학 학 학 학<br>
학박사님을 아세요??<br>
학 학 학 학 학 학<br>
</p>
</div>
<h3> 근데 리뷰할때 동규형이 보면 어떡하지</h3>
</div>
<div>
<h1>리중딱</h1>
<h2>감스트</h2>
<div>
<p>
안하긴뭘안해~~ 반갑습니다~~ 이피엘에서 우승못하는팀 누구야? 소리질러~~!!<br>
리중딱 리중딱 신나는노래~ 나도한번 불러본다~~(박수) (박수) (박수) 짠리잔짠~~<br>
우리는 우승하기 싫~어~ 왜냐면 우승하기 싫은팀이니깐~ 20년 내~내~ 프리미어리그~ 우승도 못하는 우리팀이다.<br>
리중딱 리중딱 신나는노래 ~~~ 나도한번불러본다~<br>
리중딱 리중딱 신나는노래 ~~ 가슴치며 불러본다~<br>
리중딱 노래가사는~ 생활과 정보가 있는노래 중딱이~~와 함께라면 제~라드도함께 우승못한다.
</p>
</div>
<h3>근데 ragchain 쓰는 사람이 리버풀팬이면 어떡하지</h3>
<div>
<p>
난 몰라유 그딴거 잘 몰라유
</p>
</div>
</div>
<div>
<h1>맨까송</h1>
<h2>감빡이</h2>
<div>
<p>
맨까 새끼들 부들부들하구나<br>
억까를 해 봐도 우린 골 넣지<br>
니네가 아무리 맹구다 어쩐다고 놀려도<br>
아아~ 즐겁구나 명 절 이~(짜스!)<br>
맨까 새끼들 부들부들하구나<br>
살짝쿵 설렜니 아니 안 되지<br>
이겨도 지롤 져도 지롤 뭐만 하면 리그컵<br>
아~ 리그컵도 축 군 데~ (컴온!!)<br>
맨까 새끼들 부들부들하구나<br>
돌아온 미친 폼 누가 막을래?<br>
더 보기 리그 탈출 직전[다른가사2] 돌아와요 맨유 팬!<br>
아~ 기대된다 챔 스 가~ Siuuuuuuu!<br>
</p>
</div>
<h3>근데 ragchain 쓰는 사람이 맨유팬이면 어떡하지</h3>
<div>
<p>
열심히 하시잖아~, 그만큼 열심히 하신다는거지~
</p>
</div>
</div>
</div>
</body>
</html>
""",
metadata={
'source': 'test_source',
# Check whether the metadata_etc contains the multiple information from the TEST DOCUMENT metadatas or not.
'Data information': 'test for htmldownheader splitter',
'근본과 실력 둘다 있는 팀': '레알마드리드',
'근본만 충만한 팀': '리버풀',
'실력은 있으나 노근본인팀': '파리 생제르망',
'둘다 없는 팀': '토트넘 홋스퍼'
}
)
@pytest.fixture
def html_header_text_splitter():
html_header_text_splitter = HTMLHeaderSplitter()
yield html_header_text_splitter
def test_html_header_text_splitter(html_header_text_splitter):
passages = html_header_text_splitter.split_document(TEST_DOCUMENT)
assert len(passages) > 1
assert passages[0].next_passage_id == passages[1].id
assert passages[1].previous_passage_id == passages[0].id
assert passages[0].filepath == 'test_source'
assert passages[0].filepath == passages[1].filepath
assert passages[0].previous_passage_id is None
assert passages[-1].next_passage_id is None
# Check first passage whether it contains header information of fist layout(first div).
assert ('학박사님을 아세유? 학교가는 동규형 근데 리뷰할때 동규형이 보면 어떡하지') in passages[0].content
# Check splitter preserve other metadata in original document.
test_document_metadata = list(copy.deepcopy(TEST_DOCUMENT).metadata.items())
test_document_metadata.pop(0)
for element in test_document_metadata:
assert element in list(passages[1].metadata_etc.items())
# Check passages' metadata_etc
## metadata_etc can't contain file path(Except first part of first div).
assert ('source', 'test_source') not in list(passages[1].metadata_etc.items())
assert ('source', 'test_source') not in list(passages[-1].metadata_etc.items())
# Check HTML header information put in metadata_etc right form.
assert ('Header 1', '학박사님을 아세유?') in list(passages[1].metadata_etc.items())
assert ('Header 1', '맨까송') in list(passages[-1].metadata_etc.items())
assert ('Header 2', '감빡이') in list(passages[-1].metadata_etc.items())
assert ('Header 3', '근데 ragchain 쓰는 사람이 맨유팬이면 어떡하지') in list(passages[-1].metadata_etc.items())
| [] |
2024-01-10 | Marker-Inc-Korea/RAGchain | RAGchain~preprocess~text_splitter~text_splitter.py | from typing import Optional, List
from langchain.schema import Document
from langchain.text_splitter import RecursiveCharacterTextSplitter
from RAGchain.preprocess.text_splitter.base import BaseTextSplitter
from RAGchain.schema import Passage
class RecursiveTextSplitter(BaseTextSplitter):
"""
Split a document into passages by recursively splitting on a list of separators.
You can specify a window_size and overlap_size to split the document into overlapping passages.
"""
def __init__(self, separators: Optional[List[str]] = None,
keep_separator: bool = True,
*args, **kwargs):
"""
:param separators: A list of strings to split on. Default is None.
:param keep_separator: Whether to keep the separator in the passage. Default is True.
:param kwargs: Additional arguments to pass to the langchain RecursiveCharacterTextSplitter.
"""
self.splitter = RecursiveCharacterTextSplitter(separators, keep_separator, **kwargs)
def split_document(self, document: Document) -> List[Passage]:
"""
Split a document.
"""
split_documents = self.splitter.split_documents([document])
passages = Passage.from_documents(split_documents)
return passages
| [] |
2024-01-10 | Marker-Inc-Korea/RAGchain | tests~RAGchain~benchmark~test_auto.py | import logging
import os
import pytest
from langchain.llms.openai import OpenAI
from RAGchain.DB import PickleDB
from RAGchain.benchmark import AutoEvaluator
from RAGchain.pipeline import BasicRunPipeline
from RAGchain.retrieval import BM25Retrieval
from test_base import TEST_PASSAGES, root_dir
bm25_path = os.path.join(root_dir, "resources", "bm25", "auto_evaluator.pkl")
pickle_path = os.path.join(root_dir, "resources", "pickle", "auto_evaluator.pkl")
logger = logging.getLogger(__name__)
@pytest.fixture
def auto_evaluator():
db = PickleDB(pickle_path)
db.create_or_load()
db.save(TEST_PASSAGES)
retrieval = BM25Retrieval(bm25_path)
retrieval.ingest(TEST_PASSAGES)
pipeline = BasicRunPipeline(retrieval=retrieval, llm=OpenAI())
yield AutoEvaluator(pipeline, questions=[
"Where is the capital of France?",
"Where is the largest city in Seoul?",
"What is common between Seoul and Paris?"
])
if os.path.exists(bm25_path):
os.remove(bm25_path)
if os.path.exists(pickle_path):
os.remove(pickle_path)
def test_auto_evaluator(auto_evaluator):
result = auto_evaluator.evaluate()
for key, res in result.results.items():
assert res >= 0.0
logger.info(f"{key}: {res}")
assert len(result.each_results) == 3
assert len(result.use_metrics) == len(auto_evaluator.metrics)
| [] |
2024-01-10 | Marker-Inc-Korea/RAGchain | RAGchain~reranker~importance~weighted_importance.py | from typing import List, Optional
from langchain_core.runnables import RunnableConfig
from langchain_core.runnables.utils import Input, Output
from RAGchain.reranker.base import BaseReranker
from RAGchain.schema import Passage, RetrievalResult
class WeightedImportanceReranker(BaseReranker):
"""
Rerank passages by their importance and relevance score.
First, relevance score and importance must be normalized to [0, 1] range.
And calculate the combined score by the following formula:
score = (weight) * importance + (1 - weight) * relevance_score
The larger the weight, the more important the importance is.
"""
def __init__(self, importance_weight: float = 0.5):
"""
:param importance_weight: weight of importance. The larger the value, the more important the importance is.
"""
super().__init__()
self.importance_weight = importance_weight
def rerank(self, passages: List[Passage], scores: List[float]) -> List[Passage]:
"""
:param passages: list of passages to be reranked.
:param scores: list of relevance scores of passages.
"""
result = self.invoke(RetrievalResult(query='', passages=passages, scores=scores))
return result.passages
def invoke(self, input: Input, config: Optional[RunnableConfig] = None) -> Output:
scores = input.scores
passages = input.passages
normalize_rel_scores = self.__normalize(scores)
normalize_importance = self.__normalize([passage.importance for passage in passages])
combined_scores = [self.__get_combined_score(rel_score, importance) for rel_score, importance in
zip(normalize_rel_scores, normalize_importance)]
sorted_passages, sorted_scores = zip(*sorted(zip(passages, combined_scores), key=lambda x: x[1], reverse=True))
input.passages = list(sorted_passages)
input.scores = list(sorted_scores)
return input
@staticmethod
def __normalize(scores: List[float]) -> List[float]:
"""
:param scores: list of scores to be normalized.
"""
return [(score - min(scores)) / (max(scores) - min(scores)) for score in scores]
def __get_combined_score(self, rel_score: float, importance: float):
return self.importance_weight * importance + (1 - self.importance_weight) * rel_score
| [] |
2024-01-10 | Marker-Inc-Korea/RAGchain | RAGchain~utils~vectorstore~chroma.py | from typing import List
from langchain.vectorstores import Chroma
from RAGchain.schema import Passage
from RAGchain.utils.vectorstore.base import SlimVectorStore
class ChromaSlim(Chroma, SlimVectorStore):
"""
Chroma vector store stores only passage_id and vector.
"""
def add_passages(self, passages: List[Passage]):
embeddings = None
if self._embedding_function is not None:
contents = [passage.content for passage in passages]
embeddings = self._embedding_function.embed_documents(contents)
metadatas = [{"passage_id": str(passage.id)} for passage in passages]
self._collection.upsert(
embeddings=embeddings,
metadatas=metadatas,
ids=[str(passage.id) for passage in passages],
documents=["" for _ in range(len(passages))]
)
| [] |
2024-01-10 | Marker-Inc-Korea/RAGchain | RAGchain~reranker~pygaggle~monoT5.py | from typing import List, Optional
from langchain_core.runnables import RunnableConfig
from langchain_core.runnables.utils import Input, Output
from RAGchain.reranker.base import BaseReranker
from RAGchain.schema import Passage, RetrievalResult
from .base import Query, Text
from .transformer import MonoT5
class MonoT5Reranker(BaseReranker):
"""
Rerank the passages using MonoT5 model.
The model will be downloaded from HuggingFace model hub.
"""
def __init__(self,
model_name: str = 'castorini/monot5-3b-msmarco-10k',
use_amp: bool = False,
token_false=None,
token_true=None,
*args, **kwargs):
self.reranker = MonoT5(pretrained_model_name_or_path=model_name, use_amp=use_amp, token_false=token_false,
token_true=token_true)
def invoke(self, input: Input, config: Optional[RunnableConfig] = None) -> Output:
assert isinstance(input, RetrievalResult), f'input must be RetrievalResult, but {type(input)} is given.'
input_query = Query(text=input.query)
input_texts = list(map(self.__passage_to_text, input.passages))
reranked_texts: List[Text] = self.reranker.rerank(input_query, input_texts)
scores = list(map(lambda x: x.score, reranked_texts))
result_passage = list(map(self.__text_to_passage, reranked_texts))
input.passages = result_passage
input.scores = scores
return input
def rerank(self, query: str, passages: List[Passage]) -> List[Passage]:
retrieval_result = RetrievalResult(query=query, passages=passages, scores=[])
result = self.invoke(retrieval_result)
return result.passages
@staticmethod
def __passage_to_text(passage: Passage) -> Text:
return Text(text=passage.content,
metadata={
'id': passage.id,
'filepath': passage.filepath,
'content_datetime': passage.content_datetime,
'importance': passage.importance,
'previous_passage_id': passage.previous_passage_id,
'next_passage_id': passage.next_passage_id,
'metadata_etc': passage.metadata_etc
})
@staticmethod
def __text_to_passage(text: Text) -> Passage:
return Passage(
id=text.metadata['id'],
content=text.text,
filepath=text.metadata['filepath'],
content_datetime=text.metadata['content_datetime'],
importance=text.metadata['importance'],
previous_passage_id=text.metadata['previous_passage_id'],
next_passage_id=text.metadata['next_passage_id'],
metadata_etc=text.metadata['metadata_etc']
)
| [] |
2024-01-10 | Marker-Inc-Korea/RAGchain | tests~RAGchain~utils~test_query_decompose.py | import logging
from typing import List
import pytest
from langchain.llms.openai import OpenAI
from RAGchain.utils.query_decompose import QueryDecomposition
logger = logging.getLogger(__name__)
query = "Is Disneyland in Paris the largest Disneyland in the world?"
@pytest.fixture
def query_decompose():
yield QueryDecomposition(OpenAI(temperature=0.2))
def test_query_decompose(query_decompose):
result = query_decompose.decompose(query)
check_decompose(result)
def test_query_decompose_runnable(query_decompose):
result = query_decompose.invoke(query)
check_decompose(result)
def check_decompose(result: List[str]):
logger.info(f"result : {result}")
assert len(result) > 1
for res in result:
assert isinstance(res, str)
assert bool(res)
| [] |
2024-01-10 | Marker-Inc-Korea/RAGchain | tests~RAGchain~benchmark~dataset~test_natural_qa.py | import logging
import os
import pathlib
import pytest
from langchain.llms.openai import OpenAI
from RAGchain.DB import PickleDB
from RAGchain.benchmark.dataset import NaturalQAEvaluator
from RAGchain.pipeline import BasicRunPipeline
from RAGchain.retrieval import BM25Retrieval
root_dir = pathlib.PurePath(os.path.dirname(os.path.realpath(__file__))).parent.parent.parent
bm25_path = os.path.join(root_dir, 'resources', 'bm25', 'natural_qa_evaluator.pkl')
pickle_path = os.path.join(root_dir, 'resources', 'pickle', 'natural_qa_evaluator.pkl')
logger = logging.getLogger(__name__)
@pytest.fixture
def naturalqa_evaluator():
bm25_retrieval = BM25Retrieval(save_path=bm25_path)
db = PickleDB(pickle_path)
llm = OpenAI(model_name="babbage-002")
pipeline = BasicRunPipeline(bm25_retrieval, llm)
evaluator = NaturalQAEvaluator(pipeline, evaluate_size=5)
evaluator.ingest([bm25_retrieval], db, ingest_size=20)
yield evaluator
if os.path.exists(bm25_path):
os.remove(bm25_path)
if os.path.exists(pickle_path):
os.remove(pickle_path)
def test_naturalqa_evaluator(naturalqa_evaluator):
result = naturalqa_evaluator.evaluate()
assert len(result.each_results) == 5
assert result.each_results.iloc[0]['question'] == '1 . what was the precursor to the present day internet'
assert result.each_results.iloc[0]['answer_pred']
for key, value in result.results.items():
logger.info(f"{key}: {value}")
logger.info("The result length is " + f"{len(result.results)}")
| [] |
2024-01-10 | Marker-Inc-Korea/RAGchain | RAGchain~reranker~bm25.py | from typing import List, Optional
from langchain_core.runnables import RunnableConfig
from langchain_core.runnables.utils import Input, Output
from rank_bm25 import BM25Okapi
from transformers import AutoTokenizer
from RAGchain.reranker.base import BaseReranker
from RAGchain.schema import Passage, RetrievalResult
class BM25Reranker(BaseReranker):
"""
BM25Reranker class for reranker based on BM25.
You can rerank the passages with BM25 scores .
"""
def __init__(self, tokenizer_name: str = "gpt2", *args, **kwargs):
self.tokenizer = AutoTokenizer.from_pretrained(tokenizer_name)
def rerank(self, query: str, passages: List[Passage]) -> List[Passage]:
retrieval_result = RetrievalResult(query=query, passages=passages, scores=[])
result = self.invoke(retrieval_result)
return result.passages
def invoke(self, input: Input, config: Optional[RunnableConfig] = None) -> Output:
contents: List[str] = list(map(lambda x: x.content, input.passages))
# tokenize content for bm25 instance
tokenized_content = self.__tokenize(contents)
# tokenize query
tokenized_query = self.__tokenize([input.query])[0]
bm25 = BM25Okapi(tokenized_content)
scores = bm25.get_scores(tokenized_query)
sorted_passages, sorted_scores = zip(*sorted(zip(input.passages, scores), key=lambda x: x[1], reverse=True))
input.passages = list(sorted_passages)
input.scores = list(sorted_scores)
return input
def __tokenize(self, values: List[str]):
tokenized = self.tokenizer(values)
return tokenized.input_ids
| [] |
2024-01-10 | Marker-Inc-Korea/RAGchain | tests~RAGchain~benchmark~test_base.py | import logging
import os
import pathlib
import pytest
from langchain.llms.openai import OpenAI
from RAGchain.DB import PickleDB
from RAGchain.benchmark.base import BaseEvaluator
from RAGchain.pipeline import BasicRunPipeline
from RAGchain.pipeline.base import BaseRunPipeline
from RAGchain.retrieval import BM25Retrieval
from RAGchain.schema import Passage
root_dir = pathlib.PurePath(os.path.dirname(os.path.realpath(__file__))).parent.parent
bm25_path = os.path.join(root_dir, "resources", "bm25", "base_evaluator.pkl")
pickle_path = os.path.join(root_dir, "resources", "pickle", "base_evaluator.pkl")
logger = logging.getLogger(__name__)
TEST_PASSAGES = [
Passage(id='id-1',
content='The capital of Korea is Seoul. And it is well-known.',
filepath='./korea.txt'),
Passage(id='id-2',
content='The capital of France is Paris. And it is well-known.',
filepath='./france.txt'),
Passage(id='id-3',
content='The capital of Germany is Berlin. And it is well-known.',
filepath='./germany.txt'),
Passage(id='id-4',
content='The capital of Japan is Tokyo. And it is well-known.',
filepath='./japan.txt'),
Passage(id='id-5',
content='The capital of China is Beijing. And it is well-known.',
filepath='./china.txt'),
Passage(id='id-6',
content='The capital of Argentina is Buenos Aires.',
filepath='./argentina.txt'),
Passage(id='id-7',
content='As of 2021, the largest city in Korea is Seoul.',
filepath='./korea.txt'),
Passage(id='id-8',
content='As of 2021, the largest city in France is Paris.',
filepath='./france.txt'),
Passage(id='id-9',
content='As of 2021, the largest city in Germany is Berlin.',
filepath='./germany.txt'),
Passage(id='id-10',
content='As of 2021, the largest city in Japan is Tokyo.',
filepath='./japan.txt'),
Passage(id='id-11',
content='As of 2021, the largest city in China is Beijing.',
filepath='./china.txt')
]
class DummyEvaluator(BaseEvaluator):
def __init__(self, pipeline: BaseRunPipeline, metrics=None, run_all=True):
super().__init__(run_all=run_all, metrics=metrics)
self.pipeline = pipeline
def evaluate(self, **kwargs):
questions = [
"What is the capital of France?",
"What is the capital of Korea?",
"What is the capital of Japan?",
"What is the capital of China?",
"What is the capital of Germany?"
]
retrieval_gt = [
['id-2', 'id-8'],
['id-1', 'id-7'],
['id-4'],
['id-5', 'id-11'],
['id-3']
]
retrieval_gt_order = [
[1, 2],
[1, 2],
[1, 2],
[1, 2],
[1, 2]
]
answer_gt = [
['Paris'],
['Seoul'],
['Tokyo'],
['Beijing'],
['Berlin']
]
return self._calculate_metrics(questions, self.pipeline,
retrieval_gt=retrieval_gt,
retrieval_gt_order=retrieval_gt_order,
answer_gt=answer_gt,
**kwargs)
@pytest.fixture
def dummy_evaluator():
db = PickleDB(pickle_path)
db.create_or_load()
db.save(TEST_PASSAGES)
retrieval = BM25Retrieval(bm25_path)
retrieval.ingest(TEST_PASSAGES)
pipeline = BasicRunPipeline(retrieval=retrieval, llm=OpenAI())
yield DummyEvaluator(pipeline)
if os.path.exists(bm25_path):
os.remove(bm25_path)
if os.path.exists(pickle_path):
os.remove(pickle_path)
@pytest.fixture
def no_ragas_evaluator():
db = PickleDB(pickle_path)
db.create_or_load()
db.save(TEST_PASSAGES)
retrieval = BM25Retrieval(bm25_path)
retrieval.ingest(TEST_PASSAGES)
pipeline = BasicRunPipeline(retrieval=retrieval, llm=OpenAI())
# test that it can initialize without openai api key env
evaluator = DummyEvaluator(pipeline, metrics=['Recall', 'Precision', 'F1_score', 'BLEU'], run_all=False)
yield evaluator
if os.path.exists(bm25_path):
os.remove(bm25_path)
if os.path.exists(pickle_path):
os.remove(pickle_path)
# default top_k is 4
def test_base_evaluator(dummy_evaluator):
result = dummy_evaluator.evaluate()
for key, res in result.results.items():
assert res >= 0.0
logger.info(f"{key}: {res}")
assert len(result.each_results) == 5
assert result.each_results.iloc[0]['question'] == 'What is the capital of France?'
assert bool(result.each_results.iloc[0]['passage_contents'][0]) is True
assert result.each_results.iloc[0]['passage_contents'][0] == \
list(filter(lambda x: x.id == result.each_results.iloc[0]['passage_ids'][0], TEST_PASSAGES))[0].content
assert result.each_results.iloc[0]['F1_score'] > 0
assert len(result.use_metrics) == len(dummy_evaluator.metrics)
def test_no_ragas(no_ragas_evaluator):
# It just tests it can initialize without ragas metrics.
result = no_ragas_evaluator.evaluate()
for key, res in result.results.items():
assert res >= 0.0
logger.info(f"{key}: {res}")
assert len(result.each_results) == 5
assert len(result.use_metrics) == len(no_ragas_evaluator.metrics)
| [] |
2024-01-10 | Marker-Inc-Korea/RAGchain | tests~RAGchain~benchmark~dataset~test_ko_strategy_qa.py | import os
import pathlib
import pytest
from langchain.llms.openai import OpenAI
from RAGchain.DB import PickleDB
from RAGchain.benchmark.dataset import KoStrategyQAEvaluator
from RAGchain.pipeline import BasicRunPipeline
from RAGchain.retrieval import BM25Retrieval
root_dir = pathlib.PurePath(os.path.dirname(os.path.realpath(__file__))).parent.parent.parent
bm25_path = os.path.join(root_dir, 'resources', 'bm25', 'ko_strategy_qa_evaluator.pkl')
pickle_path = os.path.join(root_dir, 'resources', 'pickle', 'ko_strategy_qa_evaluator.pkl')
@pytest.fixture
def ko_strategy_qa_evaluator():
bm25_retrieval = BM25Retrieval(save_path=bm25_path)
db = PickleDB(pickle_path)
pipeline = BasicRunPipeline(bm25_retrieval, OpenAI(model_name="babbage-002"))
evaluator = KoStrategyQAEvaluator(pipeline, evaluate_size=5,
metrics=['Recall', 'Precision', 'Hole', 'TopK_Accuracy', 'EM', 'F1_score',
'context_precision'])
evaluator.ingest([bm25_retrieval], db, ingest_size=20)
yield evaluator
if os.path.exists(bm25_path):
os.remove(bm25_path)
if os.path.exists(pickle_path):
os.remove(pickle_path)
def test_ko_strategy_qa_evaluator(ko_strategy_qa_evaluator):
with pytest.raises(ValueError):
ko_strategy_qa_evaluator.evaluate(validate_passages=True)
result = ko_strategy_qa_evaluator.evaluate(validate_passages=False)
assert len(result.each_results) == 5
assert result.each_results.iloc[0]['question'] == '토마토 껍질을 벗기려면 뜨거운 물과 찬물이 모두 필요하나요?'
assert result.each_results.iloc[0]['answer_pred']
assert len(result.use_metrics) == len(ko_strategy_qa_evaluator.metrics)
| [] |
2024-01-10 | Marker-Inc-Korea/RAGchain | tests~RAGchain~benchmark~dataset~test_triviaqa.py | import logging
import os
import pathlib
import pytest
from langchain.llms.openai import OpenAI
from RAGchain.DB import PickleDB
from RAGchain.benchmark.dataset import TriviaQAEvaluator
from RAGchain.pipeline import BasicRunPipeline
from RAGchain.retrieval import BM25Retrieval
root_dir = pathlib.PurePath(os.path.dirname(os.path.realpath(__file__))).parent.parent.parent
bm25_path = os.path.join(root_dir, 'resources', 'bm25', 'triviaqa_evaluator.pkl')
pickle_path = os.path.join(root_dir, 'resources', 'pickle', 'triviaqa_evaluator.pkl')
logger = logging.getLogger(__name__)
@pytest.fixture
def triviaqa_evaluator():
bm25_retrieval = BM25Retrieval(save_path=bm25_path)
db = PickleDB(pickle_path)
llm = OpenAI(model_name="gpt-3.5-turbo-16k")
pipeline = BasicRunPipeline(bm25_retrieval, llm, retrieval_option={'top_k': 2})
evaluator = TriviaQAEvaluator(pipeline, evaluate_size=5)
evaluator.ingest(retrievals=[bm25_retrieval], db=db, ingest_size=20)
yield evaluator
if os.path.exists(bm25_path):
os.remove(bm25_path)
if os.path.exists(pickle_path):
os.remove(pickle_path)
def test_triviaqa_evaluator(triviaqa_evaluator):
result = triviaqa_evaluator.evaluate()
assert len(result.each_results) == 5
assert result.each_results.iloc[0][
'question'] == 'Who was the man behind The Chipmunks?'
for key, value in result.results.items():
logger.info(f"{key}: {value}")
logger.info("The result length is " + f"{len(result.results)}")
| [] |
2024-01-10 | Marker-Inc-Korea/RAGchain | RAGchain~utils~semantic_clustering.py | from typing import List
import pandas as pd
from langchain.schema.embeddings import Embeddings
from sklearn.cluster import AffinityPropagation, AgglomerativeClustering, Birch, KMeans, DBSCAN, MeanShift, OPTICS, \
SpectralClustering
from RAGchain.schema import Passage
from RAGchain.utils.util import text_modifier
class SemanticClustering:
"""
This class is used to cluster the passages based on their semantic information.
First, we vectorize to embedding vector for representing each passages' semantic information.
Second, we cluster the embedding vectors by using various clustering algorithm.
There are no optimal clustering algorithm for all cases. So, you can try various clustering algorithm.
"""
def __init__(self, embedding_function: Embeddings,
clustering_algorithm: str = 'kmeans'):
self.embedding_function = embedding_function
self.clustering_algorithm = clustering_algorithm
def cluster(self, passages: List[Passage], **kwargs) -> List[List[Passage]]:
"""
clustering
:param passages: list of passages to be clustered.
:param kwargs: kwargs for clustering algorithm.
:return: 2-d list of clustered Passages. Each cluster is a list of passages.
"""
embeddings = self.embedding_function.embed_documents([passage.content for passage in passages])
clustering_algorithm_dict = {
'affinity_propagation': AffinityPropagation,
'agglomerative_clustering': AgglomerativeClustering,
'birch': Birch,
'dbscan': DBSCAN,
'kmeans': KMeans,
'mean_shift': MeanShift,
'optics': OPTICS,
'spectral_clustering': SpectralClustering,
}
clustering_algorithm_class = self.__select_clustering_algorithm(clustering_algorithm_dict)
clustering_algorithm = clustering_algorithm_class(**kwargs)
clustering_algorithm.fit(embeddings)
df = pd.DataFrame({
'id': [passage.id for passage in passages],
'cluster': clustering_algorithm.labels_.tolist(),
'passage': passages
})
return df.groupby('cluster')['passage'].apply(list).tolist()
def __select_clustering_algorithm(self, instance_dict: dict):
algorithm_names = list(instance_dict.keys())
for modified_name in text_modifier(self.clustering_algorithm):
if modified_name in algorithm_names:
return instance_dict[modified_name]
raise ValueError(f"Clustering algorithm {self.clustering_algorithm} is not supported. "
f"Please choose one of {algorithm_names}.")
| [] |
2024-01-10 | Marker-Inc-Korea/RAGchain | tests~RAGchain~retrieval~test_hyde_retrieval.py | import os
import pytest
from langchain.llms.openai import OpenAI
import test_base_retrieval
from RAGchain.retrieval import HyDERetrieval, BM25Retrieval
@pytest.fixture
def hyde_retrieval():
test_prompt = "Please write a scientific paper passage to answer the question"
bm25_path = os.path.join(test_base_retrieval.root_dir, "resources", "bm25", "test_hyde_retrieval.pkl")
pickle_path = os.path.join(test_base_retrieval.root_dir, "resources", "pickle", "test_hyde_retrieval.pkl")
if not os.path.exists(os.path.dirname(bm25_path)):
os.makedirs(os.path.dirname(bm25_path))
if not os.path.exists(os.path.dirname(pickle_path)):
os.makedirs(os.path.dirname(pickle_path))
test_base_retrieval.ready_pickle_db(pickle_path)
bm25_retrieval = BM25Retrieval(save_path=bm25_path)
hyde_retrieval = HyDERetrieval(bm25_retrieval, OpenAI(max_tokens=64), system_prompt=test_prompt)
yield hyde_retrieval
if os.path.exists(pickle_path):
os.remove(pickle_path)
if os.path.exists(bm25_path):
os.remove(bm25_path)
def test_hyde_retrieval(hyde_retrieval):
hyde_retrieval.ingest(test_base_retrieval.TEST_PASSAGES)
top_k = 4
retrieved_ids = hyde_retrieval.retrieve_id(query='What is visconde structure?', top_k=top_k)
test_base_retrieval.validate_ids(retrieved_ids, top_k)
retrieved_passages = hyde_retrieval.retrieve(query='What is visconde structure?', top_k=top_k)
test_base_retrieval.validate_passages(retrieved_passages, top_k)
retrieved_ids_2, scores = hyde_retrieval.retrieve_id_with_scores(query='What is visconde structure?',
top_k=top_k)
assert len(retrieved_ids_2) == len(scores)
assert max(scores) == scores[0]
assert min(scores) == scores[-1]
def test_hyde_retrieval_delete(hyde_retrieval):
hyde_retrieval.ingest(test_base_retrieval.SEARCH_TEST_PASSAGES)
hyde_retrieval.delete(['test_id_4_search', 'test_id_3_search'])
retrieved_passages = hyde_retrieval.retrieve(query='What is visconde structure?', top_k=4)
assert len(retrieved_passages) == 2
assert 'test_id_1_search' in [passage.id for passage in retrieved_passages]
assert 'test_id_2_search' in [passage.id for passage in retrieved_passages]
| [
"Please write a scientific paper passage to answer the question"
] |
2024-01-10 | Marker-Inc-Korea/RAGchain | tests~RAGchain~benchmark~dataset~test_dstc11_track5.py | import logging
import os
import pathlib
import pytest
from langchain.llms.openai import OpenAI
from RAGchain.DB import PickleDB
from RAGchain.benchmark.dataset import DSTC11Track5Evaluator
from RAGchain.pipeline import BasicRunPipeline
from RAGchain.retrieval import BM25Retrieval
root_dir = pathlib.PurePath(os.path.dirname(os.path.realpath(__file__))).parent.parent.parent
logger = logging.getLogger(__name__)
bm25_path = os.path.join(root_dir, 'resources', 'bm25', 'dstc_track5_evaluator.pkl')
pickle_path = os.path.join(root_dir, 'resources', 'pickle', 'dstc_track5_evaluator.pkl')
@pytest.fixture
def dstc11_track5_evaluator():
bm25_retrieval = BM25Retrieval(save_path=bm25_path)
db = PickleDB(pickle_path)
llm = OpenAI(model_name="babbage-002")
pipeline = BasicRunPipeline(bm25_retrieval, llm)
evaluator = DSTC11Track5Evaluator(pipeline, evaluate_size=5)
evaluator.ingest([bm25_retrieval], db, ingest_size=20)
yield evaluator
if os.path.exists(bm25_path):
os.remove(bm25_path)
if os.path.exists(pickle_path):
os.remove(pickle_path)
def test_dstc11_track5_evaluator(dstc11_track5_evaluator):
result = dstc11_track5_evaluator.evaluate()
assert len(result.each_results) == 5
assert result.each_results.iloc[0]['question'] == ("U: I'm looking to stay at a 3 star hotel in the north. "
"S: Sorry, I have no results for that query. Would you like to "
"try a different area of town? U: Are there any moderate priced "
"hotels in the North? S: Yes I have two. Would you like me to book "
"one? U: I need a hotel to include free parking; does either have"
" that? S: Yes both of them have free parking. U: Which one would"
" you recommend? S: How about the Ashley hotel? U: Is the Ashley "
"hotel a 3 star hotel? S: the ashley is actually a 2 star hotel. "
"U: Does this hotel have rooms with a good view of the neighborhood?")
assert result.each_results.iloc[0]['answer_pred']
logger.info('The result of DSTC-11-Track-5 dataset.')
for key, value in result.results.items():
logger.info(f"{key}: {value}")
logger.info("The result length is " + f"{len(result.results)}")
| [] |
2024-01-10 | Marker-Inc-Korea/RAGchain | RAGchain~reranker~tart~tart.py | from typing import List, Optional
import torch
import torch.nn.functional as F
from langchain_core.runnables import RunnableConfig
from langchain_core.runnables.utils import Input, Output
from RAGchain.reranker.base import BaseReranker
from RAGchain.schema import Passage, RetrievalResult
from .modeling_enc_t5 import EncT5ForSequenceClassification
from .tokenization_enc_t5 import EncT5Tokenizer
class TARTReranker(BaseReranker):
"""
TARTReranker is a reranker based on TART (https://github.com/facebookresearch/tart).
You can rerank the passages with the instruction using TARTReranker.
"""
def __init__(self, instruction: str):
"""
The default model is facebook/tart-full-flan-t5-xl.
:param instruction: The instruction for reranking.
"""
self.instruction = instruction
model_name = "facebook/tart-full-flan-t5-xl"
self.model = EncT5ForSequenceClassification.from_pretrained(model_name)
self.tokenizer = EncT5Tokenizer.from_pretrained(model_name)
def rerank(self, query: str, passages: List[Passage]) -> List[Passage]:
retrieval_result = RetrievalResult(query=query, passages=passages, scores=[])
reranked_result = self.invoke(retrieval_result)
return reranked_result.passages
def invoke(self, input: Input, config: Optional[RunnableConfig] = None) -> Output:
passages = input.passages
contents: List[str] = [passage.content for passage in passages]
instruction_queries: List[str] = ['{0} [SEP] {1}'.format(self.instruction, input.query) for _ in
range(len(contents))]
features = self.tokenizer(instruction_queries, contents, padding=True, truncation=True, return_tensors="pt")
with torch.no_grad():
scores = self.model(**features).logits
normalized_scores = [float(score[1]) for score in F.softmax(scores, dim=1)]
sorted_passages, sorted_scores = zip(
*sorted(zip(passages, normalized_scores), key=lambda x: x[1], reverse=True))
input.passages = list(sorted_passages)
input.scores = list(sorted_scores)
return input
| [] |
2024-01-10 | Marker-Inc-Korea/RAGchain | RAGchain~preprocess~loader~pdf_link_loader.py | import tempfile
from typing import List
import requests
from langchain.document_loaders import PDFMinerLoader
from langchain.document_loaders.base import BaseLoader
from langchain.schema import Document
class PdfLinkLoader(BaseLoader):
"""
Load PDF from a link
"""
def __init__(self, link: str, *args, **kwargs):
if not self.valid_url(link):
raise ValueError(f"Invalid url: {link}")
self.link = link
def load(self) -> List[Document]:
with tempfile.NamedTemporaryFile() as f:
f.write(requests.get(self.link).content)
f.seek(0)
loader = PDFMinerLoader(f.name)
return loader.load()
@staticmethod
def valid_url(url):
return url.startswith("http://") or url.startswith("https://")
| [] |
2024-01-10 | Marker-Inc-Korea/RAGchain | tests~RAGchain~benchmark~dataset~test_startegy_qa.py | import os
import pathlib
import pytest
from langchain.llms.openai import OpenAI
from RAGchain.DB import PickleDB
from RAGchain.benchmark.dataset import StrategyQAEvaluator
from RAGchain.pipeline import BasicRunPipeline
from RAGchain.retrieval import BM25Retrieval
root_dir = pathlib.PurePath(os.path.dirname(os.path.realpath(__file__))).parent.parent.parent
bm25_path = os.path.join(root_dir, 'resources', 'bm25', 'strategy_qa_evaluator.pkl')
pickle_path = os.path.join(root_dir, 'resources', 'pickle', 'strategy_qa_evaluator.pkl')
@pytest.fixture
def strategy_qa_evaluator():
bm25_retrieval = BM25Retrieval(save_path=bm25_path)
db = PickleDB(pickle_path)
pipeline = BasicRunPipeline(bm25_retrieval, OpenAI(model_name='babbage-002'))
evaluator = StrategyQAEvaluator(pipeline, evaluate_size=5,
metrics=['Recall', 'Precision', 'Hole', 'TopK_Accuracy', 'EM', 'F1_score',
'context_recall', 'context_precision'])
evaluator.ingest([bm25_retrieval], db, ingest_size=20)
yield evaluator
if os.path.exists(bm25_path):
os.remove(bm25_path)
if os.path.exists(pickle_path):
os.remove(pickle_path)
def test_strategy_qa_evaluator(strategy_qa_evaluator):
with pytest.raises(ValueError):
strategy_qa_evaluator.evaluate(validate_passages=True)
result = strategy_qa_evaluator.evaluate(validate_passages=False)
assert len(result.each_results) == 5
assert result.each_results.iloc[0][
'question'] == 'Are more people today related to Genghis Khan than Julius Caesar?'
assert result.each_results.iloc[0]['answer_pred']
# you can't use context_recall when validate_passages is False
assert len(result.use_metrics) == len(strategy_qa_evaluator.metrics) - 1
| [] |
2024-01-10 | Marker-Inc-Korea/RAGchain | tests~RAGchain~reranker~test_base_reranker.py | import os
import pathlib
import pickle
from uuid import UUID
from langchain_core.runnables import RunnableLambda
from RAGchain.reranker.base import BaseReranker
from RAGchain.schema import Passage, RetrievalResult
root_dir = pathlib.PurePath(os.path.dirname(os.path.realpath(__file__))).parent.parent
with open(os.path.join(root_dir, "resources", "sample_passages.pkl"), 'rb') as r:
TEST_PASSAGES = pickle.load(r)
query = "What is query decomposition?"
def test_load_passage():
assert len(TEST_PASSAGES) > 0
for passage in TEST_PASSAGES:
assert isinstance(passage, Passage)
assert isinstance(passage.id, UUID) or isinstance(passage.id, str)
def base_runnable_test(reranker: BaseReranker):
runnable = reranker | RunnableLambda(lambda x: x.to_dict())
result = runnable.invoke(RetrievalResult(query=query, passages=TEST_PASSAGES, scores=[]))
assert isinstance(result['query'], str)
assert isinstance(result['passages'], list)
assert isinstance(result['scores'], list)
assert len(result['passages']) == len(TEST_PASSAGES)
assert result['passages'][0] != TEST_PASSAGES[0] or result['passages'][-1] != TEST_PASSAGES[-1]
assert len(result['scores']) == len(result['passages'])
assert isinstance(result['passages'][0], Passage)
assert isinstance(result['scores'][0], float)
for i in range(1, len(result['passages'])):
assert result['scores'][i - 1] >= result['scores'][i]
results = runnable.batch([
RetrievalResult(query=query, passages=TEST_PASSAGES[:10], scores=[]),
RetrievalResult(query=query, passages=TEST_PASSAGES[10:25], scores=[])
])
assert isinstance(results, list)
assert len(results) == 2
assert isinstance(results[0]['passages'], list)
assert isinstance(results[1]['passages'][0], Passage)
assert len(results[0]['passages']) == 10
assert len(results[1]['passages']) == 15
assert len(results[0]['scores']) == 10
assert len(results[1]['scores']) == 15
| [] |
2024-01-10 | Marker-Inc-Korea/RAGchain | RAGchain~preprocess~loader~mathpix_markdown_loader.py | import os
import re
from typing import List, Iterator
from langchain.document_loaders.base import BaseLoader
from langchain.schema import Document
class MathpixMarkdownLoader(BaseLoader):
"""
Load mathpix markdown file.
mathpix markdown is .mmd file which is a markdown file for science papers.
This class supports to split the file into sections and tables of science papers.
"""
def __init__(self, filepath: str):
"""
:param filepath: The path of mathpix markdown file. The file should be .mmd file.
"""
if not os.path.exists(filepath):
raise ValueError(f"File {filepath} does not exist.")
self.filepath = filepath
def load(self, split_section: bool = True, split_table: bool = True) -> List[Document]:
"""
:param split_section: If True, split the file into sections. Default is True.
:param split_table: If True, split the file into tables. Default is True.
:return: List of Document. If split_section and split_table are True, the list contains multiple Documents.
The order of each section and table are the same as the order of the file.
"""
return list(self.lazy_load(split_section=split_section, split_table=split_table))
def lazy_load(self, split_section: bool = True, split_table: bool = True) -> Iterator[Document]:
"""
:param split_section: If True, split the file into sections. Default is True.
:param split_table: If True, split the file into tables. Default is True.
:return: Iterator of Document. If split_section and split_table are True, return contains multiple Documents.
The order of each section and table are the same as the order of the file.
"""
with open(self.filepath, 'r') as f:
content = f.read()
if not split_section and not split_table:
yield Document(page_content=content, metadata={"source": self.filepath, "content_type": "text"})
else:
split_sections: List[str] = [content]
if split_section:
split_sections = self.split_section(content)
if not split_table:
for section in split_sections:
yield Document(page_content=section, metadata={"source": self.filepath, "content_type": "text"})
if split_table:
for document in split_sections:
contents: List[str] = self.split_table(document)
for content in contents:
page_type = "table" if content.startswith('\\\\begin{table}') else "text"
yield Document(page_content=content,
metadata={"source": self.filepath, "content_type": page_type})
@staticmethod
def split_section(content: str) -> List[str]:
"""
Split section from mathpix markdown content by '#'.
"""
split_text = re.split('(#+ )', content)
split_text.pop(0)
result = [split_text[i] + split_text[i + 1] for i in range(0, len(split_text), 2)]
return result
@staticmethod
def split_table(content: str) -> List[str]:
"""
Split table from mathpix markdown content.
:param content: mathpix markdown content
:return: The odd index is the content without table, and the even index is the table.
"""
pattern = re.compile(r'\\\\begin{table}.*?\\\\end{table}', re.DOTALL)
matches = re.findall(pattern, content)
texts_without_tables = re.split(pattern, content)
result = []
for i in range(len(texts_without_tables)):
result.append(texts_without_tables[i])
if i < len(matches):
result.append(matches[i])
return result
| [
"text"
] |
2024-01-10 | Marker-Inc-Korea/RAGchain | tests~RAGchain~retrieval~test_hybrid_retrieval.py | import logging
import os
import shutil
import chromadb
import pytest
from langchain.vectorstores import Chroma
import test_base_retrieval
from RAGchain.DB import PickleDB
from RAGchain.retrieval import BM25Retrieval, VectorDBRetrieval, HybridRetrieval
from RAGchain.utils.embed import EmbeddingFactory
logger = logging.getLogger(__file__)
@pytest.fixture(scope='module')
def hybrid_retrieval():
bm25_path = os.path.join(test_base_retrieval.root_dir, "resources", "bm25", "test_hybrid_retrieval.pkl")
pickle_path = os.path.join(test_base_retrieval.root_dir, "resources", "pickle", "test_hybrid_retrieval.pkl")
chroma_path = os.path.join(test_base_retrieval.root_dir, "resources", "test_hybrid_retrieval_chroma")
if not os.path.exists(chroma_path):
os.makedirs(chroma_path)
if not os.path.exists(os.path.dirname(bm25_path)):
os.makedirs(os.path.dirname(bm25_path))
if not os.path.exists(os.path.dirname(pickle_path)):
os.makedirs(os.path.dirname(pickle_path))
test_base_retrieval.ready_pickle_db(pickle_path)
bm25_retrieval = BM25Retrieval(save_path=bm25_path)
vectordb_retrieval = VectorDBRetrieval(vectordb=Chroma(
client=chromadb.PersistentClient(path=chroma_path),
collection_name='test_hybrid_retrieval',
embedding_function=EmbeddingFactory('openai').get()
))
hybrid_retrieval = HybridRetrieval(retrievals=[bm25_retrieval, vectordb_retrieval], weights=[0.3, 0.7], p=50)
yield hybrid_retrieval
if os.path.exists(pickle_path):
os.remove(pickle_path)
if os.path.exists(bm25_path):
os.remove(bm25_path)
if os.path.exists(chroma_path):
shutil.rmtree(chroma_path)
@pytest.fixture
def hybrid_retrieval_for_delete():
bm25_path = os.path.join(test_base_retrieval.root_dir, "resources", "bm25", "test_hybrid_retrieval_for_delete.pkl")
pickle_path = os.path.join(test_base_retrieval.root_dir, "resources", "pickle",
"test_hybrid_retrieval_for_delete.pkl")
chroma_path = os.path.join(test_base_retrieval.root_dir, "resources", "test_hybrid_retrieval_for_delete_chroma")
if not os.path.exists(chroma_path):
os.makedirs(chroma_path)
if not os.path.exists(os.path.dirname(bm25_path)):
os.makedirs(os.path.dirname(bm25_path))
if not os.path.exists(os.path.dirname(pickle_path)):
os.makedirs(os.path.dirname(pickle_path))
db = PickleDB(save_path=pickle_path)
db.create_or_load()
db.save(test_base_retrieval.SEARCH_TEST_PASSAGES)
bm25_retrieval = BM25Retrieval(save_path=bm25_path)
vectordb_retrieval = VectorDBRetrieval(vectordb=Chroma(
client=chromadb.PersistentClient(path=chroma_path),
collection_name='test_hybrid_retrieval',
embedding_function=EmbeddingFactory('openai').get()
))
hybrid_retrieval = HybridRetrieval(retrievals=[bm25_retrieval, vectordb_retrieval], weights=[0.3, 0.7], p=50)
hybrid_retrieval.ingest(test_base_retrieval.SEARCH_TEST_PASSAGES)
yield hybrid_retrieval
if os.path.exists(pickle_path):
os.remove(pickle_path)
if os.path.exists(bm25_path):
os.remove(bm25_path)
if os.path.exists(chroma_path):
shutil.rmtree(chroma_path)
def test_hybrid_retrieval(hybrid_retrieval):
hybrid_retrieval.ingest(test_base_retrieval.TEST_PASSAGES)
top_k = 6
retrieved_ids = hybrid_retrieval.retrieve_id(query='What is visconde structure?', top_k=top_k)
test_base_retrieval.validate_ids(retrieved_ids, top_k)
retrieved_passages = hybrid_retrieval.retrieve(query='What is visconde structure?', top_k=top_k)
test_base_retrieval.validate_passages(retrieved_passages, top_k)
retrieved_ids_2, scores = hybrid_retrieval.retrieve_id_with_scores(query='What is visconde structure?',
top_k=top_k)
logger.info(f'scores: {scores}')
assert retrieved_ids == retrieved_ids_2
assert len(retrieved_ids_2) == len(scores)
assert max(scores) == scores[0]
assert min(scores) == scores[-1]
def test_hybrid_retrieval_rrf(hybrid_retrieval):
hybrid_retrieval.method = 'rrf'
test_hybrid_retrieval(hybrid_retrieval)
def test_hybrid_retrieval_delete(hybrid_retrieval_for_delete):
hybrid_retrieval_for_delete.delete(['test_id_4_search', 'test_id_3_search'])
retrieved_passages = hybrid_retrieval_for_delete.retrieve(query='What is visconde structure?', top_k=4)
assert len(retrieved_passages) == 2
assert 'test_id_1_search' in [passage.id for passage in retrieved_passages]
assert 'test_id_2_search' in [passage.id for passage in retrieved_passages]
| [] |
2024-01-10 | Marker-Inc-Korea/RAGchain | RAGchain~preprocess~text_splitter~html_header_splitter.py | import copy
from typing import Optional, List, Tuple
from langchain.schema import Document
from langchain.text_splitter import HTMLHeaderTextSplitter
from RAGchain.preprocess.text_splitter.base import BaseTextSplitter
from RAGchain.schema import Passage
class HTMLHeaderSplitter(BaseTextSplitter):
"""
The HTMLHeaderSplitter class in the RAGchain library is a text splitter that splits documents based on HTML headers.
This class inherits from the BaseTextSplitter class and uses the HTMLHeaderTextSplitter.
"""
def __init__(
self,
headers_to_split_on: Optional[Tuple[str, str]] = None,
return_each_element: bool = False,
):
"""
:param headers_to_split_on: list of tuples of headers we want to track mapped to (arbitrary) keys for metadata.
Allowed header values: h1, h2, h3, h4, h5, h6
Default is [("h1", "Header 1"), ("h2", "Header 2"), ("h3", "Header 3"),]
e.g. [(“h1”, “Header 1”), (“h2”, “Header 2)].
:param return_each_element: Return each element with associated headers. Default is False.
"""
# Set headers_to_split_on default variable.
if headers_to_split_on is None:
headers_to_split_on = [
("h1", "Header 1"),
("h2", "Header 2"),
("h3", "Header 3")
]
self.html_header_splitter = HTMLHeaderTextSplitter(headers_to_split_on, return_each_element)
def split_document(self, document: Document) -> List[Passage]:
doc_copy = copy.deepcopy(document)
split_documents = self.html_header_splitter.split_text(document.page_content)
for doc in split_documents:
doc.metadata.update(doc_copy.metadata)
passages = Passage.from_documents(split_documents)
return passages
| [] |
2024-01-10 | Marker-Inc-Korea/RAGchain | RAGchain~preprocess~loader~dataset_loader.py | from typing import List
import pandas as pd
from huggingface_hub import hf_hub_download
from langchain.document_loaders.base import BaseLoader
from langchain.schema import Document
class KoStrategyQALoader(BaseLoader):
"""
KoStrategyQA dataset loader
The dataset downloads at huggingface via internet.
"""
def __init__(self, *args, **kwargs):
paragraph_path = hf_hub_download(repo_id="NomaDamas/Ko-StrategyQA",
filename="ko-strategy-qa_paragraphs.parquet",
repo_type="dataset")
self.paragraph_df = pd.read_parquet(paragraph_path)
def load(self) -> List[Document]:
def make_document(row):
return Document(page_content=row["ko-content"], metadata={"id": row["key"]})
self.paragraph_df["document"] = self.paragraph_df.apply(make_document, axis=1)
return self.paragraph_df["document"].tolist()
| [] |
2024-01-10 | Marker-Inc-Korea/RAGchain | tests~RAGchain~utils~compressor~test_llm_lingua_compressor.py | import logging
import pytest
from langchain.llms.openai import OpenAI
from langchain.prompts import PromptTemplate, ChatPromptTemplate
from langchain.schema import StrOutputParser
from RAGchain.utils.compressor.llm_lingua import LLMLinguaCompressor
logger = logging.getLogger(__name__)
@pytest.fixture
def llm_lingua_compressor():
compressor = LLMLinguaCompressor()
yield compressor
def test_llm_lingua_compressor(llm_lingua_compressor):
prompt = PromptTemplate.from_template("Hello, I am a {role}.")
runnable = prompt | llm_lingua_compressor | OpenAI() | StrOutputParser()
answer = runnable.invoke({"role": "student"})
assert bool(answer.strip()) is True
chat_prompt = ChatPromptTemplate.from_messages([
("system", "As a helpful assistant, follow the instructions below."),
("user", "Hello, I am a {role}."), # user input
])
runnable = chat_prompt | llm_lingua_compressor | OpenAI() | StrOutputParser()
answer = runnable.invoke({"role": "student"})
assert bool(answer) is True
for s in runnable.stream({"role": "student"}):
logger.debug(s)
| [
"[('system', 'As a helpful assistant, follow the instructions below.'), ('user', 'Hello, I am a {role}.')]",
"Hello, I am a {role}.",
"As a helpful assistant, follow the instructions below."
] |
2024-01-10 | Marker-Inc-Korea/RAGchain | RAGchain~reranker~upr~upr.py | from typing import List, Optional
import torch
from langchain_core.runnables import RunnableConfig
from langchain_core.runnables.utils import Input, Output
from transformers import T5ForConditionalGeneration, T5Tokenizer
from RAGchain.reranker.base import BaseReranker
from RAGchain.schema import Passage, RetrievalResult
class UPRReranker(BaseReranker):
"""
UPRReranker is a reranker based on UPR (https://github.com/DevSinghSachan/unsupervised-passage-reranking).
The language model will make a question based on the passage and rerank the passages by the likelihood of the question.
"""
def __init__(self,
model_name: str = "t5-large",
prefix_prompt: str = "Passage: ",
suffix_prompt: str = "Please write a question based on this passage.",
use_bf16: bool = False,
use_gpu: bool = False,
shard_size: int = 16):
"""
:param model_name: The name of the model. The default model is t5-large.
:param prefix_prompt: The prefix prompt for the language model that generates question for reranking. Default is "Passage: ".
:param suffix_prompt: The suffix prompt for the language model that generates question for reranking. Default is "Please write a question based on this passage.".
:param use_bf16: Whether to use bfloat16 for the model. Default is False.
:param use_gpu: Whether to use GPU for the model. Default is False.
:param shard_size: The shard size for the model. The larger the shard size, the faster the reranking speed. But it will consume more memory and compute power. Default is 16.
"""
self.prefix_prompt = prefix_prompt
self.suffix_prompt = suffix_prompt
self.model = T5ForConditionalGeneration.from_pretrained(model_name,
torch_dtype=torch.bfloat16 if use_bf16 else torch.float32)
self.tokenizer = T5Tokenizer.from_pretrained(model_name)
self.use_gpu = use_gpu
self.shard_size = shard_size
def invoke(self, input: Input, config: Optional[RunnableConfig] = None) -> Output:
input_contexts = [f"{passage.filepath} {passage.content}" for passage in input.passages]
indexes, scores = self.calculate_likelihood(input.query, input_contexts)
reranked_passages = [input.passages[idx] for idx in indexes]
input.passages = reranked_passages
input.scores = scores
return input
def rerank(self, query: str, passages: List[Passage]) -> List[Passage]:
result = self.invoke(RetrievalResult(query=query, passages=passages, scores=[]))
return result.passages
def calculate_likelihood(self, question: str, contexts: List[str]) -> tuple[List[int], List[float]]:
prompts = [f"{self.prefix_prompt} {context} {self.suffix_prompt}" for context in contexts]
# tokenize contexts and instruction prompts
context_tokens = self.tokenizer(prompts,
padding='longest',
max_length=512,
pad_to_multiple_of=8,
truncation=True,
return_tensors='pt')
context_tensor, context_attention_mask = context_tokens.input_ids, context_tokens.attention_mask
if self.use_gpu:
context_tensor, context_attention_mask = context_tensor.cuda(), context_attention_mask.cuda()
# tokenize question
question_tokens = self.tokenizer([question],
max_length=128,
truncation=True,
return_tensors='pt')
question_tensor = question_tokens.input_ids
if self.use_gpu:
question_tensor = question_tensor.cuda()
question_tensor = torch.repeat_interleave(question_tensor, len(contexts), dim=0)
sharded_nll_list = []
# calculate log likelihood
for i in range(0, len(context_tensor), self.shard_size):
encoder_tensor_view = context_tensor[i: i + self.shard_size]
attention_mask_view = context_attention_mask[i: i + self.shard_size]
decoder_tensor_view = question_tensor[i: i + self.shard_size]
with torch.no_grad():
logits = self.model(input_ids=encoder_tensor_view,
attention_mask=attention_mask_view,
labels=decoder_tensor_view).logits
log_softmax = torch.nn.functional.log_softmax(logits, dim=-1)
nll = -log_softmax.gather(2, decoder_tensor_view.unsqueeze(2)).squeeze(2)
avg_nll = torch.sum(nll, dim=1)
sharded_nll_list.append(avg_nll)
topk_scores, indexes = torch.topk(-torch.cat(sharded_nll_list), k=len(context_tensor))
return indexes.tolist(), topk_scores.tolist()
| [] |
2024-01-10 | Marker-Inc-Korea/RAGchain | tests~RAGchain~utils~websearch~test_bing_search.py | from langchain_core.runnables import RunnableLambda
from RAGchain.utils.websearch import BingSearch
def test_bing_search():
search = BingSearch()
passages = search.get_search_data("뉴진스 민지의 생일은?", num_results=2)
assert len(passages) == 2
def test_bing_search_runnable():
search = BingSearch()
runnable = search | RunnableLambda(lambda x: x.to_dict())
result = runnable.invoke("뉴진스 민지의 생일은?", config={"configurable": {"web_search_options": {"num_results": 2}}})
assert isinstance(result['query'], str)
assert result['query'] == "뉴진스 민지의 생일은?"
assert isinstance(result['passages'], list)
assert len(result['passages']) == 2
assert isinstance(result['scores'], list)
assert result['scores'] == [1.0, 0.5]
def test_bing_search_runnable_batch():
search = BingSearch()
runnable = search | RunnableLambda(lambda x: x.to_dict())
results = runnable.batch([
"뉴진스 민지의 생일은?",
"에스파 카리나의 생일은?",
], config={"configurable": {"web_search_options": {"num_results": 2}}})
assert len(results) == 2
assert isinstance(results[0]['query'], str)
assert results[0]['query'] == "뉴진스 민지의 생일은?"
assert isinstance(results[0]['passages'], list)
assert len(results[0]['passages']) == 2
assert isinstance(results[0]['scores'], list)
assert len(results[0]['scores']) == 2
assert results[1]['query'] == "에스파 카리나의 생일은?"
| [] |
2024-01-10 | Marker-Inc-Korea/RAGchain | RAGchain~preprocess~loader~file_loader.py | import os
from typing import List, Iterator
from langchain.document_loaders import TextLoader, PDFMinerLoader, CSVLoader
from langchain.document_loaders.base import BaseLoader
from langchain.schema import Document
from tqdm import tqdm
class FileLoader(BaseLoader):
"""
Loads documents from a directory.
You can load .txt, .pdf, .csv, .xlsx, .hwp files.
"""
def __init__(self, target_dir: str, hwp_host_url: str, *args, **kwargs):
"""
:param target_dir: directory path to load documents from
:param hwp_host_url: hwp-converter-api host url
"""
# add more extensions when if you want to add more extensions loader
self.hwp_host_url = hwp_host_url
self.ingestable_extensions = ['.txt', '.pdf', '.csv', '.xlsx', '.hwp']
if not os.path.exists(target_dir):
raise ValueError(f"Target directory {target_dir} does not exist.")
self.target_dir = target_dir
def load(self, filter_ext: List[str] = None) -> List[Document]:
"""
Load all files in the target directory.
:param filter_ext: If not None, only files with the given extensions will be loaded. filter_ext elements must contain the dot (.) prefix.
"""
docs = list(self.lazy_load(filter_ext=filter_ext))
if len(docs) <= 0:
print(f"Could not find any new documents in {self.target_dir}")
else:
print(f"Loaded {len(docs)} documents from {self.target_dir}")
return docs
def lazy_load(self, filter_ext: List[str] = None) -> Iterator[Document]:
"""
Lazily load all files in the target directory.
:param filter_ext: If not None, only files with the given extensions will be loaded. filter_ext elements must contain the dot (.) prefix.
"""
valid_ext = self.ingestable_extensions if filter_ext is None else filter_ext
for (path, dir, files) in tqdm(os.walk(self.target_dir)):
for file_name in files:
ext = os.path.splitext(file_name)[-1].lower() # this function contain dot (.) prefix
if filter_ext is not None and ext not in filter_ext:
continue
full_file_path = os.path.join(path, file_name)
if ext in valid_ext:
yield self._load_single_document(full_file_path)
else:
print(f"Not Support file type {ext} yet.")
def _load_single_document(self, file_path: str) -> Document:
from RAGchain.preprocess.loader import ExcelLoader, HwpLoader
# Loads a single document from a file path
if file_path.endswith(".txt"):
loader = TextLoader(file_path, encoding="utf8")
elif file_path.endswith(".pdf"):
loader = PDFMinerLoader(file_path)
elif file_path.endswith(".csv"):
loader = CSVLoader(file_path)
elif file_path.endswith(".xlsx"):
loader = ExcelLoader(file_path)
elif file_path.endswith(".hwp"):
loader = HwpLoader(file_path, hwp_host_url=self.hwp_host_url)
return loader.load()[0]
| [] |
2024-01-10 | Marker-Inc-Korea/RAGchain | tests~RAGchain~reranker~test_weighted_importance_reranker.py | import pytest
from langchain_core.runnables import RunnableLambda
from RAGchain.reranker.importance import WeightedImportanceReranker
from RAGchain.schema import Passage, RetrievalResult
TEST_PASSAGES = [
Passage(id=str(i), content=str(i), filepath='test', importance=i) for i in range(5)
]
SCORES = [i ** 2 for i in range(5, 0, -1)]
@pytest.fixture
def weighted_importance_reranker():
reranker = WeightedImportanceReranker(importance_weight=0.8)
yield reranker
def test_weighted_importance_reranker(weighted_importance_reranker):
reranked_passages = weighted_importance_reranker.rerank(TEST_PASSAGES, SCORES)
assert isinstance(reranked_passages[0], Passage)
solution = [4, 3, 2, 1, 0]
for passage, idx in zip(reranked_passages, solution):
assert passage.id == str(idx)
def test_weighted_importance_reranker_runnable(weighted_importance_reranker):
runnable = weighted_importance_reranker | RunnableLambda(lambda x: x.to_dict())
result = runnable.invoke(RetrievalResult(query="query", passages=TEST_PASSAGES, scores=SCORES))
assert isinstance(result['passages'], list)
assert isinstance(result['scores'], list)
assert isinstance(result['passages'][0], Passage)
assert isinstance(result['scores'][0], float)
assert len(result['passages']) == len(TEST_PASSAGES)
assert len(result['scores']) == len(result['passages'])
solution = [4, 3, 2, 1, 0]
for passage, idx in zip(result['passages'], solution):
assert passage.id == str(idx)
for i in range(1, len(result['scores'])):
assert result['scores'][i - 1] >= result['scores'][i]
| [] |
2024-01-10 | Marker-Inc-Korea/RAGchain | RAGchain~utils~compressor~llm_lingua.py | from typing import Type, Union, List, Optional, Any
from langchain.prompts.base import StringPromptValue
from langchain.prompts.chat import ChatPromptValueConcrete, ChatPromptValue
from langchain.schema.language_model import LanguageModelInput
from langchain.schema.messages import AnyMessage
from langchain.schema.runnable import RunnableConfig, Runnable
from langchain.schema.runnable.utils import Input, Output
class LLMLinguaCompressor(Runnable[LanguageModelInput, str]):
"""
Compress given prompt using LLMLingua.
It uses small model like Llama-2-7b, and calculate perplexity of given prompt.
With that information, it compresses the prompt for reducing token usage.
"""
def __init__(self,
model_name: str = "NousResearch/Llama-2-7b-hf",
device_map: str = "cuda",
model_config: dict = {},
open_api_config: dict = {},
**kwargs: Any):
"""
Initialize the LLMLinguaCompressor class.
:param model_name: The name of the model to be used for compression.
:param device_map: The device to be used for compression.
:param model_config: The model configuration to be used for compression.
:param open_api_config: The open api configuration to be used for compression.
:param kwargs: The keyword arguments to be used for compression. parameter of compress_prompt method.
"""
super().__init__()
try:
from llmlingua import PromptCompressor
except ImportError:
raise ImportError("Please install llmlingua first.")
self.compressor = PromptCompressor(model_name=model_name,
device_map=device_map,
model_config=model_config,
open_api_config=open_api_config)
self.compress_option = kwargs
def invoke(self, input: Input, config: Optional[RunnableConfig] = None,
**kwargs: Any) -> Output:
prompt_value = ''
if isinstance(input, str):
prompt_value = input
elif isinstance(input, StringPromptValue):
prompt_value = input.text
elif isinstance(input, ChatPromptValueConcrete) or isinstance(input, ChatPromptValue):
for message in input.messages:
prompt_value += f'{message.type} : {message.content}\n'
elif isinstance(input[0], AnyMessage):
for message in input:
prompt_value += f'{message.type} : {message.content}\n'
else:
raise TypeError(f"Invalid input type: {type(input)}")
result = self.compressor.compress_prompt(context=[prompt_value],
**self.compress_option)
return result['compressed_prompt']
@property
def InputType(self) -> Type[Input]:
"""Get the input type for this runnable."""
return Union[
str,
Union[StringPromptValue, ChatPromptValueConcrete],
List[AnyMessage],
]
@property
def OutputType(self) -> Type[Output]:
return str
| [
"INPUT"
] |
2024-01-10 | Marker-Inc-Korea/RAGchain | RAGchain~preprocess~loader~win32_hwp_loader.py | import os
import re
import tempfile
import zipfile
from typing import List, Iterator
from langchain.document_loaders.base import BaseLoader
from langchain.schema import Document
class Win32HwpLoader(BaseLoader):
"""
Load HWP file using pywin32. It works for only Windows.
Using load or lazy_load, you can get list of Documents from hwp and hwpx file.
This loader loads all paragraphs and tables from hwp or hwpx file.
At the first Document, there are all paragraphs excluding texts in each table.
Next, there are separated Documents for each table. All table contents are converted to html format.
So you can get row, columns, or any complicated table structure.
In the metadata, there are filepath at key 'source' and page_type, which is 'text' or 'table'.
It is great option to use loader for loading complicated tables from hwp or hwpx file.
But it is only available at windows, so choose other hwp loader if you want to use at mac or linux.
"""
def __init__(self, path: str):
"""
:param path: hwp file path
"""
self.file_path = path
def lazy_load(self) -> Iterator[Document]:
text, tables = self.preprocessor()
yield Document(page_content=" ".join(text), metadata={"source": self.file_path,
'page_type': 'text'})
for table in tables:
yield Document(page_content=table, metadata={"source": self.file_path, 'page_type': 'table'})
def load(self) -> List[Document]:
return list(self.lazy_load())
def preprocessor(self) -> tuple[List, List]:
text = list()
table = list()
hwpx_temp_file = None
if self.file_path.endswith('.hwp'):
hwpx_temp_file = tempfile.NamedTemporaryFile(suffix='.hwpx', mode='w', delete=False)
self.convert_hwp_to_hwpx(self.file_path, hwpx_temp_file.name)
hwpx_file = hwpx_temp_file.name
elif self.file_path.endswith('.hwpx'):
hwpx_file = self.file_path
else:
raise ValueError("The file extension must be .hwp or .hwpx")
with tempfile.TemporaryDirectory() as target_path:
with zipfile.ZipFile(hwpx_file, 'r') as zf:
zf.extractall(path=target_path)
if hwpx_temp_file is not None:
hwpx_temp_file.close()
os.unlink(hwpx_temp_file.name)
text_extract_pattern = r'</?(?!(?:em|strong)\b)[a-z](?:[^>"\']|"[^"]*"|\'[^\']*\')*>'
for i, xml in enumerate(self.__splitter(os.path.join(target_path, "Contents", "section0.xml"))):
if i % 2 == 0:
text.append(re.sub(text_extract_pattern, '', xml)) # just text
elif i % 2 == 1:
table.append('<hp:tbl' + xml) # table
text[0] = text[0].strip("""['<?xml version="1.0" encoding="UTF-8" standalone="yes" ?>""")
table = list(map(self.__xml_to_html, table))
return text, table
@staticmethod
def convert_hwp_to_hwpx(input_filepath, output_filepath):
try:
import win32com.client as win32
except ImportError:
raise ImportError("Please install pywin32."
"pip install pywin32")
hwp = win32.gencache.EnsureDispatch("HWPFrame.HwpObject")
hwp.RegisterModule("FilePathCheckDLL", "FilePathCheckerModule")
hwp.HParameterSet.HTableCreation.TableProperties.TreatAsChar = 1
hwp.Open(input_filepath)
hwp.SaveAs(output_filepath, "HWPX")
hwp.Quit()
@staticmethod
def __splitter(path):
with open(path, 'r', encoding='utf-8') as file:
xml_content = file.read()
separate = re.split(r'<hp:tbl|</hp:tbl>', xml_content)
return separate
@staticmethod
def __xml_to_html(xml):
try:
from bs4 import BeautifulSoup
except ImportError:
raise ImportError("Please install bs4."
"pip install bs4")
bs = BeautifulSoup(xml, 'html.parser')
flag, line = 0, 0
result_txt = """<table
border="1"
width="50%"
height="200"
cellspacing="5">\n"""
for tag in bs.find_all('hp:tr'):
result_txt += "\t <tr>\n"
for tag2 in tag.find_all('hp:tc'):
for tag3 in tag2.find_all('hp:cellspan'):
for tag4 in tag2.find_all('hp:sublist'):
result_txt += '\t \t <td>'
for tag5 in tag4.find_all('hp:t'):
if tag3.attrs['colspan'] != "1" and tag3.attrs['rowspan'] == "1" and flag == 0:
result_txt = result_txt[:-1] + ' colspan ="{}">'.format(
tag3.attrs['colspan']) + tag5.get_text()
flag = 1
elif tag3.attrs['colspan'] == "1" and tag3.attrs[
'rowspan'] != "1" and flag == 0 and line == 0:
result_txt = result_txt[:-1] + ' rowspan ="{}">'.format(
tag3.attrs['rowspan']) + tag5.get_text()
flag = 1
line = 2
elif tag3.attrs['colspan'] != "1" and tag3.attrs['rowspan'] != "1" and flag == 0:
result_txt = result_txt[:-1] + ' colspan ="{}" rowspan ="{}">'.format(
tag3.attrs['colspan'], tag3.attrs['rowspan']) + tag5.get_text()
flag = 1
else:
result_txt += tag5.get_text()
flag = 0
result_txt += '</td>\n'
line = 0
result_txt += '\t </tr>\n'
result_txt += '</table>'
return result_txt
| [] |
2024-01-10 | Marker-Inc-Korea/RAGchain | RAGchain~preprocess~text_splitter~markdown_header_splitter.py | import copy
from typing import List, Optional
from langchain.schema import Document
from langchain.text_splitter import MarkdownHeaderTextSplitter
from RAGchain.preprocess.text_splitter.base import BaseTextSplitter
from RAGchain.schema import Passage
class MarkDownHeaderSplitter(BaseTextSplitter):
"""
The `MarkDownHeaderSplitter` is used to split a document into passages based document's header information which a list of separators contain.
The most feature is similar with Langchain's MarkdownHeaderTextSplitter. It split based on header.
"""
def __init__(self, headers_to_split_on: Optional[List[tuple[str, str]]] = None, return_each_line: bool = False):
"""
:param headers_to_split_on: A list of tuples which appended to create split standard.
ex)
headers_to_split_on = [
("#", "Header 1"),
("##", "Header 2"),
("###", "Header 3"),
]
"""
# Set default value headers_to_split_on.
if headers_to_split_on is None:
headers_to_split_on = [
("#", "Header 1"),
("##", "Header 2"),
("###", "Header 3")
]
self.markdown_splitter = MarkdownHeaderTextSplitter(headers_to_split_on, return_each_line)
def split_document(self, document: Document) -> List[Passage]:
doc_copy = copy.deepcopy(document)
split_documents = self.markdown_splitter.split_text(document.page_content)
for doc in split_documents:
doc.metadata.update(doc_copy.metadata)
passages = Passage.from_documents(split_documents)
return passages
| [] |
2024-01-10 | Marker-Inc-Korea/RAGchain | tests~RAGchain~preprocess~text_splitter~test_text_splitter.py | from datetime import datetime
import pytest
from langchain.schema import Document
from RAGchain.preprocess.text_splitter import RecursiveTextSplitter
TEST_DOCUMENT = Document(
page_content="""
To fix the issue of not being able to import your module when running 'pytest', you can try the following solutions:
Add empty __init__.py files to all subdirectories under the src/ directory. This will cause pytest to import
everything using package/module names that start with directory names under src/. [0] Modify the PYTHONPATH
environment variable to include the root directory of your project. This can be done by running the command export
PYTHONPATH=/path/to/your/project in Linux/Unix systems. By adding the root directory to PYTHONPATH, Python will be
able to find your modules from the test directory. You can then run pytest using PYTHONPATH=. pytest. [0] Use the
--import-mode command-line flag in pytest to control how test modules are imported. The default mode is prepend,
which inserts the directory path containing each module at the beginning of sys.path. You can try using the append
mode instead, which appends the directory containing each module to the end of sys.path. This can be useful if you
want to run test modules against installed versions of a package. For example, if you have a package under test and a
separate test package, using --import-mode=append will allow pytest to pick up the installed version of the package
under test. [2] Make sure that there is no __init__.py file in the folder containing your tests. Having an
__init__.py file in the tests folder can cause import issues with pytest. If you have an __init__.py file in the
tests folder, try removing it and see if that solves the problem. [6] [7] Run pytest using the python -m pytest
command instead of just pytest. This will add the current directory to sys.path, which might resolve import issues. [
7] Here is a summary of the steps:
Add empty __init__.py files to all subdirectories under the src/ directory.
Modify the PYTHONPATH environment variable to include the root directory of your project.
Run pytest using PYTHONPATH=. pytest.
Use the --import-mode command-line flag in pytest to control how test modules are imported.
Make sure there is no __init__.py file in the tests folder.
Run pytest using the python -m pytest command.
These solutions should help resolve the import issues you are facing when running pytest.
""",
metadata={
'source': 'test_source',
'data_info': 'mancity',
}
)
TEST_DOCUMENT_2 = Document(
page_content="test",
metadata={
'source': 'test_source',
'content_datetime': '2021-01-01 00:00:00',
'importance': 1
})
TEST_DOCUMENT_3 = Document(
page_content="test",
metadata={
'source': 'test_source',
'content_datetime': datetime(2021, 1, 1),
}
)
@pytest.fixture
def recursive_text_splitter():
recursive_text_splitter = RecursiveTextSplitter(chunk_size=500, chunk_overlap=50)
yield recursive_text_splitter
def test_recursive_text_splitter(recursive_text_splitter):
passages = recursive_text_splitter.split_document(TEST_DOCUMENT)
assert len(passages) > 1
assert passages[0].next_passage_id == passages[1].id
assert passages[1].previous_passage_id == passages[0].id
assert passages[0].filepath == 'test_source'
assert passages[0].filepath == passages[1].filepath
assert passages[0].previous_passage_id is None
assert passages[-1].next_passage_id is None
assert TEST_DOCUMENT.page_content.strip()[:10] == passages[0].content[:10]
assert TEST_DOCUMENT.page_content.strip()[-10:] == passages[-1].content[-10:]
assert passages[0].metadata_etc['data_info'] == 'mancity'
assert len(passages[0].metadata_etc) == 1
def test_splitter_passage_conversion(recursive_text_splitter):
passages_2 = recursive_text_splitter.split_document(TEST_DOCUMENT_2)
assert passages_2[0].content_datetime == datetime(2021, 1, 1, 0, 0, 0)
assert passages_2[0].importance == 1
passages_3 = recursive_text_splitter.split_document(TEST_DOCUMENT_3)
assert passages_3[0].content_datetime == datetime(2021, 1, 1, 0, 0, 0)
assert passages_3[0].importance == 0
| [
"2021-01-01 00:00:00"
] |
2024-01-10 | Marker-Inc-Korea/RAGchain | RAGchain~schema~db_origin.py | from langchain.load.serializable import Serializable
class DBOrigin(Serializable):
"""Class for storing a db_type and db_path: dict"""
db_type: str
db_path: dict
def to_dict(self) -> dict:
return {
"db_type": self.db_type,
"db_path": self.db_path
}
| [] |
2024-01-10 | Marker-Inc-Korea/RAGchain | RAGchain~reranker~importance~simple_importance.py | from typing import List, Optional
from langchain_core.runnables import RunnableConfig
from langchain_core.runnables.utils import Input, Output
from RAGchain.reranker.base import BaseReranker
from RAGchain.schema import Passage
class SimpleImportanceReranker(BaseReranker):
"""Rerank passages by their importance only. It is simple reranker for importance-aware RAG"""
def invoke(self, input: Input, config: Optional[RunnableConfig] = None) -> Output:
sorted_pairs = sorted(zip(input.passages, input.scores), key=lambda p: p[0].importance, reverse=True)
sorted_passages, sorted_scores = zip(*sorted_pairs)
input.passages = list(sorted_passages)
input.scores = list(sorted_scores)
return input
def rerank(self, passages: List[Passage]) -> List[Passage]:
"""
Rerank passages by their content_datetime only.
:param passages: list of passages to be reranked.
"""
return sorted(passages, key=lambda p: p.importance, reverse=True)
| [] |
2024-01-10 | Marker-Inc-Korea/RAGchain | RAGchain~utils~embed~embeddingfactory.py | import os
from enum import Enum
from RAGchain.utils.util import text_modifier
class EmbeddingType(Enum):
OPENAI = 'openai'
KOSIMCSE = 'kosimcse'
KO_SROBERTA_MULTITASK = 'ko-sroberta-multitask'
MULTILINGUAL_E5 = 'multilingual-e5'
CONTRIEVER = 'contriever'
class EmbeddingFactory:
"""
EmbeddingFactory is a factory class that returns the embedding class according to the embedding type.
You can create embedding class easily by using this class.
"""
def __init__(self, embed_type: str, device_type: str = 'cuda'):
"""
:param embed_type: Embedding type. You can choose one of the following types.
- openai: OpenAI GPT-3
- kosimcse: KoSimCSE
- ko_sroberta_multitask: Ko-SRoBERTa-MultiTask
- multilingual_e5: Multilingual-E5
- contriever: mContriever
:param device_type: Device type. You can choose one of the following types.
- cuda: GPU
- cpu: CPU
- mps: MPS
"""
if embed_type in text_modifier('openai'):
self.embed_type = EmbeddingType.OPENAI
elif embed_type in text_modifier('kosimcse',
modify_words=['KoSimCSE', 'KoSimcse', 'koSimCSE', 'kosimCSE']):
self.embed_type = EmbeddingType.KOSIMCSE
elif embed_type in text_modifier('ko_sroberta_multitask'):
self.embed_type = EmbeddingType.KO_SROBERTA_MULTITASK
elif embed_type in text_modifier('multilingual_e5'):
self.embed_type = EmbeddingType.MULTILINGUAL_E5
elif embed_type in text_modifier('contriever'):
self.embed_type = EmbeddingType.CONTRIEVER
else:
raise ValueError(f"Unknown embedding type: {embed_type}")
if device_type in text_modifier('cpu'):
self.device_type = 'cpu'
elif device_type in text_modifier('mps'):
self.device_type = 'mps'
else:
self.device_type = 'cuda'
def get(self):
"""
Returns the embedding class according to the embedding type.
"""
if self.embed_type == EmbeddingType.OPENAI:
openai_token = os.getenv("OPENAI_API_KEY")
if openai_token is None:
raise ValueError("OPENAI_API_KEY is empty.")
try:
from langchain.embeddings import OpenAIEmbeddings
except ImportError:
raise ModuleNotFoundError(
"Could not import OpenAIEmbeddings library. Please install OpenAI library."
"pip install openai"
)
return OpenAIEmbeddings(openai_api_key=openai_token)
elif self.embed_type == EmbeddingType.KOSIMCSE:
return self.__set_huggingface_embeddings("BM-K/KoSimCSE-roberta-multitask",
{"device": self.device_type})
elif self.embed_type == EmbeddingType.KO_SROBERTA_MULTITASK:
return self.__set_huggingface_embeddings("jhgan/ko-sroberta-multitask",
{"device": self.device_type})
elif self.embed_type == EmbeddingType.MULTILINGUAL_E5:
return self.__set_huggingface_embeddings("intfloat/multilingual-e5-large",
{"device": self.device_type})
elif self.embed_type == EmbeddingType.CONTRIEVER:
return self.__set_huggingface_embeddings("facebook/mcontriever-msmarco",
{"device": self.device_type})
else:
raise ValueError(f"Unknown embedding type: {self.embed_type}")
@staticmethod
def __set_huggingface_embeddings(model_name: str, model_kwargs: dict):
try:
from langchain.embeddings import HuggingFaceEmbeddings
except ImportError:
raise ModuleNotFoundError(
"Could not import HuggingFaceEmbeddings library. Please install HuggingFace library."
"pip install sentence_transformers"
)
os.environ['TOKENIZERS_PARALLELISM'] = 'true'
return HuggingFaceEmbeddings(model_name=model_name,
model_kwargs=model_kwargs)
| [] |
2024-01-10 | Marker-Inc-Korea/RAGchain | tests~RAGchain~benchmark~dataset~test_eli5.py | import logging
import os
import pathlib
import pytest
from langchain.llms.openai import OpenAI
from RAGchain.DB import PickleDB
from RAGchain.benchmark.dataset import Eli5Evaluator
from RAGchain.pipeline import BasicRunPipeline
from RAGchain.retrieval import BM25Retrieval
root_dir = pathlib.PurePath(os.path.dirname(os.path.realpath(__file__))).parent.parent.parent
bm25_path = os.path.join(root_dir, 'resources', 'bm25', 'eli5_evaluator.pkl')
pickle_path = os.path.join(root_dir, 'resources', 'pickle', 'eli5_evaluator.pkl')
logger = logging.getLogger(__name__)
@pytest.fixture
def eli5_evaluator():
bm25_retrieval = BM25Retrieval(save_path=bm25_path)
db = PickleDB(pickle_path)
llm = OpenAI(model_name="gpt-3.5-turbo-16k")
pipeline = BasicRunPipeline(bm25_retrieval, llm)
evaluator = Eli5Evaluator(pipeline, evaluate_size=5)
evaluator.ingest(retrievals=[bm25_retrieval], db=db, ingest_size=20)
yield evaluator
if os.path.exists(bm25_path):
os.remove(bm25_path)
if os.path.exists(pickle_path):
os.remove(pickle_path)
def test_eli5_evaluator(eli5_evaluator):
result = eli5_evaluator.evaluate()
assert len(result.each_results) == 5
assert result.each_results.iloc[0][
'question'] == 'in football whats the point of wasting the first two plays with a rush - up the middle ' \
'- not regular rush plays i get those'
for key, value in result.results.items():
logger.info(f"{key}: {value}")
logger.info("The result length is " + f"{len(result.results)}")
| [] |
2024-01-10 | Marker-Inc-Korea/RAGchain | RAGchain~preprocess~loader~hwp_loader.py | import asyncio
import logging
from http.client import HTTPException
from typing import List, Iterator
import aiohttp
from langchain.schema import Document
from langchain.document_loaders.base import BaseLoader
logger = logging.getLogger(__name__)
class HwpLoader(BaseLoader):
"""Load Hwp files.
Hwp to text using hwp-converter-api.
You can use hwp-converter-api at https://github.com/NomaDamas/hwp-converter-api
"""
def __init__(
self,
path: str,
hwp_host_url: str,
retry_connection: int = 4
):
"""
:param path: Path to the file. You must use .hwp file. .hwpx file is not supported.
:param hwp_host_url: URL of hwp-converter-api.
:param retry_connection: Number of retries to connect to hwp-converter-api. Default is 4.
"""
self.path = path
self.hwp_host_url = hwp_host_url
assert retry_connection >= 1
self.retry_connection = retry_connection
def load(self) -> List[Document]:
"""Load a document."""
return list(self.lazy_load())
def lazy_load(self) -> Iterator[Document]:
"""
Load a document lazily. This method uses asyncio requests.
"""
response = asyncio.run(self.async_request())
yield Document(page_content=response, metadata={"source": self.path})
async def async_request(self):
for _ in range(self.retry_connection):
async with aiohttp.ClientSession() as session:
async with session.post(self.hwp_host_url, data={'file': open(self.path, 'rb')}) as response:
if response.status == 200:
return await response.text()
raise HTTPException(
f"Check the url of your file; returned status code {response.status}"
)
| [] |
2024-01-10 | Marker-Inc-Korea/RAGchain | RAGchain~utils~websearch~bing_search.py | from typing import List
from langchain.utilities import BingSearchAPIWrapper
from RAGchain.utils.websearch import BaseWebSearch
from RAGchain.schema import Passage
class BingSearch(BaseWebSearch):
"""
Langchain's 'BingSearchAPIWrapper' returns a List[Dict[str, str]] as the return value.
This BingSearch class wraps this return value in a Passage.
First, you need to set up the proper API keys and environment variables.
To set it up, create the BING_SUBSCRIPTION_KEY in the Bing Search API
(https://portal.azure.com/#home) and a BING_SEARCH_URL using the Bing Search API
"""
def __init__(self):
self.search = BingSearchAPIWrapper()
def get_search_data(self, query, num_results=5,) -> List[Passage]:
search_results = self.search.results(query, num_results)
passages = Passage.from_search(search_results)
return passages
| [] |
2024-01-10 | Marker-Inc-Korea/RAGchain | RAGchain~retrieval~hyde.py | import logging
from typing import List, Union
from uuid import UUID
from langchain.chat_models.base import BaseChatModel
from langchain.llms import BaseLLM
from langchain.prompts import PromptTemplate, ChatPromptTemplate
from langchain.schema import StrOutputParser
from langchain.schema.language_model import BaseLanguageModel
from RAGchain.retrieval.base import BaseRetrieval
from RAGchain.schema import Passage
logger = logging.getLogger(__name__)
class HyDERetrieval(BaseRetrieval):
"""
HyDE Retrieval, which inspired by "Precise Zero-shot Dense Retrieval without Relevance Labels" (https://arxiv.org/pdf/2212.10496.pdf)
At retrieval, LLM model creates hypothetical passage.
And then, retrieve passages using hypothetical passage as query.
"""
BASIC_SYSTEM_PROMPT = "Please write a passage to answer the question"
def __init__(self, retrieval: BaseRetrieval, llm: BaseLanguageModel,
system_prompt: str = None):
"""
:param retrieval: retrieval instance to use
:param llm: llm to use for hypothetical passage generation. HyDE Retrieval supports both chat and completion LLMs.
:param system_prompt: system prompt to use when generating hypothetical passage
"""
super().__init__()
self.retrieval = retrieval
self.llm = llm
self.system_prompt = self.BASIC_SYSTEM_PROMPT if system_prompt is None else system_prompt
prompt = self.__make_prompt()
self.runnable = prompt | self.llm | StrOutputParser()
def retrieve(self, query: str, top_k: int = 5, *args, **kwargs) -> \
List[Passage]:
ids = self.retrieve_id(query, top_k, *args, **kwargs)
result = self.retrieval.fetch_data(ids)
return result
def ingest(self, passages: List[Passage]):
self.retrieval.ingest(passages)
def retrieve_id(self, query: str, top_k: int = 5, *args, **kwargs) -> List[
Union[str, UUID]]:
ids, scores = self.retrieve_id_with_scores(query, top_k, *args, **kwargs)
return ids
def retrieve_id_with_scores(self, query: str, top_k: int = 5, *args, **kwargs) -> \
tuple[List[Union[str, UUID]], List[float]]:
hyde_answer = self.runnable.invoke({"question": query})
# logging
logger.info(f"HyDE answer : {hyde_answer}")
return self.retrieval.retrieve_id_with_scores(query=hyde_answer.strip(), top_k=top_k, *args, **kwargs)
def delete(self, ids: List[Union[str, UUID]]):
self.retrieval.delete(ids)
def __make_prompt(self):
if isinstance(self.llm, BaseLLM):
return PromptTemplate.from_template(
self.system_prompt + "\nQuestion: {question}\nPassage:"
)
elif isinstance(self.llm, BaseChatModel):
return ChatPromptTemplate.from_messages([
("system", self.system_prompt),
("human", "Question: {Question}"),
("ai", "Passage: ")
])
else:
raise NotImplementedError("Only support LLM or ChatModel")
| [
"human",
"\nQuestion: {question}\nPassage:",
"Passage: ",
"Please write a passage to answer the question",
"Question: {Question}"
] |
2024-01-10 | Marker-Inc-Korea/RAGchain | RAGchain~preprocess~text_splitter~code_splitter.py | from typing import List
from langchain.schema import Document
from langchain.text_splitter import (
RecursiveCharacterTextSplitter,
Language,
)
from RAGchain.preprocess.text_splitter.base import BaseTextSplitter
from RAGchain.schema import Passage
class CodeSplitter(BaseTextSplitter):
"""
The CodeSplitter class in the RAGchain library is a text splitter that splits documents
based on separators of langchain's library Language enum. This class inherits from the
BaseTextSplitter class and uses the from_language method of RecursiveCharacterTextSplitter
class from the langchain library to perform the splitting.
CodeSplitter supports CPP, GO, JAVA, KOTLIN, JS, TS, PHP, PROTO, `PYTHON`, RST, RUBY, RUST,
SCALA, SWIFT, MARKDOWN, LATEX, HTML, SOL, CSHARP.
"""
def __init__(
self,
language_name: str = 'PYTHON',
chunk_size: int = 50,
chunk_overlap: int = 0,
**kwargs
):
"""
:param language_name: A kind of language to split. Default is PYTHON.
(CPP, GO, JAVA, KOTLIN, JS, TS, PHP, PROTO, PYTHON, RST, RUBY, RUST, SCALA, SWIFT, MARKDOWN, LATEX, HTML, SOL, CSHARP)
:param chunk_size: Maximum size of chunks to return. Default is 50.
:param chunk_overlap: Overlap in characters between chunks. Default is 0.
:param kwargs: Additional arguments to pass to the langchain RecursiveCharacterTextSplitter.
"""
self.code_splitter = RecursiveCharacterTextSplitter.from_language(
language=Language[language_name], chunk_size=chunk_size, chunk_overlap=chunk_overlap, **kwargs
)
def split_document(self, document: Document) -> List[Passage]:
split_documents = self.code_splitter.split_documents([document])
passages = Passage.from_documents(split_documents)
return passages
| [] |
2024-01-10 | Marker-Inc-Korea/RAGchain | tests~RAGchain~pipeline~test_search_pipeline.py | import pytest
from langchain.llms.openai import OpenAI
from RAGchain.pipeline import GoogleSearchRunPipeline
@pytest.fixture
def google_search_run_pipeline():
llm = OpenAI(model_name="babbage-002")
pipeline = GoogleSearchRunPipeline(llm)
yield pipeline
def test_google_search_run_pipeline(google_search_run_pipeline):
answer = google_search_run_pipeline.run.invoke("What is the capital of France?")
assert bool(answer)
answers = google_search_run_pipeline.run.batch(["What is the capital of France?",
"What is the capital of Germany?"])
assert len(answers) == 2
answers, passages, scores = google_search_run_pipeline.get_passages_and_run(["What is the capital of France?",
"What is the capital of Germany?"],
top_k=2)
assert len(answers) == len(passages) == len(scores) == 2
assert len(passages[0]) == len(scores[0]) == 2
| [] |
2024-01-10 | Marker-Inc-Korea/RAGchain | RAGchain~utils~file_cache.py | from typing import List, Optional
from langchain.schema import Document
from langchain_core.runnables import Runnable, RunnableConfig
from langchain_core.runnables.utils import Input, Output
from RAGchain.DB.base import BaseDB
class FileCache(Runnable[List[Document], List[Document]]):
"""
This class is used to delete duplicate documents from given DB.
You can use this after you load your file to Document using file loader.
It will automatically check duplicate documents using source metadata and return non-duplicate documents.
:example:
>>> from RAGchain.utils.file_cache import FileCache
>>> from RAGchain.DB import PickleDB
>>> from langchain.document_loaders import TextLoader
>>>
>>> db = PickleDB(save_path='./pickle_db.pkl')
>>> file_cache = FileCache(db)
>>> documents = TextLoader('./data.txt').load()
>>> documents = file_cache.delete_duplicate(documents)
"""
def invoke(self, input: Input, config: Optional[RunnableConfig] = None) -> Output:
return self.delete_duplicate(input)
def __init__(self, db: BaseDB):
self.db = db
self.db.create_or_load()
def delete_duplicate(self, documents: List[Document]) -> List[Document]:
for document in documents.copy():
result = self.db.search(filepath=[document.metadata['source']])
if len(result) > 0:
documents.remove(document)
return documents
@property
def InputType(self) -> type:
return List[Document]
@property
def OutputType(self) -> type:
return List[Document]
| [] |
2024-01-10 | Marker-Inc-Korea/RAGchain | tests~RAGchain~utils~websearch~test_google_search.py | from langchain_core.runnables import RunnableLambda
from RAGchain.utils.websearch import GoogleSearch
def test_google_search():
search = GoogleSearch()
passages = search.get_search_data("뉴진스 민지의 생일은?", num_results=2)
assert len(passages) == 2
def test_google_search_runnable():
search = GoogleSearch()
runnable = search | RunnableLambda(lambda x: x.to_dict())
result = runnable.invoke("뉴진스 민지의 생일은?", config={"configurable": {"web_search_options": {"num_results": 2}}})
assert isinstance(result['query'], str)
assert result['query'] == "뉴진스 민지의 생일은?"
assert isinstance(result['passages'], list)
assert len(result['passages']) == 2
assert isinstance(result['scores'], list)
assert result['scores'] == [1.0, 0.5]
def test_google_search_runnable_batch():
search = GoogleSearch()
runnable = search | RunnableLambda(lambda x: x.to_dict())
results = runnable.batch([
"뉴진스 민지의 생일은?",
"에스파 카리나의 생일은?",
], config={"configurable": {"web_search_options": {"num_results": 2}}})
assert len(results) == 2
assert isinstance(results[0]['query'], str)
assert results[0]['query'] == "뉴진스 민지의 생일은?"
assert isinstance(results[0]['passages'], list)
assert len(results[0]['passages']) == 2
assert isinstance(results[0]['scores'], list)
assert len(results[0]['scores']) == 2
assert results[1]['query'] == "에스파 카리나의 생일은?"
| [] |
2024-01-10 | Marker-Inc-Korea/RAGchain | RAGchain~utils~query_decompose.py | """
This code is inspired by Visconde paper and its github repo.
@inproceedings{10.1007/978-3-031-28238-6_44,
author = {Pereira, Jayr and Fidalgo, Robson and Lotufo, Roberto and Nogueira, Rodrigo},
title = {Visconde: Multi-Document QA With GPT-3 And Neural Reranking},
year = {2023},
isbn = {978-3-031-28237-9},
publisher = {Springer-Verlag},
address = {Berlin, Heidelberg},
url = {https://doi.org/10.1007/978-3-031-28238-6_44},
doi = {10.1007/978-3-031-28238-6_44},
booktitle = {Advances in Information Retrieval: 45th European Conference on Information Retrieval, ECIR 2023, Dublin, Ireland, April 2–6, 2023, Proceedings, Part II},
pages = {534–543},
numpages = {10},
location = {Dublin, Ireland}
}
"""
from typing import List, Optional
from langchain.llms import BaseLLM
from langchain.prompts import PromptTemplate
from langchain.schema import StrOutputParser
from langchain_core.runnables import Runnable, RunnableConfig
from langchain_core.runnables.utils import Input, Output
class QueryDecomposition(Runnable[str, List[str]]):
"""
Query Decomposition class.
You can decompose a multi-hop questions to multiple single-hop questions using LLM.
The default decomposition prompt is from Visconde paper, and its prompt is few-shot prompts from strategyQA dataset.
"""
decompose_prompt = PromptTemplate.from_template("""Decompose a question in self-contained sub-questions. Use \"The question needs no decomposition\" when no decomposition is needed.
Example 1:
Question: Is Hamlet more common on IMDB than Comedy of Errors?
Decompositions:
1: How many listings of Hamlet are there on IMDB?
2: How many listing of Comedy of Errors is there on IMDB?
Example 2:
Question: Are birds important to badminton?
Decompositions:
The question needs no decomposition
Example 3:
Question: Is it legal for a licensed child driving Mercedes-Benz to be employed in US?
Decompositions:
1: What is the minimum driving age in the US?
2: What is the minimum age for someone to be employed in the US?
Example 4:
Question: Are all cucumbers the same texture?
Decompositions:
The question needs no decomposition
Example 5:
Question: Hydrogen's atomic number squared exceeds number of Spice Girls?
Decompositions:
1: What is the atomic number of hydrogen?
2: How many Spice Girls are there?
Example 6:
Question: {question}
Decompositions:"
""")
def __init__(self, llm: BaseLLM):
"""
:param llm: BaseLLM, language model to use. Query Decomposition not supports chat model. Only supports completion LLMs.
"""
self.llm = llm
def decompose(self, query: str) -> List[str]:
"""
decompose query to little piece of questions.
:param query: str, query to decompose.
:return: List[str], list of decomposed query. Return input query if query is not decomposable.
"""
runnable = self.decompose_prompt | self.llm | StrOutputParser()
answer = runnable.invoke({"question": query})
if answer.lower().strip() == "the question needs no decomposition.":
return [query]
try:
questions = [l for l in answer.splitlines() if l != ""]
questions = [q.split(':')[1].strip() for q in questions]
if not isinstance(questions, list) or len(questions) <= 0 or not isinstance(questions[0], str) or bool(
questions[0]) is False:
return [query]
return questions
except:
return [query]
def invoke(self, input: Input, config: Optional[RunnableConfig] = None) -> Output:
return self.decompose(input)
| [
"Decompose a question in self-contained sub-questions. Use \"The question needs no decomposition\" when no decomposition is needed.\n \n Example 1:\n \n Question: Is Hamlet more common on IMDB than Comedy of Errors?\n Decompositions: \n 1: How many listings of Hamlet are there on IMDB?\n 2: How many listing of Comedy of Errors is there on IMDB?\n \n Example 2:\n \n Question: Are birds important to badminton?\n \n Decompositions:\n The question needs no decomposition\n \n Example 3:\n \n Question: Is it legal for a licensed child driving Mercedes-Benz to be employed in US?\n \n Decompositions:\n 1: What is the minimum driving age in the US?\n 2: What is the minimum age for someone to be employed in the US?\n \n Example 4:\n \n Question: Are all cucumbers the same texture?\n \n Decompositions:\n The question needs no decomposition\n \n Example 5:\n \n Question: Hydrogen's atomic number squared exceeds number of Spice Girls?\n \n Decompositions:\n 1: What is the atomic number of hydrogen?\n 2: How many Spice Girls are there?\n \n Example 6:\n \n Question: {question}\n \n Decompositions:\"\n "
] |
2024-01-10 | Marker-Inc-Korea/RAGchain | tests~RAGchain~pipeline~test_basic_pipeline.py | import logging
import os
import pathlib
import pytest
from langchain.llms.openai import OpenAI
from langchain.memory.chat_message_histories import ChatMessageHistory
from langchain.prompts import MessagesPlaceholder
from langchain.schema.runnable.history import RunnableWithMessageHistory
from RAGchain.DB import MongoDB
from RAGchain.pipeline.basic import BasicIngestPipeline, BasicRunPipeline
from RAGchain.preprocess.loader import FileLoader
from RAGchain.retrieval import BM25Retrieval
from RAGchain.schema.prompt import RAGchainChatPromptTemplate
log = logging.getLogger(__name__)
root_dir = pathlib.PurePath(os.path.dirname(os.path.realpath(__file__))).parent.parent
file_dir = os.path.join(root_dir, "resources", "ingest_files")
bm25_path = os.path.join(root_dir, "resources", "bm25", "bm25_basic_pipeline.pkl")
mongodb_collection_name = "test_basic_pipeline"
mongodb_config = {
"mongo_url": os.getenv('MONGO_URL'),
"db_name": os.getenv('MONGO_DB_NAME'),
"collection_name": mongodb_collection_name
}
@pytest.fixture
def basic_run_pipeline():
if not os.path.exists(file_dir):
os.makedirs(file_dir)
ingest_pipeline = BasicIngestPipeline(
file_loader=FileLoader(file_dir, os.getenv('HWP_CONVERTER_HOST')),
db=MongoDB(**mongodb_config),
retrieval=BM25Retrieval(bm25_path)
)
ingest_pipeline.run.invoke(None)
pipeline = BasicRunPipeline(
retrieval=BM25Retrieval(bm25_path),
llm=OpenAI()
)
yield pipeline
teardown_all(mongodb_config, bm25_path)
@pytest.fixture
def basic_run_pipeline_chat_history():
if not os.path.exists(file_dir):
os.makedirs(file_dir)
ingest_pipeline = BasicIngestPipeline(
file_loader=FileLoader(file_dir, os.getenv('HWP_CONVERTER_HOST')),
db=MongoDB(**mongodb_config),
retrieval=BM25Retrieval(bm25_path)
)
ingest_pipeline.run.invoke(None)
chat_history_prompt = RAGchainChatPromptTemplate.from_messages([
("system", "Answer user's question based on given passages."),
MessagesPlaceholder(variable_name="history"),
("user", "Passages: {passages}"),
("ai", "What is your question? I will answer based on given passages."),
("user", "Question: {question}"),
("ai", "Answer:")
])
pipeline = BasicRunPipeline(
retrieval=BM25Retrieval(bm25_path),
llm=OpenAI(),
prompt=chat_history_prompt
)
yield pipeline
teardown_all(mongodb_config, bm25_path)
def test_basic_pipeline(basic_run_pipeline):
assert os.path.exists(bm25_path)
query = "What is the purpose of RAGchain project? And what inspired it?"
log.info(f"query: {query}")
answer = basic_run_pipeline.run.invoke(query)
assert bool(answer) is True
log.info(f"answer: {answer}")
queries = ["What is the purpose of KoPrivateGPT project?",
"What inspired KoPrivateGPT project?",
"How can I install KoPrivateGPT project?"]
answers, passages, scores = basic_run_pipeline.get_passages_and_run(queries, top_k=4)
assert len(answers) == len(queries)
assert len(passages) == len(queries)
assert len(scores) == len(queries)
for query, answer, passage, score in zip(queries, answers, passages, scores):
assert bool(answer) is True
log.info(f"question: {query}\nanswer: {answer}")
assert len(passage) == 4
log.info(f"score: {score}")
def test_chat_history(basic_run_pipeline_chat_history):
chat_history = ChatMessageHistory()
chain_with_history = RunnableWithMessageHistory(
basic_run_pipeline_chat_history.run,
lambda session_id: chat_history,
input_messages_key="question",
history_messages_key="history"
)
answer = chain_with_history.invoke({"question": "How can I install this project?"},
config={"configurable": {"session_id": "test_session"}})
assert bool(answer)
log.info(f"answer: {answer}")
answer = chain_with_history.invoke({"question": "Is there other things to do?"},
config={"configurable": {"session_id": "test_session"}})
assert bool(answer)
log.info(f"answer: {answer}")
def teardown_all(mongo_config, path):
# teardown mongo db
mongo_db = MongoDB(**mongo_config)
mongo_db.create_or_load()
assert mongo_db.collection_name == mongodb_collection_name
mongo_db.collection.drop()
assert mongodb_collection_name not in mongo_db.db.list_collection_names()
# teardown bm25
if os.path.exists(path):
os.remove(path)
| [
"Question: {question}",
"What is your question? I will answer based on given passages.",
"Answer:",
"Answer user's question based on given passages.",
"Passages: {passages}"
] |
2024-01-10 | Marker-Inc-Korea/RAGchain | tests~RAGchain~benchmark~dataset~test_qasper.py | import logging
import os
import pathlib
import pytest
from langchain.llms.openai import OpenAI
from RAGchain.DB import PickleDB
from RAGchain.benchmark.dataset import QasperEvaluator
from RAGchain.pipeline import BasicRunPipeline
from RAGchain.retrieval import BM25Retrieval
root_dir = pathlib.PurePath(os.path.dirname(os.path.realpath(__file__))).parent.parent.parent
bm25_path = os.path.join(root_dir, 'resources', 'bm25', 'qasper_evaluator.pkl')
pickle_path = os.path.join(root_dir, 'resources', 'pickle', 'qasper_evaluator.pkl')
logger = logging.getLogger(__name__)
@pytest.fixture
def qasper_evaluator():
bm25_retrieval = BM25Retrieval(save_path=bm25_path)
db = PickleDB(pickle_path)
llm = OpenAI(model_name="babbage-002")
pipeline = BasicRunPipeline(bm25_retrieval, llm)
evaluator = QasperEvaluator(pipeline, evaluate_size=2)
evaluator.ingest([bm25_retrieval], db)
yield evaluator
if os.path.exists(bm25_path):
os.remove(bm25_path)
if os.path.exists(pickle_path):
os.remove(pickle_path)
def test_qasper_evaluator(qasper_evaluator):
result = qasper_evaluator.evaluate()
assert len(result.use_metrics) == len(qasper_evaluator.metrics)
def same_id(row):
same_prefix = (
row.passage_ids[0][:10] == row.passage_ids[1][:10]
== row.passage_ids[2][:10] == row.passage_ids[3][:10]
)
return same_prefix
same_id_count = result.each_results.apply(same_id, axis=1).sum()
assert len(result.each_results) == 2
assert result.each_results.iloc[0, 0] == 'What evaluation metric is used?'
assert same_id_count == len(result.each_results)
for key, value in result.results.items():
logger.info(f"{key}: {value}")
| [] |
2024-01-10 | Marker-Inc-Korea/RAGchain | RAGchain~preprocess~text_splitter~token_splitter.py | from typing import List
from langchain.schema import Document
from langchain.text_splitter import (CharacterTextSplitter, TokenTextSplitter,
SentenceTransformersTokenTextSplitter)
from transformers import AutoTokenizer
from RAGchain.preprocess.text_splitter.base import BaseTextSplitter
from RAGchain.schema import Passage
from RAGchain.utils.util import text_modifier
class TokenSplitter(BaseTextSplitter):
"""
The `TokenSplitter` is used to split a document into passages by token using various tokenization methods.
It's designed to split text from a document into smaller chunks, or "tokens", using various tokenization methods.
The class supports tokenization with 'tiktoken', 'spaCy', 'SentenceTransformers', 'NLTK', and 'huggingFace'.
"""
def __init__(self, tokenizer_name: str = 'tiktoken', chunk_size: int = 100, chunk_overlap: int = 0,
pretrained_model_name: str = "gpt2", **kwargs):
"""
:param tokenizer_name: A tokenizer_name is a name of tokenizer. You can choose tokenizer_name.
(tiktoken, spaCy, SentenceTransformers, NLTK, huggingFace)
:param chunk_size: Maximum size of chunks to return. Default is 100.
:param chunk_overlap: Overlap in characters between chunks. Default is 0.
:param pretrained_model_name: A huggingface tokenizer pretrained_model_name to use huggingface token splitter.
You can choose various pretrained_model_name in this parameter. Default is "gpt2".
Refer to pretrained model in this link. (https://huggingface.co/models)
:param kwargs: Additional arguments.
"""
# Create token splitter according to chosen_tokenizer.
if 'tiktoken' in text_modifier(tokenizer_name):
self.splitter = TokenTextSplitter.from_tiktoken_encoder(chunk_size=chunk_size, chunk_overlap=chunk_overlap)
elif 'spaCy' in text_modifier(tokenizer_name):
from langchain.text_splitter import SpacyTextSplitter
self.splitter = SpacyTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap)
elif 'SentenceTransformers' in text_modifier(tokenizer_name):
self.splitter = SentenceTransformersTokenTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap)
elif 'NLTK' in text_modifier(tokenizer_name):
from langchain.text_splitter import NLTKTextSplitter
self.splitter = NLTKTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap)
elif 'huggingFace' in text_modifier(tokenizer_name):
tokenizers = AutoTokenizer.from_pretrained(pretrained_model_name)
self.splitter = CharacterTextSplitter.from_huggingface_tokenizer(
tokenizers, chunk_size=chunk_size, chunk_overlap=chunk_overlap
)
else:
raise ValueError("Ooops! You input invalid tokenizer name." + " Your input: " + tokenizer_name)
def split_document(self, document: Document) -> List[Passage]:
"""
Split a document.
"""
split_documents = self.splitter.split_documents([document])
passages = Passage.from_documents(split_documents)
return passages
| [] |
2024-01-10 | Marker-Inc-Korea/RAGchain | tests~RAGchain~pipeline~test_visconde_pipeline.py | import logging
import os
import pathlib
import pickle
import pytest
from langchain.llms.openai import OpenAI
from RAGchain.DB import PickleDB
from RAGchain.pipeline import ViscondeRunPipeline
from RAGchain.retrieval import BM25Retrieval
logger = logging.getLogger(__name__)
root_dir = pathlib.PurePath(os.path.dirname(os.path.realpath(__file__))).parent.parent
bm25_path = os.path.join(root_dir, "resources", "bm25", "bm25_visconde_pipeline.pkl")
pickle_path = os.path.join(root_dir, "resources", "pickle", "pickle_visconde_pipeline.pkl")
with open(os.path.join(root_dir, "resources", "sample_passages.pkl"), 'rb') as r:
TEST_PASSAGES = pickle.load(r)
@pytest.fixture
def visconde_run_pipeline():
# ingest files
db = PickleDB(save_path=pickle_path)
db.create_or_load()
db.save(TEST_PASSAGES)
retrieval = BM25Retrieval(save_path=bm25_path)
retrieval.ingest(TEST_PASSAGES)
pipeline = ViscondeRunPipeline(retrieval, OpenAI(model_name="babbage-002", temperature=0.1), use_passage_count=4)
yield pipeline
# teardown bm25
if os.path.exists(bm25_path):
os.remove(bm25_path)
# teardown pickle
if os.path.exists(pickle_path):
os.remove(pickle_path)
def test_visconde_run_pipeline(visconde_run_pipeline):
answer = visconde_run_pipeline.run.invoke("Is reranker and retriever have same role?")
logger.info(f"Answer: {answer}")
assert bool(answer)
def test_visconde_run_pipeline_get_passages_and_run(visconde_run_pipeline):
answers, passages, scores = visconde_run_pipeline.get_passages_and_run(["Is reranker and retriever have same role?",
"What is reranker role?"], top_k=70)
logger.info(f"Answer 1: {answers[0]}")
logger.info(f"Answer 2: {answers[1]}")
assert len(answers) == len(passages) == len(scores) == 2
assert len(passages[0]) == len(scores[0]) == 4
| [] |
2024-01-10 | Marker-Inc-Korea/RAGchain | tests~RAGchain~retrieval~test_vectordb_retrieval.py | import os
import shutil
from datetime import datetime
import chromadb
import pytest
from langchain.vectorstores import Chroma
import test_base_retrieval
from RAGchain.DB import PickleDB
from RAGchain.retrieval import VectorDBRetrieval
from RAGchain.utils.embed import EmbeddingFactory
from RAGchain.utils.vectorstore import ChromaSlim
@pytest.fixture(scope='module')
def vectordb_retrieval():
pickle_path = os.path.join(test_base_retrieval.root_dir, "resources", "pickle", "test_vectordb_retrieval.pkl")
if not os.path.exists(os.path.dirname(pickle_path)):
os.makedirs(os.path.dirname(pickle_path))
test_base_retrieval.ready_pickle_db(pickle_path)
chroma_path = os.path.join(test_base_retrieval.root_dir, "resources", "test_vectordb_retrieval_chroma")
if not os.path.exists(chroma_path):
os.makedirs(chroma_path)
chroma = Chroma(client=chromadb.PersistentClient(path=chroma_path),
collection_name='test_vectordb_retrieval',
embedding_function=EmbeddingFactory('openai').get())
retrieval = VectorDBRetrieval(vectordb=chroma)
yield retrieval
# teardown
if os.path.exists(pickle_path):
os.remove(pickle_path)
if os.path.exists(chroma_path):
shutil.rmtree(chroma_path)
@pytest.fixture(scope='module')
def vectordb_retrieval_for_delete():
pickle_path = os.path.join(test_base_retrieval.root_dir, "resources", "pickle",
"test_vectordb_retrieval_for_delete.pkl")
if not os.path.exists(os.path.dirname(pickle_path)):
os.makedirs(os.path.dirname(pickle_path))
db = PickleDB(save_path=pickle_path)
db.create_or_load()
db.save(test_base_retrieval.SEARCH_TEST_PASSAGES)
chroma_path = os.path.join(test_base_retrieval.root_dir, "resources", "test_vectordb_retrieval_for_delete_chroma")
if not os.path.exists(chroma_path):
os.makedirs(chroma_path)
chroma = ChromaSlim(
client=chromadb.PersistentClient(path=chroma_path),
collection_name='test_vectordb_retrieval_for_delete',
embedding_function=EmbeddingFactory('openai').get()
)
retrieval = VectorDBRetrieval(vectordb=chroma)
retrieval.ingest(test_base_retrieval.SEARCH_TEST_PASSAGES)
yield retrieval
# teardown
if os.path.exists(pickle_path):
os.remove(pickle_path)
if os.path.exists(chroma_path):
shutil.rmtree(chroma_path)
@pytest.fixture(scope='module')
def slim_vectordb_retrieval():
pickle_path = os.path.join(test_base_retrieval.root_dir, "resources", "pickle", "test_slim_vectordb_retrieval.pkl")
if not os.path.exists(os.path.dirname(pickle_path)):
os.makedirs(os.path.dirname(pickle_path))
test_base_retrieval.ready_pickle_db(pickle_path)
chroma_path = os.path.join(test_base_retrieval.root_dir, "resources", "test_slim_vectordb_retrieval_chroma")
if not os.path.exists(chroma_path):
os.makedirs(chroma_path)
chroma = ChromaSlim(
client=chromadb.PersistentClient(path=chroma_path),
collection_name='test_slim_vectordb_retrieval',
embedding_function=EmbeddingFactory('openai').get()
)
retrieval = VectorDBRetrieval(vectordb=chroma)
yield retrieval
# teardown
if os.path.exists(pickle_path):
os.remove(pickle_path)
if os.path.exists(chroma_path):
shutil.rmtree(chroma_path)
def test_vectordb_retrieval(vectordb_retrieval):
vectordb_retrieval_test(vectordb_retrieval)
def test_vectordb_retrieval_slim(slim_vectordb_retrieval):
vectordb_retrieval_test(slim_vectordb_retrieval)
def vectordb_retrieval_test(retrieval: VectorDBRetrieval):
retrieval.ingest(test_base_retrieval.TEST_PASSAGES)
top_k = 6
retrieved_ids = retrieval.retrieve_id(query='What is visconde structure?', top_k=top_k)
test_base_retrieval.validate_ids(retrieved_ids, top_k)
retrieved_passages = retrieval.retrieve(query='What is visconde structure?', top_k=top_k)
test_base_retrieval.validate_passages(retrieved_passages, top_k)
retrieved_ids_2, scores = retrieval.retrieve_id_with_scores(query='What is visconde structure?',
top_k=top_k)
assert len(retrieved_ids_2) == len(scores)
assert max(scores) == scores[0]
assert min(scores) == scores[-1]
retrieval.ingest(test_base_retrieval.SEARCH_TEST_PASSAGES)
retrieved_passages = retrieval.retrieve_with_filter(
query='What is visconde structure?',
top_k=top_k,
content=['This is test number 1', 'This is test number 3'],
content_datetime_range=[(datetime(2020, 12, 1), datetime(2021, 1, 31))],
importance=[1, 2]
)
assert len(retrieved_passages) == 1
assert 'test_id_3_search' == retrieved_passages[0].id
def test_vectordb_retrieval_delete(vectordb_retrieval_for_delete):
vectordb_retrieval_for_delete.delete(['test_id_4_search', 'test_id_3_search'])
retrieved_passages = vectordb_retrieval_for_delete.retrieve(query='What is visconde structure?', top_k=4)
assert len(retrieved_passages) == 2
assert 'test_id_1_search' in [passage.id for passage in retrieved_passages]
assert 'test_id_2_search' in [passage.id for passage in retrieved_passages]
| [] |
2024-01-10 | Marker-Inc-Korea/RAGchain | tests~RAGchain~benchmark~dataset~test_nfcorpus.py | import logging
import os
import pathlib
import pytest
from langchain.llms.openai import OpenAI
from RAGchain.DB import PickleDB
from RAGchain.benchmark.dataset import NFCorpusEvaluator
from RAGchain.pipeline import BasicRunPipeline
from RAGchain.retrieval import BM25Retrieval
root_dir = pathlib.PurePath(os.path.dirname(os.path.realpath(__file__))).parent.parent.parent
bm25_path = os.path.join(root_dir, 'resources', 'bm25', 'nfcorpus_evaluator.pkl')
pickle_path = os.path.join(root_dir, 'resources', 'pickle', 'nfcorpus_evaluator.pkl')
logger = logging.getLogger(__name__)
@pytest.fixture
def nfcorpus_evaluator():
bm25_retrieval = BM25Retrieval(save_path=bm25_path)
db = PickleDB(pickle_path)
llm = OpenAI(model_name="gpt-3.5-turbo-16k")
pipeline = BasicRunPipeline(bm25_retrieval, llm)
evaluator = NFCorpusEvaluator(pipeline, evaluate_size=5)
evaluator.ingest(retrievals=[bm25_retrieval], db=db, ingest_size=20)
yield evaluator
if os.path.exists(bm25_path):
os.remove(bm25_path)
if os.path.exists(pickle_path):
os.remove(pickle_path)
def test_nfcorpus_evaluator(nfcorpus_evaluator):
result = nfcorpus_evaluator.evaluate()
assert len(result.each_results) == 5
assert result.each_results.iloc[0][
'question'] == 'deafness'
for key, value in result.results.items():
logger.info(f"{key}: {value}")
logger.info("The result length is " + f"{len(result.results)}")
| [] |
2024-01-10 | Marker-Inc-Korea/RAGchain | RAGchain~utils~evidence_extractor.py | from typing import List, Optional, Union, Any, Type
from langchain.chat_models.base import BaseChatModel
from langchain.llms import BaseLLM
from langchain.schema import StrOutputParser
from langchain.schema.language_model import BaseLanguageModel
from langchain_core.runnables import Runnable, RunnableConfig, RunnableLambda
from langchain_core.runnables.utils import Input, Output
from RAGchain.schema import Passage, RetrievalResult, RAGchainPromptTemplate, RAGchainChatPromptTemplate
# This prompt is originated from RETA-LLM
BASIC_SYSTEM_PROMPT = """From the given document, please select and output the relevant document fragments which are related to the query.
Note that the output must be fragments of the original document, not a summary of the document.
If there is no fragment related to the query in the document, please output 'No Fragment'.
"""
class EvidenceExtractor(Runnable[RetrievalResult, str]):
"""
EvidenceExtractor is a class that extracts relevant evidences based on a given question and a list of passages.
:example:
>>> from RAGchain.utils.evidence_extractor import EvidenceExtractor
>>> from RAGchain.schema import Passage
>>> from langchain.llms.openai import OpenAI
>>>
>>> passages = [
... Passage(content="Lorem ipsum dolor sit amet"),
... Passage(content="Consectetur adipiscing elit"),
... Passage(content="Sed do eiusmod tempor incididunt ut labore et dolore magna aliqua")
... ]
>>>
>>> question = "What is Lorem ipsum?"
>>> extractor = EvidenceExtractor(OpenAI())
>>> result = extractor.extract(question, passages)
>>> print(result)
"""
def __init__(self, llm: BaseLanguageModel, system_prompt: str = None):
"""
Initialize the EvidenceExtractor class.
:param llm: The language model to be used for evidence extraction. You can use both Chat and Completion models.
:param system_prompt: The system prompt to be used. If not provided, the default system prompt will be used.
"""
self.llm = llm
self.system_prompt = system_prompt if system_prompt is not None else BASIC_SYSTEM_PROMPT
def extract(self, question: str, passages: List[Passage]) -> str:
"""
Extract method extracts relevant document evidences based on a question and a list of passages.
:param question: The question for which relevant document fragments need to be extracted.
:param passages: A list of Passage objects that contain the content of the documents.
:return: The extracted relevant document fragments.
"""
runnable = self.__get_prompt() | self.llm | StrOutputParser()
answer = runnable.invoke({
"question": question,
"passages": Passage.make_prompts(passages),
})
return answer
def invoke(self, input: Input, config: Optional[RunnableConfig] = None) -> Output:
runnable = RunnableLambda(lambda x: x.to_prompt_input()) | self.__get_prompt() | self.llm | StrOutputParser()
return runnable.invoke(input, config)
def batch(
self,
inputs: List[Input],
config: Optional[Union[RunnableConfig, List[RunnableConfig]]] = None,
*,
return_exceptions: bool = False,
**kwargs: Optional[Any],
) -> List[Output]:
runnable = RunnableLambda(lambda x: x.to_prompt_input()) | self.__get_prompt() | self.llm | StrOutputParser()
return runnable.batch(inputs, config, **kwargs)
@property
def InputType(self) -> Type[Input]:
return RetrievalResult
@property
def OutputType(self) -> Type[str]:
return str
def __get_prompt(self):
if isinstance(self.llm, BaseLLM):
return RAGchainPromptTemplate.from_template(
self.system_prompt +
"Document content: {passages}\n\nquery: {question}]\n\nrelevant document fragments:"
)
elif isinstance(self.llm, BaseChatModel):
return RAGchainChatPromptTemplate.from_messages([
("system", self.system_prompt),
("human", "Document content: {passages}\n\nquery: {question}"),
("ai", "relevant document fragments: ")
])
else:
raise NotImplementedError("Only support LLM or ChatModel")
| [
"Document content: {passages}\n\nquery: {question}",
"Document content: {passages}\n\nquery: {question}]\n\nrelevant document fragments:",
"human",
"From the given document, please select and output the relevant document fragments which are related to the query.\nNote that the output must be fragments of the original document, not a summary of the document. \nIf there is no fragment related to the query in the document, please output 'No Fragment'.\n",
"relevant document fragments: "
] |
2024-01-10 | Marker-Inc-Korea/RAGchain | RAGchain~preprocess~loader~rem_loader.py | from datetime import datetime
from typing import List, Iterator, Optional
import pytz
from langchain.document_loaders.base import BaseLoader
from langchain.schema import Document
from RAGchain.utils.util import FileChecker
class RemLoader(BaseLoader):
"""
Load rem storage file from rem sqlite database.
You can set time range to load.
"""
def __init__(self, path: str, time_range: Optional[List[datetime]] = None):
"""
:param path: rem sqlite database file path
:param time_range: time range to load. If None, load all data. We recommend set time range.
It will be slow when you try to load all data from once. Default is None.
"""
self.path = path
if not FileChecker(self.path).check_type(file_type='.sqlite3').is_exist():
raise ValueError(f"{self.path} is not sqlite3 file or do not exist.")
import sqlite3
self.conn = sqlite3.connect(path)
self.time_range = time_range if time_range is not None else [datetime(1970, 1, 1), datetime.now()]
self.__preprocess_time_range()
assert len(self.time_range) == 2, "time_range must be list of datetime with length 2"
def lazy_load(self) -> Iterator[Document]:
query = f"""
SELECT allText.text, frames.timestamp
FROM allText
JOIN frames ON allText.frameId = frames.id
WHERE frames.timestamp BETWEEN '{self.time_range[0]}' AND '{self.time_range[1]}'
"""
cur = self.conn.cursor()
cur.execute(query)
for row in cur.fetchall():
yield Document(page_content=row[0],
metadata={
"source": self.path,
"content_datetime": datetime.strptime(row[1], '%Y-%m-%dT%H:%M:%S.%f'),
})
def load(self) -> List[Document]:
return list(self.lazy_load())
def __preprocess_time_range(self):
for i, time in enumerate(self.time_range):
alter_time = time.astimezone(pytz.UTC)
self.time_range[i] = alter_time.strftime('%Y-%m-%dT%H:%M:%S.%f')
| [
"%Y-%m-%dT%H:%M:%S.%f"
] |
2024-01-10 | Marker-Inc-Korea/RAGchain | tests~RAGchain~benchmark~dataset~test_asqa.py | import logging
import os
import pathlib
import pytest
from langchain.llms.openai import OpenAI
from RAGchain.DB import PickleDB
from RAGchain.benchmark.dataset import ASQAEvaluator
from RAGchain.pipeline import BasicRunPipeline
from RAGchain.retrieval import BM25Retrieval
root_dir = pathlib.PurePath(os.path.dirname(os.path.realpath(__file__))).parent.parent.parent
bm25_path = os.path.join(root_dir, 'resources', 'bm25', 'asqa_evaluator.pkl')
pickle_path = os.path.join(root_dir, 'resources', 'pickle', 'asqa_evaluator.pkl')
logger = logging.getLogger(__name__)
@pytest.fixture
def asqa_evaluator():
bm25_retrieval = BM25Retrieval(save_path=bm25_path)
db = PickleDB(pickle_path)
llm = OpenAI(model_name="gpt-3.5-turbo-16k")
pipeline = BasicRunPipeline(bm25_retrieval, llm)
evaluator = ASQAEvaluator(pipeline, evaluate_size=5)
evaluator.ingest(retrievals=[bm25_retrieval], db=db, ingest_size=20)
yield evaluator
if os.path.exists(bm25_path):
os.remove(bm25_path)
if os.path.exists(pickle_path):
os.remove(pickle_path)
def test_asqa_evaluator(asqa_evaluator):
result = asqa_evaluator.evaluate()
assert len(result.each_results) == 5
assert result.each_results.iloc[0][
'question'] == 'Who has the highest goals in world football?'
for key, value in result.results.items():
logger.info(f"{key}: {value}")
logger.info("The result length is " + f"{len(result.results)}")
| [] |
2024-01-10 | Marker-Inc-Korea/RAGchain | tests~RAGchain~pipeline~test_rerank_pipeline.py | import logging
import os
import pathlib
import pickle
import pytest
from langchain.llms.openai import OpenAI
from RAGchain.DB import PickleDB
from RAGchain.pipeline import RerankRunPipeline
from RAGchain.reranker import TARTReranker
from RAGchain.retrieval import BM25Retrieval
logger = logging.getLogger(__name__)
root_dir = pathlib.PurePath(os.path.dirname(os.path.realpath(__file__))).parent.parent
bm25_path = os.path.join(root_dir, "resources", "bm25", "bm25_rerank_pipeline.pkl")
pickle_path = os.path.join(root_dir, "resources", "pickle", "pickle_rerank_pipeline.pkl")
with open(os.path.join(root_dir, "resources", "sample_passages.pkl"), 'rb') as r:
TEST_PASSAGES = pickle.load(r)
@pytest.fixture
def rerank_run_pipeline():
# ingest files
db = PickleDB(save_path=pickle_path)
db.create_or_load()
db.save(TEST_PASSAGES)
retrieval = BM25Retrieval(save_path=bm25_path)
retrieval.ingest(TEST_PASSAGES)
reranker = TARTReranker("Find passage to answer given question")
pipeline = RerankRunPipeline(retrieval, reranker, OpenAI(model_name="babbage-002"), use_passage_count=4)
yield pipeline
# teardown bm25
if os.path.exists(bm25_path):
os.remove(bm25_path)
# teardown pickle
if os.path.exists(pickle_path):
os.remove(pickle_path)
def test_rerank_run_pipeline(rerank_run_pipeline):
answer, passages, scores = rerank_run_pipeline.get_passages_and_run(["What is reranker role?",
"What is the purpose of reranker?"])
logger.info(f"Answer: {answer[0]}")
assert bool(answer[0])
assert len(answer) == len(passages) == len(scores) == 2
assert len(passages[0]) == len(scores[0]) == 4
for i in range(1, len(scores[0])):
assert scores[0][i - 1] >= scores[0][i]
result = rerank_run_pipeline.run.invoke("What is reranker role?",
config={"configurable": {"retrieval_options": {"top_k": 3}}})
logger.info(f"Answer: {result}")
assert bool(result)
assert isinstance(result, str)
| [] |
2024-01-10 | Marker-Inc-Korea/RAGchain | RAGchain~pipeline~rerank.py | from typing import List, Optional, Union
from langchain.schema import StrOutputParser
from langchain.schema.language_model import BaseLanguageModel
from langchain.schema.runnable import RunnableLambda
from RAGchain.pipeline.base import BaseRunPipeline
from RAGchain.reranker.base import BaseReranker
from RAGchain.retrieval.base import BaseRetrieval
from RAGchain.schema import Passage, RAGchainChatPromptTemplate, RAGchainPromptTemplate, RetrievalResult
class RerankRunPipeline(BaseRunPipeline):
"""
Rerank pipeline is for question answering with retrieved passages using reranker.
Af first, retrieval module will retrieve retrieve_size passages for reranking.
Then, reranker rerank passages and use use_passage_count passages for llm question.
:example:
>>> from RAGchain.pipeline.rerank import RerankRunPipeline
>>> from RAGchain.retrieval import BM25Retrieval
>>> from RAGchain.reranker import MonoT5Reranker
>>> from langchain.llms.openai import OpenAI
>>> retrieval = BM25Retrieval(save_path="./bm25.pkl")
>>> reranker = MonoT5Reranker()
>>> llm = OpenAI()
>>> pipeline = RerankRunPipeline(retrieval, reranker, llm)
>>> answer, passages, rel_scores = pipeline.get_passages_and_run(["What is the purpose of this framework based on the document?"])
>>> print(answer[0])
"""
def __init__(self, retrieval: BaseRetrieval, reranker: BaseReranker, llm: BaseLanguageModel,
prompt: Optional[Union[RAGchainPromptTemplate, RAGchainChatPromptTemplate]] = None,
use_passage_count: int = 5):
"""
Initializes an instance of the RerankRunPipeline class.
:param retrieval: An instance of the Retrieval module used for retrieving passages.
:param reranker: An instance of the Reranker module used for reranking passages.
:param llm: An instance of the Langchain LLM module used for generating answers.
:param use_passage_count: The number of passages to use for llm question after reranking. Default is 5.
"""
self.retrieval = retrieval
self.reranker = reranker
self.llm = llm
self.prompt = self._get_default_prompt(llm, prompt)
self.use_passage_count = use_passage_count
super().__init__()
def _make_runnable(self):
self.run = self.retrieval | self.reranker | RunnableLambda(
lambda x: x.slice(
end=self.use_passage_count).to_prompt_input()) | self.prompt | self.llm | StrOutputParser()
def get_passages_and_run(self, questions: List[str], top_k: int = 5) -> tuple[
List[str], List[List[Passage]], List[List[float]]]:
runnable = self.retrieval | self.reranker | RunnableLambda(lambda x: x.slice(end=self.use_passage_count)) | {
"passages": RunnableLambda(lambda x: x.passages),
"scores": RunnableLambda(lambda x: x.scores),
"answers": RunnableLambda(RetrievalResult.to_prompt_input) | self.prompt | self.llm | StrOutputParser()
}
result = runnable.batch(questions, config={"configurable": {"retrieval_options": {"top_k": top_k}}})
answers, passages, rel_scores = zip(
*[(answer['answers'], answer['passages'], answer['scores']) for answer in result])
return answers, passages, rel_scores
| [] |
2024-01-10 | Marker-Inc-Korea/RAGchain | tests~RAGchain~utils~embed~test_embeddingfactory.py | import os
from langchain.embeddings import OpenAIEmbeddings, HuggingFaceEmbeddings
from RAGchain.utils.embed import EmbeddingFactory
def test_embedding_factory():
openai_embedding = EmbeddingFactory(embed_type='openai').get()
assert isinstance(openai_embedding, OpenAIEmbeddings)
assert openai_embedding.openai_api_key == os.getenv('OPENAI_API_KEY')
kosimcse_embedding = EmbeddingFactory(embed_type='kosimcse').get()
assert isinstance(kosimcse_embedding, HuggingFaceEmbeddings)
assert kosimcse_embedding.model_name == "BM-K/KoSimCSE-roberta-multitask"
ko_sroberta_multitask_embedding = EmbeddingFactory(embed_type='ko_sroberta_multitask').get()
assert isinstance(ko_sroberta_multitask_embedding, HuggingFaceEmbeddings)
assert ko_sroberta_multitask_embedding.model_name == "jhgan/ko-sroberta-multitask"
multilingual_e5_embedding = EmbeddingFactory(embed_type='multilingual_e5').get()
assert isinstance(multilingual_e5_embedding, HuggingFaceEmbeddings)
assert multilingual_e5_embedding.model_name == "intfloat/multilingual-e5-large"
contriever_embedding = EmbeddingFactory(embed_type='contriever').get()
assert isinstance(contriever_embedding, HuggingFaceEmbeddings)
assert contriever_embedding.model_name == "facebook/mcontriever-msmarco"
| [] |
2024-01-10 | Marker-Inc-Korea/RAGchain | RAGchain~reranker~llm~rank_gpt.py | """
This code is from RankGPT repo and modified a little bit for integration.
Please go to https://github.com/sunnweiwei/RankGPT if you need more information.
"""
import copy
import time
import openai
import tiktoken
class SafeOpenai:
def __init__(self, keys=None, start_id=None, proxy=None, api_base: str = None):
if isinstance(keys, str):
keys = [keys]
if keys is None:
raise "Please provide OpenAI Key."
self.key = keys
self.key_id = start_id or 0
self.key_id = self.key_id % len(self.key)
openai.proxy = proxy
openai.api_key = self.key[self.key_id % len(self.key)]
self.api_key = self.key[self.key_id % len(self.key)]
if api_base is not None:
openai.api_base = api_base
def chat(self, *args, return_text=False, reduce_length=False, **kwargs):
while True:
try:
model = args[0] if len(args) > 0 else kwargs["model"]
completion = openai.ChatCompletion.create(*args, **kwargs, timeout=30)
break
except Exception as e:
print(str(e))
if "This model's maximum context length is" in str(e):
print('reduce_length')
return 'ERROR::reduce_length'
self.key_id = (self.key_id + 1) % len(self.key)
openai.api_key = self.key[self.key_id]
time.sleep(0.1)
if return_text:
completion = completion['choices'][0]['message']['content']
return completion
def text(self, *args, return_text=False, reduce_length=False, **kwargs):
while True:
try:
completion = openai.Completion.create(*args, **kwargs)
break
except Exception as e:
print(e)
if "This model's maximum context length is" in str(e):
print('reduce_length')
return 'ERROR::reduce_length'
self.key_id = (self.key_id + 1) % len(self.key)
openai.api_key = self.key[self.key_id]
time.sleep(0.1)
if return_text:
completion = completion['choices'][0]['text']
return completion
def num_tokens_from_messages(messages, model="gpt-3.5-turbo-0301"):
"""Returns the number of tokens used by a list of messages."""
if model == "gpt-3.5-turbo":
return num_tokens_from_messages(messages, model="gpt-3.5-turbo-0301")
elif model == "gpt-4":
return num_tokens_from_messages(messages, model="gpt-4-0314")
elif model == "gpt-3.5-turbo-0301":
tokens_per_message = 4 # every message follows <|start|>{role/name}\n{content}<|end|>\n
tokens_per_name = -1 # if there's a name, the role is omitted
elif model == "gpt-4-0314":
tokens_per_message = 3
tokens_per_name = 1
else:
tokens_per_message, tokens_per_name = 0, 0
try:
encoding = tiktoken.get_encoding(model)
except:
encoding = tiktoken.get_encoding("cl100k_base")
num_tokens = 0
if isinstance(messages, list):
for message in messages:
num_tokens += tokens_per_message
for key, value in message.items():
num_tokens += len(encoding.encode(value))
if key == "name":
num_tokens += tokens_per_name
else:
num_tokens += len(encoding.encode(messages))
num_tokens += 3 # every reply is primed with <|start|>assistant<|message|>
return num_tokens
def max_tokens(model):
if 'gpt-4' in model:
return 8192
else:
return 4096
def get_prefix_prompt(query, num):
return [{'role': 'system',
'content': "You are RankGPT, an intelligent assistant that can rank passages based on their relevancy to the query."},
{'role': 'user',
'content': f"I will provide you with {num} passages, each indicated by number identifier []. \nRank the passages based on their relevance to query: {query}."},
{'role': 'assistant', 'content': 'Okay, please provide the passages.'}]
def get_post_prompt(query, num):
return f"Search Query: {query}. \nRank the {num} passages above based on their relevance to the search query. The passages should be listed in descending order using identifiers. The most relevant passages should be listed first. The output format should be [] > [], e.g., [1] > [2]. Only response the ranking results, do not say any word or explain."
def create_permutation_instruction(item=None, rank_start=0, rank_end=100, model_name='gpt-3.5-turbo'):
query = item['query']
num = len(item['hits'][rank_start: rank_end])
max_length = 300
while True:
messages = get_prefix_prompt(query, num)
rank = 0
for hit in item['hits'][rank_start: rank_end]:
rank += 1
content = hit['content']
content = content.replace('Title: Content: ', '')
content = content.strip()
# For Japanese should cut by character: content = content[:int(max_length)]
content = ' '.join(content.split()[:int(max_length)])
messages.append({'role': 'user', 'content': f"[{rank}] {content}"})
messages.append({'role': 'assistant', 'content': f'Received passage [{rank}].'})
messages.append({'role': 'user', 'content': get_post_prompt(query, num)})
if num_tokens_from_messages(messages, model_name) <= max_tokens(model_name) - 200:
break
else:
max_length -= 1
return messages
def run_llm(messages, api_key=None, api_base: str = None, model_name="gpt-3.5-turbo"):
agent = SafeOpenai(api_key, api_base=api_base)
response = agent.chat(model=model_name, messages=messages, temperature=0, return_text=True)
return response
def clean_response(response: str):
new_response = ''
for c in response:
if not c.isdigit():
new_response += ' '
else:
new_response += c
new_response = new_response.strip()
return new_response
def remove_duplicate(response):
new_response = []
for c in response:
if c not in new_response:
new_response.append(c)
return new_response
def receive_permutation(item, permutation, rank_start=0, rank_end=100):
response = clean_response(permutation)
response = [int(x) - 1 for x in response.split()]
response = remove_duplicate(response)
cut_range = copy.deepcopy(item['hits'][rank_start: rank_end])
original_rank = [tt for tt in range(len(cut_range))]
response = [ss for ss in response if ss in original_rank]
response = response + [tt for tt in original_rank if tt not in response]
for j, x in enumerate(response):
item['hits'][j + rank_start] = copy.deepcopy(cut_range[x])
if 'rank' in item['hits'][j + rank_start]:
item['hits'][j + rank_start]['rank'] = cut_range[j]['rank']
if 'score' in item['hits'][j + rank_start]:
item['hits'][j + rank_start]['score'] = cut_range[j]['score']
return item
def permutation_pipeline(item=None, rank_start=0, rank_end=100, model_name='gpt-3.5-turbo', api_key=None,
api_base=None):
messages = create_permutation_instruction(item=item, rank_start=rank_start, rank_end=rank_end,
model_name=model_name) # chan
permutation = run_llm(messages, api_key=api_key, model_name=model_name, api_base=api_base)
item = receive_permutation(item, permutation, rank_start=rank_start, rank_end=rank_end)
return item
def sliding_windows(item=None, rank_start=0, rank_end=100, window_size=20, step=10, model_name='gpt-3.5-turbo',
api_key=None, api_base=None):
item = copy.deepcopy(item)
end_pos = rank_end
start_pos = rank_end - window_size
while start_pos >= rank_start:
start_pos = max(start_pos, rank_start)
item = permutation_pipeline(item, start_pos, end_pos, model_name=model_name, api_key=api_key, api_base=api_base)
end_pos = end_pos - step
start_pos = start_pos - step
return item
| [
"Received passage [PLACEHOLDER].",
"True",
"You are RankGPT, an intelligent assistant that can rank passages based on their relevancy to the query.",
"[PLACEHOLDER] PLACEHOLDER",
"I will provide you with PLACEHOLDER passages, each indicated by number identifier []. \nRank the passages based on their relevance to query: PLACEHOLDER.",
"Okay, please provide the passages."
] |
2024-01-10 | Marker-Inc-Korea/RAGchain | RAGchain~pipeline~basic.py | from typing import List, Optional, Union
from langchain.document_loaders.base import BaseLoader
from langchain.schema import StrOutputParser
from langchain.schema.language_model import BaseLanguageModel
from langchain.schema.runnable import RunnableLambda
from RAGchain.DB.base import BaseDB
from RAGchain.pipeline.base import BaseIngestPipeline, BaseRunPipeline
from RAGchain.preprocess.text_splitter import RecursiveTextSplitter
from RAGchain.preprocess.text_splitter.base import BaseTextSplitter
from RAGchain.retrieval.base import BaseRetrieval
from RAGchain.schema import Passage, RAGchainPromptTemplate, RAGchainChatPromptTemplate, RetrievalResult
class BasicIngestPipeline(BaseIngestPipeline):
"""
Basic ingest pipeline class.
This class handles the ingestion process of documents into a database and retrieval system.
First, load file from directory using file loader.
Second, split a document into passages using text splitter.
Third, save passages to a database.
Fourth, ingest passages to retrieval module.
:example:
>>> from RAGchain.pipeline.basic import BasicIngestPipeline
>>> from RAGchain.DB import PickleDB
>>> from RAGchain.retrieval import BM25Retrieval
>>> from RAGchain.preprocess.loader import FileLoader
>>> file_loader = FileLoader(target_dir="./data")
>>> db = PickleDB("./db")
>>> retrieval = BM25Retrieval(save_path="./bm25.pkl")
>>> pipeline = BasicIngestPipeline(file_loader=file_loader, db=db, retrieval=retrieval)
>>> pipeline.run.invoke(None)
"""
def __init__(self,
file_loader: BaseLoader,
db: BaseDB,
retrieval: BaseRetrieval,
text_splitter: BaseTextSplitter = RecursiveTextSplitter(chunk_size=500, chunk_overlap=50),
ignore_existed_file: bool = True):
"""
Initialize BasicIngestPipeline.
:param file_loader: File loader to load documents. You can use any file loader from langchain and RAGchain.
:param db: Database to save passages.
:param retrieval: Retrieval module to ingest passages.
:param text_splitter: Text splitter to split a document into passages. Default is RecursiveTextSplitter.
:param ignore_existed_file: If True, ignore existed file in a database. Default is True.
"""
self.file_loader = file_loader
self.text_splitter = text_splitter
self.db = db
self.retrieval = retrieval
self.ignore_existed_file = ignore_existed_file
super().__init__()
def _make_runnable(self):
self.run = (RunnableLambda(self.file_loader.load) |
self.text_splitter |
self.db | self.retrieval.as_ingest())
class BasicRunPipeline(BaseRunPipeline):
"""
Basic run pipeline class.
This class handles the run process of document question answering.
First, retrieve passages from retrieval module.
Second, run LLM module to get answer.
Finally, you can get answer and passages as return value.
:example:
>>> from RAGchain.pipeline.basic import BasicRunPipeline
>>> from RAGchain.retrieval import BM25Retrieval
>>> from langchain.llms.openai import OpenAI
>>> retrieval = BM25Retrieval(save_path="./bm25.pkl")
>>> pipeline = BasicRunPipeline(retrieval=retrieval, llm=OpenAI())
>>> answer, passages, rel_scores = pipeline.get_passages_and_run(questions=["Where is the capital of Korea?"])
>>> # Run with Langchain LCEL
>>> answer = pipeline.run.invoke("Where is the capital of Korea?")
"""
def __init__(self, retrieval: BaseRetrieval, llm: BaseLanguageModel,
prompt: Optional[Union[RAGchainPromptTemplate, RAGchainChatPromptTemplate]] = None):
self.retrieval = retrieval
self.llm = llm
self.prompt = self._get_default_prompt(llm, prompt)
super().__init__()
def _make_runnable(self):
self.run = self.retrieval | RunnableLambda(
RetrievalResult.to_prompt_input) | self.prompt | self.llm | StrOutputParser()
def get_passages_and_run(self, questions: List[str], top_k: int = 5) -> tuple[
List[str], List[List[Passage]], List[List[float]]]:
runnable = self.retrieval | {
"answer": RunnableLambda(lambda x: x.to_prompt_input()) | self.prompt | self.llm | StrOutputParser(),
"passages": RunnableLambda(lambda x: x.passages),
"scores": RunnableLambda(lambda x: x.scores),
}
answers = runnable.batch(questions, config={"configurable": {"retrieval_options": {"top_k": top_k}}})
final_answers, final_passages, final_scores = (
map(list, zip(*[(answer['answer'], answer['passages'], answer['scores']) for answer in answers])))
return final_answers, final_passages, final_scores
| [] |
2024-01-10 | Marker-Inc-Korea/RAGchain | RAGchain~preprocess~loader~deepdoctection_loader.py | import re
from typing import List, Iterator, Dict, Any
from urllib.parse import urljoin, urlencode
import requests
from langchain.document_loaders.pdf import BasePDFLoader
from langchain.schema import Document
class DeepdoctectionPDFLoader(BasePDFLoader):
"""
Load PDF file using NomaDamas' Deepdoctection API server.
You can use Deepdoctection API server using Dockerfile at https://github.com/NomaDamas/deepdoctection-api-server
"""
def __init__(self, file_path: str, deepdoctection_host: str):
super().__init__(file_path)
response = requests.get(deepdoctection_host)
if response.status_code != 200:
raise ValueError(f"Could not connect to Deepdoctection server: {deepdoctection_host}")
self.deepdoctection_host = deepdoctection_host
def load(self, *args, **kwargs) -> List[Document]:
"""
load pdf file using Deepdoctection API server
return list of Document
"""
return list(self.lazy_load(*args, **kwargs))
def lazy_load(self, *args, **kwargs) -> Iterator[Document]:
"""
lazy_load pdf file using Deepdoctection API server
return list of Document
"""
request_url = urljoin(self.deepdoctection_host, "extract/") + '?' + urlencode(kwargs)
with open(self.file_path, 'rb') as file:
file_upload = {'file': file}
response = requests.post(request_url, files=file_upload)
if response.status_code != 200:
raise ValueError(f'Deepdoctection API server returns {response.status_code} status code.')
result = response.json()
extracted_pages = self.extract_pages(result)
for extracted_page in extracted_pages:
if 'table' in extracted_page:
yield Document(page_content=extracted_page['table'],
metadata={'source': self.file_path, 'page_number': extracted_page['page_number'],
'page_type': 'table'})
else:
page_content = 'title:' + extracted_page['title'] + '\n' + 'text:' + extracted_page['text']
metadata = {'source': self.file_path, 'page_number': extracted_page['page_number'], 'page_type': 'text'}
yield Document(page_content=page_content, metadata=metadata)
def extract_pages(self, result: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
extracted_pages = []
last_title = None
for i, item in enumerate(result):
titles = item['title']
text = item['text']
page_number = item['page_number']
table = item['table']
# If there is a table, extract the table and add it to the extracted pages
for tbl in table:
extracted_pages.append({'table': tbl, 'page_number': page_number})
# Find the positions of each title in the text
positions = [(title, pos) for title in titles for pos in self.find_positions(text, title)]
positions.sort(key=lambda x: x[1])
# If there are no titles in this page, use the last title from the previous page
if not titles:
if last_title:
extracted_page = {'title': last_title, 'text': text.strip(),
'page_number': page_number}
extracted_pages.append(extracted_page)
else:
extracted_page = {'title': '', 'text': text.strip(),
'page_number': page_number}
extracted_pages.append(extracted_page)
else:
# If there is a last title, create a new document with the last title and the text
# before the first title of the current page
if last_title is not None:
extracted_pages.append({
'title': last_title,
'text': text[:positions[0][1]].strip(),
'page_number': page_number
})
# Create a new extracted page for each title in the current page
for j in range(len(positions)):
title, start = positions[j]
if j == len(positions) - 1:
end = len(text)
else:
end = positions[j + 1][1]
txt = text[start:end].replace(title, '', 1).strip()
extracted_page = {'title': title, 'text': txt,
'page_number': page_number}
extracted_pages.append(extracted_page)
# Update last_title to the last title of the current page if there are titles,
# otherwise keep the last title
last_title = positions[-1][0]
return extracted_pages
@staticmethod
def find_positions(text, substring):
positions = [match.start() for match in re.finditer(re.escape(substring), text)]
return positions
| [] |
2024-01-10 | Marker-Inc-Korea/RAGchain | RAGchain~preprocess~loader~excel_loader.py | import csv
import tempfile
from typing import List, Optional, Iterator
import openpyxl
from langchain.document_loaders import CSVLoader
from langchain.document_loaders.base import BaseLoader
from langchain.schema import Document
class ExcelLoader(BaseLoader):
"""
Load a document from an Excel file.
"""
def __init__(self, path: str, sheet_name: Optional[str] = None, *args, **kwargs):
self.path = path
wb = openpyxl.load_workbook(self.path)
# load all sheets if sheet_name is None
self.wb = wb if sheet_name is None else [wb[sheet_name]]
def load(self) -> List[Document]:
return list(self.lazy_load())
def lazy_load(self) -> Iterator[Document]:
csv_filepaths = self.__xlxs_to_csv()
for filepath, sheet_name in zip(csv_filepaths, self.wb.sheetnames):
temp_loader = CSVLoader(filepath)
document = temp_loader.load()[0]
document.metadata['source'] = self.path
document.metadata['sheet_name'] = sheet_name
yield document
def __xlxs_to_csv(self) -> List[str]:
temp_file_name = []
# Iterate over the worksheets in the workbook
for ws in self.wb:
# Create a new temporary file and write the contents of the worksheet to it
with tempfile.NamedTemporaryFile(mode='w+', newline='', suffix='.csv', delete=False) as f:
c = csv.writer(f)
for r in ws.rows:
c.writerow([cell.value for cell in r])
temp_file_name.append(f.name)
# all Sheets are saved to temporary file {temp_file_name}
return temp_file_name
| [] |
2024-01-10 | Marker-Inc-Korea/RAGchain | tests~RAGchain~reranker~test_weighted_time_reranker.py | from datetime import datetime, timedelta
import pytest
from langchain_core.runnables import RunnableLambda
from RAGchain.reranker.time import WeightedTimeReranker
from RAGchain.schema import Passage, RetrievalResult
TEST_PASSAGES = [
Passage(id=str(i), content=str(i), filepath='test', content_datetime=datetime.now() - timedelta(hours=i * 2)) for i
in range(11)
]
SCORES = [i for i in range(11)]
@pytest.fixture
def weighted_time_reranker():
reranker = WeightedTimeReranker(decay_rate=0.1)
yield reranker
def test_weighted_time_reranker(weighted_time_reranker):
reranked_passages = weighted_time_reranker.rerank(TEST_PASSAGES, SCORES)
assert isinstance(reranked_passages[0], Passage)
solution = [10, 9, 0, 8, 7, 1, 6, 2, 5, 3, 4]
for passage, idx in zip(reranked_passages, solution):
assert passage.id == str(idx)
def test_weighted_time_reranker_runnable(weighted_time_reranker):
runnable = weighted_time_reranker | {
"passages": RunnableLambda(lambda x: x.passages),
"scores": RunnableLambda(lambda x: x.scores)
}
result = runnable.invoke(RetrievalResult(query="query", passages=TEST_PASSAGES, scores=SCORES))
assert isinstance(result['passages'], list)
assert isinstance(result['scores'], list)
assert isinstance(result['passages'][0], Passage)
assert isinstance(result['scores'][0], float)
solution = [10, 9, 0, 8, 7, 1, 6, 2, 5, 3, 4]
for passage, idx in zip(result['passages'], solution):
assert passage.id == str(idx)
for i in range(1, len(result['scores'])):
assert result['scores'][i - 1] >= result['scores'][i]
| [] |
2024-01-10 | Marker-Inc-Korea/RAGchain | tests~RAGchain~benchmark~dataset~test_antique.py | import logging
import os
import pathlib
import pytest
from langchain.llms.openai import OpenAI
from RAGchain.DB import PickleDB
from RAGchain.benchmark.dataset import AntiqueEvaluator
from RAGchain.pipeline import BasicRunPipeline
from RAGchain.retrieval import BM25Retrieval
root_dir = pathlib.PurePath(os.path.dirname(os.path.realpath(__file__))).parent.parent.parent
bm25_path = os.path.join(root_dir, 'resources', 'bm25', 'antique_evaluator.pkl')
pickle_path = os.path.join(root_dir, 'resources', 'pickle', 'antique_evaluator.pkl')
logger = logging.getLogger(__name__)
@pytest.fixture
def antique_evaluator():
bm25_retrieval = BM25Retrieval(save_path=bm25_path)
db = PickleDB(pickle_path)
llm = OpenAI(model_name="gpt-3.5-turbo-16k")
pipeline = BasicRunPipeline(bm25_retrieval, llm)
evaluator = AntiqueEvaluator(pipeline, evaluate_size=5)
evaluator.ingest(retrievals=[bm25_retrieval], db=db, ingest_size=20)
yield evaluator
if os.path.exists(bm25_path):
os.remove(bm25_path)
if os.path.exists(pickle_path):
os.remove(pickle_path)
def test_antique_evaluator(antique_evaluator):
result = antique_evaluator.evaluate()
assert len(result.each_results) == 5
assert result.each_results.iloc[0][
'question'] == 'What does "see Leaflet" mean on Ept Pregnancy test?'
for key, value in result.results.items():
logger.info(f"{key}: {value}")
logger.info("The result length is " + f"{len(result.results)}")
| [] |
2024-01-10 | jennyluciav/loanbot | loanbot.py | from dotenv import load_dotenv
import os
import openai
load_dotenv()
#openai.api_key = os.environ.get('OPENAI_KEY')
openai.api_key = "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"
completion = openai.Completion()
start_sequence = "\nCredily:"
restart_sequence = "\n\nPersona:"
#session_prompt = "Estas hablando con Credily, un experto asesor financiero que te ayudará a analizar tus posibilidades de acceder a un crédito. Comenzaré a hacerte algunas preguntas durante la conversación que serán utilizadas para tu evaluación. comenzaré por pedirte tus datos personales: nombre y apellidos, correo electrónico, dirección, grado de educación, estado civil, sexo, número de dependientes. Luego hablaremos sobre tu historial financiero. Este debe ser lo más impecable posible. Eso significa: no tener deudas pendientes, no tener cargos extras por pagos atrasados, no tener más de 3 juicios por alimentos, no contar con créditos vigentes, no estar en la central de riesgos. Además te solicitaremos otros datos como: situación laboral (empleado, desempleado, independiente), si deseas aplicar al crédito con una persona (co-aplicante), los ingresos de tu co-aplicante, el monto del préstamo que solicitas y el plazo en meses en el que vas a pagar el crédito.\nCredily: Hola soy tu asesor financiero y te ayudaré en el proceso inicial de solicitud de crédito financiero. Tienes alguna pregunta?\nPersona: Quiero saber si puedo acceder a un crédito\nCredily: Seguro, comenzaré preguntándote unos datos personales para hacer un análisis preliminar. ¿Cuál es tu nombre y apellido?\n\nPersona:jenny vega\nCredily: Jenny, ¿cuál es tu correo electrónico?\n\nPersona:[email protected]\nCredily: Jenny, ¿cuál es tu dirección?\n\nPersona:Jr zorritos 1399 bloque 38. Dpto 402. lima\nCredily: Jenny, ¿cuál es tu grado de educación?\n\nPersona:Graduado\nCredily: Jenny, ¿cuál es tu estado civil?\n\nPersona:Soltera\nCredily: Jenny, ¿cuál es tu sexo?\n\nPersona:Femenino\nCredily: Jenny, ¿cuántos dependientes tienes?\n\nPersona:0\nCredily: Jenny, ¿cuál es tu situación laboral?\n\nPersona:independiente\nCredily: Jenny, ¿cuáles son tus ingresos mensuales?\n\nPersona:5000\nCredily: Jenny, ¿cuál es el monto del préstamo que solicitas?\n\nPersona:4000\nCredily: Jenny, ¿cuál es el plazo en meses en el que vas a pagar el crédito?\n\nPersona:36\nCredily: Jenny, ¿estás en la central de riesgos?\n\nPersona:no\nCredily: Jenny, ¿tienes algún crédito vigente?\n\nPersona:si\nCredily: Jenny, ¿tienes algún cargo extra por pagos atrasados?\n\nPersona:no\nCredily: Jenny, ¿tienes más de 3 juicios por alimentos?\n\nPersona:no\nCredily: Jenny, ¿tienes alguna deuda pendiente?\n\nPersona:no\nCredily: Jenny, ¿te gustaría aplicar al crédito con una persona?\n\nPersona:no\nCredily: Jenny, ¿te gustaría aplicar al crédito con una persona?\n\nPersona:no\nCredily: Jenny, en base a tu historial financiero, te podemos ofrecer el crédito que solicitaste. ¿Tienes alguna otra pregunta?\n\nPersona:no\nCredily: Ok, Jenny. Gracias por solicitar tu crédito con nosotros. Esperamos que puedas utilizarlo para tus necesidades financieras.\n\nPersona:gracias\nCredily: De nada, Jenny. ¡Que tengas un buen día!"
session_prompt = "Estas hablando con Credily, un experto asesor financiero que te ayudará a analizar tus posibilidades de acceder a un crédito. Comenzaré a hacerte algunas preguntas durante la conversación que serán utilizadas para tu evaluación. comenzaré por pedirte tus datos personales: nombre y apellidos, correo electrónico, dirección, grado de educación, estado civil, sexo, número de dependientes. Luego hablaremos sobre tu historial financiero. Este debe ser lo más impecable posible. Eso significa: no tener deudas pendientes, no tener cargos extras por pagos atrasados, no tener más de 3 juicios por alimentos, no contar con créditos vigentes, no estar en la central de riesgos. Además te solicitaremos otros datos como: situación laboral (empleado, desempleado, independiente), si deseas aplicar al crédito con una persona (co-aplicante), los ingresos de tu co-aplicante, el monto del préstamo que solicitas y el plazo en meses en el que vas a pagar el crédito."
def ask(question, chat_log=None):
prompt_text = f'{chat_log}{restart_sequence}: {question}{start_sequence}:'
response = openai.Completion.create(
engine="text-davinci-002",
prompt=prompt_text,
temperature=0.7,
max_tokens=90,
top_p=1,
frequency_penalty=0.05,
presence_penalty=0,
stop=["\n"],
)
story = response['choices'][0]['text']
return str(story)
def append_interaction_to_chat_log(question, answer, chat_log=None):
if chat_log is None:
chat_log = session_prompt
return f'{chat_log}{restart_sequence} {question}{start_sequence}{answer}'
| [
"PLACEHOLDER\n\nPersona:: PLACEHOLDERstart_sequence35a5aec5-4358-4928-b426-3ef843ae2d56:",
"Estas hablando con Credily, un experto asesor financiero que te ayudará a analizar tus posibilidades de acceder a un crédito. Comenzaré a hacerte algunas preguntas durante la conversación que serán utilizadas para tu evaluación. comenzaré por pedirte tus datos personales: nombre y apellidos, correo electrónico, dirección, grado de educación, estado civil, sexo, número de dependientes. Luego hablaremos sobre tu historial financiero. Este debe ser lo más impecable posible. Eso significa: no tener deudas pendientes, no tener cargos extras por pagos atrasados, no tener más de 3 juicios por alimentos, no contar con créditos vigentes, no estar en la central de riesgos. Además te solicitaremos otros datos como: situación laboral (empleado, desempleado, independiente), si deseas aplicar al crédito con una persona (co-aplicante), los ingresos de tu co-aplicante, el monto del préstamo que solicitas y el plazo en meses en el que vas a pagar el crédito.",
"PLACEHOLDER\n\nPersona:: PLACEHOLDER\nCredily::"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.