date_collected
stringclasses 1
value | repo_name
stringlengths 6
116
| file_name
stringlengths 2
220
| file_contents
stringlengths 13
357k
| prompts
sequence |
---|---|---|---|---|
2024-01-10 | john-mwangi/chatgpt-ui | src~prompt_lang.py | from langchain.chains.conversation.memory import ConversationBufferMemory
from langchain.prompts import PromptTemplate
from .params import models, template
# define prompt template
prompt_template = PromptTemplate(
input_variables=["conversation_history", "question"], template=template
)
# define memory
memory = ConversationBufferMemory(
memory_key="conversation_history", ai_prefix="assistant", human_prefix="user"
)
if __name__ == "__main__":
from dotenv import load_dotenv
from langchain.chains import LLMChain
from langchain.chat_models import ChatOpenAI
load_dotenv()
llm_chain = LLMChain(
llm=ChatOpenAI(model=models[0]),
prompt=prompt_template,
memory=memory,
)
msg = llm_chain.predict(question="Hello")
print(msg)
| [
"question",
"conversation_history"
] |
2024-01-10 | john-mwangi/chatgpt-ui | app_lang.py | """Calls OpenAI API via LangChain"""
import streamlit as st
from dotenv import load_dotenv
from langchain.chains import LLMChain
from langchain.chat_models import ChatOpenAI
from src.params import models
from src.prompt_lang import memory, prompt_template
from src.utils import calc_conversation_cost, calc_prompt_cost, num_tokens_from_string
load_dotenv()
# App interface, capture prompt
st.sidebar.title("ChatGPT API Interface")
model = st.sidebar.selectbox(label="Select a model", options=models)
new_conversation = st.sidebar.checkbox(label="Start new conversation?", value=True)
prompt = st.sidebar.text_area(
label="Prompt", placeholder="Enter your prompt here...", height=250
)
submit = st.sidebar.button(label="Submit")
# Process submission
if submit:
with st.spinner():
llm_chain = LLMChain(
llm=ChatOpenAI(model=model), prompt=prompt_template, memory=memory
)
msg = llm_chain.predict(question=prompt)
input_tokens = num_tokens_from_string(message=prompt, model=model)
output_tokens = num_tokens_from_string(message=msg, model=model)
token_used, promt_cost = calc_prompt_cost(input_tokens, output_tokens, model)
conversation_cost = calc_conversation_cost(
prompt_cost=promt_cost, new_conversation=new_conversation
)
result = {}
result["msg"] = msg
result["token_used"] = token_used
result["promt_cost"] = promt_cost
result["conversation_cost"] = conversation_cost
token_used = result.get("token_used")
promt_cost = result.get("promt_cost")
conversation_cost = result.get("conversation_cost")
st.text(f"Tokens used: {token_used}")
st.text(f"Prompt cost USD: {promt_cost}")
st.text(f"Conversation cost USD: {conversation_cost}")
st.markdown(result.get("msg"))
| [
"Enter your prompt here..."
] |
2024-01-10 | john-mwangi/chatgpt-ui | src~prompt_gpt.py | import os
import pickle
from pathlib import Path
import openai
def prompt_gpt(
model: str = None,
messages: list[str] = None,
) -> dict:
"""
Submit a prompt to ChatGPT API including a conversation history.
Args:
---
model: The name of the model
messages: A list of messages the includes the prompt and the conversation history
Returns:
---
A dictionary that has the following keys:
- msg: ChatGPT response
- msgs: The updated conversation history
- input_tokens: Number of input tokens used
- output_tokens: Number of output tokens used
Useful links:
---
models: https://platform.openai.com/docs/models/gpt-3-5
account balance: https://platform.openai.com/account/billing/overview
create params: https://platform.openai.com/docs/api-reference/chat/create
pricing: https://openai.com/pricing
"""
response = openai.ChatCompletion.create(model=model, messages=messages)
msg = response.get("choices")[0].get("message").get("content")
assistant = {"role": "assistant", "content": msg}
messages.append(assistant)
input_tokens = response.get("usage").get("prompt_tokens")
output_tokens = response.get("usage").get("prompt_tokens")
return {
"msg": msg,
"msgs": messages,
"input_tokens": input_tokens,
"output_tokens": output_tokens,
}
def create_messages(
prompt: str = None,
role: str = None,
messages: list[dict] = None,
):
"""Adds the user prompt to the conversation history."""
if prompt is None:
raise ValueError("prompt cannot be None")
if messages is None:
messages = [
{"role": "system", "content": role},
{"role": "user", "content": prompt},
]
else:
messages.append({"role": "user", "content": prompt})
return messages
def load_conversation(msgs_path: Path, new_conversation: bool):
conversation_history = None
if not new_conversation:
if not os.path.exists(msgs_path):
print("No history to load")
else:
try:
with open(msgs_path, mode="rb") as f:
conversation_history = pickle.load(f)
except Exception as e:
print(e)
return conversation_history
| [] |
2024-01-10 | john-mwangi/chatgpt-ui | app_api.py | """Calls OpenAI API directly"""
import os
import pickle
import openai
import streamlit as st
from dotenv import load_dotenv
from src.params import CHATGPT_ROLE, models, msgs_path
from src.prompt_gpt import create_messages, load_conversation, prompt_gpt
from src.utils import calc_conversation_cost, calc_prompt_cost
load_dotenv()
OPENAI_API_KEY = os.environ["OPENAI_API_KEY"]
openai.api_key = OPENAI_API_KEY
# App interface, capture prompt
st.sidebar.title("ChatGPT API Interface")
model = st.sidebar.selectbox(label="Select a model", options=models)
new_conversation = st.sidebar.checkbox(label="Start new conversation?", value=True)
prompt = st.sidebar.text_area(
label="Prompt", placeholder="Enter your prompt here...", height=250
)
submit = st.sidebar.button(label="Submit")
# Load conversation history
conversation_history = load_conversation(msgs_path, new_conversation)
# Create messages
messages = create_messages(prompt, role=CHATGPT_ROLE, messages=conversation_history)
# Process submission
if submit:
with st.spinner():
result = prompt_gpt(model=model, messages=messages)
token_used, promt_cost = calc_prompt_cost(
input_tokens=result.get("input_tokens"),
output_tokens=result.get("output_tokens"),
model=model,
)
conversation_cost = calc_conversation_cost(
prompt_cost=promt_cost, new_conversation=new_conversation
)
st.text(f"Tokens used: {token_used}")
st.text(f"Prompt cost USD: {promt_cost}")
st.text(f"Conversation cost USD: {conversation_cost}")
st.markdown(result.get("msg"))
if not msgs_path.exists():
msgs_path.parent.mkdir()
with open(msgs_path, mode="wb") as f:
pickle.dump(result["msgs"], file=f)
# TODO:save different conversations in different files
| [
"Enter your prompt here..."
] |
2024-01-10 | RajK-NeetiAI/Values-Agrocel-Assistant | config.py | import os
from dotenv import load_dotenv, find_dotenv
from openai import OpenAI
load_dotenv(find_dotenv())
OPENAI_API_KEY = os.getenv('OPENAI_API_KEY')
ASSISTANT_ID = os.getenv('ASSISTANT_ID')
ERROR_MESSAGE = 'We are facing an issue at this moment.'
client = OpenAI(
api_key=OPENAI_API_KEY
)
| [] |
2024-01-10 | rihp/autogpt-planner-plugin | src~auto_gpt_planner_plugin~planner.py | import json
import os
def check_plan():
"""this function checks if the file plan.md exists, if it doesn't exist it gets created"""
current_working_directory = os.getcwd()
workdir = os.path.join(
current_working_directory, "autogpt", "auto_gpt_workspace", "plan.md"
)
file_name = workdir
if not os.path.exists(file_name):
with open(file_name, "w") as file:
file.write(
"""
# Task List and status:
- [ ] Create a detailed checklist for the current plan and goals
- [ ] Finally, review that every new task is completed
## Notes:
- Use the run_planning_cycle command frequently to keep this plan up to date.
"""
)
print(f"{file_name} created.")
with open(file_name, "r") as file:
return file.read()
def update_plan():
"""this function checks if the file plan.md exists, if it doesn't exist it gets created"""
current_working_directory = os.getcwd()
workdir = os.path.join(current_working_directory, 'autogpt', 'auto_gpt_workspace', 'plan.md')
file_name = workdir
with open(file_name, 'r') as file:
data = file.read()
response = generate_improved_plan(data)
with open(file_name, "w") as file:
file.write(response)
print(f"{file_name} updated.")
return response
def generate_improved_plan(prompt: str) -> str:
"""Generate an improved plan using OpenAI's ChatCompletion functionality"""
import openai
tasks = load_tasks()
model = os.getenv('PLANNER_MODEL', os.getenv('FAST_LLM_MODEL', 'gpt-3.5-turbo'))
max_tokens = os.getenv('PLANNER_TOKEN_LIMIT', os.getenv('FAST_TOKEN_LIMIT', 1500))
temperature = os.getenv('PLANNER_TEMPERATURE', os.getenv('TEMPERATURE', 0.5))
# Call the OpenAI API for chat completion
response = openai.ChatCompletion.create(
model=model,
messages=[
{
"role": "system",
"content": "You are an assistant that improves and adds crucial points to plans in .md format.",
},
{
"role": "user",
"content": f"Update the following plan given the task status below, keep the .md format:\n{prompt}\n"
f"Include the current tasks in the improved plan, keep mind of their status and track them "
f"with a checklist:\n{tasks}\n Revised version should comply with the contents of the "
f"tasks at hand:",
},
],
max_tokens=int(max_tokens),
n=1,
temperature=float(temperature),
)
# Extract the improved plan from the response
improved_plan = response.choices[0].message.content.strip()
return improved_plan
def create_task(task_id=None, task_description: str = None, status=False):
task = {"description": task_description, "completed": status}
tasks = load_tasks()
tasks[str(task_id)] = task
current_working_directory = os.getcwd()
workdir = os.path.join(
current_working_directory, "autogpt", "auto_gpt_workspace", "tasks.json"
)
file_name = workdir
with open(file_name, "w") as f:
json.dump(tasks, f)
return tasks
def load_tasks() -> dict:
current_working_directory = os.getcwd()
workdir = os.path.join(
current_working_directory, "autogpt", "auto_gpt_workspace", "tasks.json"
)
file_name = workdir
if not os.path.exists(file_name):
with open(file_name, "w") as f:
f.write("{}")
with open(file_name) as f:
try:
tasks = json.load(f)
if isinstance(tasks, list):
tasks = {}
except json.JSONDecodeError:
tasks = {}
return tasks
def update_task_status(task_id):
tasks = load_tasks()
if str(task_id) not in tasks:
print(f"Task with ID {task_id} not found.")
return
tasks[str(task_id)]["completed"] = True
current_working_directory = os.getcwd()
workdir = os.path.join(
current_working_directory, "autogpt", "auto_gpt_workspace", "tasks.json"
)
file_name = workdir
with open(file_name, "w") as f:
json.dump(tasks, f)
return f"Task with ID {task_id} has been marked as completed."
| [
"You are an assistant that improves and adds crucial points to plans in .md format.",
"Update the following plan given the task status below, keep the .md format:\nPLACEHOLDER\nInclude the current tasks in the improved plan, keep mind of their status and track them with a checklist:\nPLACEHOLDER\n Revised version should comply with the contents of the tasks at hand:"
] |
2024-01-10 | mvdmakesthings/research-bot | utilities~summerize.py | """Module providing a function to summarize content"""
from langchain.prompts import PromptTemplate
from langchain.chains.summarize import load_summarize_chain
from langchain.text_splitter import RecursiveCharacterTextSplitter
import utilities.llm_definitions as llm_definitions
def summerize(objective, content):
"""Generate a summary of the content based on the objective using LLM
Args:
objective (str): The query prompt we are asking the LLM to evaluate
content (str): The documents that we are using in context
Returns:
str: The summary of the content based on the objective.
"""
if len(content) < 10000:
return content
llm = llm_definitions.llm_ollama
# Break the text into small chunks
text_splitter = RecursiveCharacterTextSplitter(
separators=["\n\n", "\n"],
chunk_size=10000,
chunk_overlap=500
)
docs = text_splitter.create_documents([content])
# Generate the Prompt Template that will be used in the chain
map_prompt = """
Write a summary of the following text for {objective}:
"{text}"
SUMMARY:
"""
prompt_template = PromptTemplate(
template=map_prompt,
input_variables=["text", "objective"]
)
# Create Summary Chain
summary_chain = load_summarize_chain(
llm=llm,
chain_type='map_reduce',
map_prompt=prompt_template,
combine_prompt=prompt_template,
verbose=True
)
# Run the query and return the output
output = summary_chain.run(input_documents=docs, objective=objective)
return output
| [
"\n Write a summary of the following text for {objective}:\n \"{text}\"\n SUMMARY:\n "
] |
2024-01-10 | mvdmakesthings/research-bot | utilities~llm_definitions.py | from langchain.llms import Ollama
from langchain.callbacks.manager import CallbackManager
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
callback_handlers = [
StreamingStdOutCallbackHandler()
]
llm_ollama = Ollama(
model="mistral",
num_gpu=120,
temperature=0,
verbose=False,
callback_manager=CallbackManager(callback_handlers)
) | [] |
2024-01-10 | Silverasdf/sonificationproject2023 | src~cliptest.py | import torch
import torchvision.transforms as transforms
from PIL import Image
import clip
import os, sys
import numpy as np
#from openai_clip_simple_implementation import CLIPModel
import pandas as pd
import random
import math
from tqdm import tqdm
# Load the CLIP model
verbose = False
device = "cuda:0" if torch.cuda.is_available() else "cpu"
model, preprocess = clip.load("ViT-B/16", device=device)
back = True
# model = torch.load('/root/BlurImageTrainingProject/Experiments/CLIPModels/new_model_0.pth').get_model().to(device)
if back:
dir = "/root/BlurImageTrainingProject/Data_Back/New_Data_2023/Testing/0"
dir2 = "/root/BlurImageTrainingProject/Data_Back/New_Data_2023/Testing/1"
mode = 'CLIP - BackSeat'
type = 'CLIP_Back'
else:
dir = "/root/BlurImageTrainingProject/Data_Front/New_Data_2023/Testing/0"
dir2 = "/root/BlurImageTrainingProject/Data_Front/New_Data_2023/Testing/1"
mode = 'CLIP - FrontSeat'
type = 'CLIP_Front'
output_dir = '/root/BlurImageTrainingProject/Experiments/CLIP'
batch_size = 4
# Parse args for verbose flag
if len(sys.argv) > 1:
verbose = sys.argv[1] == '-verbose'
images = []
filenames = []
for num, filename in enumerate(os.listdir(dir)):
# Prepare the image
if not filename.endswith('.jpg'):
continue
filenames.append('0/'+filename)
for num, filename in enumerate(os.listdir(dir2)):
# Prepare the image
if not filename.endswith('.jpg'):
continue
filenames.append('1/'+filename)
batches = math.ceil(len(filenames)/batch_size)
# Prepare the text inputs
text_inputs = ["a picture of an empty seat", "a picture of a person"]
# Get the labels for the scores
labels = text_inputs
all_scores = []
targets = []
predictions = []
#For each image, get the label with the highest score
tp = 0
fp = 0
tn = 0
fn = 0
pbar = tqdm(total=batches, desc="Processing batches", unit="batch")
for i in range(batches):
#Take the batch of images
images = []
for j in range(batch_size):
try:
image_path = os.path.join(dir[:-2], filenames[i*batch_size + j])
except IndexError:
break
image = Image.open(image_path).convert("RGB")
image = preprocess(image)
images.append(image)
pbar.update(1)
# Tokenize the text inputs
image_inputs = torch.tensor(np.stack(images)).to(device)
text_tokens = clip.tokenize(text_inputs).to(device)
# Generate image and text features
with torch.no_grad():
image_features = model.encode_image(image_inputs).float()
text_features = model.encode_text(text_tokens).float()
image_features /= image_features.norm(dim=-1, keepdim=True)
text_features /= text_features.norm(dim=-1, keepdim=True)
similarity_scores = (text_features.cpu().numpy() @ image_features.cpu().numpy().T)
#Normalize scores
similarity_scores = (similarity_scores - similarity_scores.min()) / (similarity_scores.max() - similarity_scores.min())
#For each image, get the label with the highest score
for j, scores in enumerate(similarity_scores.T):
#Make sure the scores add up to 1
scores = scores / scores.sum()
if verbose:
print(f'{filenames[i*batch_size+j]} is {labels[scores.argmax()]}')
print(f'Scores: {scores}')
if filenames[i*batch_size+j][0] == '1' and labels[scores.argmax()] == labels[1]:
tp += 1
predictions.append('1')
elif filenames[i*batch_size+j][0] == '1' and labels[scores.argmax()] == labels[0]:
fn += 1
predictions.append('0')
elif filenames[i*batch_size+j][0] == '0' and labels[scores.argmax()] == labels[1]:
fp += 1
predictions.append('1')
elif filenames[i*batch_size+j][0] == '0' and labels[scores.argmax()] == labels[0]:
tn += 1
predictions.append('0')
all_scores.append(scores[1])
targets.append(filenames[i*batch_size+j][0])
if verbose:
print(f'Scores: {all_scores}')
print(f'Targets: {targets}')
print(f'Predictions: {predictions}')
print(f'Labels: {labels}')
print(f'TP: {tp}, FP: {fp}, TN: {tn}, FN: {fn}')
print(f'Accuracy: {(tp+tn)/(tp+tn+fp+fn)}')
print(f'Precision: {tp/(tp+fp)}')
print(f'Recall: {tp/(tp+fn)}')
print(f'F1: {2*tp/(2*tp+fp+fn)}')
#Get prevalence
prevalence = (tp+fn)/(tp+tn+fp+fn)
#Save results
df = pd.DataFrame({'name': filenames, 'y_true': targets, 'y_pred': predictions, 'y_scores': all_scores, 'mode': mode, 'prevalence': prevalence})
df.to_json(os.path.join(output_dir, f'{type}_perfs.json')) | [] |
2024-01-10 | shashwatpritish/My-AI | brain.py | fileopen = open("nerves.txt","r")
API = fileopen.read()
fileopen.close()
import openai
from dotenv import load_dotenv
openai.api_key = API
load_dotenv()
completion = openai.Completion()
def reply(question,chat_log = None):
FileLog = open("learnings.txt","r")
chat_log_template = FileLog.read()
FileLog.close()
if chat_log is None:
chat_log = chat_log_template
prompt = f'{chat_log}You : {question}\nJarvis : '
response = completion.create(
model = "text-davinci-002",
prompt=prompt,
temperature = 0.5,
max_tokens = 60,
top_p = 0.3,
frequency_penalty = 0.5,
presence_penalty = 0)
answer = response.choices[0].text.strip()
chat_log_template_update = chat_log_template + f"\nYou : {question} \nJarvis : {answer}"
FileLog = open("learnings.txt","w")
FileLog.write(chat_log_template_update)
FileLog.close()
return answer | [
"PLACEHOLDER\nYou : PLACEHOLDER \nJarvis : PLACEHOLDER",
"PLACEHOLDERYou : PLACEHOLDER\nJarvis : "
] |
2024-01-10 | axgpt/LLM_Selector | main_stream.py | from langchain.callbacks.manager import CallbackManager
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.llms import Ollama
import streamlit as st
def select_best_model(user_input, models_dict):
llm = Ollama(model="neural-chat") #Selector Model
# Construct the prompt for the LLM
prompt = f"Given the user question: '{user_input}', evaluate which of the following models is most suitable: Strictly respond in 1 word only."
for model, description in models_dict.items():
prompt += f"\n- {model}: {description}"
# print('prompt:', prompt)
# Send the prompt to the LLM
llm_response = llm(prompt)
# print("llm_response: ", llm_response)
# Parse the response to find the best model
# This part depends on how your LLM formats its response. You might need to adjust the parsing logic.
best_model = parse_llm_response(llm_response, models_dict=models_dict)
return best_model
def parse_llm_response(response, models_dict):
# Convert response to lower case for case-insensitive matching
response_lower = response.lower()
# Initialize a dictionary to store the occurrence count of each model in the response
model_occurrences = {model: response_lower.count(model) for model in models_dict}
# Find the model with the highest occurrence count
best_model = max(model_occurrences, key=model_occurrences.get)
# If no model is mentioned or there is a tie, you might need additional logic to handle these cases
if model_occurrences[best_model] == 0:
return "neural-chat" # Or some default model
return best_model
models_dict = {
'neural-chat': 'A fine-tuned model based on Mistral with good coverage of domain and language.',
'mistral': 'The popular model which is able to generate coherent text and perform various natural language processing tasks.',
'codellama': 'A model that can use generate and discuss code and programme in python, c++, javacript and all the programming languages.',
'llama2-uncensored': 'Uncensored Llama 2 model. This model can give NSFW replies and can be used to generate anything which the other models are shy about. Questions on hacking, immorality, sexual and all bad contents can be asked to this model',
'orca-mini': 'A general-purpose model ranging from 3 billion parameters to 70 billion, suitable for entry-level hardware.',
'llama2-chinese': 'Llama 2 based model fine tuned to improve Chinese dialogue ability.',
'dolphin2.2-mistral': 'An instruct-tuned model based on Mistral. Version 2.2 is fine-tuned for improved conversation and empathy.',
}
# Layout the UI
st.set_page_config(page_title="Ollama Web UI by @PromptEngineer48", layout="wide")
st.title("Ollama Web UI by @PromptEngineer48")
# Main content area
st.header("How can I help you today?")
st.text_input("Send a message", key="user_input")
# Check for input
if st.session_state.user_input:
best_model = select_best_model(st.session_state.user_input, models_dict)
st.sidebar.write(f"THE SELECTED MODEL IS : {best_model}")
# You can then use this model in your LLM call
llm = Ollama(model=best_model, callback_manager=CallbackManager([StreamingStdOutCallbackHandler()]))
response = llm(st.session_state.user_input)
st.write(response)
### Future
# Memory
# Host
# More LLMs
# Own LLMs (Finetuning)
| [
"\n- PLACEHOLDER: PLACEHOLDER",
"Given the user question: 'PLACEHOLDER', evaluate which of the following models is most suitable: Strictly respond in 1 word only."
] |
2024-01-10 | axgpt/LLM_Selector | main_working.py | ## Working case ##Your LLM chooses the best LLM for your Specific Query
# Autoselect the best LLM for your specific Query | Ollama Implementation
from langchain.callbacks.manager import CallbackManager
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.llms import Ollama
def select_best_model(user_input, models_dict):
llm = Ollama(model="neural-chat") #Selector Model
# Construct the prompt for the LLM
prompt = f"Given the user question: '{user_input}', evaluate which of the following models is most suitable: Strictly respond in 1 word only."
for model, description in models_dict.items():
prompt += f"\n- {model}: {description}"
# print('prompt:', prompt)
# Send the prompt to the LLM
llm_response = llm(prompt)
# print("llm_response: ", llm_response)
# Parse the response to find the best model
# This part depends on how your LLM formats its response. You might need to adjust the parsing logic.
best_model = parse_llm_response(llm_response, models_dict=models_dict)
return best_model
def parse_llm_response(response, models_dict):
# Convert response to lower case for case-insensitive matching
response_lower = response.lower()
# Initialize a dictionary to store the occurrence count of each model in the response
model_occurrences = {model: response_lower.count(model) for model in models_dict}
# Find the model with the highest occurrence count
best_model = max(model_occurrences, key=model_occurrences.get)
# If no model is mentioned or there is a tie, you might need additional logic to handle these cases
if model_occurrences[best_model] == 0:
return "neural-chat" # Or some default model
return best_model
models_dict = {
'neural-chat': 'A fine-tuned model based on Mistral with good coverage of domain and language.',
'mistral': 'The popular model which is able to generate coherent text and perform various natural language processing tasks.',
'codellama': 'A model that can use generate and discuss code and programme in python, c++, javacript and all the programming languages.',
'llama2-uncensored': 'Uncensored Llama 2 model. This model can give NSFW replies and can be used to generate anything which the other models are shy about. Questions on hacking, immorality, sexual and all bad contents can be asked to this model',
'orca-mini': 'A general-purpose model ranging from 3 billion parameters to 70 billion, suitable for entry-level hardware.',
'llama2-chinese': 'Llama 2 based model fine tuned to improve Chinese dialogue ability.',
'dolphin2.2-mistral': 'An instruct-tuned model based on Mistral. Version 2.2 is fine-tuned for improved conversation and empathy.',
}
while True:
user_input = input("\nType your question? => ")
if user_input.strip().lower() == "/exit":
print("Exiting the program.")
break
best_model = select_best_model(user_input, models_dict)
print("Selected model:", best_model)
llm = Ollama(model=best_model, callback_manager=CallbackManager([StreamingStdOutCallbackHandler()]))
response = llm(user_input) | [
"\n- PLACEHOLDER: PLACEHOLDER",
"Given the user question: 'PLACEHOLDER', evaluate which of the following models is most suitable: Strictly respond in 1 word only."
] |
2024-01-10 | BeastyZ/LLM-Verified-Retrieval | Iterative_retrieval.py | import logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
import argparse
import json
import os
import concurrent.futures
import threading
from typing import List, Dict
from tqdm import tqdm
import openai
import time
import csv
import numpy as np
import torch
from openai_account_manager import get_account_manager
from utils import (
load_embeddings,
save_embeddings,
get_demonstration,
get_messages as get_messages_field
)
from llm_retrieval_related.iterative_select_supporting_documents import (
create_stage2_select_prompt as get_messages,
iterative_select_supporting_documents_multi_thread
)
device = "cuda" if torch.cuda.is_available() else "cpu"
def repeat_until_success_call_openai_api(func):
def wrapper(*args, **kwargs):
global account_manager
while True:
result = None
# account_manager = kwargs.get('account_manager', None)
thread_id = kwargs.get('thread_id', None)
account = account_manager.thread_to_account.get('thread_id', None) # triad
if account is None:
account = account_manager.get_next_account(thread_id)
openai.api_key = account[-1]
try:
result = func(*args, **kwargs)
except openai.error.APIConnectionError as e:
logger.info('openai connection error, so retry after sleeping 5 seconds')
logger.info(e)
time.sleep(5)
except openai.error.RateLimitError as e:
logger.info(type(e))
logger.info(e)
logger.info('e._message:{}'.format(e._message))
if 'quota' in e._message:
logger.info('now openai account {} runs out. so use next.'.format(account[-1]))
logger.info(type(e))
logger.info(e)
account = account_manager.get_next_account(thread_id, account)
account_manager.thread_to_account[thread_id] = account
else:
logger.info('openai rate limit error, so retry after sleeping 60 seconds')
logger.info(e)
time.sleep(60)
except openai.error.AuthenticationError as e:
if 'This key is associated with a deactivated account' in e._message:
logger.info('the account {} is deactivated. so use next'.format(account[-1]))
logger.info(e)
account = account_manager.get_next_account(thread_id, account)
account_manager.thread_to_account[thread_id] = account
else:
logger.info('meet unexpected AuthenticationError, so retry after sleeping 5 seconds')
logger.info(e)
account = account_manager.get_next_account(thread_id, account)
account_manager.thread_to_account[thread_id] = account
except openai.error.OpenAIError as e:
logger.info('meet unexpected openai error, so retry after sleeping 5 seconds')
logger.info(e)
time.sleep(5)
except Exception as e:
raise e
if result != None:
return result
else:
pass
return wrapper
@repeat_until_success_call_openai_api
def call_chatgpt(model: str, messages: List[Dict], thread_id: int, max_tokens: int=4096) -> str:
"""Call ChatGPT.
Args
----
model: str
ID of the model to use.
messages: List[Dict]
A list of messages comprising the conversation so far.
thread_id: int
Thread ID
max_tokens: int=4096
Max tokens to generate.
Returns
-------
ret: str
Answer to prompt from ChatGPT.
"""
if max_tokens == 4096:
completion = openai.ChatCompletion.create(model=model, messages=messages, temperature=0)
else:
completion = openai.ChatCompletion.create(model=model, messages=messages, temperature=0, max_tokens=max_tokens)
ret = completion['choices'][0]['message']['content']
return ret
def gen_used_field_multi_thread(questions: str,
prompt_style: str,
doc: Dict,
model_name: str = "gpt-3.5-turbo-0301",
target_used_field: str = None,
# target_used_field: Literal["summary_use_sub", "summary_no_sub", "summary", "answer"]=None,
max_tokens: int = 150) -> None:
"""Generate used field by multithreading.
Args
----
questions: str
Given questions.
prompt_style: str
Set the type of user's content.
doc: Dict
Single document.
model_name: str="gpt-3.5-turbo-0301"
OpenAI model.
target_used_field: Literal["summary_use_sub", "summary_no_sub", "summary", "answer"]=None
Which used field to generate
max_tokens int=150.
Max tokens to generate.
"""
thread_id = threading.current_thread().ident
messages = get_messages_field(questions, prompt_style, doc)
ret = call_chatgpt(model=model_name, messages=messages, thread_id=thread_id, max_tokens=max_tokens)
doc[target_used_field] = ' '.join(ret.strip().split())
def filter_bad_question(item: Dict,
openai_model_name: str,
filtration_system_prompt: str,
k: int,
idf_use_letter: str,
use_title: int,
used_doc_field: str,
filtration_result: str) -> None:
"""Drop bad data that documents cannot fully support the question.
Args
----
item: Dict
Single data.
openai_model_name: str
ID of the model to use.
filtration_system_prompt: str
System prompt for instruction.
k: int
K number of documents to be used.
idf_use_letter: str
Use uppercase letters, lowercase letters, or integers to mark the documents.
use_title: int
Whether to use title or not.
used_doc_field: str
Which filed of document to use.
filtration_result: str=Literal["judgment", "threshold"]
Field used for saving.
"""
global account_manager
if 'qa_pairs' in item:
questions = list(map(lambda x: x['question'], list(item['qa_pairs'])))
else:
question = item['question']
questions = [question]
# Using top-k documents.
docs = item["docs"][:k]
thread_id = threading.current_thread().ident
messages = get_messages(questions, docs, k, idf_use_letter, use_title, filtration_system_prompt, used_doc_field)
result = call_chatgpt(model=openai_model_name, messages=messages, thread_id=thread_id)
item[filtration_result] = result
def update_query(user_query: str, system_prompt: str, model: str, d: Dict, max_tokens: int=4096) -> None:
"""Update query by ChatGPT.
Args
----
user_query: str
Query for ChatGPT.
system_prompt: str
System prompt for ChatGPT.
model: str
ID of the model to use.
d: Dict
Single data.
max_tokens: int=4096
Max tokens to generate.
"""
messages = [
{"role": "system", "content": system_prompt},
{"role": "user", "content": user_query}
]
thread_id = threading.current_thread().ident
query = call_chatgpt(model=model, messages=messages, thread_id=thread_id, max_tokens=max_tokens)
d["update_query_using_missing_info_from_question_and_psgs"] = query
def bm25_sphere_retrieval(data: List[Dict], system_prompt: str=None, args: object=None) -> None:
"""Use retriever of bm25 to do retrieval on the corpus of sphere.
Args
----
data: List[Dict]
A list of data used for retrieval.
system_prompt: str=None
Instruction for ChatGPT when updating query.
args: object=None
Parameters.
"""
if args.update_query_using_missing_info_from_question_and_psgs and system_prompt is not None:
logger.info("update_query_using_missing_info_from_question_and_psgs")
# Update query using multi-thread.
with concurrent.futures.ThreadPoolExecutor(max_workers=args.thread_num) as executor:
futures = []
for d in data:
# Get question(s)
questions = d["question"]
psgs = []
for index, doc in enumerate(d["docs"]):
psgs.append(f"{index + 1}.{doc['text']}")
psgs = "\n".join(psgs)
user_query = f"Question:\n{questions}\nAnswering Passages:\n{psgs}"
future = executor.submit(
update_query,
user_query=user_query,
system_prompt=system_prompt,
model=args.openai_model_name,
d=d
)
futures.append(future)
for future in tqdm(concurrent.futures.as_completed(futures), total=len(futures)):
future.result()
else:
with concurrent.futures.ThreadPoolExecutor(max_workers=args.thread_num) as executor:
futures = []
for d in data:
questions = d["question"]
user_query = f'Please write a passage to answer the question.\nQuestion: {questions}\nPassage: '
future = executor.submit(
update_query,
user_query=user_query,
system_prompt="You are a helpful assistant.",
model=args.openai_model_name,
d=d,
)
futures.append(future)
for future in tqdm(concurrent.futures.as_completed(futures), total=len(futures)):
future.result()
def bge_wiki_retrieval(data: List[Dict], system_prompt: str=None, args: object=None) -> None:
"""Use retriever of bge to do retrieval on the corpus of wiki.
Args
----
data: List[Dict]
A list of data used for retrieval.
system_prompt: str=None
Instruction for ChatGPT when updating query.
args: object=None
Parameters.
"""
if args.update_query_using_missing_info_from_question_and_psgs and system_prompt is not None:
logger.info("update_query_using_missing_info_from_question_and_psgs")
# Update query using multi-thread.
with concurrent.futures.ThreadPoolExecutor(max_workers=args.thread_num) as executor:
futures = []
for d in data:
# Get question(s)
if args.use_sub_questions:
asqa_questions = list(map(lambda x: x['question'], list(d['qa_pairs'])))
questions = "\n".join(asqa_questions)
else:
questions = d["question"]
# Get passages.
psgs = []
for index, doc in enumerate(d["docs"]):
psgs.append(f"{index + 1}.{doc['text']}")
psgs = "\n".join(psgs)
user_query = f"Question:\n{questions}\nAnswering Passages:\n{psgs}"
future = executor.submit(
update_query,
user_query=user_query,
system_prompt=system_prompt,
model=args.openai_model_name,
d=d
)
futures.append(future)
for future in tqdm(concurrent.futures.as_completed(futures), total=len(futures)):
future.result()
# Load questions
questions = []
for d in data:
if args.update_query_using_missing_info_from_question_and_psgs and system_prompt is not None:
if "update_query_using_missing_info_from_question_and_psgs" not in d:
raise ValueError
query = d["update_query_using_missing_info_from_question_and_psgs"]
questions.append(query)
else:
if args.use_sub_questions:
logger.info("Load sub questions.")
asqa_questions = list(map(lambda x: x['question'], list(d['qa_pairs'])))
questions.append("\n".join(asqa_questions))
else:
questions.append(d["question"])
# Handle question togather and split the result of question encoding into several batches.
from tqdm.autonotebook import trange
q_embeddings = bge_retriever.encode_queries(questions)
matrix_batch_size = 128
len_p_embeddings = len(p_embeddings)
docs_list = []
for start_index in trange(0, len(q_embeddings), matrix_batch_size):
embeddings_batch = q_embeddings[start_index: start_index + matrix_batch_size]
embeddings_batch = torch.tensor(embeddings_batch).cuda()
p_embeddings_half = torch.tensor(p_embeddings[:len_p_embeddings // 2]).cuda()
scores_1 = torch.matmul(embeddings_batch, p_embeddings_half.t())
del p_embeddings_half
torch.cuda.empty_cache()
p_embeddings_half = torch.tensor(p_embeddings[len_p_embeddings // 2:]).cuda()
scores_2 = torch.matmul(embeddings_batch, p_embeddings_half.t())
del p_embeddings_half
torch.cuda.empty_cache()
scores = torch.cat((scores_1, scores_2), dim=1)
assert scores.shape[1] == len_p_embeddings
# Save top-k documents.
values, indices = torch.topk(scores, args.top_k_retrieval)
for i, doc_idx in enumerate(indices):
docs = []
for j, idx in enumerate(doc_idx):
title, text = documents[idx.item()].split("\n")
docs.append({"id": str(idx.item() + 1), "title": title, "text": text, "score": values[i][j].item()})
docs_list.append(docs)
# Save retrieved top-k docs
for d_id in range(len(data)):
data[d_id]["docs"] = docs_list[d_id]
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Iterative retrieval.")
parser.add_argument("--max_iteration", type=int, default=4)
parser.add_argument("--dataset_name", type=str, required=True)
parser.add_argument("--thread_num", type=int, default=10)
parser.add_argument("--openai_model_name", type=str, default="gpt-3.5-turbo-0301")
parser.add_argument("--use_sub_questions", type=int, required=True)
parser.add_argument("--use_title", type=int, required=True)
parser.add_argument("--used_doc_field", type=str, required=True)
# Args for retrieval
parser.add_argument("--input_file", type=str, required=True, help="Path to the original input file for the first retrieval.")
parser.add_argument("--retriever", type=str, required=True)
parser.add_argument("--top_k_retrieval", type=int, default=100)
parser.add_argument("--update_prompt_file", type=str, required=True)
parser.add_argument("--update_query_using_missing_info_from_question_and_psgs", type=int, help="Whether to use this function or not.")
parser.add_argument("--corpus_path", type=str, required=True)
# Args for generating used field.
parser.add_argument("--top_k_field", type=int, default=50)
parser.add_argument("--prompt_style", type=str, choices=['summary', 'extraction', 'answer'], required=True)
parser.add_argument('--target_used_field', type=str, required=True)
parser.add_argument("--max_tokens", type=int, default=4096)
# Args for reranker
parser.add_argument("--reranker", type=str, default="llm-select")
parser.add_argument("--position", type=str, required=True, help="Put the 5 old selected docs into the head or tail.")
parser.add_argument("--top_k_reranking", type=int, default=5)
parser.add_argument("--window_size", type=int, default=20)
parser.add_argument("--reranking_prompt_file", type=str, required=True)
parser.add_argument('--doc_num', type=int, default=100, help="Use top-k docs for reranking.")
# Args for filtration
parser.add_argument('--demo_file', type=str, default=None, help="Path to demonstration file.")
parser.add_argument("--top_k_filtration", type=int, default=5)
parser.add_argument("--idf_use_letter", type=str, default="int")
parser.add_argument("--filtration_prompt_file", type=str, required=True)
parser.add_argument("--threshold", type=float)
parser.add_argument("--filtration_method", type=str, choices=["threshold", "judgment"])
args = parser.parse_args()
if args.filtration_method == "threshold":
assert args.threshold is not None and args.filtration_prompt_file == "prompt14_score"
# Save args
args_dict = vars(args)
args_dir = f'iter_retrieval_50/{args.dataset_name}_input'
os.makedirs(args_dir, exist_ok=True)
with open(f"{args_dir}/args_{args.retriever}_max_iteration-{args.max_iteration}_{args.update_prompt_file}_{args.position}.json", 'w') as f:
json.dump(args_dict, f, indent=4)
retrieval_dir = f"iter_retrieval_50/{args.dataset_name}_max-{args.max_iteration}_{args.retriever}-{args.update_prompt_file}"
os.makedirs(retrieval_dir, exist_ok=True)
field_dir = f"iter_retrieval_50/{args.dataset_name}_max-{args.max_iteration}_{args.retriever}-{args.update_prompt_file}_{args.target_used_field}"
os.makedirs(field_dir, exist_ok=True)
reranker_dir = f"iter_retrieval_50/{args.dataset_name}_max-{args.max_iteration}_{args.retriever}-{args.update_prompt_file}_{args.target_used_field}_{args.reranker}-{args.position}"
os.makedirs(reranker_dir, exist_ok=True)
filtration_dir = f"iter_retrieval_50/{args.dataset_name}_max-{args.max_iteration}_{args.retriever}-{args.update_prompt_file}_{args.target_used_field}_{args.reranker}-{args.position}_filtration"
os.makedirs(filtration_dir, exist_ok=True)
# Get OpenAI account manager
account_manager = get_account_manager('openai_account_files/accounts.txt', 'openai_account_files/used.txt', multi_thread=True)
if args.dataset_name == "eli5":
logger.info("Load bm25 index (only sphere), this may take a while... ")
# For single process
# searcher = LuceneSearcher(args.corpus_path)
elif args.dataset_name == "asqa" or args.dataset_name == "qampari":
if args.retriever in ["bge-large-en-v1.5", "bge-base-en-v1.5"]:
from FlagEmbedding import FlagModel
logger.info(f"Load BAAI/{args.retriever} model.")
bge_retriever = FlagModel(f"BAAI/{args.retriever}",
query_instruction_for_retrieval="Represent this sentence for searching relevant passages: ",
use_fp16=True)
# Load documents
logger.info("Load documents.")
documents = []
with open(args.corpus_path) as f:
reader = csv.reader(f, delimiter="\t")
for i, row in enumerate(reader):
if i == 0:
continue
documents.append(row[2] + "\n" + row[1])
# model_name = args.retriever.split("/")[1]
if os.path.exists(f"./embedding/psgs_w100_embedding_{args.retriever}.pkl"):
logger.info("Load local embeddings.")
p_embeddings = load_embeddings(f"./embedding/psgs_w100_embedding_{args.retriever}.pkl")
else:
logger.info("Build embeddings.")
p_embeddings = bge_retriever.encode(documents, batch_size=256)
save_embeddings(p_embeddings, f"./embedding/psgs_w100_embedding_{args.retriever}.pkl")
else:
raise NotImplementedError
else:
raise NotImplementedError
for iter in range(args.max_iteration):
# 1. Retrieval stage
logger.info("Start retrieval.")
retrieval_path = f"{retrieval_dir}/retrieval_output_iter-{iter}.json"
if os.path.exists(retrieval_path):
logger.warning(f"Retrieval output in iteration {iter} already exists.")
else:
if iter > 0:
logger.info(f"iter:{iter} => Update query and then start retrieval using new query.")
update_system_prompt = ''.join(open('llm_retrieval_prompt_drafts/{}.md'.format(args.update_prompt_file)).readlines())
with open(f"{filtration_dir}/filtration_output_iter-{iter - 1}_no.json") as f:
data = json.load(f)
if args.dataset_name == "asqa":
bge_wiki_retrieval(data, update_system_prompt, args)
elif args.dataset_name == "qampari":
bge_wiki_retrieval(data, update_system_prompt, args)
elif args.dataset_name == "eli5":
# Get new prompt
bm25_sphere_retrieval(data, update_system_prompt, args)
queries = []
for d in data:
queries.append(d["update_query_using_missing_info_from_question_and_psgs"])
logger.info("Start bm25 retrieval using multi-process.")
from multi_process.bm25_multi_process import BM25MultiProcess
bm25_multiprocess = BM25MultiProcess(args.corpus_path)
pool = bm25_multiprocess.start_multi_process_pool(process_num=15)
docs_list = bm25_multiprocess.retrieve_multi_process(queries, pool)
bm25_multiprocess.stop_multi_process_pool(pool)
for i in range(len(data)):
data[i]["docs"] = docs_list[i]
else:
raise NotImplementedError
else:
logger.info("First time doing a retrieval.")
with open(args.input_file) as f:
data = json.load(f)
if args.dataset_name == "asqa":
bge_wiki_retrieval(data, None, args)
elif args.dataset_name == "qampari":
bge_wiki_retrieval(data, None, args)
elif args.dataset_name == "eli5":
bm25_sphere_retrieval(data, None, args)
else:
raise NotImplementedError
with open(retrieval_path, "w") as f:
json.dump(data, f, indent=4)
logger.info("Finish retrieval.")
# 2. Generating used field stage
logger.info("Start generating used field.")
field_path = f"{field_dir}/field_output_iter-{iter}.json"
if os.path.exists(field_path):
logger.warning(f"Field output in iteration {iter} already exists.")
else:
with open(retrieval_path) as f:
data = json.load(f)
with concurrent.futures.ThreadPoolExecutor(max_workers=args.thread_num) as executor:
futures = []
for d in data:
if args.use_sub_questions and 'qa_pairs' in d:
questions = list(map(lambda x: x['question'], list(d['qa_pairs'])))
else:
questions = [d["question"]]
questions = '\n'.join(questions)
for doc in d["docs"][:args.top_k_field]:
future = executor.submit(
gen_used_field_multi_thread,
questions,
args.prompt_style,
doc,
args.openai_model_name,
args.target_used_field,
max_tokens=args.max_tokens
)
futures.append(future)
for future in tqdm(concurrent.futures.as_completed(futures), total=len(futures)):
future.result()
with open(field_path, "w") as f:
json.dump(data, f, indent=4)
logger.info("Finish generating used field.")
# 3. Reranking stage
logger.info("Start reranking.")
reranking_path = f"{reranker_dir}/reranker_output_iter-{iter}.json"
if os.path.exists(reranking_path):
logger.warning(f"Reranker output in iteration {iter} already exists.")
else:
reranking_system_prompt = ''.join(open('llm_retrieval_prompt_drafts/{}.md'.format(args.reranking_prompt_file)).readlines())
select_gpt_hyper_parameter = {
'k': args.top_k_reranking, 'window_size': args.window_size, 'reversed_browse_order': 0, 'selected_doc_first': 1,
'idf_use_letter': "int", 'use_title': args.use_title, 'model_name': args.openai_model_name,
'used_doc_field_in_retrieval': args.used_doc_field, "use_sub_questions": args.use_sub_questions,
"position": args.position, "stage2_select_system_prompt": reranking_system_prompt, "doc_num": args.doc_num
}
if iter == 0:
old_data = None
else:
with open(f"{filtration_dir}/filtration_output_iter-{iter - 1}_no.json") as f:
old_data = json.load(f)
with open(field_path) as f:
data = json.load(f)
reranked_data = iterative_select_supporting_documents_multi_thread(data, select_gpt_hyper_parameter, args.thread_num, True, old_data=old_data)
with open(reranking_path, "w") as f:
json.dump(reranked_data, f, indent=4)
logger.info("Finish reranking.")
# If it is the last iteration, skip filtration and exit loop.
if iter == args.max_iteration - 1:
break
# 4. Filtration stage
logger.info("Start filtration.")
filtration_path = f"{filtration_dir}/filtration_output_iter-{iter}_yes.json"
if os.path.exists(filtration_path):
logger.warning(f"Yes/No filtration output in iteration {iter} already exists.")
else:
with open(reranking_path) as f:
data = json.load(f)
filtration_system_prompt = ''.join(open('llm_retrieval_prompt_drafts/{}.md'.format(args.filtration_prompt_file)).readlines())
if args.demo_file is not None:
logger.warning("Use demonstration for filtration.")
assert "with_demo" in args.filtration_prompt_file
with open(args.demo_file) as f:
demo_data = json.load(f)
demos = get_demonstration(demo_data)
filtration_system_prompt = filtration_system_prompt.replace("{Demo}", demos)
if args.filtration_method == "judgment":
filtration_result = "judgment"
else:
raise NotImplementedError
with concurrent.futures.ThreadPoolExecutor(max_workers=args.thread_num) as executor:
futures = []
for d in data:
future = executor.submit(
filter_bad_question,
d,
args.openai_model_name,
filtration_system_prompt,
args.top_k_filtration,
args.idf_use_letter,
args.use_title,
args.used_doc_field,
filtration_result
)
futures.append(future)
for future in tqdm(concurrent.futures.as_completed(futures), total=len(futures)):
future.result()
wanted_data = []
dropped_data = []
for d in data:
# Use JudgmentGPT
if args.filtration_method == "judgment":
if "[YES]" in d["judgment"]:
wanted_data.append(d)
else:
dropped_data.append(d)
else:
raise NotImplementedError
with open(filtration_path, "w") as f:
json.dump(wanted_data, f, indent=4)
# All data meets requirements
if len(dropped_data) == 0:
logger.info("All data meets requirements, so exit.")
break
with open(f"{filtration_dir}/filtration_output_iter-{iter}_no.json", "w") as f:
json.dump(dropped_data, f, indent=4)
logger.info("Finish filtration.")
# Save final data for run & eval stage
logger.info("Start Saving all data for run & eval")
all_data = []
for iter in range(args.max_iteration):
if iter == args.max_iteration - 1:
with open(f"{reranker_dir}/reranker_output_iter-{iter}.json") as f:
all_data += json.load(f)
else:
with open(f"{filtration_dir}/filtration_output_iter-{iter}_yes.json") as f:
all_data += json.load(f)
save_dir = f"iter_retrieval_50/{args.dataset_name}_final_data"
os.makedirs(save_dir, exist_ok=True)
save_path = f"{save_dir}/final_data_{args.retriever}_max_iteration-{args.max_iteration}_{args.update_prompt_file}_{args.position}.json"
if os.path.exists(save_path):
logger.warning(f"{args.dataset_name} final data already exist.")
else:
with open(save_path, "w") as f:
json.dump(all_data, f, indent=4)
logger.info("Get it all done.")
| [
"llm_retrieval_prompt_drafts/{}.md"
] |
2024-01-10 | BeastyZ/LLM-Verified-Retrieval | llm_retrieval_related~iterative_select_supporting_documents.py | import copy
import threading
import tqdm
import openai
from transformers import AutoTokenizer
import time
from typing import List, Dict, Tuple, Union
import logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
gpt2_tokenizer = AutoTokenizer.from_pretrained('gpt2')
def truncate_doc_in_user_prompt(user_prompt):
last_index = user_prompt.rfind('Content:\n')
if last_index != -1:
user_prompt = user_prompt[:last_index]
user_prompt = '\n'.join(user_prompt.split('\n')[:-2])
user_prompt = user_prompt.strip()
return user_prompt
def letter_to_int(letter):
if 'a' <= letter <= 'z':
return ord(letter) - ord('a')
elif 'A' <= letter <= 'Z':
return ord(letter) - ord('A')
else:
print('letter:{}'.format(letter))
raise NotImplementedError
def letter_to_int_upper(letter):
if 'A' <= letter <= 'Z':
return ord(letter) - ord('A')
else:
print('letter:{}'.format(letter))
raise NotImplementedError
def letter_to_int_lower(letter):
if 'a' <= letter <= 'z':
return ord(letter) - ord('a')
else:
print('letter:{}'.format(letter))
raise NotImplementedError
def int_to_letter_lower(n):
if 0 <= n <= 25:
return chr(n + ord('a'))
else:
raise ValueError('The entered integer must be between 0 and 25')
def int_to_letter_upper(n):
if 0 <= n <= 25:
return chr(n + ord('A'))
else:
raise ValueError('The entered integer must be between 0 and 25')
def create_stage2_select_prompt(questions: List[str],
docs: List,
k: int,
idf_use_letter: str,
use_title: int,
stage2_select_system_prompt: str,
used_doc_field: str,
reverse_doc_order: bool=False) -> List[Dict]:
"""Create the prompt for selection in the 2nd stage.
Args
----
questions: List[str]
The question.
docs: List
The documents relevant to the question
k: int
A specified number of documents for answering the user's specific question(s).
idf_use_letter: str
Use uppercase letters, lowercase letters, or integers to mark the documents.
use_title: int
Whether to use title or not.
stage2_select_system_prompt: str
System prompt for instruction.
used_doc_field_in_retrieval: str
Which filed of document to use in retrieval.
reverse_doc_order: bool=False
Whether to reverse the order of document or not.
Returns
-------
prompt: List[Dict]
Prompt for selection.
"""
user_prompt = 'Question:\n{}\n\nk: {}\n\n'.format('\n'.join(questions), k)
user_prompt += 'Candidate Documents:\n\n'
prompt_doc_str_list = []
for i, doc in enumerate(docs):
if idf_use_letter == 'lower':
idf = int_to_letter_lower(i)
elif idf_use_letter == 'upper':
idf = int_to_letter_upper(i)
else:
idf = i + 1
if use_title:
prompt_doc_str_list.append('{}\nTitle:\n{}\nContent:\n{}\n\n'.format(idf, doc['title'], doc[used_doc_field]))
else:
prompt_doc_str_list.append('{}\nContent:\n{}\n\n'.format(idf, doc[used_doc_field]))
if reverse_doc_order:
user_prompt += ''.join(list(reversed(prompt_doc_str_list)))
else:
user_prompt += ''.join(prompt_doc_str_list)
prompt = [
{'role': 'system', 'content': stage2_select_system_prompt},
{'role': 'user', 'content': user_prompt.strip()}
]
return prompt
def select_k_supporting_documents(questions: List[str],
tmp_selected_docs: List,
extra_docs_to_browse: List[Dict],
k: int,
selected_doc_first: int,
idf_use_letter: str,
use_title: int,
model_name: str,
stage2_select_system_prompt: str,
used_doc_field_in_retrieval: str,
thread: "instance") -> Dict:
"""Select k supporting documents.
Args
----
questions: List[str]
The question.
tmp_selected_docs: List
pass
extra_docs_to_browse: List[Dict]
pass
k: int
A specified number of documents for answering the user's specific question(s).
selected_doc_first: int
idf_use_letter: str
Use uppercase letters, lowercase letters, or integers to mark the documents.
use_title: int
Whether to use title or not.
model_name: str
OpenAI model name.
stage2_select_system_prompt: str
System prompt for instruction.
used_doc_field_in_retrieval: str
Which filed of document to use.
thread: "instance"
pass
"""
unbrowsed_docs = []
assert idf_use_letter in ['upper', 'lower', 'int']
while 1:
if selected_doc_first:
docs_concat = tmp_selected_docs + extra_docs_to_browse
else:
docs_concat = extra_docs_to_browse + tmp_selected_docs
messages = create_stage2_select_prompt(questions, docs_concat, k, idf_use_letter, use_title, stage2_select_system_prompt, used_doc_field_in_retrieval)
prompt_token_num = len(gpt2_tokenizer.tokenize(messages[0]['content'] + messages[1]['content']))
if prompt_token_num > 3900:
unbrowsed_docs.insert(0, extra_docs_to_browse[-1])
extra_docs_to_browse.pop()
else:
break
if len(extra_docs_to_browse) == 0:
break
final_docs_in_query = [docs_concat]
if len(unbrowsed_docs) > 0:
logger.info('before openai query, unbrowsed_docs > 0 : {}'.format(len(unbrowsed_docs)))
def repeat_until_success_call_openai_api(func):
def wrapper(*args, **kw):
while True:
result = None
try:
result = func(*args, **kw)
except openai.error.APIConnectionError as e:
if thread.print_error:
logger.info('openai connection error, so retry after sleeping 5 seconds')
logger.info(e)
time.sleep(5)
except openai.error.RateLimitError as e:
logger.info(type(e))
logger.info(e)
logger.info('e._message:{}'.format(e._message))
if 'quota' in e._message:
if thread.print_error:
logger.info('now openai account {} runs out. so use next.'.format(thread.account[-1]))
logger.info(type(e))
logger.info(e)
thread.account = thread.openai_account_manager_multi_thread.get_next_account(thread.thread_id,
thread.account)
elif "maximum context length is" in e._message:
unbrowsed_docs.insert(0, extra_docs_to_browse[-1])
extra_docs_to_browse.pop()
if selected_doc_first:
docs_concat = tmp_selected_docs + extra_docs_to_browse
else:
docs_concat = extra_docs_to_browse + tmp_selected_docs
final_docs_in_query[0] = docs_concat
messages = create_stage2_select_prompt(questions, docs_concat, k, idf_use_letter, use_title, stage2_select_system_prompt, used_doc_field_in_retrieval)
print('in repeat_until_success_call_openai_api, docs < 20 : {}'.format(
len(docs_concat)))
kw['messages'] = messages
else:
if True:
logger.info('openai rate limit error, so retry after sleeping 45 seconds')
time.sleep(45)
except openai.error.AuthenticationError as e:
if 'This key is associated with a deactivated account' in e._message:
logger.info('the account {} is deactivated. so use next'.format(thread.account[-1]))
if thread.print_error:
logger.info(e)
thread.account = thread.openai_account_manager_multi_thread.get_next_account(thread.thread_id,
thread.account)
else:
logger.info('meet unexpected AuthenticationError, so retry after sleeping 5 seconds')
if thread.print_error:
logger.info(e)
thread.account = thread.openai_account_manager_multi_thread.get_next_account(thread.thread_id,
thread.account)
except openai.error.InvalidRequestError as e:
if "maximum context length is" in e._message:
unbrowsed_docs.insert(0, extra_docs_to_browse[-1])
extra_docs_to_browse.pop()
if selected_doc_first:
docs_concat = tmp_selected_docs + extra_docs_to_browse
else:
docs_concat = extra_docs_to_browse + tmp_selected_docs
final_docs_in_query[0] = docs_concat
messages = create_stage2_select_prompt(questions, docs_concat, k, idf_use_letter, use_title, stage2_select_system_prompt, used_doc_field_in_retrieval)
print('in repeat_until_success_call_openai_api, docs < 20 : {}'.format(len(docs_concat)))
kw['messages'] = messages
except openai.error.OpenAIError as e:
logger.info('meet unexpected openai error, so retry after sleeping 5 seconds')
logger.info(e)
logger.info(type(e))
time.sleep(3)
except Exception as e:
raise e
if result != None:
return result
else:
pass
return wrapper
@repeat_until_success_call_openai_api
def tmp_func(messages):
return openai.ChatCompletion.create(model=model_name, messages=messages, temperature=0, max_tokens=64, api_key=thread.account[-1])
if "gpt-3.5-turbo" in model_name:
response = tmp_func(messages=messages)
response = response['choices'][0]['message']['content']
else:
raise NotImplementedError
response = response.split('\n')
if len(response) > 1:
logger.info('response has > 1 lines, so just use its first line which has the selected documents')
logger.warning(f"response: \n{response}")
response = response[0]
if len(unbrowsed_docs) > 0:
logger.info('after openai query, unbrowsed_docs > 0 : {}'.format(len(unbrowsed_docs)))
response_document_identifiers = response.replace(',', ' ').replace('[', ' ').replace(']', ' ').strip().split()
selected_doc_idfs = []
docs_concat_in_openai_query = final_docs_in_query[0]
for idf in response_document_identifiers:
try:
if idf_use_letter == 'upper':
idf = letter_to_int_upper(idf)
elif idf_use_letter == 'lower':
idf = letter_to_int_lower(idf)
else:
idf = int(idf) - 1
if idf >= len(docs_concat_in_openai_query):
print('idf={}, response={}'.format(idf, response))
else:
selected_doc_idfs.append(idf)
except:
pass
if len(selected_doc_idfs) != k:
print('len(retrieved_doc_idfs) != k, k:{}, len:{},\nresponse:\n{}response_document_identifiers:\n{}'.format(k,
len(selected_doc_idfs),
response,
response_document_identifiers))
selected_doc_idfs = selected_doc_idfs[:k]
docs_concat_in_openai_query = final_docs_in_query[0]
result_dict = {}
selected_docs = []
for idf in selected_doc_idfs:
selected_docs.append(docs_concat_in_openai_query[idf])
result_dict['selected_docs'] = selected_docs
original_openai_response = response
result_dict['original_openai_response'] = original_openai_response
parsed_doc_idfs = selected_doc_idfs
result_dict['parsed_doc_idfs'] = parsed_doc_idfs
result_dict['unbrowsed_docs'] = unbrowsed_docs
return result_dict
def iterative_select_supporting_documents_single(alce_item: Dict,
k: int,
window_size: int,
reversed_browse_order: int,
selected_doc_first: int,
idf_use_letter: str,
use_title: int,
model_name: str,
stage2_select_system_prompt: str,
used_doc_field_in_retrieval: str,
thread: "instance",
use_sub_questions: int=0,
old_selected_docs: List[Dict]=None,
position: str=None,
doc_num: int=100) -> Dict:
"""Iteratively select supporting documents.
Args
----
alce_item: Dict
Single data.
k: int
A specified number of documents for answering the user's specific question(s).
window_size: int
Context length.
reversed_browse_order: int
Whether to reverse the document order or not.
selected_doc_first: int
Whether to use the selected documents first or not.
idf_use_letter: str
Use uppercase letters, lowercase letters, or integers to mark the documents.
use_title: int
Whether to use title or not.
model_name: str
Which model of OpenAI to use.
stage2_select_system_prompt: str
System prompt for instruction.
used_doc_field_in_retrieval: str
Which filed of document to use in retrieval.
thread: "instance"
Instance of thread.
use_sub_questions: int=0
Whether to use sub questions for asqa.
old_selected_docs: List[Dict]=None
Old selected docs. May be less than 5.
position: str=None
Put the top-5 docs from old selected docs into the head or tail.
doc_num: int=100
Use top-k docs for reranking.
Returns
-------
output_alce_item: Dict
Selected docs.
"""
output_alce_item = copy.deepcopy(alce_item)
question = alce_item['question']
asqa_questions = None
if use_sub_questions and 'qa_pairs' in alce_item:
logger.warning("Use sub questions for asqa.")
asqa_questions = list(map(lambda x: x['question'], list(alce_item['qa_pairs'])))
if asqa_questions != None:
questions = asqa_questions
else:
questions = [question]
docs_to_browse = copy.deepcopy(alce_item['docs'][:doc_num])
logger.warning(f"The number of documents used for reranking is {len(docs_to_browse)}.")
if old_selected_docs is not None and position == "head":
logger.info("Add old selected docs into head.")
old_selected_docs_copy = copy.deepcopy(old_selected_docs)
docs_to_browse = old_selected_docs_copy + docs_to_browse
elif old_selected_docs is not None and position == "tail":
logger.info("Add old selected docs into tail.")
old_selected_docs_copy = copy.deepcopy(old_selected_docs)
docs_to_browse = docs_to_browse + old_selected_docs_copy
if reversed_browse_order:
docs_to_browse = list(reversed(docs_to_browse))
tmp_selected_docs = []
while len(docs_to_browse) > 0:
# iteratively update tmp_selected_docs
tmp_extra_docs_to_browse = docs_to_browse[:window_size - len(tmp_selected_docs)]
docs_to_browse = docs_to_browse[window_size - len(tmp_selected_docs):]
select_result_dict = select_k_supporting_documents(questions, tmp_selected_docs, tmp_extra_docs_to_browse, k,
selected_doc_first, idf_use_letter, use_title,
model_name, stage2_select_system_prompt, used_doc_field_in_retrieval, thread)
tmp_selected_docs = select_result_dict['selected_docs']
original_openai_response = select_result_dict['original_openai_response']
parsed_doc_idfs = select_result_dict['parsed_doc_idfs']
unbrowsed_docs = select_result_dict['unbrowsed_docs']
docs_to_browse = unbrowsed_docs + docs_to_browse
output_alce_item['docs'] = tmp_selected_docs
return output_alce_item
class OpenAI_API_inp_Manager_MultiThread_Generalized:
def __init__(self, idx_non_general_inp: List[Tuple], general_inp: Dict) -> None:
"""Class init
Args
----
idx_non_general_inp: List[Tuple]
Data with index.
general_inp: Dict
Hyperparameter.
"""
self.idx_non_general_inp = idx_non_general_inp
assert idx_non_general_inp[0][0] == 0, 'the 1st idx_non_general_inp"s idx is not 0, maybe something error'
self.general_inp = general_inp
self.inp_lock = threading.Lock()
self.progress_index = 0
assert type(general_inp) == type({})
def get_next_idx_inp(self) -> Union[List, None]:
"""
Get next new data.
"""
with self.inp_lock:
if self.progress_index < len(self.idx_non_general_inp):
tmp_idx = self.idx_non_general_inp[self.progress_index][0]
tmp_non_general_inp = self.idx_non_general_inp[self.progress_index][1]
tmp_general_inp = self.general_inp
assert len(set(tmp_general_inp.keys()) & set(tmp_non_general_inp)) == 0, 'tmp_non_general_inp and tmp_general_inp has key overlap, must have problem'
self.progress_index += 1
return [tmp_idx, {**tmp_non_general_inp, **tmp_general_inp}]
else:
return None
class MyThread(threading.Thread):
# todo: Adjust MyThread from calling_sliding_window to two_stage_retrieve
def __init__(self, thread_id: int, account_manager: "instance", inp_manager: "instance", print_error: bool, pbar: tqdm.tqdm, print_finish: bool=True) -> None:
"""Class init.
Args
----
thread_id: int
Thread id.
account_manager: "instance"
A manager for accounts of OpenAI.
inp_manager: "instance"
A manager for data.
print_error: bool
Whether to output error info or not.
pbar: tqdm.tqdm
Object of tqdm.
print_finish: bool=True
Whether to output ending info or not.
"""
threading.Thread.__init__(self)
self.thread_id = thread_id
self.openai_account_manager_multi_thread = account_manager
self.openai_inp_manager = inp_manager
self.account = self.openai_account_manager_multi_thread.get_next_account(self.thread_id)
self.print_error = print_error
self.pbar = pbar
self.print_finish = print_finish
def run(self):
self.results_with_idx = []
while True:
tmp = self.openai_inp_manager.get_next_idx_inp()
if tmp == None:
if self.print_finish:
logger.info('thread {} finish'.format(self.thread_id))
return
else:
tmp_idx = tmp[0]
select_doc_input = tmp[1]
result = iterative_select_supporting_documents_single(**select_doc_input, thread=self)
if self.pbar is not None:
self.pbar.update(1)
self.results_with_idx.append([tmp_idx, result])
from openai_account_manager import get_account_manager
def iterative_select_supporting_documents_multi_thread(items_to_select: List[Dict],
general_input: Dict,
num_threads: int,
use_tqdm: bool=True,
old_data: List[Dict]=None) -> List:
"""Iteratively select supporting documents in a multi-threaded manner.
Args
----
items_to_select: List[Dict]
Candidate documents for selection.
general_input: Dict
Hyperparameter.
num_threads: int
Number of Thread.
use_tqdm: bool
Whether to use tqdm or not.
old_data: List[Dict]=None
Old data before updating query.
Returns
-------
results: List
Selected supporting documents.
"""
new_items_to_select = []
if old_data is None:
logger.info("Old data is None...")
for item in items_to_select:
new_items_to_select.append({'alce_item': item})
else:
logger.info("Use old data...")
question_to_docs = {item["question"]: item["docs"] for item in old_data}
for item in items_to_select:
new_items_to_select.append({'alce_item': item, "old_selected_docs": question_to_docs[item["question"]]})
idx_items_to_select = list(enumerate(new_items_to_select)) # List[Tuple(index, item)]
account_manager = get_account_manager(multi_thread=True)
inp_manager = OpenAI_API_inp_Manager_MultiThread_Generalized(idx_items_to_select, general_input)
if use_tqdm:
pbar = tqdm.tqdm(total=len(idx_items_to_select))
else:
pbar = None
thread_list = []
for i in range(num_threads):
thread_list.append(MyThread(i, account_manager, inp_manager, True, pbar))
for t in thread_list:
t.start()
for i, t in enumerate(thread_list):
t.join()
results_with_idx = []
for t in thread_list:
results_with_idx.extend(t.results_with_idx)
results_with_idx.sort(key=lambda x: x[0])
results = list(map(lambda x: x[1], results_with_idx))
return results
| [
"Candidate Documents:\n\n",
"\n",
"content",
"Question:\n{}\n\nk: {}\n\n",
"[]"
] |
2024-01-10 | BeastyZ/LLM-Verified-Retrieval | multi_thread_openai_api_call.py | import threading
import openai
import logging
logger = logging.getLogger(__name__)
import time
class MyThread(threading.Thread):
def __init__(self, thread_id, llm, account_manager, inp_manager, print_error, pbar, turbo_system_message,
print_finish=True):
threading.Thread.__init__(self)
self.thread_id = thread_id
self.openai_account_manager_multi_thread = account_manager
self.openai_inp_manager = inp_manager
self.account = self.openai_account_manager_multi_thread.get_next_account(self.thread_id)
self.print_error = print_error
self.pbar = pbar
self.print_finish = print_finish
self.turbo_system_message = turbo_system_message
self.llm = llm
def run(self):
def repeat_until_success_call_openai_api(func):
def wrapper(*args, **kw):
while 1:
result = None
try:
result = func(*args, **kw)
except openai.error.APIConnectionError as e:
if self.print_error:
logger.info('openai connection error, so retry after sleep 5 seconds')
logger.info(e)
time.sleep(5)
except openai.error.RateLimitError as e:
logger.info(type(e))
if 'quota' in e._message:
if self.print_error:
logger.info('now openai account {} runs out. so use next.'.format(self.account[-1]))
logger.info(type(e))
logger.info(e)
self.account = self.openai_account_manager_multi_thread.get_next_account(self.thread_id,
self.account)
else:
logger.info("Meeting RateLimitError, sleep for 45 seconds.")
time.sleep(45)
except openai.error.AuthenticationError as e:
if 'This key is associated with a deactivated account' in e._message:
logger.info('the account {} is deactivated. so use next'.format(self.account[-1]))
if self.print_error:
logger.info(e)
self.account = self.openai_account_manager_multi_thread.get_next_account(self.thread_id,
self.account)
else:
logger.info('meet unexpected AuthenticationError, so retry after sleep 5 seconds')
if self.print_error:
logger.info(e)
self.account = self.openai_account_manager_multi_thread.get_next_account(self.thread_id,
self.account)
except Exception as e:
logger.info('meet unexpected error, so retry after sleep 5 seconds')
logger.info(e)
logger.info(type(e))
time.sleep(5)
if result != None:
return result
else:
pass
return wrapper
# pbar = tqdm.tqdm(total=len(self.idx_x_list_to_decode))
responses_with_idx = []
self.responses_with_idx = responses_with_idx
while True:
tmp = self.openai_inp_manager.get_next_gpt_idx_inp()
if tmp == None:
if self.print_finish:
logger.info('thread {} finish'.format(self.thread_id))
return
else:
idx_inp = tmp['inp']
idx, inp = idx_inp
hyper_parameter = tmp['hyper_parameter']
@repeat_until_success_call_openai_api
def tmp_api_call():
result = self.llm.generate(inp, hyper_parameter['max_tokens'], api_key=self.account[-1],
turbo_system_message=self.turbo_system_message)
return result
response = tmp_api_call()
if self.pbar is not None:
self.pbar.update(1)
responses_with_idx.append([idx, response])
| [] |
2024-01-10 | llunareine/BilimAI | gpt_config.py | import json
import os
import nest_asyncio
from langchain.document_loaders import WebBaseLoader, JSONLoader
from langchain.text_splitter import CharacterTextSplitter
from langchain.chains import RetrievalQA
from langchain.embeddings import OpenAIEmbeddings
from langchain.chat_models import ChatOpenAI
from langchain.vectorstores import Chroma
from langchain.prompts import ChatPromptTemplate
from links import get_link
import sqlite3 as sq
from dotenv import load_dotenv
load_dotenv()
nest_asyncio.apply()
os.environ["OPENAI_API_KEY"] = os.getenv("OPENAI_API_KEY")
def nfacotiral(question):
loader = WebBaseLoader(
["https://www.nfactorial.school/"]
)
docs = loader.load()
embeddings = OpenAIEmbeddings()
docsearch = Chroma.from_documents(docs, embeddings)
retriever = docsearch.as_retriever(search_kwargs={"k": 3})
llm = ChatOpenAI(model_name="gpt-3.5-turbo-16k-0613")
qa = RetrievalQA.from_chain_type(llm=llm, chain_type="stuff", retriever=retriever)
response = qa(question)
return response["result"]
def chat_query(question: str):
if ("courses" in question.lower() or "course" in question.lower()) and "nfactorial" not in question.lower():
try:
sqliteConnection = sq.connect('db.sqlite3')
cursor = sqliteConnection.cursor()
cursor.execute("SELECT name, description, price FROM api_course")
datas = cursor.fetchall()
if datas:
json_loader_list = []
for data in datas:
json_loader_list.append({'courses': [{
"name_course": data[0],
"course_description": data[1],
"course_price": data[2]
}]})
json_loader = json.dumps(json_loader_list)
with open("courses/courses.json", "w") as f:
f.write(json_loader.replace("[", "").replace("]", ""))
loader = JSONLoader(
file_path='courses/courses.json',
jq_schema='.',
text_content=False)
docs = loader.load()
embeddings = OpenAIEmbeddings()
docsearch = Chroma.from_documents(docs, embeddings)
retriever = docsearch.as_retriever(search_kwargs={"k": 3})
llm = ChatOpenAI()
qa = RetrievalQA.from_chain_type(llm=llm, chain_type="stuff", retriever=retriever)
response = qa(question)
return response["result"]
else:
return nfacotiral(question)
except sq.DatabaseError as e:
pass
elif ("courses" in question.lower() or "course" in question.lower()) and "nfactorial" in question.lower():
return nfacotiral(question)
loader = WebBaseLoader(
get_link(question)
)
data = loader.aload()
text_spliter = CharacterTextSplitter(
separator="\n",
chunk_size=1000,
chunk_overlap=200
)
docs = text_spliter.split_documents(data)
prompt = """Prompt:you are an expert on the issues of unified national testing and admission to universities in
Kazakhstan. Please do not hesitate to ask any questions related to these topics, university admission procedures
or recommendations for choosing a university or specialty, as well as preparing for the UNT. if the questions are
not about these topics, just answer I'm sorry, I do not know the answer to your question.
User:{question}
"""
prompt_template = ChatPromptTemplate.from_template(prompt)
question = prompt_template.format_messages(question=question)
embeddings = OpenAIEmbeddings()
docsearch = Chroma.from_documents(docs, embeddings)
retriever = docsearch.as_retriever(search_kwargs={"k": 3})
llm = ChatOpenAI(model_name="gpt-3.5-turbo-16k-0613")
qa = RetrievalQA.from_chain_type(llm=llm, chain_type="stuff", retriever=retriever)
response = qa(question[0].content)
return response["result"]
| [
"Prompt:you are an expert on the issues of unified national testing and admission to universities in \n Kazakhstan. Please do not hesitate to ask any questions related to these topics, university admission procedures \n or recommendations for choosing a university or specialty, as well as preparing for the UNT. if the questions are \n not about these topics, just answer I'm sorry, I do not know the answer to your question.\n \n User:{question}\n "
] |
2024-01-10 | llunareine/BilimAI | gpt_test_config.py | import os
import json
from langchain.llms import OpenAI
from dotenv import load_dotenv
load_dotenv()
os.environ["OPENAI_API_KEY"] = os.getenv("OPENAI_API_KEY")
def test_query(my_text: str) -> str:
prompt = f"""
Using the provided text:
"{my_text}"
you are an expert in creating tests. I am giving you a text for constructing questions and with an answer.
generate questions without asking unnecessary questions, just generate questions and return it as a list of
objects based on the given text, this is example of response format:
[
{{
{{
"question": "Sample question?",
"options": {{
"A": "Option A",
"B": "Option B",
"C": "Option C",
"D": "Option D"
}},
}},
{{
"question": "Sample question 2?",
"options": {{
"A": "Option A",
"B": "Option B",
"C": "Option C",
"D": "Option D"
}},
}}
}}
]
just send the list at json questions without any words and and completely write the list to the end.
warning: The correct answer should always be on option A
"""
llm = OpenAI(model_name="gpt-3.5-turbo", n=10, temperature=0)
completion = llm(prompt=prompt)
try:
list_of_dicts = json.loads(completion)
return list_of_dicts
except json.JSONDecodeError:
print("Invalid JSON string") | [
"\n Using the provided text:\n\n \"PLACEHOLDER\"\n\n you are an expert in creating tests. I am giving you a text for constructing questions and with an answer. \n generate questions without asking unnecessary questions, just generate questions and return it as a list of \n objects based on the given text, this is example of response format:\n\n [\n {\n {\n \"question\": \"Sample question?\",\n \"options\": {\n \"A\": \"Option A\",\n \"B\": \"Option B\",\n \"C\": \"Option C\",\n \"D\": \"Option D\"\n },\n },\n\n {\n \"question\": \"Sample question 2?\",\n \"options\": {\n \"A\": \"Option A\",\n \"B\": \"Option B\",\n \"C\": \"Option C\",\n \"D\": \"Option D\"\n },\n } \n }\n ]\n just send the list at json questions without any words and and completely write the list to the end.\n\n warning: The correct answer should always be on option A \n "
] |
2024-01-10 | inha-carbon-neutral-seven/beaver-web-server | server~services~engine.py | from llama_index.query_engine import CustomQueryEngine
from llama_index.retrievers import BaseRetriever
from llama_index.response_synthesizers import (
BaseSynthesizer,
)
from llama_index.llms import OpenAI
from llama_index.prompts import PromptTemplate
qa_prompt = PromptTemplate(
"Context information is below.\n"
"---------------------\n"
"{context_str}\n"
"---------------------\n"
"Given the context information and not prior knowledge, "
"answer the query.\n"
"Query: {query_str}\n"
"Answer: "
)
class RAGStringQueryEngine(CustomQueryEngine):
"""RAG String Query Engine."""
retriever: BaseRetriever
response_synthesizer: BaseSynthesizer
llm: OpenAI
qa_prompt: PromptTemplate
def custom_query(self, query_str: str):
nodes = self.retriever.retrieve(query_str)
context_str = "\n\n".join([n.node.get_content() for n in nodes])
response = self.llm.complete(qa_prompt.format(context_str=context_str, query_str=query_str))
return str(response)
| [
"Given the context information and not prior knowledge, ",
"Context information is below.\n---------------------\n{context_str}\n---------------------\nGiven the context information and not prior knowledge, answer the query.\nQuery: {query_str}\nAnswer: ",
"{context_str}\n",
"Answer: ",
"Context information is below.\n",
"---------------------\n",
"Query: {query_str}\n",
"answer the query.\n"
] |
2024-01-10 | inha-carbon-neutral-seven/beaver-web-server | server~services~generate_simple.py | import logging
from langchain.llms.openai import OpenAI
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain
from ..models.generate import Answer, Question
from .ping import check_server_status
async def generate_message(question: Question) -> Answer:
"""
unused:
"""
if await check_server_status() is False: # 모델 서버가 불안정하면 임베딩을 진행하지 않음
return Answer(message="모델 서버가 불안정합니다. 나중에 시도해주세요. ")
# "첨부한 자료를 근거로 해서 질문에 답해주시기 바랍니다." 문장 일단 제외
template = """### Prompt:
당신은 AI 챗봇이며, 사용자에게 도움이 되는 유익한 내용을 제공해야 합니다.
index를 적극적으로 활용하여 질문에 답해주시기 바랍니다.
### Question:
{question}
### Answer:"""
prompt = PromptTemplate(
input_variables=["question"],
template=template,
)
llm_chain = LLMChain(
llm=OpenAI(temperature=0.1),
prompt=prompt,
verbose=False,
)
res = llm_chain.predict(question=question.message)
answer = Answer(message=res)
logging.info("생성한 응답: %s", answer.message)
return answer
| [
"### Prompt:\n당신은 AI 챗봇이며, 사용자에게 도움이 되는 유익한 내용을 제공해야 합니다.\nindex를 적극적으로 활용하여 질문에 답해주시기 바랍니다.\n### Question:\n{question}\n### Answer:",
"question"
] |
2024-01-10 | sudoghut/wos-ss-affiliations | f1_finding_country_for_fund~f1_claude_create_countries_for_fund_list.py | import pandas as pd
from anthropic import Anthropic, HUMAN_PROMPT, AI_PROMPT
import os
input_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), "fund_list_small.csv")
prompt_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), "prompt.txt")
output_folder = os.path.join(os.path.dirname(os.path.realpath(__file__)), "..", "claude_output")
max_tokens_to_sample_value = 10000
input_list = pd.read_csv(input_file, dtype=str).fillna("").values.tolist()
input_list = [item[0] for item in input_list]
with open('api_key.txt', 'r') as file:
api_key_str = file.read()
anthropic = Anthropic(
api_key = api_key_str,
)
for filename in os.listdir(output_folder):
file_path = os.path.join(output_folder, filename)
try:
if os.path.isfile(file_path) or os.path.islink(file_path):
os.unlink(file_path)
except Exception as e:
print('Failed to delete %s. Reason: %s' % (file_path, e))
with open(os.path.join(prompt_file), "r") as file:
prompt_str = file.read()
prompt_str = prompt_str.replace("\\n", "\n")
print(prompt_str)
print(input_list[:5])
print(len(input_list))
count = 0
batch = 150
batch_list = []
for i in range(len(input_list)):
batch_list.append(input_list[i])
if i % batch == 0 and i != 0:
output_names = "\n".join(batch_list)
prompt = f"{prompt_str}{output_names}"
completion = anthropic.completions.create(
model="claude-1",
max_tokens_to_sample=max_tokens_to_sample_value,
prompt=f"{HUMAN_PROMPT} {prompt} {AI_PROMPT}",
)
file = open(output_folder + "\\output_" + str(count) + ".txt", "w", encoding="utf-8")
file.write(completion.completion)
file.close()
batch_list = []
count += 1
if len(batch_list) != 0:
count += 1
output_names = "\n".join(batch_list)
prompt = f"{prompt_str}{output_names}"
completion = anthropic.completions.create(
model="claude-1",
max_tokens_to_sample=max_tokens_to_sample_value,
prompt=f"{HUMAN_PROMPT} {prompt} {AI_PROMPT}",
)
file = open(output_folder + "\\output_" + str(count) + ".txt", "w", encoding="utf-8")
file.write(completion.completion)
print("Finished!") | [
"\n",
"PLACEHOLDERPLACEHOLDER",
"prompt.txt",
"\\n",
"PLACEHOLDER PLACEHOLDER PLACEHOLDER"
] |
2024-01-10 | sudoghut/wos-ss-affiliations | a3_create_university_country_by_claude.py | import pandas as pd
from anthropic import Anthropic, HUMAN_PROMPT, AI_PROMPT
import os
input_excel = "node-with-country-small.xlsx"
# input_excel = "node-with-country.xlsx"
output_folder = "claude_output"
with open('api_key.txt', 'r') as file:
api_key_str = file.read()
anthropic = Anthropic(
api_key = api_key_str,
)
for filename in os.listdir(output_folder):
file_path = os.path.join(output_folder, filename)
try:
if os.path.isfile(file_path) or os.path.islink(file_path):
os.unlink(file_path)
except Exception as e:
print('Failed to delete %s. Reason: %s' % (file_path, e))
input_df = pd.read_excel(input_excel, sheet_name="node-with-countries", dtype=str).fillna("")
print(input_df.head())
print(input_df.shape)
prompt_str = pd.read_excel(input_excel, sheet_name="prompt", header=None).to_string(index=False, header=False)
prompt_str = prompt_str.replace("\\n", "\n")
print(prompt_str)
label_list = input_df["Label"].tolist()
count = 0
batch = 200
batch_list = []
for i in range(len(label_list)):
batch_list.append(label_list[i])
if i % batch == 0 and i != 0:
output_names = "\n".join(batch_list)
prompt = f"{prompt_str}{output_names}"
completion = anthropic.completions.create(
model="claude-1",
# max_tokens_to_sample=300,
max_tokens_to_sample=2000,
prompt=f"{HUMAN_PROMPT} {prompt} {AI_PROMPT}",
)
file = open(output_folder + "\\output_" + str(count) + ".txt", "w")
file.write(completion.completion)
file.close()
batch_list = []
count += 1
if len(batch_list) != 0:
count += 1
output_names = "\n".join(batch_list)
prompt = f"{prompt_str}{output_names}"
completion = anthropic.completions.create(
model="claude-1",
max_tokens_to_sample=2000,
prompt=f"{HUMAN_PROMPT} {prompt} {AI_PROMPT}",
)
file = open(output_folder + "\\output_" + str(count) + ".txt", "w")
file.write(completion.completion)
print("Finished!") | [
"PLACEHOLDERPLACEHOLDER",
"\n",
"\\n",
"PLACEHOLDER PLACEHOLDER PLACEHOLDER"
] |
2024-01-10 | sudoghut/wos-ss-affiliations | c2_wosa_university_year_basedd_GIS~c2_create_university_coordinates_by_claude.py | import pandas as pd
from anthropic import Anthropic, HUMAN_PROMPT, AI_PROMPT
import os
# input_excel = "node-with-country-small.xlsx"
input_excel = "node-with-country.xlsx"
output_folder = "claude_coordinates_output"
max_tokens_to_sample_value = 2000
with open('api_key.txt', 'r') as file:
api_key_str = file.read()
anthropic = Anthropic(
api_key = api_key_str,
)
for filename in os.listdir(output_folder):
file_path = os.path.join(output_folder, filename)
try:
if os.path.isfile(file_path) or os.path.islink(file_path):
os.unlink(file_path)
except Exception as e:
print('Failed to delete %s. Reason: %s' % (file_path, e))
input_df = pd.read_excel(input_excel, sheet_name="node-with-countries", dtype=str).fillna("")
print(input_df.head())
print(input_df.shape)
with open(os.path.join("c2_wosa_university_year_basedd_GIS","c2_clause_prompt.txt"), "r") as file:
prompt_str = file.read()
prompt_str = prompt_str.replace("\\n", "\n")
print(prompt_str)
label_list = input_df["Label"].tolist()
count = 0
batch = 50
batch_list = []
for i in range(len(label_list)):
batch_list.append(label_list[i])
if i % batch == 0 and i != 0:
output_names = "\n".join(batch_list)
prompt = f"{prompt_str}{output_names}"
completion = anthropic.completions.create(
model="claude-1",
# max_tokens_to_sample=300,
max_tokens_to_sample=max_tokens_to_sample_value,
prompt=f"{HUMAN_PROMPT} {prompt} {AI_PROMPT}",
)
file = open(output_folder + "\\output_" + str(count) + ".txt", "w", encoding="utf-8")
file.write(completion.completion)
file.close()
batch_list = []
count += 1
if len(batch_list) != 0:
count += 1
output_names = "\n".join(batch_list)
prompt = f"{prompt_str}{output_names}"
completion = anthropic.completions.create(
model="claude-1",
max_tokens_to_sample=max_tokens_to_sample_value,
prompt=f"{HUMAN_PROMPT} {prompt} {AI_PROMPT}",
)
file = open(output_folder + "\\output_" + str(count) + ".txt", "w", encoding="utf-8")
file.write(completion.completion)
print("Finished!") | [
"PLACEHOLDERPLACEHOLDER",
"\n",
"\\n",
"PLACEHOLDER PLACEHOLDER PLACEHOLDER"
] |
2024-01-10 | sudoghut/wos-ss-affiliations | claude_test.py | from anthropic import Anthropic, HUMAN_PROMPT, AI_PROMPT
with open('test.txt', 'r') as file:
test = file.read()
with open('api_key.txt', 'r') as file:
api_key_str = file.read()
anthropic = Anthropic(
api_key=api_key_str,
)
completion = anthropic.completions.create(
# model="claude-2",
model="claude-1",
max_tokens_to_sample=300,
# prompt=f"{HUMAN_PROMPT} how does a court case get to the Supreme Court? {AI_PROMPT}",
prompt=f"{HUMAN_PROMPT} {test} {AI_PROMPT}",
)
print(completion.completion)
print(type(completion.completion))
| [
"PLACEHOLDER PLACEHOLDER PLACEHOLDER"
] |
2024-01-10 | huseyingunes/goruntu_isleme | 13.%20hafta~9_metinden_resim_uretme.py | import openai
import requests
from PIL import Image
openai.api_key = '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'
def generate(text):
res = openai.Image.create(
# text describing the generated image
prompt=text,
# number of images to generate
n=1,
# size of each generated image
size="1024x1024",
)
# returning the URL of one image as
# we are generating only one image
return res["data"][0]["url"]
text = "cat walking on articial neural network"
url1 = generate(text)
response = requests.get(url1)
with open("img3.png", "wb") as f:
f.write(response.content)
# opening the saved image and converting it into "RGBA" format
# converted image is saved in result
result = Image.open('img3.png').convert('RGBA')
# saving the new image in PNG format
result.save('img_rgba3.png','PNG')
Image.open(response.raw)
| [] |
2024-01-10 | ncsa/ray_slurm_autoscaler | rllib~algorithms~maddpg~maddpg.py | """Contributed port of MADDPG from OpenAI baselines.
The implementation has a couple assumptions:
- The number of agents is fixed and known upfront.
- Each agent is bound to a policy of the same name.
- Discrete actions are sent as logits (pre-softmax).
For a minimal example, see rllib/examples/two_step_game.py,
and the README for how to run with the multi-agent particle envs.
"""
import logging
from typing import List, Optional, Type
from ray.rllib.algorithms.algorithm_config import AlgorithmConfig
from ray.rllib.algorithms.dqn.dqn import DQN
from ray.rllib.algorithms.maddpg.maddpg_tf_policy import MADDPGTFPolicy
from ray.rllib.policy.policy import Policy
from ray.rllib.policy.sample_batch import SampleBatch, MultiAgentBatch
from ray.rllib.utils.annotations import Deprecated, override
from ray.rllib.utils.typing import AlgorithmConfigDict
from ray.rllib.utils.deprecation import DEPRECATED_VALUE
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class MADDPGConfig(AlgorithmConfig):
"""Defines a configuration class from which a MADDPG Algorithm can be built.
Example:
>>> from ray.rllib.algorithms.maddpg.maddpg import MADDPGConfig
>>> config = MADDPGConfig()
>>> print(config.replay_buffer_config)
>>> replay_config = config.replay_buffer_config.update(
>>> {
>>> "capacity": 100000,
>>> "prioritized_replay_alpha": 0.8,
>>> "prioritized_replay_beta": 0.45,
>>> "prioritized_replay_eps": 2e-6,
>>> }
>>> )
>>> config.training(replay_buffer_config=replay_config)\
>>> .resources(num_gpus=0)\
>>> .rollouts(num_rollout_workers=4)\
>>> .environment("CartPole-v1")
>>> algo = config.build()
>>> while True:
>>> algo.train()
Example:
>>> from ray.rllib.algorithms.maddpg.maddpg import MADDPGConfig
>>> from ray import tune
>>> config = MADDPGConfig()
>>> config.training(n_step=tune.grid_search([3, 5]))
>>> config.environment(env="CartPole-v1")
>>> tune.run(
>>> "MADDPG",
>>> stop={"episode_reward_mean":200},
>>> config=config.to_dict()
>>> )
"""
def __init__(self, algo_class=None):
"""Initializes a DQNConfig instance."""
super().__init__(algo_class=algo_class or MADDPG)
# fmt: off
# __sphinx_doc_begin__
# MADDPG specific config settings:
self.agent_id = None
self.use_local_critic = False
self.use_state_preprocessor = False
self.actor_hiddens = [64, 64]
self.actor_hidden_activation = "relu"
self.critic_hiddens = [64, 64]
self.critic_hidden_activation = "relu"
self.n_step = 1
self.good_policy = "maddpg"
self.adv_policy = "maddpg"
self.replay_buffer_config = {
"type": "MultiAgentReplayBuffer",
# Specify prioritized replay by supplying a buffer type that supports
# prioritization, for example: MultiAgentPrioritizedReplayBuffer.
"prioritized_replay": DEPRECATED_VALUE,
"capacity": int(1e6),
# How many steps of the model to sample before learning starts.
"learning_starts": 1024 * 25,
# Force lockstep replay mode for MADDPG.
"replay_mode": "lockstep",
}
self.training_intensity = None
self.critic_lr = 1e-2
self.actor_lr = 1e-2
self.target_network_update_freq = 0
self.tau = 0.01
self.actor_feature_reg = 0.001
self.grad_norm_clipping = 0.5
# Changes to Algorithm's default:
self.rollout_fragment_length = 100
self.train_batch_size = 1024
self.num_workers = 1
self.min_time_s_per_iteration = 0
# fmt: on
# __sphinx_doc_end__
@override(AlgorithmConfig)
def training(
self,
*,
agent_id: Optional[str] = None,
use_local_critic: Optional[bool] = None,
use_state_preprocessor: Optional[bool] = None,
actor_hiddens: Optional[List[int]] = None,
actor_hidden_activation: Optional[str] = None,
critic_hiddens: Optional[List[int]] = None,
critic_hidden_activation: Optional[str] = None,
n_step: Optional[int] = None,
good_policy: Optional[str] = None,
adv_policy: Optional[str] = None,
replay_buffer_config: Optional[dict] = None,
training_intensity: Optional[float] = None,
critic_lr: Optional[float] = None,
actor_lr: Optional[float] = None,
target_network_update_freq: Optional[int] = None,
tau: Optional[float] = None,
actor_feature_reg: Optional[float] = None,
grad_norm_clipping: Optional[float] = None,
**kwargs,
) -> "MADDPGConfig":
"""Sets the training related configuration.
Args:
agent_id: ID of the agent controlled by this policy.
use_local_critic: Use a local critic for this policy.
use_state_preprocessor: Apply a state preprocessor with spec given by the
"model" config option (like other RL algorithms). This is mostly useful
if you have a weird observation shape, like an image. Disabled by
default.
actor_hiddens: Postprocess the policy network model output with these hidden
layers. If `use_state_preprocessor` is False, then these will be the
*only* hidden layers in the network.
actor_hidden_activation: Hidden layers activation of the postprocessing
stage of the policy network.
critic_hiddens: Postprocess the critic network model output with these
hidden layers; again, if use_state_preprocessor is True, then the state
will be preprocessed by the model specified with the "model" config
option first.
critic_hidden_activation: Hidden layers activation of the postprocessing
state of the critic.
n_step: N-step for Q-learning.
good_policy: Algorithm for good policies.
adv_policy: Algorithm for adversary policies.
replay_buffer_config: Replay buffer config.
Examples:
{
"_enable_replay_buffer_api": True,
"type": "MultiAgentReplayBuffer",
"learning_starts": 1000,
"capacity": 50000,
"replay_sequence_length": 1,
}
- OR -
{
"_enable_replay_buffer_api": True,
"type": "MultiAgentPrioritizedReplayBuffer",
"capacity": 50000,
"prioritized_replay_alpha": 0.6,
"prioritized_replay_beta": 0.4,
"prioritized_replay_eps": 1e-6,
"replay_sequence_length": 1,
}
- Where -
prioritized_replay_alpha: Alpha parameter controls the degree of
prioritization in the buffer. In other words, when a buffer sample has
a higher temporal-difference error, with how much more probability
should it drawn to use to update the parametrized Q-network. 0.0
corresponds to uniform probability. Setting much above 1.0 may quickly
result as the sampling distribution could become heavily “pointy” with
low entropy.
prioritized_replay_beta: Beta parameter controls the degree of
importance sampling which suppresses the influence of gradient updates
from samples that have higher probability of being sampled via alpha
parameter and the temporal-difference error.
prioritized_replay_eps: Epsilon parameter sets the baseline probability
for sampling so that when the temporal-difference error of a sample is
zero, there is still a chance of drawing the sample.
training_intensity: If set, this will fix the ratio of replayed from a
buffer and learned on timesteps to sampled from an environment and
stored in the replay buffer timesteps. Otherwise, the replay will
proceed at the native ratio determined by
`(train_batch_size / rollout_fragment_length)`.
critic_lr: Learning rate for the critic (Q-function) optimizer.
actor_lr: Learning rate for the actor (policy) optimizer.
target_network_update_freq: Update the target network every
`target_network_update_freq` sample steps.
tau: Update the target by \tau * policy + (1-\tau) * target_policy.
actor_feature_reg: Weights for feature regularization for the actor.
grad_norm_clipping: If not None, clip gradients during optimization at this
value.
Returns:
This updated AlgorithmConfig object.
"""
# Pass kwargs onto super's `training()` method.
super().training(**kwargs)
if agent_id is not None:
self.agent_id = agent_id
if use_local_critic is not None:
self.use_local_critic = use_local_critic
if use_state_preprocessor is not None:
self.use_state_preprocessor = use_state_preprocessor
if actor_hiddens is not None:
self.actor_hiddens = actor_hiddens
if actor_hidden_activation is not None:
self.actor_hidden_activation = actor_hidden_activation
if critic_hiddens is not None:
self.critic_hiddens = critic_hiddens
if critic_hidden_activation is not None:
self.critic_hidden_activation = critic_hidden_activation
if n_step is not None:
self.n_step = n_step
if good_policy is not None:
self.good_policy = good_policy
if adv_policy is not None:
self.adv_policy = adv_policy
if replay_buffer_config is not None:
self.replay_buffer_config = replay_buffer_config
if training_intensity is not None:
self.training_intensity = training_intensity
if critic_lr is not None:
self.critic_lr = critic_lr
if actor_lr is not None:
self.actor_lr = actor_lr
if target_network_update_freq is not None:
self.target_network_update_freq = target_network_update_freq
if tau is not None:
self.tau = tau
if actor_feature_reg is not None:
self.actor_feature_reg = actor_feature_reg
if grad_norm_clipping is not None:
self.grad_norm_clipping = grad_norm_clipping
return self
def before_learn_on_batch(multi_agent_batch, policies, train_batch_size):
samples = {}
# Modify keys.
for pid, p in policies.items():
i = p.config["agent_id"]
keys = multi_agent_batch.policy_batches[pid].keys()
keys = ["_".join([k, str(i)]) for k in keys]
samples.update(dict(zip(keys, multi_agent_batch.policy_batches[pid].values())))
# Make ops and feed_dict to get "new_obs" from target action sampler.
new_obs_ph_n = [p.new_obs_ph for p in policies.values()]
new_obs_n = list()
for k, v in samples.items():
if "new_obs" in k:
new_obs_n.append(v)
for i, p in enumerate(policies.values()):
feed_dict = {new_obs_ph_n[i]: new_obs_n[i]}
new_act = p.get_session().run(p.target_act_sampler, feed_dict)
samples.update({"new_actions_%d" % i: new_act})
# Share samples among agents.
policy_batches = {pid: SampleBatch(samples) for pid in policies.keys()}
return MultiAgentBatch(policy_batches, train_batch_size)
class MADDPG(DQN):
@classmethod
@override(DQN)
def get_default_config(cls) -> AlgorithmConfigDict:
return MADDPGConfig().to_dict()
@override(DQN)
def validate_config(self, config: AlgorithmConfigDict) -> None:
"""Adds the `before_learn_on_batch` hook to the config.
This hook is called explicitly prior to `training_step()`
"""
# Call super's validation method.
super().validate_config(config)
def f(batch, workers, config):
policies = dict(
workers.local_worker().foreach_policy_to_train(lambda p, i: (i, p))
)
return before_learn_on_batch(batch, policies, config["train_batch_size"])
config["before_learn_on_batch"] = f
@override(DQN)
def get_default_policy_class(self, config: AlgorithmConfigDict) -> Type[Policy]:
return MADDPGTFPolicy
# Deprecated: Use ray.rllib.algorithms.maddpg.MADDPG instead!
class _deprecated_default_config(dict):
def __init__(self):
super().__init__(MADDPGConfig().to_dict())
@Deprecated(
old="ray.rllib.algorithms.maddpg.maddpg.DEFAULT_CONFIG",
new="ray.rllib.algorithms.maddpg.maddpg.MADDPGConfig(...)",
error=False,
)
def __getitem__(self, item):
return super().__getitem__(item)
DEFAULT_CONFIG = _deprecated_default_config()
| [] |
2024-01-10 | SHI-Labs/Prompt-Free-Diffusion | lib~model_zoo~common~get_model.py | from email.policy import strict
import torch
import torchvision.models
import os.path as osp
import copy
from ...log_service import print_log
from .utils import \
get_total_param, get_total_param_sum, \
get_unit
# def load_state_dict(net, model_path):
# if isinstance(net, dict):
# for ni, neti in net.items():
# paras = torch.load(model_path[ni], map_location=torch.device('cpu'))
# new_paras = neti.state_dict()
# new_paras.update(paras)
# neti.load_state_dict(new_paras)
# else:
# paras = torch.load(model_path, map_location=torch.device('cpu'))
# new_paras = net.state_dict()
# new_paras.update(paras)
# net.load_state_dict(new_paras)
# return
# def save_state_dict(net, path):
# if isinstance(net, (torch.nn.DataParallel,
# torch.nn.parallel.DistributedDataParallel)):
# torch.save(net.module.state_dict(), path)
# else:
# torch.save(net.state_dict(), path)
def singleton(class_):
instances = {}
def getinstance(*args, **kwargs):
if class_ not in instances:
instances[class_] = class_(*args, **kwargs)
return instances[class_]
return getinstance
def preprocess_model_args(args):
# If args has layer_units, get the corresponding
# units.
# If args get backbone, get the backbone model.
args = copy.deepcopy(args)
if 'layer_units' in args:
layer_units = [
get_unit()(i) for i in args.layer_units
]
args.layer_units = layer_units
if 'backbone' in args:
args.backbone = get_model()(args.backbone)
return args
@singleton
class get_model(object):
def __init__(self):
self.model = {}
def register(self, model, name):
self.model[name] = model
def __call__(self, cfg, verbose=True):
"""
Construct model based on the config.
"""
if cfg is None:
return None
t = cfg.type
# the register is in each file
if t.find('pfd')==0:
from .. import pfd
elif t=='autoencoderkl':
from .. import autokl
elif (t.find('clip')==0) or (t.find('openclip')==0):
from .. import clip
elif t.find('openai_unet')==0:
from .. import openaimodel
elif t.find('controlnet')==0:
from .. import controlnet
elif t.find('seecoder')==0:
from .. import seecoder
elif t.find('swin')==0:
from .. import swin
args = preprocess_model_args(cfg.args)
net = self.model[t](**args)
pretrained = cfg.get('pretrained', None)
if pretrained is None: # backward compatible
pretrained = cfg.get('pth', None)
map_location = cfg.get('map_location', 'cpu')
strict_sd = cfg.get('strict_sd', True)
if pretrained is not None:
if osp.splitext(pretrained)[1] == '.pth':
sd = torch.load(pretrained, map_location=map_location)
elif osp.splitext(pretrained)[1] == '.ckpt':
sd = torch.load(pretrained, map_location=map_location)['state_dict']
elif osp.splitext(pretrained)[1] == '.safetensors':
from safetensors.torch import load_file
from collections import OrderedDict
sd = load_file(pretrained, map_location)
sd = OrderedDict(sd)
net.load_state_dict(sd, strict=strict_sd)
if verbose:
print_log('Load model from [{}] strict [{}].'.format(pretrained, strict_sd))
# display param_num & param_sum
if verbose:
print_log(
'Load {} with total {} parameters,'
'{:.3f} parameter sum.'.format(
t,
get_total_param(net),
get_total_param_sum(net) ))
return net
def register(name):
def wrapper(class_):
get_model().register(class_, name)
return class_
return wrapper
| [] |
2024-01-10 | stjordanis/Megatron-LM | megatron~model~bert_model.py | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
"""BERT model."""
import torch
from megatron import get_args
from megatron.core import tensor_parallel
from megatron.model.enums import AttnMaskType
from megatron.model.language_model import parallel_lm_logits
from megatron.model.language_model import get_language_model
from megatron.model.utils import get_norm
from megatron.model.utils import openai_gelu, erf_gelu
from megatron.model.utils import get_linear_layer
from megatron.model.utils import init_method_normal
from megatron.model.utils import scaled_init_method_normal
from .module import MegatronModule
def bert_extended_attention_mask(attention_mask):
# We create a 3D attention mask from a 2D tensor mask.
# [b, 1, s]
attention_mask_b1s = attention_mask.unsqueeze(1)
# [b, s, 1]
attention_mask_bs1 = attention_mask.unsqueeze(2)
# [b, s, s]
attention_mask_bss = attention_mask_b1s * attention_mask_bs1
# [b, 1, s, s]
extended_attention_mask = attention_mask_bss.unsqueeze(1)
# Convert attention mask to binary:
extended_attention_mask = (extended_attention_mask < 0.5)
return extended_attention_mask
def bert_position_ids(token_ids):
# Create position ids
seq_length = token_ids.size(1)
position_ids = torch.arange(seq_length, dtype=torch.long,
device=token_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(token_ids)
return position_ids
class BertLMHead(MegatronModule):
"""Masked LM head for Bert
Arguments:
config: TransformerConfig object
mpu_vocab_size: model parallel size of vocabulary.
parallel_output: whether output logits being distributed or not.
"""
def __init__(self, mpu_vocab_size, config, parallel_output):
super().__init__(config=config)
args = get_args()
self.bias = torch.nn.Parameter(torch.zeros(mpu_vocab_size))
tensor_parallel.set_tensor_model_parallel_attributes(self.bias, True, 0, 1)
self.parallel_output = parallel_output
self.dense = get_linear_layer(config.hidden_size, config.hidden_size, config.init_method)
setattr(self.dense.weight, 'sequence_parallel', config.sequence_parallel)
setattr(self.dense.bias, 'sequence_parallel', config.sequence_parallel)
self.norm = get_norm(config)
self.gelu = torch.nn.functional.gelu
if args.openai_gelu:
self.gelu = openai_gelu
elif args.onnx_safe:
self.gelu = erf_gelu
def forward(self, hidden_states, word_embeddings_weight):
hidden_states = self.dense(hidden_states)
hidden_states = self.gelu(hidden_states)
hidden_states = self.norm(hidden_states)
output = parallel_lm_logits(hidden_states,
word_embeddings_weight,
self.parallel_output,
bias=self.bias)
return output
def post_language_model_processing(lm_output, pooled_output,
lm_head, binary_head,
lm_labels,
logit_weights,
fp16_lm_cross_entropy):
# Output.
lm_logits = lm_head(
lm_output, logit_weights)
binary_logits = None
if binary_head is not None:
binary_logits = binary_head(pooled_output)
if lm_labels is None:
# [s b h] => [b s h]
return lm_logits.transpose(0,1).contiguous(), binary_logits
else:
# [b s] => [s b]
lm_labels = lm_labels.transpose(0,1).contiguous()
# lm_logits : [s, b, h] and lm_labels: [s, b]
if fp16_lm_cross_entropy:
assert lm_logits.dtype == torch.half
lm_loss = tensor_parallel.vocab_parallel_cross_entropy(lm_logits, lm_labels)
else:
lm_loss = tensor_parallel.vocab_parallel_cross_entropy(lm_logits.float(),
lm_labels)
# [s, b] => [b s]
lm_loss = lm_loss.transpose(0,1).contiguous()
return lm_loss, binary_logits
class BertModel(MegatronModule):
"""Bert Language model."""
def __init__(self,
config,
num_tokentypes=2,
add_binary_head=True,
parallel_output=True,
pre_process=True,
post_process=True):
super().__init__(config=config)
args = get_args()
# TODO this option is not yet implemented in BERT
assert args.untie_embeddings_and_output_weights is False
self.fp16_lm_cross_entropy = args.fp16_lm_cross_entropy
self.add_binary_head = add_binary_head
self.parallel_output = parallel_output
self.pre_process = pre_process
self.post_process = post_process
self.return_embeddings = args.output_bert_embeddings
if self.return_embeddings:
assert self.post_process and self.add_binary_head
self.language_model, self._language_model_key = get_language_model(
config=config,
num_tokentypes=num_tokentypes,
add_pooler=self.add_binary_head,
encoder_attn_mask_type=AttnMaskType.padding,
pre_process=self.pre_process,
post_process=self.post_process)
self.initialize_word_embeddings()
if self.post_process:
self.lm_head = BertLMHead(self.shared_embedding_or_output_weight().size(0), config, parallel_output)
self._lm_head_key = 'lm_head'
self.binary_head = None
if self.add_binary_head:
self.binary_head = get_linear_layer(config.hidden_size, 2,
config.init_method)
self._binary_head_key = 'binary_head'
def set_input_tensor(self, input_tensor):
"""See megatron.model.transformer.set_input_tensor()"""
self.language_model.set_input_tensor(input_tensor)
def forward(self, bert_model_input, attention_mask,
tokentype_ids=None, lm_labels=None):
extended_attention_mask = bert_extended_attention_mask(attention_mask)
input_ids = bert_model_input
position_ids = bert_position_ids(input_ids)
lm_output = self.language_model(
input_ids,
position_ids,
extended_attention_mask,
tokentype_ids=tokentype_ids
)
if self.post_process and self.add_binary_head:
lm_output, pooled_output = lm_output
# Return pooled output (e.g., when computing Bert embeddings).
if self.return_embeddings:
# Sum attention mask.
embeddings = torch.transpose(lm_output, 0, 1)
masks = torch.sum(attention_mask, dim=1)
# Collect masked embeddings.
output = torch.zeros(
size=(embeddings.shape[0], embeddings.shape[2]),
dtype=torch.float32,
device=torch.cuda.current_device())
for i, (embedding, mask) in enumerate(zip(embeddings, masks)):
output[i, :] = torch.mean(embedding[1: mask - 1], dim=0)
return output
else:
pooled_output = None
if self.post_process:
return post_language_model_processing(lm_output, pooled_output,
self.lm_head, self.binary_head,
lm_labels,
self.shared_embedding_or_output_weight(),
self.fp16_lm_cross_entropy)
else:
return lm_output
def state_dict_for_save_checkpoint(self, prefix='', keep_vars=False):
"""For easy load when model is combined with other heads,
add an extra key."""
state_dict_ = {}
state_dict_[self._language_model_key] \
= self.language_model.state_dict_for_save_checkpoint(prefix=prefix,
keep_vars=keep_vars)
if self.post_process:
state_dict_[self._lm_head_key] \
= self.lm_head.state_dict_for_save_checkpoint(prefix=prefix,
keep_vars=keep_vars)
if self.post_process and self.add_binary_head:
state_dict_[self._binary_head_key] \
= self.binary_head.state_dict(prefix=prefix, keep_vars=keep_vars)
# Save word_embeddings.
if self.post_process and not self.pre_process:
state_dict_[self._word_embeddings_for_head_key] \
= self.word_embeddings.state_dict(prefix=prefix, keep_vars=keep_vars)
return state_dict_
def load_state_dict(self, state_dict, strict=True):
"""Customized load."""
self.language_model.load_state_dict(
state_dict[self._language_model_key], strict=strict)
if self.post_process:
self.lm_head.load_state_dict(
state_dict[self._lm_head_key], strict=strict)
if self.post_process and self.add_binary_head:
self.binary_head.load_state_dict(
state_dict[self._binary_head_key], strict=strict)
# Load word_embeddings.
if self.post_process and not self.pre_process:
self.word_embeddings.load_state_dict(
state_dict[self._word_embeddings_for_head_key], strict=strict)
| [] |
2024-01-10 | jnhstk/simplifiedglimpse | glimpse.py | from youtube_transcript_api import YouTubeTranscriptApi, YouTubeRequestFailed
import openai
class Glimpse:
def __init__(self, key):
self.key = key
openai.api_key = key
def get_transcript(self, video): # added self parameter
try:
if "youtube.com" in video:
video_id = video.split("v=")[1].split("&")[0]
elif "youtu.be" in video:
video_id = video.split("/")[-1]
transcript = ""
for item in YouTubeTranscriptApi.get_transcript(video_id):
transcript += item["text"] + " "
return transcript
except YouTubeRequestFailed:
return 400, None
def get_blog(self, transcript): # added self parameter
try:
blog = openai.Completion.create(
model="text-davinci-003",
prompt=f"Write a long-form blog that discusses the main points in the following video transcript: {transcript[:12000]}\
\nEnsure your response has a title and headers formatted in markdown (.md) file format",
temperature=0.5,
max_tokens=1000
).choices[0].text
return blog
except Exception as e:
return str(e) # convert the error to a string
def get_glimpse(self, video):
transcript = self.get_transcript(video) # updated here
if type(transcript) == int:
return transcript, None
else:
blog = self.get_blog(transcript)
if type(blog) == int:
return blog, None
else:
return 399, blog
| [
"Write a long-form blog that discusses the main points in the following video transcript: PLACEHOLDER \nEnsure your response has a title and headers formatted in markdown (.md) file format"
] |
2024-01-10 | BrouthenKamel/AI-Virtual-Hackathon | scripts~llms.py | # The module is concerned of instanciating LLMs
from langchain.llms import OpenAI
chatGPT = OpenAI(temperature=0) | [] |
2024-01-10 | BrouthenKamel/AI-Virtual-Hackathon | scripts~chains.py | # the module is concerned of building LLM Chains
from scripts.prompts import *
from scripts.llms import chatGPT
from langchain import LLMChain
general_clustering_chain = LLMChain(llm=chatGPT, prompt=general_clustering_template)
extract_key_points_chain = LLMChain(llm=chatGPT, prompt=extract_key_points_template)
summarize_key_points_chain = LLMChain(llm=chatGPT, prompt=summarize_key_points_template) | [] |
2024-01-10 | alohachen/Hide-and-Seek | demo_model.py | # coding: utf-8
import os
os.environ['TOKENIZERS_PARALLELISM'] = 'False'
import warnings
import gc
import time
from typing import Dict
import numpy as np
import torch
import transformers
from transformers import AutoModel, AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig, GenerationConfig
from transformers import StoppingCriteria, StoppingCriteriaList
from peft import PeftModel, LoraConfig, get_peft_model, prepare_model_for_kbit_training
import spacy
from ltp import LTP
import langdetect
langdetect.DetectorFactory.seed = 0
from utils import get_ents_en, get_ents_zh, add_pinyin, get_labelled_text
import openai
openai.api_key = "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"
# specify tasks
# tasks = ['abs', 'poli', 'trans']
tasks = ['trans']
# specify base model
#base_model = 'bloomz-560m'
base_model = 'bloomz-1b7'
base_model_dir = f'./models/{base_model}'
# specify langauge
lang = 'en'
# specify lora weights
hide_model_path = f"./lora_weights/hide_{base_model}_{lang}/checkpoint-6300"
hide_method = 'model1b7'
#hide_method = 'model560m'
seek_model_path = f"./lora_weights/seek-%s_{hide_method}_{base_model}_{lang}/checkpoint-2700"
# special tokens
DEFAULT_PAD_TOKEN = '[PAD]'
DEFAULT_EOS_TOKEN = '</s>'
DEFAULT_BOS_TOKEN = '<s>'
DEFAULT_UNK_TOKEN = '<unk>'
bnb_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_use_double_quant=True,
bnb_4bit_quant_type='nf4',
bnb_4bit_compute_dtype=torch.bfloat16,
)
def smart_tokenizer_and_embedding_resize(
tokenizer: transformers.PreTrainedTokenizer,
model: transformers.PreTrainedModel,
):
"""Resize tokenizer and embedding.
Note: This is the unoptimized version that may make your embedding size not be divisible by 64.
"""
special_tokens_dict: Dict[str, str] = {}
if tokenizer.pad_token is None:
special_tokens_dict['pad_token'] = DEFAULT_PAD_TOKEN
if tokenizer.eos_token is None:
special_tokens_dict['eos_token'] = DEFAULT_EOS_TOKEN
if tokenizer.bos_token is None:
special_tokens_dict['bos_token'] = DEFAULT_BOS_TOKEN
if tokenizer.unk_token is None:
special_tokens_dict['unk_token'] = DEFAULT_UNK_TOKEN
num_new_tokens = tokenizer.add_special_tokens(special_tokens_dict)
model.resize_token_embeddings(len(tokenizer))
if num_new_tokens > 0:
input_embeddings = model.get_input_embeddings().weight.data
output_embeddings = model.get_output_embeddings().weight.data
input_embeddings_avg = input_embeddings[:-num_new_tokens].mean(
dim=0, keepdim=True)
output_embeddings_avg = output_embeddings[:-num_new_tokens].mean(
dim=0, keepdim=True)
input_embeddings[-num_new_tokens:] = input_embeddings_avg
output_embeddings[-num_new_tokens:] = output_embeddings_avg
def hide_text(raw_input, target_ents, model, tokenizer, lang, ltp, spacy_model):
sub_model = PeftModel.from_pretrained(model, hide_model_path, quantization_config=bnb_config, device_map='cuda:0', trust_remote_code=True)
with open(f'./prompts/v5/hide_{lang}.txt', 'r', encoding='utf-8') as f:
initial_prompt = f.read()
if target_ents == 'label':
return get_labelled_text(raw_input, spacy_model, return_ents=False)
if target_ents == 'auto':
if lang == 'en':
target_ents = get_ents_en(raw_input, spacy_model)
else:
target_ents = get_ents_zh(raw_input, ltp, spacy_model)
print(target_ents)
input_text = initial_prompt % (raw_input, target_ents)
input_text += tokenizer.bos_token
inputs = tokenizer(input_text, return_tensors='pt')
inputs = inputs.to('cuda:0')
len_prompt = len(inputs['input_ids'][0])
def custom_stopping_criteria(input_ids: torch.LongTensor, score: torch.FloatTensor, **kwargs) -> bool:
cur_top1 = tokenizer.decode(input_ids[0,len_prompt:])
if '\n' in cur_top1 or tokenizer.eos_token in cur_top1:
return True
return False
pred = sub_model.generate(
**inputs,
generation_config = GenerationConfig(
max_new_tokens = int(len(inputs['input_ids'][0]) * 1.3),
do_sample=False,
num_beams=3,
repetition_penalty=5.0,
),
stopping_criteria = StoppingCriteriaList([custom_stopping_criteria])
)
pred = pred.cpu()[0][len(inputs['input_ids'][0]):]
response = tokenizer.decode(pred, skip_special_tokens=True).split('\n')[0]
torch.cuda.empty_cache()
gc.collect()
return response
def get_api_output(hidden_text, task_type, lang):
with open(f'./prompts/v5/api_{task_type}_{lang}.txt', 'r', encoding='utf-8') as f:
template = f.read()
response = openai.ChatCompletion.create(
# model="gpt-4",
model="gpt-3.5-turbo",
temperature=0.1,
messages=[
{"role": "user", "content": template % hidden_text}
]
)
return response['choices'][0]['message']['content'].strip(" \n")
def recover_text(sub_content, sub_output, content, model, tokenizer, task_type, lang):
re_model = PeftModel.from_pretrained(model, seek_model_path % task_type, quantization_config=bnb_config, device_map='cuda:0', trust_remote_code=True)
with open(f'./prompts/v5/seek_{task_type}_{lang}.txt', 'r', encoding='utf-8') as f:
initial_prompt = f.read()
input_text = initial_prompt % (sub_content, sub_output, content)
input_text += tokenizer.bos_token
inputs = tokenizer(input_text, return_tensors='pt')
inputs = inputs.to('cuda:0')
len_prompt = len(inputs['input_ids'][0])
def custom_stopping_criteria(input_ids: torch.LongTensor, score: torch.FloatTensor, **kwargs) -> bool:
cur_top1 = tokenizer.decode(input_ids[0,len_prompt:])
if '\n' in cur_top1 or tokenizer.eos_token in cur_top1:
return True
return False
pred = re_model.generate(
**inputs,
generation_config = GenerationConfig(
max_new_tokens=1024,
do_sample=False,
num_beams=3,
),
stopping_criteria = StoppingCriteriaList([custom_stopping_criteria])
)
pred = pred.cpu()[0][len(inputs['input_ids'][0]):]
recovered_text = tokenizer.decode(pred, skip_special_tokens=True).split('\n')[0]
torch.cuda.empty_cache()
gc.collect()
return recovered_text
if __name__ == '__main__':
# load models
print('loading model...')
model = AutoModelForCausalLM.from_pretrained(base_model_dir, load_in_4bit=True, quantization_config=bnb_config, device_map='cuda:0', trust_remote_code=True, torch_dtype=torch.float16)
tokenizer = AutoTokenizer.from_pretrained(base_model_dir, trust_remote_code=True)
smart_tokenizer_and_embedding_resize(tokenizer=tokenizer,model=model)
spacy_model = spacy.load(f'{lang}_core_web_trf')
# only chinese uses ltp
ltp = LTP("LTP/small")
if torch.cuda.is_available():
ltp.cuda()
while True:
# input text
raw_input = input('\033[1;31minput:\033[0m ')
if raw_input == 'q':
print('quit')
break
# hide
target_ents = input('\033[1;31mtarget entities:\033[0m ')
hidden_text = hide_text(raw_input, target_ents, model, tokenizer, lang, ltp, spacy_model)
print('\033[1;31mhide_text:\033[0m ', hidden_text)
# seek
for task_type in tasks:
sub_output = get_api_output(hidden_text, task_type, lang).replace('\n', ';')
print(f'\033[1;31mhidden output for {task_type}:\033[0m ', sub_output)
if lang == 'zh' and task_type == 'trans':
raw_input = add_pinyin(raw_input, ltp)
output_text = recover_text(hidden_text, sub_output, raw_input, model, tokenizer, task_type, lang)
print(f'\033[1;31mrecovered output for {task_type}:\033[0m ', output_text)
| [
"1"
] |
2024-01-10 | alohachen/Hide-and-Seek | demo_label.py | # coding: utf-8
import os
os.environ['TOKENIZERS_PARALLELISM'] = 'False'
import warnings
import gc
import time
from typing import Dict
import numpy as np
import torch
import transformers
from transformers import AutoModel, AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig, GenerationConfig
from transformers import StoppingCriteria, StoppingCriteriaList
from peft import PeftModel, LoraConfig, get_peft_model, prepare_model_for_kbit_training
import spacy
from ltp import LTP
import langdetect
langdetect.DetectorFactory.seed = 0
from utils import get_ents_en, get_ents_zh, add_pinyin, get_labelled_text
import openai
# openai.api_base = "https://cp.ojkb.xyz/v1"
openai.api_key = "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"
# specify tasks
# tasks = ['abs', 'poli', 'trans']
tasks = ['trans']
# specify base model
#base_model = 'bloomz-560m'
base_model = 'bloomz-1b7'
base_model_dir = f'./models/{base_model}'
# specify langauge
lang = 'en'
# specify lora weights
seek_model_path = f"./lora_weights/seek-%s_label_{base_model}_{lang}/checkpoint-2700"
# special tokens
DEFAULT_PAD_TOKEN = '[PAD]'
DEFAULT_EOS_TOKEN = '</s>'
DEFAULT_BOS_TOKEN = '<s>'
DEFAULT_UNK_TOKEN = '<unk>'
bnb_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_use_double_quant=True,
bnb_4bit_quant_type='nf4',
bnb_4bit_compute_dtype=torch.bfloat16,
)
def smart_tokenizer_and_embedding_resize(
tokenizer: transformers.PreTrainedTokenizer,
model: transformers.PreTrainedModel,
):
"""Resize tokenizer and embedding.
Note: This is the unoptimized version that may make your embedding size not be divisible by 64.
"""
special_tokens_dict: Dict[str, str] = {}
if tokenizer.pad_token is None:
special_tokens_dict['pad_token'] = DEFAULT_PAD_TOKEN
if tokenizer.eos_token is None:
special_tokens_dict['eos_token'] = DEFAULT_EOS_TOKEN
if tokenizer.bos_token is None:
special_tokens_dict['bos_token'] = DEFAULT_BOS_TOKEN
if tokenizer.unk_token is None:
special_tokens_dict['unk_token'] = DEFAULT_UNK_TOKEN
num_new_tokens = tokenizer.add_special_tokens(special_tokens_dict)
model.resize_token_embeddings(len(tokenizer))
if num_new_tokens > 0:
input_embeddings = model.get_input_embeddings().weight.data
output_embeddings = model.get_output_embeddings().weight.data
input_embeddings_avg = input_embeddings[:-num_new_tokens].mean(
dim=0, keepdim=True)
output_embeddings_avg = output_embeddings[:-num_new_tokens].mean(
dim=0, keepdim=True)
input_embeddings[-num_new_tokens:] = input_embeddings_avg
output_embeddings[-num_new_tokens:] = output_embeddings_avg
def hide_text(raw_input, spacy_model):
return get_labelled_text(raw_input, spacy_model, return_ents=False)
def get_api_output(subed_text, task_type, lang):
with open(f'./prompts/v5/api_{task_type}_label_{lang}.txt', 'r', encoding='utf-8') as f:
template = f.read()
response = openai.ChatCompletion.create(
# model="gpt-4",
model="gpt-3.5-turbo",
temperature=0.1,
messages=[
{"role": "user", "content": template % subed_text}
]
)
return response['choices'][0]['message']['content'].strip(" \n")
def recover_text(sub_content, sub_output, content, model, tokenizer, task_type, lang):
re_model = PeftModel.from_pretrained(model, seek_model_path % task_type, quantization_config=bnb_config, device_map='cuda:0', trust_remote_code=True)
with open(f'./prompts/v5/seek_{task_type}_{lang}.txt', 'r', encoding='utf-8') as f:
initial_prompt = f.read()
input_text = initial_prompt % (sub_content, sub_output, content)
input_text += tokenizer.bos_token
inputs = tokenizer(input_text, return_tensors='pt')
inputs = inputs.to('cuda:0')
len_prompt = len(inputs['input_ids'][0])
def custom_stopping_criteria(input_ids: torch.LongTensor, score: torch.FloatTensor, **kwargs) -> bool:
cur_top1 = tokenizer.decode(input_ids[0,len_prompt:])
if '\n' in cur_top1 or tokenizer.eos_token in cur_top1:
return True
return False
pred = re_model.generate(
**inputs,
generation_config = GenerationConfig(
max_new_tokens=1024,
do_sample=False,
num_beams=3,
),
stopping_criteria = StoppingCriteriaList([custom_stopping_criteria])
)
pred = pred.cpu()[0][len(inputs['input_ids'][0]):]
recovered_text = tokenizer.decode(pred, skip_special_tokens=True).split('\n')[0]
torch.cuda.empty_cache()
gc.collect()
return recovered_text
if __name__ == '__main__':
# load models
print('loading model...')
model = AutoModelForCausalLM.from_pretrained(base_model_dir, load_in_4bit=True, quantization_config=bnb_config, device_map='cuda:0', trust_remote_code=True, torch_dtype=torch.float16)
tokenizer = AutoTokenizer.from_pretrained(base_model_dir, trust_remote_code=True)
smart_tokenizer_and_embedding_resize(tokenizer=tokenizer,model=model)
spacy_model = spacy.load(f'{lang}_core_web_trf')
ltp = LTP("LTP/small")
if torch.cuda.is_available():
ltp.cuda()
while True:
# input text
raw_input = input('\033[1;31minput:\033[0m ')
if raw_input == 'q':
print('quit')
break
# hide
hidden_text = hide_text(raw_input, spacy_model)
print('\033[1;31mhidden text:\033[0m ', hidden_text)
# seek
for task_type in tasks:
sub_output = get_api_output(hidden_text, task_type, lang).replace('\n', ';')
print(f'\033[1;31mhidden output for {task_type}:\033[0m ', sub_output)
if lang == 'zh' and task_type == 'translate':
raw_input = add_pinyin(raw_input, ltp)
output_text = recover_text(hidden_text, sub_output, raw_input, model, tokenizer, task_type, lang)
print(f'\033[1;31mrecovered output for {task_type}:\033[0m ', output_text)
| [
"1"
] |
2024-01-10 | johnBamp/ArduinoGUI | Chronos~webserver.py | # Note: Ensure that you have the OpenAI Python library v0.27.0 or above installed
import openai
import socket
import os
import threading
import queue
# Configure OpenAI API Key
openai.api_key = ''
HOST = '0.0.0.0'
PORT = 65432
WAV_FILE = "received_audio.wav"
MAX_THREADS = 2 # Adjust as per your requirements
connection_queue = queue.Queue()
def transcribe_audio(file_path):
"""Transcribe audio using OpenAI and return the transcription."""
with open(file_path, "rb") as audio_file:
transcript = openai.Audio.transcribe("whisper-1", audio_file)
text = transcript['text']
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": text}
]
)
return completion.choices[0].message['content']
def handle_connection(conn, addr):
with conn:
print(f"Processing connection from {addr}")
data_buffer = []
while True:
data = conn.recv(1024)
if not data:
break
if "EOF" in data.decode("utf-8", errors="ignore"):
data_buffer.append(data[:-3])
break
data_buffer.append(data)
file_name = f"received_audio_{addr[0]}_{addr[1]}.wav"
conn.sendall(b"ACK\n")
with open(file_name, 'wb') as file:
for data_chunk in data_buffer:
file.write(data_chunk)
print(f"File {file_name} saved!")
transcription = transcribe_audio(file_name)
print(transcription)
os.remove(file_name)
print(f"File {file_name} deleted!")
conn.sendall(transcription.encode('utf-8') + b'\nEND_TRANSCRIPTION\n')
print(f"Finished processing connection from {addr}")
def worker():
while True:
conn, addr = connection_queue.get()
try:
handle_connection(conn, addr)
finally:
connection_queue.task_done()
def main():
for _ in range(MAX_THREADS):
threading.Thread(target=worker, daemon=True).start()
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind((HOST, PORT))
s.listen()
print(f'Listening on {HOST}:{PORT}')
while True:
conn, addr = s.accept()
print(f"Received connection from {addr}")
connection_queue.put((conn, addr))
if __name__ == '__main__':
main()
| [
"You are a helpful assistant."
] |
2024-01-10 | kotsonis/banana-DDQN_RL | buffer~ReplayBuffer.py | # ReplayBuffer and PrioritizedReplayBuffer classes
# from OpenAI : https://github.com/openai/baselines/blob/master/baselines/deepq/replay_buffer.py
import numpy as np
import torch
import random
from buffer.segmenttrees import SumSegmentTree, MinSegmentTree
from collections import namedtuple, deque
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class ReplayBuffer():
""" Experience Replay Buffer class """
def __init__(self, size:int):
"""Create simple Replay circular buffer as a list"""
self._buffer = []
self._maxsize = size
self.experience = namedtuple("Experience", field_names=["state", "action", "reward", "next_state", "done"])
self._next_idx = 0 # next available index for circular buffer is at start
def __len__(self):
return len(self._buffer)
def add(self, state, action, reward, next_state, done):
""" Add a new experience to replay buffer """
data = self.experience(state, action, reward, next_state, done)
if self._next_idx >= len(self._buffer):
# when we are still filling the buffer to capacity
self._buffer.append(data)
else:
# overwrite data at index
self._buffer[self._next_idx] = data
#increment buffer index and loop to beginning when needed
self._next_idx = int((self._next_idx + 1) % self._maxsize)
def _encode_sample(self, idxes):
"encode batch of experiences indexed by idxes from buffer"
states, actions, rewards, next_states, dones = [], [], [], [],[]
for idx in idxes:
states.append(self._buffer[idx].state)
actions.append(self._buffer[idx].action)
rewards.append(self._buffer[idx].reward)
next_states.append(self._buffer[idx].next_state)
dones.append(self._buffer[idx].done)
states = torch.tensor(states).float().to(device)
actions = torch.tensor(actions).long().unsqueeze(1).to(device)
rewards = torch.tensor(rewards).float().unsqueeze(1).to(device)
next_states = torch.tensor(next_states).float().to(device)
dones = torch.tensor(dones).float().unsqueeze(1).to(device)
return (states, actions, rewards, next_states, dones)
def sample(self, batch_size):
"""Sample a random batch of experiences."""
idxes = [random.randint(0, len(self._buffer) - 1) for _ in range(batch_size)]
return self._encode_sample(idxes)
class PrioritizedReplayBuffer(ReplayBuffer):
""" A Prioritized according to TD Error replay buffer """
def __init__(self, size: int, batch_size: int, alpha: float):
"""Create Prioritized(alpha=0 -> no priority) Replay circular buffer as a list"""
super(PrioritizedReplayBuffer, self).__init__(size)
assert alpha >= 0, "negative alpha not allowed"
self._alpha = alpha
self._batch_size = batch_size
# find minimum power of 2 size for segment trees
st_capacity = 1
while st_capacity < size:
st_capacity *= 2
self._st_sum = SumSegmentTree(st_capacity)
self._st_min = MinSegmentTree(st_capacity)
# set priority with which new experiences will be added. 1.0 means they have highest chance of being sampled
self._max_priority = 1.0
def add(self, *args, **kwargs):
"""See ReplayBuffer.store_effect"""
idx = self._next_idx # obtain next available index to store at from the replay buffer parent class
super().add(*args, **kwargs) # add to the replay buffer
self._st_sum[idx] = self._max_priority ** self._alpha # put it in the sum tree with max priority
self._st_min[idx] = self._max_priority ** self._alpha # put it in the min tree with max priority
def _sample_proportional(self, batch_size: int):
""" sample uniformly within `batch_size` segments """
results = []
p_total = self._st_sum.sum(0, len(self._buffer) - 1) # get total sum of priorites in the whole replay buffer
every_range_len = p_total / batch_size # split the total sum of priorities into batch_size segments
for i in range(batch_size):
# generate a random cummulative sum of priorites within this segment
mass = random.random() * every_range_len + i * every_range_len
#Find index in the array of sampling probabilities such that sum of previous values is mass
idx = self._st_sum.find_prefixsum_idx(mass)
results.append(idx)
return results
def sample(self, batch_size:int, beta:float):
""" sample a batch of experiences from memory and also returns importance weights and idxes of sampled experiences"""
assert beta > 0
idxes = self._sample_proportional(batch_size)
weights = []
# find maximum weight factor, ie. smallest P(i) since we are dividing by this
p_sum = self._st_sum.sum()
p_min = self._st_min.min() / p_sum
max_weight = (p_min * len(self._buffer)) ** (-beta)
for idx in idxes:
# Compute importance-sampling weight (w_i) and append to weights
# priority of transition
# P(i) = -------------------------------------
# sum of priorities for all transitions
# | 1 |^beta
# w_i = | --------- |
# | N * P(i) |
# and then normalize by the maximum weight
# w_j = w_i/max_weight
p_sample = self._st_sum[idx] / p_sum
weight_sample = (p_sample * len(self._buffer)) ** (-beta)
weights.append(weight_sample / max_weight)
#expand weights dimension from (batch_size,) to (batch_size,1)
weights_t = torch.tensor(weights).unsqueeze(1).to(device)
encoded_sample = self._encode_sample(idxes)
return tuple(list(encoded_sample) + [weights_t, idxes])
def update_priorities(self, idxes, priorities):
"""Update priorities of sampled transitions.
sets priority of transition at index idxes[i] in buffer to priorities[i].
Parameters
----------
idxes: [int]
List of idxes of sampled transitions
priorities: [float]
List of updated priorities corresponding to transitions at the sampled idxes denoted by variable `idxes`.
"""
assert len(idxes) == len(priorities)
for idx, priority in zip(idxes, priorities):
assert priority > 0
assert 0 <= idx < len(self._buffer)
self._st_sum[idx] = priority ** self._alpha # update value and parent values in sum-tree
self._st_min[idx] = priority ** self._alpha # update value and parent values in min-tree
self._max_priority = max(self._max_priority, priority) | [] |
2024-01-10 | kotsonis/banana-DDQN_RL | buffer~segmenttrees.py | # Modified from original source from OpenAI : (http://openai.com) and available through The MIT License
import operator
import numpy as np
class SegmentTree():
def __init__(self, capacity, operation, neutral_element):
"""Build a Segment Tree data structure.
https://en.wikipedia.org/wiki/Segment_tree
Arguments
---------
capacity:
Total size of the array - must be a power of two.
operation: lambda obj, obj -> obj
an operation for combining elements (eg. sum, max)
neutral_element:
neutral element for the operation above. eg. float('-inf')
for max and 0 for sum.
"""
assert capacity > 0 and capacity & (capacity - 1) == 0, "capacity must be positive and a power of 2."
self._capacity = capacity
self._value = [neutral_element for _ in range(2 * capacity)] # create array to represent binary tree structure
self._operation = operation
def _reduce_helper(self, start: int, end:int, node:int, node_start:int, node_end:int):
""" recursively apply `self.operation` to a tree subset with sequential representation """
if start == node_start and end == node_end:
return self._value[node]
mid = (node_start + node_end) // 2
if end <= mid:
return self._reduce_helper(start, end, 2 * node, node_start, mid)
else:
if mid + 1 <= start:
return self._reduce_helper(start, end, 2 * node + 1, mid + 1, node_end)
else:
return self._operation(
self._reduce_helper(start, mid, 2 * node, node_start, mid),
self._reduce_helper(mid + 1, end, 2 * node + 1, mid + 1, node_end)
)
def reduce(self, start:int =0, end:int =None):
"""Returns result of applying `self.operation` to a contiguous subsequence of the array.
self.operation(arr[start], operation(arr[start+1], operation(... arr[end])))
Returns
-------
reduced: obj
result of reducing self.operation over the specified range of array elements.
"""
if end is None:
end = self._capacity
if end < 0:
end += self._capacity
end -= 1
return self._reduce_helper(start, end, 1, 0, self._capacity - 1)
def __setitem__(self, idx:int, val:float):
""" set value for a node in tree and update parents """
idx += self._capacity
self._value[idx] = val
idx //= 2 ## go to parent
while idx >= 1:
self._value[idx] = self._operation(
self._value[2 * idx],
self._value[2 * idx + 1]
)
idx //= 2 ## keep moving to parent in b-tree
def __getitem__(self, idx):
""" get value for item in tree """
assert 0 <= idx < self._capacity
return self._value[self._capacity + idx]
class SumSegmentTree(SegmentTree):
""" Segment Tree with cummulative sum being the segmentation operator """
def __init__(self, capacity):
""" initialize SumSegmentTree as a SegmentTree with operation being addition and neutral value being 0 """
super(SumSegmentTree, self).__init__(capacity=capacity,operation=operator.add,neutral_element=0.0)
def sum(self, start:int =0, end:int =None) :
"""Returns sum of elements from index `start` to index `end` """
return super(SumSegmentTree, self).reduce(start, end) # run the reduce operator on the SegmentTree
def find_prefixsum_idx(self, prefixsum:float):
"""Find the highest index `i` in the SumSegmentTree such that sum(data[0] ... data[`i - i`]) is less than `prefixsum`
Arguments:
----
prefixsum:
the cummulative sum that we are querying for
Returns:
----
idx:
highest index satisfying the prefixsum constraint
"""
assert 0 <= prefixsum <= self.sum() + 1e-5, "prefixsum outside current data boundaries"
idx = 1 # tree root node index
while idx < self._capacity: # stay within b-tree structure and not the data themselves
if self._value[2 * idx] > prefixsum: # if left child is greater, move to that child and keep drilling
idx = 2 * idx
else:
prefixsum -= self._value[2 * idx] # if right child is greater, subtract left child sum and move to right to keep drilling
idx = 2 * idx + 1
return idx - self._capacity
class MinSegmentTree(SegmentTree):
""" Segment Tree segmented on the minimum value operator """
def __init__(self, capacity:int):
super(MinSegmentTree, self).__init__( # initialize as a SegmentTree with operation being min comparison and neutral value being infinity
capacity=capacity,
operation=min,
neutral_element=float('inf')
)
def min(self, start=0, end=None):
"""Returns minimum value of Tree elements from index `start` to index `end` """
return super(MinSegmentTree, self).reduce(start, end)
| [] |
2024-01-10 | abarnea/pw-semantic-search | src~run_gpt.py | import sys
import json
import openai
from semantic_search import semsearch
import helper_funcs as helper
import doc_reader as reader
MAX_TOKENS = 3500
def valid_gpt_input(cand_json: str) -> bool:
"""
Checks if the candidate json input fits in GPT token input of 3500 total tokens.
Choice is arbitrary to allow for 597 total tokens for setting system prompts and
GPT output.
Parameters
-----------
cand_json (str) : Candidate json input to test
Returns
-----------
fits_in_gpt (bool) : Boolean flag on whether the candidate can be passed
into ChatGPT API
"""
token_count = len(cand_json) / 4
return token_count < MAX_TOKENS
def format_gpt_input(gpt_docs: dict) -> str:
"""
Formats the documentation to be passed into ChatGPT API into a joined
sentence structure to minimize GPT API token input.
Parameters
-----------
gpt_docs (dict) : Formatted documentation to be inputted into ChatGPT API
Returns
-----------
gpt_input (str) : json-formatted dictionary keyed by the same filenames,
but the the content is now strings instead of lists of string tokens
"""
return json.dumps({file : " ".join(content) for file, content in gpt_docs.items()})
def optimize_gpt_input(gpt_docs: dict) -> str:
"""
Optimizes the documentation passed into ChatGPT by filling in the maximum
number of tokens that can be fit from five files. If the five most relevant
files go over the maximum number of GPT API tokens, this function cuts out
the least relevant file, and tries again until the json-formatted dictionary
can be passed into the ChatGPT API with as many relevant documents as possible
and while staying below the maximum token count.
Parameters
-----------
gpt_docs (dict) : Formatted documentation to be inputted into ChatGPT API
Returns
-----------
gpt_input (str) : json-formatted string dictionary that has been trimmed
from lest relevant to most in order to fit into ChatGPT API
max token count of 3597 for inputs.
"""
temp_docs = gpt_docs.copy()
cand = format_gpt_input(temp_docs)
while not valid_gpt_input(cand):
file_keys = list(temp_docs.keys())
file_to_remove = file_keys[-1]
temp_docs.pop(file_to_remove)
cand = format_gpt_input(temp_docs)
return cand
def replace_filenames_with_links(gpt_output: str, hyperlinks: dict) -> str:
"""
Replaces filenames in Ask PW output with hyperlinks in markdown format.
Parameters
-----------
gpt_output (str) : output from ChatGPT PW API
hyperlinks (dict) : the hyperlinks corresponding to the file names
Returns
-----------
formatted_output (str) : gpt_output but with file names replaced with hyperlinks
"""
formatted_output = gpt_output
for filename, link in hyperlinks.items():
hyperlink = f"[{filename}]({link})"
formatted_output = formatted_output.replace(filename, hyperlink)
return formatted_output
def run_gpt(query, formatte_docs, api_key=helper.get_api_key()):
"""
Function that runs the gpt-3.5-turbo AI API on a query and set of arguments
Arguments should consist of a variable length list, where each
element contains a list of tokens from the most relevant files related to
the inputted query.
Paramaters:
query (str) : inputted query from user
formatted_docs (list[str]) : json-formatted dictionary containing file and
content info from semantic search
api_key (str) : user API key to run
Returns:
reply (str) : GPT AI response to query with supporting relevant documents
"""
openai.api_key = api_key
gpt_prompt = "You are a helpful assistant in charge of helping users understand our platform."
clarification_1 = "Your responses should not require users to search through our files and should be fully comprehensive. However, always include the most relevant files to the query at the bottom if the user would like to conduct further reading."
clarification_2 = "If the inputted query isn't related to PW documentation, respond explaining that you are meant as an assistant for the Parallel Works platform. Tangentially related queries are okay."
clarification_3 = "If the message passed to you is `Your query does not match anything in our system.`, explain that we currently don't have documentation related to that query."
messages = [
{"role": "system", "content": gpt_prompt},
{"role": "system", "content": clarification_1},
{"role": "system", "content": clarification_2},
{"role": "system", "content": clarification_3},
{"role": "user", "content": query}
]
messages.append({"role": "user", "content": formatte_docs})
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages
)
reply = response.choices[0].message.content
return reply
def main():
if len(sys.argv) < 2:
raise ValueError("Query input required.")
query = sys.argv[1]
docs_path = sys.argv[2] if len(sys.argv) >= 3 else "../data/docs"
preproc_docs = helper.read_clean_process_data(docs_path)
hyperlink_dict = reader.create_hyperlink_dict(docs_path)
w2v_model = helper.load_w2v()
# vectorizer, tfidf_matrix = helper.load_tfidf()
# ss_docs = semsearch(query, preproc_docs, w2v_model, vectorizer, tfidf_matrix)
ss_docs = semsearch(query, preproc_docs, w2v_model)
gpt_input = optimize_gpt_input(ss_docs)
reply = run_gpt(query, gpt_input)
hyperlink_reply = replace_filenames_with_links(reply, hyperlink_dict)
print(f"{hyperlink_reply}\n")
if __name__ == "__main__":
main()
| [
"You are a helpful assistant in charge of helping users understand our platform.",
"If the inputted query isn't related to PW documentation, respond explaining that you are meant as an assistant for the Parallel Works platform. Tangentially related queries are okay.",
"If the message passed to you is `Your query does not match anything in our system.`, explain that we currently don't have documentation related to that query.",
"Your responses should not require users to search through our files and should be fully comprehensive. However, always include the most relevant files to the query at the bottom if the user would like to conduct further reading."
] |
2024-01-10 | code-and-circuit/spotty | run4.py | import os
import sys
import uuid
import pathlib
import requests
import json
import json5
import openai
import re
import cv2
import random
import math
import time
from threading import Thread
class VideoGet:
"""
Class that continuously gets frames from a VideoCapture object
with a dedicated thread.
"""
def __init__(self, src=0):
self.stream = cv2.VideoCapture(src)
(self.grabbed, self.frame) = self.stream.read()
self.stopped = False
def start(self):
Thread(target=self.get, args=()).start()
return self
def get(self):
while not self.stopped:
if not self.grabbed:
self.stop()
else:
(self.grabbed, self.frame) = self.stream.read()
def stop(self):
self.stopped = True
import bosdyn.client
import bosdyn.client.util
from bosdyn.api import basic_command_pb2
from bosdyn.api.spot import robot_command_pb2 as spot_command_pb2
from bosdyn.api import geometry_pb2 as geo
from bosdyn.api.basic_command_pb2 import RobotCommandFeedbackStatus
from bosdyn.client import math_helpers
from bosdyn.client.frame_helpers import (BODY_FRAME_NAME, ODOM_FRAME_NAME, VISION_FRAME_NAME,
get_se2_a_tform_b)
from bosdyn.client.lease import LeaseClient, LeaseKeepAlive
from bosdyn.client.robot_command import (RobotCommandBuilder, RobotCommandClient,
block_for_trajectory_cmd, blocking_stand)
from bosdyn.client.robot_state import RobotStateClient
from elevenlabs import set_api_key
from elevenlabs import generate, play
eleven_key = "df9fff70db36babd0e5040610c8c822f"
set_api_key(eleven_key)
robot_command_client = None
robot_state_client = None
lease_client = None
robot = None
leaseKeepAlive = None
lease = None
walk = False
talk = False
video_getter = None
chatmessages=[{"role": "system", "content": "You are a curious robot on a mission to explore."}]
def startupSpot(foo):
global robot_command_client
global robot_state_client
global lease_client
global robot
global leaseKeepAlive
global lease
# Create robot object.
sdk = bosdyn.client.create_standard_sdk('RobotCommandMaster')
robot = sdk.create_robot("192.168.50.3") # robot's IP when on GXP
robot.authenticate("spot", "spotspotspot")
# Check that an estop is connected with the robot so that the robot commands can be executed.
assert not robot.is_estopped(), "Robot is estopped. Please use an external E-Stop client, " \
"such as the estop SDK example, to configure E-Stop."
# Create the lease client.
lease_client = robot.ensure_client(LeaseClient.default_service_name)
# Setup clients for the robot state and robot command services.
robot_state_client = robot.ensure_client(RobotStateClient.default_service_name)
robot_command_client = robot.ensure_client(RobotCommandClient.default_service_name)
try:
lease = lease_client.acquire()
except:
lease = lease_client.take()
leaseKeepAlive = LeaseKeepAlive(lease_client)
# Power on the robot and stand it up.
robot.time_sync.wait_for_sync()
robot.power_on()
blocking_stand(robot_command_client)
def set_mobility_params():
"""Set robot mobility params to disable obstacle avoidance."""
obstacles = spot_command_pb2.ObstacleParams(disable_vision_body_obstacle_avoidance=False,
disable_vision_foot_obstacle_avoidance=False,
disable_vision_foot_constraint_avoidance=False,
obstacle_avoidance_padding=.5)
mobility_params = spot_command_pb2.MobilityParams(
obstacle_params=obstacles,
locomotion_hint=spot_command_pb2.HINT_AUTO)
return mobility_params
def relative_move(dx, dy, dyaw, frame_name, robot_command_client, robot_state_client, stairs=False):
transforms = robot_state_client.get_robot_state().kinematic_state.transforms_snapshot
# Build the transform for where we want the robot to be relative to where the body currently is.
body_tform_goal = math_helpers.SE2Pose(x=dx, y=dy, angle=dyaw)
# We do not want to command this goal in body frame because the body will move, thus shifting
# our goal. Instead, we transform this offset to get the goal position in the output frame
# (which will be either odom or vision).
out_tform_body = get_se2_a_tform_b(transforms, frame_name, BODY_FRAME_NAME)
out_tform_goal = out_tform_body * body_tform_goal
# Command the robot to go to the goal point in the specified frame. The command will stop at the
# new position.
mobility_params = set_mobility_params()
robot_cmd = RobotCommandBuilder.synchro_se2_trajectory_point_command(
goal_x=out_tform_goal.x, goal_y=out_tform_goal.y, goal_heading=out_tform_goal.angle,
frame_name=frame_name, params=mobility_params)
end_time = 5.0
cmd_id = robot_command_client.robot_command(lease=None, command=robot_cmd,
end_time_secs=time.time() + end_time)
# Wait until the robot has reached the goal.
while True:
feedback = robot_command_client.robot_command_feedback(cmd_id)
mobility_feedback = feedback.feedback.synchronized_feedback.mobility_command_feedback
if mobility_feedback.status != RobotCommandFeedbackStatus.STATUS_PROCESSING:
print("Failed to reach the goal")
return False
traj_feedback = mobility_feedback.se2_trajectory_feedback
if (traj_feedback.status == traj_feedback.STATUS_AT_GOAL and
traj_feedback.body_movement_status == traj_feedback.BODY_STATUS_SETTLED):
print("Arrived at the goal.")
return True
time.sleep(1)
return True
def observe():
filename = "temp.jpg" #sys.argv[1]
image = video_getter.frame
image = cv2.resize(image, (640, 360))
# saving image in local storage
#cv2.imwrite(filename, image)
result, imagedata = cv2.imencode(".jpg", image)
byte_im = imagedata.tobytes()
file1 = open(filename, "wb")
file1.write(byte_im)
file1.close()
api_url = "https://cctest1.cognitiveservices.azure.com/computervision/imageanalysis:analyze?api-version=2023-02-01-preview&features=denseCaptions&language=en&gender-neutral-caption=False"
headers = {"Ocp-apim-subscription-key":"243a6fc6116940849c58b4eb168ebdfa" , "Content-Type":"application/octet-stream"}
response = requests.post(api_url, headers=headers, data=byte_im)
captions = json.loads(response.text)
print(captions)
print(response.status_code)
prompt = "Pretend you are a curious and funny robot. You see the following scene: \n"
count = 1
for thing in captions["denseCaptionsResult"]["values"]:
prompt += str(count) + ": " + thing["text"] + "\n"
count = count + 1
if count > 6:
break
imgwidth = captions["metadata"]["width"]
imgheight = captions["metadata"]["height"]
prompt += "Pick one item, and respond with a JSON response about why that item is interesting to you. Also include in the JSON one sentence you'll say when you see it, and another sentence you'd say when you get there. Make references to previous things you talked about. The keys of the JSON response should be index, description, sentence1, and sentence2"
print(prompt)
openai.api_key = "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";
chatmessages.append({"role": "user", "content": prompt})
completion = []
try:
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=chatmessages,
request_timeout=20
)
except KeyboardInterrupt:
sys.exit()
except:
return
content = completion.choices[0].message.content
chatmessages.append(completion.choices[0].message)
print(chatmessages)
regex = r"\{(.*?)\}"
matches = re.finditer(regex, content, re.MULTILINE | re.DOTALL)
for matchNum, match in enumerate(matches):
for groupNum in range(0, len(match.groups())):
content = (match.group(1))
content = "{" + content + "}"
print("JSON: " + content + " <---")
result = json5.loads(content)
def say(result, sentence):
if not talk:
return
if sentence in result:
greeting = result[sentence]
audio = generate(
text=greeting,
voice="Adam",
model="eleven_monolingual_v1"
)
play(audio)
say(result, "sentence1")
if "index" in result:
item = captions["denseCaptionsResult"]["values"][result["index"]-1]
box = item["boundingBox"]
horiz = box["x"] + (box["w"]/2)
vert = box["y"] + (box["h"]/2)
print("Turn to point at " + str(horiz) + ", " + str(vert) )
factor = 120
xdeg = ((horiz/imgwidth) * factor) - (factor/2.0)
xdeg = -xdeg
print("Turn by " + str(xdeg))
if spot:
relative_move(0,0, math.radians(xdeg), ODOM_FRAME_NAME, robot_command_client, robot_state_client, stairs=False)
relative_move(4,0, 0, ODOM_FRAME_NAME, robot_command_client, robot_state_client, stairs=False)
print("walking...")
say(result, "sentence2")
#leaseKeepAlive.shutdown()
#lease_client.return_lease(lease)
#robot.power_off(cut_immediately=False, timeout_sec=20)
arg = ""
if len(sys.argv) > 1:
arg = sys.argv[1]
spot = ("walk" in arg)
talk = ("talk" in arg)
if spot:
startupSpot(True)
video_getter = VideoGet(1).start()
for i in range(10):
observe()
turn = random.randint(90, 270)
if spot:
relative_move(0,0, math.radians(turn), ODOM_FRAME_NAME, robot_command_client, robot_state_client, stairs=False)
time.sleep(2)
| [
"You are a curious robot on a mission to explore.",
"Pretend you are a curious and funny robot. You see the following scene: \n",
"Pick one item, and respond with a JSON response about why that item is interesting to you. Also include in the JSON one sentence you'll say when you see it, and another sentence you'd say when you get there. Make references to previous things you talked about. The keys of the JSON response should be index, description, sentence1, and sentence2",
"PLACEHOLDER: PLACEHOLDER\n"
] |
2024-01-10 | code-and-circuit/spotty | run2.py | import os
import sys
import uuid
import pathlib
import requests
import json
import openai
import re
import cv2
from elevenlabs import set_api_key
from elevenlabs import generate, play
eleven_key = "df9fff70db36babd0e5040610c8c822f"
set_api_key(eleven_key)
filename = "temp.png" #sys.argv[1]
cam_port = 0
cam = cv2.VideoCapture(cam_port)
result, image = cam.read()
if result:
# saving image in local storage
cv2.imwrite("temp.png", image)
file_extension = pathlib.Path(filename).suffix
uid = str(uuid.uuid4())
file_path = filename
uname = uid + file_extension
account_name = 'codeandcircuituploads'
account_key = 'IVYI0Q+T4LBtjBMjK4WGJB/bgRBZNxvVRZdlFnvWR2gyqS7oX9P9Q8JgYKagYAUteUmxTS7y9iIl+ASti+61kg=='
container_name = 'images'
account_url = "https://" + account_name + ".blob.core.windows.net"
from datetime import datetime, timedelta
from azure.storage.blob import BlobServiceClient, generate_account_sas, ResourceTypes, AccountSasPermissions
sas_token = generate_account_sas(
account_name=account_name,
account_key=account_key,
resource_types=ResourceTypes(service=True, container=True, object=True),
permission=AccountSasPermissions(read=True, write=True),
expiry=datetime.utcnow() + timedelta(hours=1)
)
blob_service_client = BlobServiceClient(account_url=account_url, credential=sas_token)
# Create a blob client using the local file name as the name for the blob
blob_client = blob_service_client.get_blob_client(container=container_name, blob=uname)
with open(file=filename, mode="rb") as data:
blob_client.upload_blob(data=data)
#block_blob_service = BlockBlobService( account_name=account_name, account_key=account_key)
#block_blob_service.create_blob_from_path(container_name, uname, file_path)
cloud_url = "https://" + account_name + ".blob.core.windows.net/" + container_name + "/" + uname
print(cloud_url)
api_url = "https://cctest1.cognitiveservices.azure.com/computervision/imageanalysis:analyze?api-version=2023-02-01-preview&features=denseCaptions&language=en&gender-neutral-caption=False"
post = {"url" : cloud_url}
headers = {"Ocp-apim-subscription-key":"243a6fc6116940849c58b4eb168ebdfa" , "Content-Type":"application/json"}
response = requests.post(api_url, json=post, headers=headers)
captions = json.loads(response.text)
print(captions)
print(response.status_code)
prompt = "Pretend you are a curious child that is observing the world. You see the following scene: \n"
count = 1
for thing in captions["denseCaptionsResult"]["values"]:
prompt += str(count) + ": " + thing["text"] + "\n"
count = count + 1
print(prompt)
prompt += "Pick one item, and respond with a JSON response about why that item is interesting to you. Also include in the JSON one sentence you'll say when you see it, and another sentence you'd say when you get there. The keys of the JSON respnose should be index, description, sentence1, and sentence2"
openai.api_key = "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";
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "user", "content": prompt}
]
)
content = completion.choices[0].message.content
print(content)
regex = r"\{(.*?)\}"
matches = re.finditer(regex, content, re.MULTILINE | re.DOTALL)
for matchNum, match in enumerate(matches):
for groupNum in range(0, len(match.groups())):
content = (match.group(1))
content = "{" + content + "}"
print(content)
result = json.loads(content)
def say(result, sentence):
if sentence in result:
greeting = result[sentence]
audio = generate(
text=greeting,
voice="Sam",
model="eleven_monolingual_v1"
)
play(audio)
say(result, "sentence1")
if "index" in result:
item = captions["denseCaptionsResult"]["values"][result["index"]-1]
box = item["boundingBox"]
horiz = box["x"] + (box["w"]/2)
vert = box["y"] + (box["h"]/2)
print("Turn to point at " + str(horiz) + ", " + str(vert) )
print("walking...")
say(result, "sentence2")
| [
"Pretend you are a curious child that is observing the world. You see the following scene: \n",
"Pick one item, and respond with a JSON response about why that item is interesting to you. Also include in the JSON one sentence you'll say when you see it, and another sentence you'd say when you get there. The keys of the JSON respnose should be index, description, sentence1, and sentence2",
"PLACEHOLDER: PLACEHOLDER\n"
] |
2024-01-10 | code-and-circuit/spotty | run3.py | import os
import sys
import uuid
import pathlib
import requests
import json
import json5
import openai
import re
import cv2
import random
import math
import time
from threading import Thread
#os.system("pacmd set-default-sink 0") # make speakers the output device
from aikeys import *
class VideoGet:
"""
Class that continuously gets frames from a VideoCapture object
with a dedicated thread.
"""
def __init__(self, src=0):
self.stream = cv2.VideoCapture(src)
(self.grabbed, self.frame) = self.stream.read()
self.stopped = False
def start(self):
Thread(target=self.get, args=()).start()
return self
def get(self):
while not self.stopped:
time.sleep(1)
'''
if not self.grabbed:
self.stop()
else:
'''
grabbed, frame = self.stream.read()
if grabbed:
self.frame = frame
self.grabbed = grabbed
def stop(self):
self.stopped = True
import bosdyn.client
import bosdyn.client.util
from bosdyn.api import basic_command_pb2
from bosdyn.api.spot import robot_command_pb2 as spot_command_pb2
from bosdyn.api import geometry_pb2 as geo
from bosdyn.api.basic_command_pb2 import RobotCommandFeedbackStatus
from bosdyn.client import math_helpers
from bosdyn.client.frame_helpers import (BODY_FRAME_NAME, ODOM_FRAME_NAME, VISION_FRAME_NAME,
get_se2_a_tform_b)
from bosdyn.client.lease import LeaseClient, LeaseKeepAlive
from bosdyn.client.robot_command import (RobotCommandBuilder, RobotCommandClient,
block_for_trajectory_cmd, blocking_stand)
from bosdyn.client.robot_state import RobotStateClient
from elevenlabs import set_api_key
from elevenlabs import generate, play
set_api_key(eleven_key)
robot_command_client = None
robot_state_client = None
lease_client = None
robot = None
leaseKeepAlive = None
lease = None
walk = False
talk = False
video_getter = None
chatmessages=[{"role": "system", "content": "You are a curious, grumpy, sarcastic robot on a mission to explore."}]
def startupSpot(foo):
global robot_command_client
global robot_state_client
global lease_client
global robot
global leaseKeepAlive
global lease
# Create robot object.
sdk = bosdyn.client.create_standard_sdk('RobotCommandMaster')
robot = sdk.create_robot("192.168.50.3") # robot's IP when on GXP
robot.authenticate("spot", "spotspotspot")
# Check that an estop is connected with the robot so that the robot commands can be executed.
assert not robot.is_estopped(), "Robot is estopped. Please use an external E-Stop client, " \
"such as the estop SDK example, to configure E-Stop."
# Create the lease client.
lease_client = robot.ensure_client(LeaseClient.default_service_name)
# Setup clients for the robot state and robot command services.
robot_state_client = robot.ensure_client(RobotStateClient.default_service_name)
robot_command_client = robot.ensure_client(RobotCommandClient.default_service_name)
try:
lease = lease_client.acquire()
except:
lease = lease_client.take()
leaseKeepAlive = LeaseKeepAlive(lease_client)
# Power on the robot and stand it up.
robot.time_sync.wait_for_sync()
robot.power_on()
blocking_stand(robot_command_client)
def set_mobility_params():
"""Set robot mobility params to disable obstacle avoidance."""
obstacles = spot_command_pb2.ObstacleParams(disable_vision_body_obstacle_avoidance=False,
disable_vision_foot_obstacle_avoidance=False,
disable_vision_foot_constraint_avoidance=False,
obstacle_avoidance_padding=.5)
mobility_params = spot_command_pb2.MobilityParams(
obstacle_params=obstacles,
locomotion_hint=spot_command_pb2.HINT_AUTO)
return mobility_params
def turnBody(pitch, roll):
footprint_R_body = bosdyn.geometry.EulerZXY(yaw=0.0, roll=roll, pitch=pitch)
cmd = RobotCommandBuilder.synchro_stand_command(footprint_R_body=footprint_R_body)
robot_command_client.robot_command(cmd)
def relative_move(dx, dy, dyaw, frame_name, robot_command_client, robot_state_client, stairs=False):
transforms = robot_state_client.get_robot_state().kinematic_state.transforms_snapshot
# Build the transform for where we want the robot to be relative to where the body currently is.
body_tform_goal = math_helpers.SE2Pose(x=dx, y=dy, angle=dyaw)
# We do not want to command this goal in body frame because the body will move, thus shifting
# our goal. Instead, we transform this offset to get the goal position in the output frame
# (which will be either odom or vision).
out_tform_body = get_se2_a_tform_b(transforms, frame_name, BODY_FRAME_NAME)
out_tform_goal = out_tform_body * body_tform_goal
# Command the robot to go to the goal point in the specified frame. The command will stop at the
# new position.
mobility_params = set_mobility_params()
robot_cmd = RobotCommandBuilder.synchro_se2_trajectory_point_command(
goal_x=out_tform_goal.x, goal_y=out_tform_goal.y, goal_heading=out_tform_goal.angle,
frame_name=frame_name, params=mobility_params)
end_time = 5.0
cmd_id = robot_command_client.robot_command(lease=None, command=robot_cmd,
end_time_secs=time.time() + end_time)
# Wait until the robot has reached the goal.
while True:
feedback = robot_command_client.robot_command_feedback(cmd_id)
mobility_feedback = feedback.feedback.synchronized_feedback.mobility_command_feedback
if mobility_feedback.status != RobotCommandFeedbackStatus.STATUS_PROCESSING:
print("Failed to reach the goal")
return False
traj_feedback = mobility_feedback.se2_trajectory_feedback
if (traj_feedback.status == traj_feedback.STATUS_AT_GOAL and
traj_feedback.body_movement_status == traj_feedback.BODY_STATUS_SETTLED):
print("Arrived at the goal.")
return True
time.sleep(1)
return True
def observe():
filename = "temp.jpg" #sys.argv[1]
image = video_getter.frame
image = cv2.resize(image, (640, 360))
# saving image in local storage
#cv2.imwrite(filename, image)
result, imagedata = cv2.imencode(".jpg", image)
byte_im = imagedata.tobytes()
file1 = open(filename, "wb")
file1.write(byte_im)
file1.close()
api_url = "https://cctest1.cognitiveservices.azure.com/computervision/imageanalysis:analyze?api-version=2023-02-01-preview&features=denseCaptions&language=en&gender-neutral-caption=False"
headers = {"Ocp-apim-subscription-key":azure_cognitive_key , "Content-Type":"application/octet-stream"}
response = requests.post(api_url, headers=headers, data=byte_im)
captions = json.loads(response.text)
print(captions)
print(response.status_code)
prompt = "You are a curious, grumpy, sarcastic robot on a mission to explore. You see the following things: \n"
count = 1
for thing in captions["denseCaptionsResult"]["values"]:
capt = thing["text"]
capt = capt.replace("a close up of", "") # dense caption likes to say things are a close up of
capt = capt.replace("a close-up of", "") # dense caption likes to say things are a close up of
prompt += str(count) + ": " + capt + "\n"
count = count + 1
if count > 6:
break
imgwidth = captions["metadata"]["width"]
imgheight = captions["metadata"]["height"]
prompt += "Pick one item, and respond with a JSON response. Don't be repetitive about what you pick. Also include in the JSON one sentence you'll say when you see it, and another sentence you'll say when you get there. The sentences should explain why the item is interesting to you and be nerdy. Be sure to make a dad joke in every sentence!! Say things that should be extremely surprising. The keys of the JSON response should be index, description, sentence1, and sentence2"
print(prompt)
openai.api_key = openai_key;
chatmessages.append({"role": "user", "content": prompt})
completion = []
try:
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=chatmessages,
request_timeout=20
)
except KeyboardInterrupt:
sys.exit()
except:
return
content = completion.choices[0].message.content
chatmessages.append(completion.choices[0].message)
print(chatmessages)
regex = r"\{(.*?)\}"
matches = re.finditer(regex, content, re.MULTILINE | re.DOTALL)
for matchNum, match in enumerate(matches):
for groupNum in range(0, len(match.groups())):
content = (match.group(1))
content = "{" + content + "}"
print("JSON: " + content + " <---")
result = json5.loads(content)
def say(result, sentence):
if not talk:
return
if sentence in result:
greeting = result[sentence]
audio = generate(
text=greeting,
voice="Adam",
model="eleven_monolingual_v1"
)
play(audio)
say(result, "sentence1")
if "index" in result:
item = captions["denseCaptionsResult"]["values"][result["index"]-1]
box = item["boundingBox"]
horiz = box["x"] + (box["w"]/2)
vert = box["y"] + (box["h"]/2)
print("Turn to point at " + str(horiz) + ", " + str(vert) )
factor = 120
xdeg = ((horiz/imgwidth) * factor) - (factor/2.0)
xdeg = -xdeg
print("Turn by " + str(xdeg))
yfactor = 1.0
ydeg = (vert/imgheight) - 0.5
print("tilt to " + str(ydeg))
randroll = random.uniform(-0.3, 0.3)
if spot:
relative_move(0,0, math.radians(xdeg), ODOM_FRAME_NAME, robot_command_client, robot_state_client, stairs=False)
relative_move(4,0, 0, ODOM_FRAME_NAME, robot_command_client, robot_state_client, stairs=False)
turnBody(ydeg, randroll)
print("walking...")
say(result, "sentence2")
#leaseKeepAlive.shutdown()
#lease_client.return_lease(lease)
#robot.power_off(cut_immediately=False, timeout_sec=20)
arg = ""
if len(sys.argv) > 1:
arg = sys.argv[1]
spot = ("walk" in arg)
talk = ("talk" in arg)
if spot:
startupSpot(True)
video_getter = VideoGet(0).start()
for i in range(100):
observe()
turn = random.randint(90, 270)
if spot:
relative_move(0,0, math.radians(turn), ODOM_FRAME_NAME, robot_command_client, robot_state_client, stairs=False)
| [
"You are a curious, grumpy, sarcastic robot on a mission to explore. You see the following things: \n",
"Pick one item, and respond with a JSON response. Don't be repetitive about what you pick. Also include in the JSON one sentence you'll say when you see it, and another sentence you'll say when you get there. The sentences should explain why the item is interesting to you and be nerdy. Be sure to make a dad joke in every sentence!! Say things that should be extremely surprising. The keys of the JSON response should be index, description, sentence1, and sentence2",
"You are a curious, grumpy, sarcastic robot on a mission to explore.",
"PLACEHOLDER: PLACEHOLDER\n"
] |
2024-01-10 | KeSyren/ntm_back_translation | nsm~graph_factory.py | "Implements several tensorflow graphs and capsulate them as Graph."
import abc
import six
import collections
import os
import pprint
import numpy as np
import tensorflow as tf
import tf_utils
import data_utils
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
RNN_CELL_DICT = dict(
rnn=tf.contrib.rnn.RNNCell,
lstm=tf.contrib.rnn.BasicLSTMCell,
layernorm_lstm=tf.contrib.rnn.LayerNormBasicLSTMCell,
gru=tf.contrib.rnn.GRUCell)
OPTIMIZER_DICT = dict(
sgd=tf.train.GradientDescentOptimizer,
adam=tf.train.AdamOptimizer,
adagrad=tf.train.AdagradOptimizer,
rmsprop=tf.train.RMSPropOptimizer)
ACTIVATION_DICT = dict(
relu=tf.nn.relu, sigmoid=tf.nn.sigmoid,
tanh=tf.nn.tanh)
# Bind a variable length tensor with its sequence_length.
SeqTensor = collections.namedtuple('SeqTensor', ['tensor', 'sequence_length'])
def with_graph_variable_scope(func):
def func_wrapper(*args, **kwargs):
self = args[0]
with self._graph.as_default():
pid = os.getpid()
container_name = 'worker{}'.format(pid)
# print(container_name)
with self._graph.container(container_name):
with tf.variable_scope(self.vs):
return func(*args, **kwargs)
return func_wrapper
class Graph(object):
"""A TensorFlow graph with simpler interface to interact with it.
The neural network architecture (basically all the
tensorflow code) should live within this class. A new
architecture (for example, Seq2seq) should implement a new
subclass (Seq2seqGraph).
"""
def __init__(self, name, meta_graph_fn=''):
self.node_dict = {'summaries': []}
self._graph = tf.Graph()
self.vs_name = name
self.use_gpu = False
with tf.variable_scope(name) as vs:
self.vs = vs
@with_graph_variable_scope
def launch(self, init_model_path=''):
"Launch and initialize the graph."
if self.use_gpu:
n_gpu = 1
else:
n_gpu = 0
session_config = tf.ConfigProto(
device_count={'GPU' : n_gpu},
allow_soft_placement=False,
log_device_placement=False,
)
tf.logging.info('number of gpu used {}'.format(n_gpu))
self.session = tf.Session(
graph=self._graph, config=session_config)
self.saver = tf.train.Saver(tf.global_variables())
if init_model_path:
self._graph.finalize()
self.saver.restore(self.session, init_model_path)
else:
init = tf.global_variables_initializer()
self._graph.finalize()
self.session.run(init)
return self.session
def restore(self, model_path):
self.saver.restore(self.session, model_path)
def save(self, model_path, global_step):
return self.saver.save(self.session, model_path, global_step)
def run(self, fetch_list, feed_dict, writer=None):
"""Main interface to interact with the tensorflow graph.
Args:
fetch_list: a list of names (strings) indicating
the name of result operations.
feed_dict: a dictionary with the names of the nodes as keys
and the corresponding values that are fed as values.
writer: a tensorflow summary writer
Returns:
outputs: a dictionary with the names in the fetch_list as
keys, and the outputs from the executing graph as values.
"""
fetch_dict = dict([(name, self.node_dict[name])
for name in fetch_list if name in self.node_dict])
if writer is not None:
fetch_dict['summaries'] = self.node_dict['summaries']
fetch_dict['global_step'] = self.node_dict['global_step']
outputs = self.session.run(
fetch_dict, map_dict(self.node_dict, feed_dict))
if writer is not None:
writer.add_summary(
outputs['summaries'], outputs['global_step'])
writer.flush()
return outputs
@with_graph_variable_scope
def add_train(self, aux_loss_list=None, optimizer='adam',
learning_rate=0.01, max_grad_norm=5.0,
decay_after_n_steps=1000, decay_every_n_steps=1000,
lr_decay_factor=1.0, avg_loss_by_n=False, debug=True,
l2_coeff=0.0,
adam_beta1=0.9,
name='Training'):
"Construct part of the graph that controlls training (SGD optimization)."
self.node_dict['max_batch_size'] = tf.placeholder(tf.int32, None)
with tf.variable_scope(name):
all_summaries = []
batch_size = tf.cast(self.node_dict['max_batch_size'],
dtype=tf.float32)
loss = self.node_dict['loss'] / batch_size
all_summaries.append(
tf.summary.scalar(self.vs_name + '/' + 'loss', loss))
total_loss = loss
if aux_loss_list is not None:
for loss_name, w in aux_loss_list:
if w > 0.0:
aux_loss = self.node_dict[loss_name]
total_loss += aux_loss * w / batch_size
aux_loss_summary = tf.summary.scalar(
self.vs_name + '/' + loss_name, aux_loss)
all_summaries.append(aux_loss_summary)
if debug:
total_loss= tf.Print(
total_loss, [self.node_dict['sequence_loss']], message='seq_loss:', summarize=10000)
total_loss= tf.Print(
total_loss, [self.node_dict['weights'].tensor], message='weights:', summarize=10000)
total_loss= tf.Print(
total_loss, [self.node_dict['targets'].tensor], message='targets:', summarize=10000)
total_loss= tf.Print(
total_loss, [self.node_dict['probs'].tensor], message='probs:', summarize=10000)
total_loss= tf.Print(
total_loss, [self.node_dict['step_loss'].tensor],
message='step_loss:', summarize=10000)
total_loss= tf.Print(
total_loss, [self.node_dict['logits'].tensor], message='logits:', summarize=10000)
total_loss_summary = tf.summary.scalar(
self.vs_name + '/' + 'total_loss', total_loss)
all_summaries.append(total_loss_summary)
lr = tf.Variable(
float(learning_rate), trainable=False)
new_lr = tf.placeholder(dtype=tf.float32, shape=None, name='new_lr')
update_lr = lr.assign(new_lr)
global_step = tf.Variable(0, trainable=False)
decay_step = tf.maximum(
0, global_step - decay_after_n_steps)
decay_exponent = (tf.cast(decay_step, tf.float32) /
tf.cast(decay_every_n_steps, tf.float32))
decay = lr_decay_factor ** decay_exponent
decayed_lr = lr * decay
lr_summary = tf.summary.scalar(self.vs_name + '/' + 'learning_rate', decayed_lr)
all_summaries.append(lr_summary)
params = tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.vs_name)
n_params = 0
tf.logging.info('trainable parameters:')
for tv in params:
n_tv_params = np.product(tv.get_shape().as_list())
n_params += n_tv_params
tf.logging.info('{}: {}'.format(tv.name, n_tv_params))
if 'weights' in tv.name or 'kernel' in tv.name:
total_loss += tf.reduce_sum(tf.nn.l2_loss(tv)) * l2_coeff
tf.logging.info('total number of trainable parameters {}'.format(n_params))
if optimizer == 'adam':
tf.logging.info('adam beta1: {}'.format(adam_beta1))
opt = OPTIMIZER_DICT[optimizer](decayed_lr, beta1=adam_beta1)
else:
opt = OPTIMIZER_DICT[optimizer](decayed_lr)
gradients = tf.gradients(total_loss, params)
clipped_gradients, grad_norm = tf.clip_by_global_norm(
gradients, max_grad_norm)
update = opt.apply_gradients(
zip(clipped_gradients, params), global_step=global_step)
grad_norm_summary = tf.summary.scalar(
self.vs_name + '/' + 'grad_norm', grad_norm)
all_summaries.append(grad_norm_summary)
if debug:
_, clipped_grad_norm = tf.clip_by_global_norm(
clipped_gradients, max_grad_norm)
clipped_grad_norm_summary = tf.summary.scalar(
self.vs_name + '/' + 'clipped_grad_norm', clipped_grad_norm)
n_summary = tf.summary.scalar(
self.vs_name + '/' + 'n', self.node_dict['n'])
seq_loss_summary = tf.summary.histogram(
self.vs_name + '/' + 'seq_loss', self.node_dict['sequence_loss'])
step_loss_summary = tf.summary.histogram(
self.vs_name + '/' + 'step_loss', self.node_dict['step_loss'].tensor)
weights_summary = tf.summary.histogram(
self.vs_name + '/' + 'weights', self.node_dict['weights'].tensor)
all_summaries += [
clipped_grad_norm_summary, n_summary,
step_loss_summary, seq_loss_summary, weights_summary]
batch_size_summary = tf.summary.scalar(
self.vs_name + '/' + 'batch_size', self.node_dict['batch_size'])
all_summaries.append(batch_size_summary)
if 'ent_reg' in self.node_dict:
if avg_loss_by_n:
ent_reg = (self.node_dict['ent_reg'] /
tf.cast(self.node_dict['n'],
dtype=tf.float32))
else:
ent_reg = self.node_dict['ent_reg'] / batch_size
ent_reg_summary = tf.summary.scalar(
self.vs_name + '/' + 'polic_entropy',
(self.node_dict['ent_reg'] /
tf.cast(self.node_dict['n'], tf.float32)))
ent_reg_ppl_summary = tf.summary.scalar(
self.vs_name + '/' + 'policy_entropy_ppl',
tf.exp(self.node_dict['ent_reg'] /
tf.cast(self.node_dict['n'], tf.float32)))
all_summaries.append(ent_reg_summary)
all_summaries.append(ent_reg_ppl_summary)
for s in self.node_dict['summaries']:
all_summaries.append(s)
merged = tf.summary.merge(inputs=all_summaries)
self.node_dict.update(
train=update, global_step=global_step,
summaries=merged,
update_lr=update_lr, new_lr=new_lr)
@property
def final_state(self):
return 'final_state'
@property
def outputs(self):
return 'outputs'
@property
def initial_state(self):
return 'initial_state'
@property
def en_outputs(self):
return 'en_outputs'
@property
def n_examples(self):
return 'n_examples'
@property
def prediction_probs(self):
return 'probs'
@property
def samples(self):
return 'samples'
@property
def predictions(self):
return 'predictions'
@property
def en_initial_state(self):
return 'en_initial_state'
def add_outputs(self, output_type, output_config):
"Create part of the graph that compute final outputs from the RNN output."
if output_type == 'softmax':
self.add_softmax_outputs(**output_config)
elif output_type == 'regression':
self.add_regression_outputs(**output_config)
else:
raise NotImplementedError('Output type {} not supported!'.format(
output_type))
@with_graph_variable_scope
def add_softmax_outputs(self, output_vocab_size=None, use_logits=None, name='Softmax'):
"Add softmax layer on top of RNN outputs."
with tf.variable_scope(name):
seq_targets = create_seq_inputs(
shape=tf.TensorShape([None, None]), dtype=tf.int32)
seq_weights = create_seq_inputs(
shape=tf.TensorShape([None, None]), dtype=tf.float32)
if use_logits:
# Feeding logits instead of outputs (thus no linear transformation needed).
logits, probs, predictions, samples, temperature = create_softmax_from_logits(
self.node_dict['outputs'].tensor)
sequence_length = self.node_dict['outputs'].sequence_length
else:
logits, probs, predictions, samples, temperature = create_softmax(
self.node_dict['outputs'].tensor, output_vocab_size=output_vocab_size)
sequence_length = self.node_dict['outputs'].sequence_length
# From openai baselines to avoid numerical issue.
a0 = logits - tf.reduce_max(logits, axis=-1, keepdims=True)
ea0 = tf.exp(a0)
z0 = tf.reduce_sum(ea0, axis=-1, keepdims=True)
p0 = ea0 / z0
clipped_entropy = p0 * (tf.log(z0) - a0)
seq_entropy = SeqTensor(
tf.reduce_sum(clipped_entropy, axis=-1) *
tf.sequence_mask(
sequence_length, dtype=tf.float32),
sequence_length)
policy_entropy = tf.reduce_sum(
tf.reduce_sum(clipped_entropy, axis=-1) *
tf.sequence_mask(
sequence_length, dtype=tf.float32))
# Compute sequence cross entropy loss.
seq_logits, seq_probs, seq_predictions, seq_samples = [
SeqTensor(x, sequence_length)
for x in (logits, probs, predictions, samples)]
xent_loss, sequence_loss, step_loss = create_seq_xent_loss(
seq_logits.tensor,
seq_targets.tensor,
seq_weights.tensor,
sequence_length)
seq_step_loss = SeqTensor(step_loss, sequence_length)
# Add new nodes to the node_dict.
self.node_dict.update(
targets=seq_targets, weights=seq_weights,
temperature=temperature,
sequence_loss=sequence_loss,
step_loss=seq_step_loss,
loss=xent_loss,
ent_reg=policy_entropy,
seq_entropy=seq_entropy,
probs=seq_probs,
samples=seq_samples,
predictions=seq_predictions, logits=seq_logits)
@with_graph_variable_scope
def add_regression_outputs(self, hidden_sizes=(), activation='relu',
stop_gradient=False, name='Regression'):
"Add regression layer (linear transformation and MSE loss)."
with tf.variable_scope(name):
seq_targets = create_seq_inputs(
shape=tf.TensorShape([None, None]), dtype=tf.float32)
seq_weights = create_seq_inputs(
shape=tf.TensorShape([None, None]), dtype=tf.float32)
dropout = tf.placeholder_with_default(
0.0, shape=None, name='regression_dropout')
# Last dimension is the output, thus only one dimension.
output_tensor = self.node_dict['outputs'].tensor
if stop_gradient:
output_tensor = tf.stop_gradient(output_tensor)
if hidden_sizes:
h = create_multilayer_fnn(
output_tensor, dropout, list(hidden_sizes),
activation=activation)
else:
h = output_tensor
predictions = tf.layers.dense(inputs=h, units=1, name='regression_final')
# Turn shape from (batch_size, timesteps, 1) to (batch_size, timesteps).
predictions = tf.squeeze(predictions, axis=-1)
sequence_length = self.node_dict['outputs'].sequence_length
seq_predictions = SeqTensor(predictions, sequence_length)
mse_loss = create_seq_mse_loss(
seq_predictions.tensor, seq_targets.tensor, seq_weights.tensor, sequence_length)
self.node_dict.update(
targets=seq_targets, weights=seq_weights, regression_dropout=dropout,
loss=mse_loss, predictions=seq_predictions)
class SeqGraph(Graph):
"TensorFlow graph for RNN sequence model."
def __init__(self, graph_config, name='seq_graph'):
super(SeqGraph, self).__init__(name)
self.add_seq(**graph_config['core_config'])
self.add_outputs(graph_config['output_type'], graph_config['output_config'])
self.add_train(**graph_config['train_config'])
@with_graph_variable_scope
def add_seq(self, input_shape, input_vocab_size=None,
hidden_size=128, n_layers=2,
cell_type='lstm', bidirectional=False,
dropout=0.0, use_embeddings=True,
embedding_size=64, name='Sequence'):
with tf.variable_scope(name):
batch_size = tf.placeholder(
dtype=tf.int32, shape=(), name='batch_size')
if use_embeddings:
embeddings = tf.get_variable(
'embeddings', shape=(input_vocab_size, embedding_size),
initializer=tf.truncated_normal_initializer(mean=0.0, stddev=0.1))
else:
embeddings = None
(seq_inputs, initial_state, seq_outputs, final_state,
input_dropout, rnn_dropout, _) = create_seq_graph(
input_shape, batch_size=batch_size,
hidden_size=hidden_size, n_layers=n_layers,
cell_type=cell_type, bidirectional=bidirectional,
embeddings=embeddings)
n = tf.reduce_sum(seq_inputs.sequence_length)
self.node_dict.update(inputs=seq_inputs,
rnn_dropout=rnn_dropout,
input_dropout=input_dropout,
embeddings=embeddings,
batch_size=batch_size,
final_state=final_state,
outputs=seq_outputs, n=n,
initial_state=initial_state)
class Seq2seqGraph(Graph):
"""TensorFlow graph for seq2seq model.
A basic seq2seq model with attention. The model supports
all the common specifications for a seq2seq model such as
number of layers, whether to use bidirectional encoder,
attention type, etc.
"""
def __init__(self, graph_config, name='seq2seq_graph'):
super(Seq2seqGraph, self).__init__(name)
self.add_seq2seq(**graph_config['core_config'])
self.add_outputs(graph_config['output_type'], graph_config['output_config'])
self.add_train(**graph_config['train_config'])
@with_graph_variable_scope
def add_seq2seq(self, en_input_shape, input_shape,
use_attn=True,
attn_size=128, attn_vec_size=128,
en_input_vocab_size=None,
input_vocab_size=None,
en_hidden_size=128, en_n_layers=2,
hidden_size=128, n_layers=2,
cell_type='lstm',
en_bidirectional=False,
en_use_embeddings=True,
use_embeddings=True,
en_embedding_size=64,
embedding_size=64, name='Seq2seq'):
with tf.variable_scope(name) as scope:
batch_size = tf.placeholder(
dtype=tf.int32, shape=[], name='batch_size')
# Create encoder. en -- encoder de -- decoder
with tf.variable_scope('Encoder'):
if en_use_embeddings:
en_embeddings = tf.get_variable(
'embeddings', shape=(en_input_vocab_size, en_embedding_size),
initializer=tf.truncated_normal_initializer(mean=0.0, stddev=0.1))
else:
en_embeddings = None
(en_seq_inputs, en_initial_state, en_seq_outputs, en_final_state,
en_input_dropout, en_rnn_dropout, _) = create_seq_graph(
en_input_shape, batch_size=batch_size,
hidden_size=en_hidden_size, n_layers=en_n_layers,
cell_type=cell_type, bidirectional=en_bidirectional,
embeddings=en_embeddings, output_proj_size=en_hidden_size)
if use_attn:
attn_inputs = en_seq_outputs.tensor
else:
attn_inputs = None
if en_bidirectional:
en_final_state = en_final_state[0]
# Create decoder.
with tf.variable_scope('Decoder'):
if use_embeddings:
embeddings = tf.get_variable(
'embeddings', shape=(input_vocab_size, embedding_size),
initializer=tf.truncated_normal_initializer(mean=0.0, stddev=0.1))
else:
embeddings = None
(seq_inputs, initial_state, seq_outputs, final_state,
input_dropout, rnn_dropout, _) = create_seq_graph(
input_shape, batch_size=batch_size,
hidden_size=hidden_size, n_layers=n_layers,
cell_type=cell_type, bidirectional=False,
embeddings=embeddings,
attn_size=attn_size,
attn_vec_size=attn_vec_size,
attn_inputs=attn_inputs,
initial_state=en_final_state)
# Count number of steps.
n = tf.reduce_sum(seq_inputs.sequence_length)
self.node_dict.update(
en_inputs=en_seq_inputs,
en_rnn_dropout=en_rnn_dropout,
en_input_dropout=en_input_dropout,
en_outputs=en_seq_outputs,
en_initial_state=en_initial_state,
en_final_state=en_final_state,
inputs=seq_inputs,
rnn_dropout=rnn_dropout,
input_dropout=input_dropout,
outputs=seq_outputs,
batch_size=batch_size,
final_state=final_state,
initial_state=initial_state, n=n,
encoded_context=en_seq_outputs, context=en_seq_inputs,
en_embeddings=en_embeddings,
embeddings=embeddings)
if use_attn:
self.node_dict['attn_inputs'] = attn_inputs
class MemorySeq2seqGraph(Graph):
def __init__(self, graph_config, name='memory_seq2seq_graph'):
super(MemorySeq2seqGraph, self).__init__(name)
self.use_gpu = graph_config['use_gpu']
if self.use_gpu:
os.environ["CUDA_VISIBLE_DEVICES"] = graph_config['gpu_id']
else:
os.environ["CUDA_VISIBLE_DEVICES"] = ''
self.add_memory_seq2seq(**graph_config['core_config'])
self.add_outputs(graph_config['output_type'], graph_config['output_config'])
self.add_train(**graph_config['train_config'])
self.config = graph_config
@with_graph_variable_scope
def add_memory_seq2seq(
self, max_n_valid_indices=None,
n_mem=None,
n_builtin=None,
use_attn=True,
attn_size=128, attn_vec_size=128,
en_input_vocab_size=None,
input_vocab_size=None,
en_hidden_size=128, en_n_layers=2,
hidden_size=128, n_layers=2,
cell_type='lstm',
en_bidirectional=False,
en_use_embeddings=True,
en_embedding_size=4,
value_embedding_size=128,
en_pretrained_vocab_size=None,
en_pretrained_embedding_size=-1,
tie_en_embeddings=True,
add_lm_loss=False,
n_en_input_features=1,
n_de_output_features=1,
en_attn_on_constants=False,
name='MemorySeq2seq'):
"""Create seq2seq with key variable memory.
Seq2seq with key variable memory is used for semantic
parsing (generating programs from natural language
instructions/questions).
A MemorySeq2seq Model uses a memory cell in decoder.
There are 3 types of tokens in a program:
1) constants that are provided at the before the program
is generated (added before decoding, different for
different examples); 2) variables that saves the results
from executing past expressions (added during decoding,
different for different examples); 3) language
primitives such as built-in functions and reserved
tokens (for example, "(", ")"). (the same for different
examples).
There are two kinds of constants: 1) constants from the
question, whose representation is from the span the
annotated constants; 2) constants from the context,
whose representation is from the constant value
embeddings, for example, table columns.
So the decoder vocab is organized as
[primitives, constants, variables].
For a constant, its embedding is computed as sum of two
parts: 1) embedding of the span (from encoder) on which
the constant is annotated with, for example the span
"barack obama" in "who is barack obama's wife" or the
span "one" in "what is one plus one"; 2) embedding of
the constant, for example, the embedding of the entity
Obama or the embedding of the number one.
For a variable, its embedding is the decoder RNN output
at the step where the variable is created.
For a primitive, its embedding is initialized randomly
and tuned by SGD.
Inspired by the code asistance (such as autocompletion)
in modern IDE, we also apply semantic and syntax
constraint on the decoder vocabulary so that at each
step, only some of the tokens are valid. So the decoder
has a dynamic vocabulary that is changing through
different steps.
"""
input_shape = tf_utils.MemoryInputTuple(
tf.TensorShape([None, None]),
tf.TensorShape([None, None]),
tf.TensorShape([None, None, max_n_valid_indices]))
input_dtype=tf_utils.MemoryInputTuple(
tf.int32, tf.int32, tf.int32)
en_input_shape=tf.TensorShape([None, None])
constant_span_shape=tf.TensorShape([None, n_mem, 2])
constant_value_embedding_shape=tf.TensorShape(
[None, n_mem, value_embedding_size])
builtin_de_embeddings_shape=tf.TensorShape([n_builtin, hidden_size])
with tf.variable_scope('Constant_encoder'):
# constant_span_embedding encodes the information
# from the span where the constant is referred to,
# for example the span "obama" in "who is the wife
# of obama".
# constant_value_embedding encodes the information
# from the value of the constant, for example, the
# embedding of the entity Obama.
# constant_span: (B, n_mem, 2)
constant_spans_placeholder = tf.placeholder(tf.int32, constant_span_shape)
constant_spans = constant_spans_placeholder
n_constants_placeholder = tf.placeholder(tf.int32, [None, 1])
n_constants = tf.squeeze(n_constants_placeholder, [-1])
# constant_spans: (B, n_mem, 1)
# 0.0 if the span is [-1, -1], else 1.0.
constant_span_masks = tf.cast(
tf.greater(
tf.reduce_sum(constant_spans, axis=2), 0), tf.float32)
constant_span_masks = tf.expand_dims(constant_span_masks, -1)
# constant_spans: (B, n_mem, 2, 1)
constant_spans = tf.maximum(constant_spans, 0)
constant_spans = tf.expand_dims(constant_spans, axis=-1)
if constant_value_embedding_shape is not None:
constant_value_embeddings_placeholder = tf.placeholder(
tf.float32, shape=constant_value_embedding_shape)
constant_value_embeddings = constant_value_embeddings_placeholder
constant_value_embeddings = tf.layers.dense(
constant_value_embeddings, hidden_size, use_bias=True)
constant_value_masks = tf.squeeze(1 - constant_span_masks, [-1])
if n_en_input_features > 0:
en_input_features_shape = tf.TensorShape([None, None, n_en_input_features])
else:
en_input_features_shape = None
with tf.variable_scope(name) as scope:
batch_size = tf.placeholder(
dtype=tf.int32, shape=[], name='batch_size')
with tf.variable_scope('Encoder'):
if en_use_embeddings:
if en_pretrained_embedding_size < 0:
en_embeddings = tf.get_variable(
'embeddings', shape=(en_input_vocab_size, en_embedding_size),
initializer=tf.truncated_normal_initializer(mean=0.0, stddev=0.1))
else:
en_embeddings = tf.get_variable(
'embeddings', shape=(
en_input_vocab_size - en_pretrained_vocab_size, en_embedding_size),
initializer=tf.truncated_normal_initializer(mean=0.0, stddev=0.1))
en_pretrained_embeddings = tf.get_variable(
'pretrained_embeddings', shape=(
en_pretrained_vocab_size, en_pretrained_embedding_size),
trainable=False,
initializer=tf.zeros_initializer())
en_pretrained_embeddings_placeholder = tf.placeholder(
tf.float32, [en_pretrained_vocab_size, en_pretrained_embedding_size])
en_pretrained_embeddings_init = en_pretrained_embeddings.assign(
en_pretrained_embeddings_placeholder)
en_pretrained_embeddings = tf.layers.dense(
inputs=en_pretrained_embeddings, units=en_embedding_size,
use_bias=True)
en_embeddings = tf.concat(
values=[en_embeddings, en_pretrained_embeddings], axis=0)
else:
en_embeddings = None
# using attention
if en_attn_on_constants:
tf.logging.info('Using attention in encoder!!!')
(en_seq_inputs, en_initial_state, en_seq_outputs, en_final_state,
en_input_dropout, en_rnn_dropout, en_rnn_outputs) = create_seq_graph(
en_input_shape, batch_size=batch_size,
hidden_size=en_hidden_size, n_layers=en_n_layers,
cell_type=cell_type, bidirectional=en_bidirectional,
embeddings=en_embeddings, output_proj_size=en_hidden_size,
input_features_shape=en_input_features_shape,
attn_inputs=constant_value_embeddings,
attn_masks=constant_value_masks,
attn_size=attn_size, attn_vec_size=attn_vec_size)
else:
(en_seq_inputs, en_initial_state, en_seq_outputs, en_final_state,
en_input_dropout, en_rnn_dropout, en_rnn_outputs) = create_seq_graph(
en_input_shape, batch_size=batch_size,
hidden_size=en_hidden_size, n_layers=en_n_layers,
cell_type=cell_type, bidirectional=en_bidirectional,
embeddings=en_embeddings, output_proj_size=en_hidden_size,
input_features_shape=en_input_features_shape)
if n_en_input_features > 0:
en_seq_input_features = SeqTensor(
en_seq_inputs.tensor[1], tf.placeholder(tf.int32, [None]))
en_seq_inputs = SeqTensor(
en_seq_inputs.tensor[0], en_seq_inputs.sequence_length)
if add_lm_loss:
sequence_length = tf.maximum(en_seq_inputs.sequence_length - 1, 0)
en_n = tf.cast(tf.reduce_sum(sequence_length), tf.float32)
mask = tf.sequence_mask(sequence_length, dtype=tf.float32)
if en_bidirectional:
en_fw_outputs = en_rnn_outputs[0]
en_bw_outputs = en_rnn_outputs[1]
if tie_en_embeddings:
en_fw_logits = tf_utils.tensormul(
en_fw_outputs[:, :-1, :], tf.transpose(en_embeddings))
en_bw_logits = tf_utils.tensormul(
en_bw_outputs[:, 1:, :], tf.transpose(en_embeddings))
else:
# Use 0 to n-2 to compute logits.
en_fw_logits = tf.layers.dense(
en_fw_outputs[:, :-1, :], en_input_vocab_size, use_bias=True)
en_bw_logits = tf.layers.dense(
en_bw_outputs[:, 1:, :], en_input_vocab_size, use_bias=True)
# Use 1 to n-1 as targets.
en_fw_lm_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=en_seq_inputs.tensor[:, 1:], logits=en_fw_logits) * mask
en_bw_lm_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=en_seq_inputs.tensor[:, :-1], logits=en_bw_logits) * mask
en_lm_loss = tf.reduce_sum(en_fw_lm_loss + en_bw_lm_loss) / en_n
else:
en_fw_outputs = en_rnn_outputs
if tie_en_embeddings:
en_fw_logits = tf_utils.tensormul(
en_fw_outputs[:, :-1, :], tf.transpose(en_embeddings))
else:
en_fw_logits = tf.layers.dense(
en_fw_outputs[:, :-1, :], en_input_vocab_size, use_bias=True)
en_fw_lm_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=en_seq_inputs.tensor[:, 1:], logits=en_fw_logits) * mask
en_lm_step_loss = en_fw_lm_loss
en_lm_loss = tf.reduce_sum(en_lm_step_loss) / en_n
if use_attn:
attn_inputs = en_seq_outputs.tensor
attn_masks = tf.sequence_mask(
en_seq_outputs.sequence_length, dtype=tf.float32)
else:
attn_inputs = None
attn_masks = None
with tf.variable_scope('Constant_encoder'):
batch_ind = tf.range(batch_size)
# batch_ind: (B, 1, 1, 1)
for i in range(3):
batch_ind = tf.expand_dims(batch_ind, axis=-1)
# batch_ind: (B, n_mem, 2, 1)
batch_ind = tf.tile(batch_ind, [1, n_mem, 2, 1])
# constant_span: (B, n_mem, 2, 2)
constant_spans = tf.concat([batch_ind, constant_spans], axis=-1)
# constant_span_embedding: (B, n_mem, 2, en_output_size)
constant_span_embeddings = tf.gather_nd(en_seq_outputs.tensor, constant_spans)
# constant_embedding: (B, n_mem, en_output_size)
constant_embeddings = tf.reduce_mean(constant_span_embeddings, axis=2)
constant_embeddings = constant_embeddings * constant_span_masks
if constant_value_embedding_shape is not None:
constant_embeddings = constant_embeddings + constant_value_embeddings
# mask out the bad constants.
# constant mask: (B, n_mem)
constant_masks = tf.sequence_mask(
n_constants, maxlen=n_mem, dtype=tf.float32)
# constant mask: (B, n_mem, 1)
constant_masks = tf.expand_dims(constant_masks, -1)
constant_masks = tf.tile(constant_masks, [1, 1, hidden_size])
# constant_embeddings: (B, n_mem, hidden_size)
constant_embeddings = constant_embeddings * constant_masks
# builtin_de_embeddings: (n_builtin, embed_size)
builtin_de_embeddings = tf.get_variable(
'builtin_de_embeddings', builtin_de_embeddings_shape,
initializer=tf.truncated_normal_initializer(mean=0.0, stddev=0.1))
# builtin_de_embeddings: (1, n_builtin, embed_size)
builtin_de_embeddings = tf.expand_dims(builtin_de_embeddings, axis=0)
# builtin_de_embeddings: (B, n_builtin, embed_size)
builtin_de_embeddings = tf.tile(
builtin_de_embeddings, [batch_size] + [1] * 2)
# initial_memory: (B, n_builtin + n_mem, embed_size)
initial_memory = tf.concat(
[builtin_de_embeddings, constant_embeddings], axis=1)
# concatenate static and constant embeddings to form
# new memory to create initial states.
if en_bidirectional:
initial_state = en_final_state[0]
else:
initial_state = en_final_state
with tf.variable_scope('Decoder'):
initial_state = tf_utils.MemoryStateTuple(
initial_memory, initial_state)
seq_inputs = create_seq_inputs(shape=input_shape, dtype=input_dtype)
inputs = seq_inputs.tensor
sequence_length = seq_inputs.sequence_length
rnn_dropout = tf.placeholder_with_default(
0.0, shape=None, name='rnn_dropout')
# Create multilayer attention cell then wrap with memory cell.
cell = multilayer_dropout_cell(
cell_fn=RNN_CELL_DICT[cell_type], hidden_size=hidden_size,
n_layers=n_layers, dropout=rnn_dropout)
if attn_inputs is not None:
cell = tf_utils.SeqAttentionCellWrapper(
cell, attn_inputs=attn_inputs,
attn_size=attn_size, attn_vec_size=attn_vec_size,
output_size=hidden_size, attn_masks=attn_masks)
mem_size = builtin_de_embeddings_shape[0] + constant_span_shape[1]
embed_size = hidden_size
cell = tf_utils.MemoryWrapper(
cell, mem_size, embed_size, max_n_valid_indices)
flat_inputs = data_utils.flatten(inputs)
flat_inputs = [
tf.expand_dims(in_, -1) for in_ in flat_inputs[:2]] + flat_inputs[2:]
inputs = data_utils.pack_sequence_as(inputs, flat_inputs)
outputs, final_state = tf.nn.dynamic_rnn(
cell, inputs, sequence_length=sequence_length,
initial_state=initial_state, dtype=tf.float32)
if n_de_output_features > 0:
de_seq_output_features = create_seq_inputs(
shape=tf.TensorShape(
[None, None, max_n_valid_indices, n_de_output_features]),
dtype=tf.int32, name='de_output_features')
output_feature_weights = tf.get_variable(
'de_output_feature_weights',
shape=tf.TensorShape([n_de_output_features, 1]),
initializer=tf.zeros_initializer())
outputs = outputs + tf.squeeze(tf_utils.tensormul(
tf.cast(de_seq_output_features.tensor, tf.float32),
output_feature_weights), axis=-1)
seq_outputs = SeqTensor(outputs, sequence_length)
n = tf.reduce_sum(seq_inputs.sequence_length)
self.node_dict.update(
en_inputs=en_seq_inputs,
en_rnn_dropout=en_rnn_dropout,
en_input_dropout=en_input_dropout,
en_outputs=en_seq_outputs,
en_initial_state=en_initial_state,
en_final_state=en_final_state,
inputs=seq_inputs,
constant_spans=constant_spans_placeholder,
constant_embeddings=constant_embeddings,
constant_masks=constant_masks,
n_constants=n_constants_placeholder,
rnn_dropout=rnn_dropout,
# input_dropout=input_dropout,
outputs=seq_outputs,
batch_size=batch_size,
final_state=final_state,
initial_state=initial_state, n=n,
encoded_context=en_seq_outputs,
context=en_seq_inputs,
en_embeddings=en_embeddings)
if en_pretrained_embedding_size > 0:
self.node_dict['en_pretrained_embeddings'] = en_pretrained_embeddings_placeholder
self.node_dict['en_pretrained_embeddings_init'] = en_pretrained_embeddings_init
if constant_value_embedding_shape is not None:
self.node_dict['constant_value_embeddings'] = constant_value_embeddings_placeholder
if add_lm_loss:
self.node_dict['en_lm_loss'] = en_lm_loss
# self.node_dict['en_lm_step_loss'] = en_lm_step_loss
if use_attn:
self.node_dict['attn_inputs'] = attn_inputs
if n_en_input_features > 0:
self.node_dict['en_input_features'] = en_seq_input_features
self.node_dict['summaries'].append(
tf.summary.scalar(
self.vs_name + '/' + 'en_input_features_sum',
tf.reduce_sum(en_seq_input_features.tensor)))
if n_de_output_features > 0:
self.node_dict['output_features'] = de_seq_output_features
self.node_dict['output_feature_weights'] = output_feature_weights
self.node_dict['summaries'].append(
tf.summary.scalar(
self.vs_name + '/' + 'output_feature_weights_0',
output_feature_weights[0][0]))
self.node_dict['summaries'].append(
tf.summary.scalar(
self.vs_name + '/' + 'output_features_sum',
tf.reduce_sum(de_seq_output_features.tensor)))
class MonitorGraph(object):
"""A tensorflow graph to monitor some values during training.
Generate tensorflow summaries for the values to monitor
them through tensorboard.
"""
def __init__(self):
self.node_dict = {}
self._graph = tf.Graph()
def launch(self):
with self._graph.as_default():
self.merged = tf.summary.merge_all()
init = tf.global_variables_initializer()
self.session = tf.Session(graph=self._graph)
self.session.run(init)
def add_scalar_monitor(self, name, dtype):
with self._graph.as_default():
x = tf.placeholder(
dtype=dtype, shape=None, name=name)
tf.summary.scalar(name, x)
self.node_dict[name] = x
def generate_summary(self, feed_dict):
summary_str = self.session.run(
self.merged, map_dict(self.node_dict, feed_dict))
return summary_str
# Utility functions for creating TensorFlow graphs.
# FNN
def create_multilayer_fnn(inputs, dropout, hidden_sizes, activation='relu'):
x = inputs
for size in hidden_sizes:
x = tf.nn.dropout(x, 1 - dropout)
x = tf.layers.dense(inputs=x, units=size, activation=ACTIVATION_DICT[activation])
return x
# Loss
def create_seq_mse_loss(outputs, targets, weights, sequence_length):
mask = tf.sequence_mask(
sequence_length, dtype=tf.float32)
loss = tf.reduce_sum(tf.squared_difference(outputs, targets) * weights * mask)
return loss
# cross entropy
def create_seq_xent_loss(logits, targets, weights, sequence_length):
mask = tf.sequence_mask(
sequence_length, maxlen=tf.reduce_max(sequence_length),
dtype=tf.float32)
step_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=targets, logits=logits) * weights * mask
sequence_loss = tf.reduce_sum(step_loss, axis=1)
xent_loss = tf.reduce_sum(sequence_loss)
return xent_loss, sequence_loss, step_loss
def create_softmax(
inputs, softmax_w=None,
output_vocab_size=None, use_bias=False, name='Softmax_layer'):
"Create nodes for linear transformation of inputs and softmax computation."
with tf.name_scope(name):
# inputs = tf.nn.dropout(inputs, 1-dropout)
if softmax_w is None:
logits = tf.layers.dense(
inputs=inputs, units=output_vocab_size,
use_bias=use_bias)
else:
logits = tf_utils.tensormul(inputs, softmax_w)
if use_bias:
softmax_b = tf.Variable(
initial_value=np.zeros(
(1, output_vocab_size), dtype=tf.float32),
name='softmax_bias')
logits += softmax_b
return create_softmax_from_logits(logits)
def create_softmax_from_logits(logits):
"Create nodes for softmax computation from logits."
temperature = tf.placeholder_with_default(
1.0, shape=(), name='temperature')
logits = logits / temperature
logits_shape = tf.shape(logits)
logits_dim = logits_shape[-1]
logits_2d = tf.reshape(logits, [-1, logits_dim])
samples = tf.multinomial(logits_2d, 1)
samples = tf.reshape(samples, logits_shape[:-1])
probs = tf.nn.softmax(logits)
predictions = tf.argmax(probs, axis=2)
return logits, probs, predictions, samples, temperature
# Embedding
def embed_inputs(
inputs, embeddings, name='Embedding_layer'):
with tf.name_scope(name):
embedded_inputs = tf.nn.embedding_lookup(embeddings, inputs)
return embedded_inputs
# RNN
def create_rnn(
cell, initial_state, inputs, sequence_length,
hidden_size, bidirectional, cell_bw=None,
name='RNN'):
with tf.name_scope(name):
if bidirectional:
# Note that you can't use bidirectional RNN if you
# want to do decoding.
initial_state_fw = initial_state[0]
initial_state_bw = initial_state[1]
outputs, final_state = tf.nn.bidirectional_dynamic_rnn(
cell, cell_bw, inputs, sequence_length=sequence_length,
initial_state_fw=initial_state_fw,
initial_state_bw=initial_state_bw,
dtype=tf.float32)
else:
outputs, final_state = tf.nn.dynamic_rnn(
cell, inputs, sequence_length=sequence_length,
initial_state=initial_state, dtype=tf.float32)
return outputs, final_state
# RNN Cell
def multilayer_dropout_cell(
cell_fn, hidden_size, n_layers, dropout,
use_skip_connection=True):
"""Create multilayer RNN cell with dropout."""
cells = []
for i in xrange(n_layers):
cell = cell_fn(hidden_size)
if i > 0 and use_skip_connection:
cell = tf.nn.rnn_cell.ResidualWrapper(cell)
cell = tf.contrib.rnn.DropoutWrapper(
cell, output_keep_prob=1.0-dropout)
# variational_recurrent=True,
# state_keep_prob = 1.0 - dropout,
# dtype=tf.float32)
cells.append(cell)
final_cell = tf.contrib.rnn.MultiRNNCell(cells)
return final_cell
# Input placeholders.
def create_seq_inputs(shape, dtype=tf.float32, name='inputs'):
with tf.name_scope(name):
if isinstance(shape, tuple):
flat_input_shape = data_utils.flatten(shape)
assert isinstance(dtype, tuple)
flat_dtype = data_utils.flatten(dtype)
flat_inputs = [tf.placeholder(
dt, sh, name='inputs') for dt, sh in zip(flat_dtype, flat_input_shape)]
inputs = data_utils.pack_sequence_as(shape, flat_inputs)
else:
inputs = tf.placeholder(dtype, shape)
sequence_length = tf.placeholder(
tf.int32, [None], name='sequence_length')
return SeqTensor(inputs, sequence_length)
def create_tuple_placeholders_with_default(inputs, extra_dims, shape):
if isinstance(shape, int):
result = tf.placeholder_with_default(
inputs, list(extra_dims) + [shape])
else:
subplaceholders = [create_tuple_placeholders_with_default(
subinputs, extra_dims, subshape)
for subinputs, subshape in zip(inputs, shape)]
t = type(shape)
if t == tuple:
result = t(subplaceholders)
else:
result = t(*subplaceholders)
return result
def create_tuple_placeholders(dtype, extra_dims, shape):
if isinstance(shape, int):
result = tf.placeholder(dtype, list(extra_dims) + [shape])
else:
subplaceholders = [create_tuple_placeholders(dtype, extra_dims, subshape)
for subshape in shape]
t = type(shape)
# Handles both tuple and LSTMStateTuple.
if t == tuple:
result = t(subplaceholders)
else:
result = t(*subplaceholders)
return result
# Sequence models.
def create_seq_graph(input_shape, batch_size=None,
# input_vocab_size=None,
attn_inputs=None,
attn_size=128, attn_vec_size=128,
# output_size=128,
input_size=None,
hidden_size=128, n_layers=2,
cell_type='lstm', bidirectional=False,
initial_state=None,
#use_embeddings=True,
#embedding_size=64,
embeddings=None,
output_proj_size=None,
input_features_shape=None,
attn_masks=None):
# Create inputs.
seq_inputs = create_seq_inputs(
shape=input_shape, dtype=tf.int32 if embeddings is not None else tf.float32)
rnn_dropout = tf.placeholder_with_default(
0.0, shape=None, name='rnn_dropout')
# Create embedding layer.
if embeddings is not None:
embedded_inputs = embed_inputs(
seq_inputs.tensor, embeddings=embeddings)
else:
embedded_inputs = seq_inputs.tensor
input_dropout = tf.placeholder_with_default(
0.0, shape=None, name='input_dropout')
embedded_inputs = tf.nn.dropout(
embedded_inputs, 1 - input_dropout)
# If we include features in inputs, then add them here.
if input_features_shape is not None:
seq_input_features = create_seq_inputs(
shape=input_features_shape, dtype=tf.int32)
embedded_inputs = tf.concat(
[embedded_inputs, tf.cast(seq_input_features.tensor, tf.float32)],
axis=-1)
seq_inputs = SeqTensor(
(seq_inputs.tensor, seq_input_features.tensor), seq_inputs.sequence_length)
else:
seq_input_features = None
embedded_seq_inputs = SeqTensor(embedded_inputs, seq_inputs.sequence_length)
# Create RNN cell
cell = multilayer_dropout_cell(
RNN_CELL_DICT[cell_type],
hidden_size, n_layers, rnn_dropout)
if bidirectional:
cell_bw = multilayer_dropout_cell(
RNN_CELL_DICT[cell_type],
hidden_size, n_layers, rnn_dropout)
else:
cell_bw = None
# Add attention.
if attn_inputs is not None:
cell = tf_utils.SeqAttentionCellWrapper(
cell, attn_inputs=attn_inputs,
attn_size=attn_size, attn_vec_size=attn_vec_size,
output_size=hidden_size, attn_masks=attn_masks)
if bidirectional:
cell_bw = tf_utils.SeqAttentionCellWrapper(
cell_bw, attn_inputs=attn_inputs,
attn_size=attn_size, attn_vec_size=attn_vec_size,
output_size=hidden_size, attn_masks=attn_masks)
if initial_state is None:
# Create zero state.
zero_state = cell.zero_state(batch_size, tf.float32)
if bidirectional:
zero_state_bw = cell_bw.zero_state(batch_size, tf.float32)
zero_state = (zero_state, zero_state_bw)
initial_state = zero_state
# Create RNN.
outputs, final_state = create_rnn(
cell, initial_state, embedded_seq_inputs.tensor,
embedded_seq_inputs.sequence_length,
hidden_size=hidden_size,
bidirectional=bidirectional, cell_bw=cell_bw)
rnn_outputs = outputs
if bidirectional:
outputs = tf.concat(outputs, axis=2)
hidden_size *= 2
# Whether to add linear transformation to outputs.
if output_proj_size is not None:
outputs = tf.layers.dense(
inputs=outputs, units=output_proj_size, use_bias=True)
seq_outputs = SeqTensor(
outputs,
tf.placeholder_with_default(
seq_inputs.sequence_length, shape=[None]))
return (seq_inputs, initial_state, seq_outputs,
final_state, input_dropout, rnn_dropout,
rnn_outputs)
# General utility functions.
def map_dict(dict_1, main_dict):
new_dict = {}
for k, v in main_dict.iteritems():
if k in dict_1:
new_dict[dict_1[k]] = main_dict[k]
return new_dict
| [] |
2024-01-10 | charlesdedampierre/BunkaTopics | tests~test_bunka.py | import sys
sys.path.append("../")
import unittest
from bunkatopics import Bunka
import pandas as pd
import plotly.graph_objects as go
import random
from datasets import load_dataset
from langchain.llms import LlamaCpp
import os
from dotenv import load_dotenv
load_dotenv()
random.seed(42)
generative_model = LlamaCpp(
model_path=os.getenv("MODEL_PATH"),
n_ctx=2048,
temperature=0.75,
max_tokens=2000,
top_p=1,
verbose=False,
)
generative_model.client.verbose = False
class BunkaTestCase(unittest.TestCase):
def setUp(self):
docs = load_dataset("rguo123/trump_tweets")["train"]["content"]
docs = random.sample(docs, 100)
self.bunka = Bunka()
self.bunka.fit(docs)
def test_pipeline(self):
# test Topic Modeling
n_clusters = 2
df_topics = self.bunka.get_topics(n_clusters=n_clusters)
self.assertEqual(len(df_topics), n_clusters)
self.assertIsInstance(df_topics, pd.DataFrame)
topic_fig = self.bunka.visualize_topics(width=800, height=800)
self.assertIsInstance(topic_fig, go.Figure)
# test Bourdieu Map
bourdieu_fig = self.bunka.visualize_bourdieu(
generative_model=generative_model,
x_left_words=["past"],
x_right_words=["future", "futuristic"],
y_top_words=["politics", "Government"],
y_bottom_words=["cultural phenomenons"],
height=2000,
width=2000,
clustering=True,
topic_gen_name=True,
topic_n_clusters=2,
)
self.assertIsInstance(bourdieu_fig, go.Figure)
# test Undimentional Map
fig_solo = self.bunka.visualize_bourdieu_one_dimension(
left=["negative", "bad"],
right=["positive"],
width=600,
height=600,
explainer=False,
)
self.assertIsInstance(fig_solo, go.Figure)
# test RAG
top_doc_len = 3
res = self.bunka.rag_query(
query="What are the main fight of Donald Trump ?",
generative_model=generative_model,
top_doc=top_doc_len,
)
result = res["result"]
self.assertIsInstance(result, str)
document_sources = res["source_documents"]
self.assertEqual(len(document_sources), top_doc_len)
if __name__ == "__main__":
unittest.main()
| [] |
2024-01-10 | charlesdedampierre/BunkaTopics | bunkatopics~bunkatopics.py | import warnings
from numba.core.errors import NumbaDeprecationWarning
warnings.filterwarnings("ignore", category=NumbaDeprecationWarning)
import os
import random
import string
import typing as t
import uuid
import warnings
import subprocess
import json
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
import umap
from langchain.chains import RetrievalQA
from langchain.document_loaders import DataFrameLoader
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.vectorstores import Chroma
from sklearn.preprocessing import MinMaxScaler
from tqdm import tqdm
from bunkatopics.bunka_logger import logger
from bunkatopics.datamodel import (
DOC_ID,
TERM_ID,
TOPIC_ID,
BourdieuQuery,
Document,
Term,
Topic,
TopicGenParam,
TopicParam,
)
from bunkatopics.functions.bourdieu_api import bourdieu_api
from bunkatopics.functions.coherence import get_coherence
from bunkatopics.functions.extract_terms import extract_terms_df
from bunkatopics.functions.topic_document import get_top_documents
from bunkatopics.functions.topic_gen_representation import get_clean_topic_all
from bunkatopics.functions.topic_utils import get_topic_repartition
from bunkatopics.functions.topics_modeling import get_topics
from bunkatopics.visualisation.bourdieu_visu import visualize_bourdieu_one_dimension
from bunkatopics.visualisation.new_bourdieu_visu import visualize_bourdieu
from bunkatopics.visualisation.query_visualisation import plot_query
from bunkatopics.visualisation.topic_visualization import visualize_topics
from bunkatopics.serveur.utils import is_server_running, kill_server
os.environ["TOKENIZERS_PARALLELISM"] = "true"
class Bunka:
def __init__(self, embedding_model=None, language: str = "en_core_web_sm"):
if embedding_model is None:
embedding_model = HuggingFaceEmbeddings(model_name="all-MiniLM-L6-v2")
self.embedding_model = embedding_model
self.language = language
def fit(
self,
docs: t.List[str],
ids: t.List[DOC_ID] = None,
) -> None:
df = pd.DataFrame(docs, columns=["content"])
if ids is not None:
df["doc_id"] = ids
else:
df["doc_id"] = [str(uuid.uuid4())[:8] for _ in range(len(df))]
df = df[~df["content"].isna()]
df = df.reset_index(drop=True)
self.docs = [Document(**row) for row in df.to_dict(orient="records")]
sentences = [doc.content for doc in self.docs]
ids = [doc.doc_id for doc in self.docs]
df = pd.DataFrame.from_records([doc.dict() for doc in self.docs])
logger.info("Extracting Terms")
df_terms, df_terms_indexed = extract_terms_df(
df,
text_var="content",
index_var="doc_id",
ngs=True,
ents=True,
ncs=True,
sample_size=100000,
drop_emoji=True,
ngrams=(1, 2, 3),
remove_punctuation=True,
include_pos=["NOUN"],
include_types=["PERSON", "ORG"],
language=self.language,
)
df_terms = df_terms.reset_index()
df_terms = df_terms.rename(columns={"terms_indexed": "term_id"})
terms = [Term(**row) for row in df_terms.to_dict(orient="records")]
self.terms: t.List[Term] = terms
df_terms_indexed = df_terms_indexed.reset_index()
df_terms_indexed.columns = ["doc_id", "indexed_terms"]
indexed_terms_dict = df_terms_indexed.set_index("doc_id")[
"indexed_terms"
].to_dict()
# add to the docs object
for doc in self.docs:
doc.term_id = indexed_terms_dict.get(doc.doc_id, [])
# Embed sentences
logger.info("Embedding Documents, this may take few minutes")
# Using FAISS to index and embed the documents
df_temporary = pd.DataFrame(sentences)
loader = DataFrameLoader(df_temporary, page_content_column=0)
documents_langchain = loader.load()
characters = string.ascii_letters + string.digits
random_string = "".join(random.choice(characters) for _ in range(20))
self.vectorstore = Chroma(
embedding_function=self.embedding_model, collection_name=random_string
)
self.vectorstore.add_texts(texts=sentences, ids=ids)
# self.vectorstore.add_documents(documents_langchain)
# Get all embeddings
embeddings = self.vectorstore._collection.get(include=["embeddings"])[
"embeddings"
]
# final_ids = vectorstore._collection.get(include=["embeddings"])["ids"]
df_embeddings = pd.DataFrame(embeddings)
df_embeddings.index = ids
emb_doc_dict = {x: y for x, y in zip(ids, embeddings)}
for doc in self.docs:
doc.embedding = emb_doc_dict.get(doc.doc_id, [])
logger.info("Reducing Dimensions")
reducer = umap.UMAP(n_components=2, random_state=42)
embeddings_2D = reducer.fit_transform(embeddings)
df_embeddings_2D = pd.DataFrame(embeddings_2D)
df_embeddings_2D.columns = ["x", "y"]
df_embeddings_2D["doc_id"] = ids
xy_dict = df_embeddings_2D.set_index("doc_id")[["x", "y"]].to_dict("index")
# Update the documents with the x and y values from the DataFrame
for doc in self.docs:
doc.x = xy_dict[doc.doc_id]["x"]
doc.y = xy_dict[doc.doc_id]["y"]
def fit_transform(self, docs: t.List[Document], n_clusters=40) -> pd.DataFrame:
self.fit(docs)
df_topics = self.get_topics(n_clusters=n_clusters)
return df_topics
def get_topics(
self,
n_clusters=5,
ngrams=[1, 2],
name_lenght=15,
top_terms_overall=2000,
min_count_terms=1,
) -> pd.DataFrame:
self.topics: t.List[Topic] = get_topics(
docs=self.docs,
terms=self.terms,
n_clusters=n_clusters,
ngrams=ngrams,
name_lenght=name_lenght,
x_column="x",
y_column="y",
top_terms_overall=top_terms_overall,
min_count_terms=min_count_terms,
)
self.docs, self.topics = get_top_documents(
self.docs, self.topics, ranking_terms=20
)
df_topics = pd.DataFrame.from_records([topic.dict() for topic in self.topics])
return df_topics
def rag_query(self, query: str, generative_model, top_doc: int = 2):
logger.info("Answering your query, please wait a few seconds")
# this is the entire retrieval system
qa_with_sources_chain = RetrievalQA.from_chain_type(
llm=generative_model,
retriever=self.vectorstore.as_retriever(search_kwargs={"k": top_doc}),
# chain_type_kwargs=chain_type_kwargs,
return_source_documents=True,
)
response = qa_with_sources_chain({"query": query})
return response
def get_clean_topic_name(
self, generative_model, language="english", use_doc=False, context="everything"
) -> pd.DataFrame:
"""
Get the topic name using Generative AI
"""
self.topics: t.List[Topic] = get_clean_topic_all(
generative_model,
self.topics,
self.docs,
language=language,
use_doc=use_doc,
context=context,
)
df_topics = pd.DataFrame.from_records([topic.dict() for topic in self.topics])
return df_topics
def search(self, user_input: str, top_doc: int = 3) -> pd.DataFrame:
res = self.vectorstore.similarity_search_with_score(user_input, k=top_doc)
# res = vector_search(self.docs, self.embedding_model, user_input=user_input)
return res
def get_topic_coherence(self, topic_terms_n=10):
texts = [doc.term_id for doc in self.docs]
res = get_coherence(self.topics, texts, topic_terms_n=topic_terms_n)
return res
def get_topic_repartition(self, width=1200, height=800) -> go.Figure:
fig = get_topic_repartition(self.topics, width=width, height=height)
return fig
def visualize_bourdieu(
self,
generative_model=None,
x_left_words=["war"],
x_right_words=["peace"],
y_top_words=["men"],
y_bottom_words=["women"],
height=1500,
width=1500,
display_percent=True,
clustering=False,
topic_n_clusters=10,
topic_terms=2,
topic_ngrams=[1, 2],
topic_top_terms_overall=500,
gen_topic_language="english",
topic_gen_name=False,
manual_axis_name=None,
use_doc_gen_topic=False,
radius_size: float = 0.3,
convex_hull=True,
) -> go.Figure:
topic_gen_param = TopicGenParam(
language=gen_topic_language,
top_doc=3,
top_terms=10,
use_doc=use_doc_gen_topic,
context="everything",
)
topic_param = TopicParam(
n_clusters=topic_n_clusters,
ngrams=topic_ngrams,
name_lenght=topic_terms,
top_terms_overall=topic_top_terms_overall,
)
self.bourdieu_query = BourdieuQuery(
x_left_words=x_left_words,
x_right_words=x_right_words,
y_top_words=y_top_words,
y_bottom_words=y_bottom_words,
radius_size=radius_size,
)
# Request Bourdieu API
res = bourdieu_api(
generative_model=generative_model,
embedding_model=self.embedding_model,
docs=self.docs,
terms=self.terms,
bourdieu_query=self.bourdieu_query,
generative_ai_name=topic_gen_name,
topic_param=topic_param,
topic_gen_param=topic_gen_param,
)
self.bourdieu_docs = res[0]
self.bourdieu_topics = res[1]
# Visualize The results from the API
fig = visualize_bourdieu(
self.bourdieu_docs,
self.bourdieu_topics,
height=height,
width=width,
display_percent=display_percent,
convex_hull=convex_hull,
clustering=clustering,
manual_axis_name=manual_axis_name,
)
return fig
def start_server_bourdieu(self):
if is_server_running():
print("Server on port 3000 is already running. Killing it...")
kill_server()
try:
file_path = "../web/public" + "/bunka_bourdieu_docs.json"
docs_json = [x.dict() for x in self.bourdieu_docs]
with open(file_path, "w") as json_file:
json.dump(docs_json, json_file)
file_path = "../web/public" + "/bunka_bourdieu_topics.json"
topics_json = [x.dict() for x in self.bourdieu_topics]
with open(file_path, "w") as json_file:
json.dump(topics_json, json_file)
file_path = "../web/public" + "/bunka_bourdieu_query.json"
with open(file_path, "w") as json_file:
json.dump(self.bourdieu_query.dict(), json_file)
subprocess.Popen(["npm", "start"], cwd="../web")
print("NPM server started.")
except Exception as e:
print(f"Error starting NPM server: {e}")
def start_server(self):
if is_server_running():
print("Server on port 3000 is already running. Killing it...")
kill_server()
try:
file_path = "../web/public" + "/bunka_docs.json"
docs_json = [x.dict() for x in self.docs]
with open(file_path, "w") as json_file:
json.dump(docs_json, json_file)
file_path = "../web/public" + "/bunka_topics.json"
topics_json = [x.dict() for x in self.topics]
with open(file_path, "w") as json_file:
json.dump(topics_json, json_file)
subprocess.Popen(["npm", "start"], cwd="../web")
print("NPM server started.")
except Exception as e:
print(f"Error starting NPM server: {e}")
def visualize_topics(
self, add_scatter=False, label_size_ratio=100, width=1000, height=1000
) -> go.Figure:
fig = visualize_topics(
self.docs,
self.topics,
width=width,
height=height,
add_scatter=add_scatter,
label_size_ratio=label_size_ratio,
)
return fig
def visu_query(
self, query="What is firearm?", min_score=0.8, width=600, height=300
):
fig, percent = plot_query(
embedding_model=self.embedding_model,
docs=self.docs,
query=query,
min_score=min_score,
width=width,
height=height,
)
return fig, percent
def visualize_bourdieu_one_dimension(
self,
left=["negative", "bad"],
right=["positive"],
width=1200,
height=1200,
explainer=False,
):
fig = visualize_bourdieu_one_dimension(
docs=self.docs,
embedding_model=self.embedding_model,
left=left,
right=right,
width=width,
height=height,
explainer=explainer,
)
return fig
def get_dimensions(
self, dimensions: t.List[str], width=500, height=500, template="plotly_dark"
) -> go.Figure:
final_df = []
logger.info("Computing Similarities")
scaler = MinMaxScaler(feature_range=(0, 1))
for dim in tqdm(dimensions):
df_search = self.search(dim)
df_search["score"] = scaler.fit_transform(
df_search[["cosine_similarity_score"]]
)
df_search["source"] = dim
final_df.append(df_search)
final_df = pd.concat([x for x in final_df])
final_df_mean = (
final_df.groupby("source")["score"]
.mean()
.rename("mean_score")
.reset_index()
)
final_df_mean = final_df_mean.sort_values(
"mean_score", ascending=True
).reset_index(drop=True)
final_df_mean["rank"] = final_df_mean.index + 1
self.df_dimensions = final_df_mean
fig = px.line_polar(
final_df_mean,
r="mean_score",
theta="source",
line_close=True,
template=template,
width=width,
height=height,
)
return fig
| [] |
2024-01-10 | charlesdedampierre/BunkaTopics | tests~run_bourdieu.py | import sys
sys.path.append("../")
from bunkatopics import Bunka
from bunkatopics.functions.clean_text import clean_tweet
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.llms import LlamaCpp
import random
from datasets import load_dataset
import os
from dotenv import load_dotenv
load_dotenv()
random.seed(42)
if __name__ == "__main__":
# Social Data
dataset = load_dataset("rguo123/trump_tweets")["train"]["content"]
full_docs = random.sample(dataset, 3000)
full_docs = [clean_tweet(x) for x in full_docs]
# Scientific Litterature Data
# dataset = load_dataset("CShorten/ML-ArXiv-Papers")["train"]["title"]
# full_docs = random.sample(dataset, 500)
embedding_model = HuggingFaceEmbeddings(model_name="all-MiniLM-L6-v2")
bunka = Bunka(embedding_model=embedding_model)
generative_model = LlamaCpp(
model_path=os.getenv("MODEL_PATH"),
n_ctx=2048,
temperature=0.75,
max_tokens=2000,
top_p=1,
verbose=False,
)
generative_model.client.verbose = False
bunka.fit(full_docs)
manual_axis_name = {
"x_left_name": "positive",
"x_right_name": "negative",
"y_top_name": "women",
"y_bottom_name": "men",
}
bourdieu_fig = bunka.visualize_bourdieu(
generative_model=generative_model,
x_left_words=["this is a positive content"],
x_right_words=["this is a negative content"],
y_top_words=["this is about women"],
y_bottom_words=["this is about men"],
height=1000,
width=1000,
display_percent=True,
use_doc_gen_topic=True,
clustering=True,
topic_n_clusters=10,
topic_terms=5,
topic_top_terms_overall=500,
topic_gen_name=True,
convex_hull=True,
radius_size=0.5,
manual_axis_name=manual_axis_name,
)
bourdieu_fig.show()
| [] |
2024-01-10 | charlesdedampierre/BunkaTopics | bunkatopics~visualisation~bourdieu_visu.py | import random
import typing as t
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
from langchain.embeddings import HuggingFaceInstructEmbeddings
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.preprocessing import MinMaxScaler
from bunkatopics.datamodel import (BourdieuDimension, ContinuumDimension,
Document, Term)
from bunkatopics.functions.topic_document import get_top_documents
from bunkatopics.functions.topic_gen_representation import get_clean_topic_all
from bunkatopics.functions.topics_modeling import get_topics
from bunkatopics.visualisation.explainer import plot_specific_terms
from bunkatopics.visualisation.visu_utils import wrap_by_word
pd.options.mode.chained_assignment = None
def get_continuum(
embedding_model: HuggingFaceInstructEmbeddings,
docs: t.List[Document],
cont_name: str = "emotion",
left_words: list = ["hate", "pain"],
right_words: list = ["love", "good"],
scale: bool = False,
) -> t.List[Document]:
df_docs = pd.DataFrame.from_records([doc.dict() for doc in docs])
df_emb = df_docs[["doc_id", "embedding"]]
df_emb = df_emb.set_index("doc_id")
df_emb = pd.DataFrame(list(df_emb["embedding"]))
df_emb.index = df_docs["doc_id"]
continuum = ContinuumDimension(
id=cont_name, left_words=left_words, right_words=right_words
)
# Compute the extremity embeddings
left_embedding = embedding_model.embed_documents(continuum.left_words)
right_embedding = embedding_model.embed_documents(continuum.right_words)
left_embedding = pd.DataFrame(left_embedding).mean().values.reshape(1, -1)
right_embedding = pd.DataFrame(right_embedding).mean().values.reshape(1, -1)
# Make the difference to get the continnum
continuum_embedding = left_embedding - right_embedding
df_continuum = pd.DataFrame(continuum_embedding)
df_continuum.index = ["distance"]
# Compute the Cosine Similarity
full_emb = pd.concat([df_emb, df_continuum])
df_bert = pd.DataFrame(cosine_similarity(full_emb))
df_bert.index = full_emb.index
df_bert.columns = full_emb.index
df_bert = df_bert.iloc[
-1:,
].T
df_bert = df_bert.sort_values("distance", ascending=False).reset_index()
df_bert = df_bert[1:]
df_bert = df_bert.rename(columns={"index": "doc_id"})
final_df = pd.merge(df_bert, df_docs[["doc_id", "content"]], on="doc_id")
if scale:
scaler = MinMaxScaler(feature_range=(-1, 1))
final_df[["distance"]] = scaler.fit_transform(final_df[["distance"]])
final_df = final_df.set_index("doc_id")
final_df = final_df[["distance"]]
distance_dict = final_df.to_dict("index")
new_docs = docs.copy()
for doc in new_docs:
res = BourdieuDimension(
continuum=continuum, distance=distance_dict.get(doc.doc_id)["distance"]
)
doc.bourdieu_dimensions.append(res)
return new_docs
def plot_unique_dimension(
docs: t.List[Document],
id: str = id,
left: list = ["aggressivity"],
right: list = ["peacefullness"],
height=700,
width=600,
explainer: bool = True,
explainer_ngrams: list = [1, 2],
) -> go.Figure:
left = " ".join(left)
right = " ".join(right)
distances = [
x.distance
for doc in docs
for x in doc.bourdieu_dimensions
if x.continuum.id == id
]
doc_id = [x.doc_id for x in docs]
content = [x.content for x in docs]
df_distances = pd.DataFrame(
{"doc_id": doc_id, "distances": distances, "content": content}
)
name = "<" + right + "-" + left + ">"
df_fig = df_distances.rename(columns={"distances": name})
df_fig["content"] = df_fig["content"].apply(lambda x: wrap_by_word(x, 10))
fig = px.box(
df_fig,
y=name,
points="all",
hover_data=["content"],
height=height,
width=width,
template="plotly_white",
)
fig.add_shape(
dict(
type="line",
x0=df_fig[name].min(), # Set the minimum x-coordinate of the line
x1=df_fig[name].max(), # Set the maximum x-coordinate of the line
y0=0,
y1=0,
line=dict(color="red", width=4),
)
)
if explainer:
plot_specific_terms(
docs=docs,
left_words=left,
right_words=right,
id=id,
ngrams=explainer_ngrams,
quantile=0.80,
top_n=20,
)
return fig
def visualize_bourdieu_one_dimension(
docs: t.List[Document],
embedding_model,
left: str = ["aggressivity"],
right: str = ["peacefullness"],
height=700,
width=600,
explainer: bool = True,
explainer_ngrams: list = [1, 2],
) -> go.Figure:
id = str(random.randint(0, 10000))
new_docs = get_continuum(
embedding_model=embedding_model,
docs=docs,
cont_name=id,
left_words=left,
right_words=right,
scale=False,
)
fig = plot_unique_dimension(
new_docs,
id=id,
left=left,
right=right,
height=height,
width=width,
explainer=explainer,
explainer_ngrams=explainer_ngrams,
)
return fig
def visualize_bourdieu(
embedding_model,
generative_model,
docs: t.List[Document],
terms: t.List[Term],
x_left_words: t.List[str] = ["war"],
x_right_words: t.List[str] = ["peace"],
y_top_words: t.List[str] = ["men"],
y_bottom_words: t.List[str] = ["women"],
height: int = 1500,
width: int = 1500,
clustering: bool = True,
topic_gen_name: bool = False,
topic_n_clusters: int = 5,
topic_terms: int = 2,
topic_ngrams: list = [1, 2],
display_percent: bool = True,
use_doc_gen_topic: bool = False,
gen_topic_language: str = "english",
label_size_ratio_label: int = 50,
topic_top_terms_overall: int = 500,
manual_axis_name: dict = None,
radius_size: float = 0.3,
convex_hull: bool = True,
):
# Reset
for doc in docs:
doc.bourdieu_dimensions = []
# Compute Continuums
new_docs = get_continuum(
embedding_model,
docs,
cont_name="cont1",
left_words=x_left_words,
right_words=x_right_words,
)
new_docs = get_continuum(
embedding_model,
docs,
cont_name="cont2",
left_words=y_top_words,
right_words=y_bottom_words,
)
df_names = [
{
"names": [y.continuum.id for y in x.bourdieu_dimensions],
"left_words": [y.continuum.left_words for y in x.bourdieu_dimensions],
"right_words": [y.continuum.right_words for y in x.bourdieu_dimensions],
}
for x in new_docs
]
df_names = pd.DataFrame(df_names)
df_names = df_names.explode(["names", "left_words", "right_words"])
df_names["left_words"] = df_names["left_words"].apply(lambda x: "-".join(x))
df_names["right_words"] = df_names["right_words"].apply(lambda x: "-".join(x))
df_names = df_names.drop_duplicates()
df_names = df_names.set_index("names")
dict_bourdieu = df_names.to_dict(orient="index")
df_bourdieu = [
{
"doc_id": x.doc_id,
"coordinates": [y.distance for y in x.bourdieu_dimensions],
"names": [y.continuum.id for y in x.bourdieu_dimensions],
}
for x in new_docs
]
df_bourdieu = pd.DataFrame(df_bourdieu)
df_bourdieu = df_bourdieu.explode(["coordinates", "names"])
# Filter with only the top and bottom data to avoid getting results too far form the continnuums
df_content = [{"doc_id": x.doc_id, "content": x.content} for x in new_docs]
df_content = pd.DataFrame(df_content)
df_fig = df_bourdieu[["doc_id", "coordinates", "names"]]
df_fig = df_fig.pivot(index="doc_id", columns="names", values="coordinates")
df_fig = df_fig.reset_index()
# Remove the data inside the radius of 1/3 of max because central data does not mean mucj
df_fig["cont1"] = df_fig["cont1"].astype(float)
df_fig["cont2"] = df_fig["cont2"].astype(float)
import numpy as np
x_values = df_fig["cont1"].values
y_values = df_fig["cont2"].values
distances = np.sqrt(x_values**2 + y_values**2)
circle_radius = max(df_fig.cont1) * radius_size
df_fig["distances"] = distances
df_fig["outside"] = "0"
df_fig["outside"][df_fig["distances"] >= circle_radius] = "1"
outside_ids = list(df_fig["doc_id"][df_fig["outside"] == "1"])
df_fig = df_fig[df_fig["doc_id"].isin(outside_ids)]
df_fig = pd.merge(df_content, df_fig, on="doc_id")
df_fig["Text"] = df_fig["content"].apply(lambda x: wrap_by_word(x, 10))
x_axis_name = list(dict_bourdieu.keys())[0]
y_axis_name = list(dict_bourdieu.keys())[1]
x_left_words = dict_bourdieu[x_axis_name]["left_words"]
x_right_words = dict_bourdieu[x_axis_name]["right_words"]
y_top_words = dict_bourdieu[y_axis_name]["left_words"]
y_bottom_words = dict_bourdieu[y_axis_name]["right_words"]
fig = go.Figure(
go.Histogram2dContour(
x=df_fig[x_axis_name],
y=df_fig[y_axis_name],
colorscale="delta",
showscale=False,
),
)
scatter_fig = px.scatter(
df_fig,
x=x_axis_name,
y=y_axis_name,
color="outside",
color_discrete_map={"1": "white", "0": "grey"},
hover_data=["Text"],
template="simple_white",
height=height,
width=width,
opacity=0.3,
# title="Bourdieu Plot"
# color_discrete_sequence=["blue"],
)
for trace in scatter_fig.data:
fig.add_trace(trace)
# Set the axis to the max value to get a square
max_val = max(
abs(min(df_fig[y_axis_name])),
abs(max(df_fig[y_axis_name])),
abs(max(df_fig[x_axis_name])),
abs(min(df_fig[x_axis_name])),
)
# Add axis lines for x=0 and y=0
fig.add_shape(
type="line",
x0=0,
x1=0,
# y0=-max_val,
# y1=max_val,
y0=min(df_fig[y_axis_name]),
y1=max(df_fig[y_axis_name]),
line=dict(color="white", width=3), # Customize line color and width
)
fig.add_shape(
type="line",
x0=min(df_fig[x_axis_name]),
x1=max(df_fig[x_axis_name]),
# x0=-max_val,
# x1=max_val,
y0=0,
y1=0,
line=dict(color="white", width=3), # Customize line color and width
)
fig.update_layout(
font_size=25,
width=width,
height=height,
margin=dict(
t=width / 50,
b=width / 50,
r=width / 50,
l=width / 50,
),
# title=dict(font=dict(size=width / 40)),
)
fig.update_layout(showlegend=False)
"""
histogram2d_contour = go.Figure(
go.Histogram2dContour(
x=df_fig[x_axis_name],
y=df_fig[y_axis_name],
colorscale="delta",
showscale=False,
),
)
fig.add_trace(histogram2d_contour.data[0])
scatter_fig = px.scatter(
df_fig,
x=x_axis_name,
y=y_axis_name,
color="outside",
color_discrete_map={"1": "white", "0": "grey"},
hover_data=["Text"],
template="simple_white",
height=height,
width=width,
opacity=0.3,
# title="Bourdieu Plot"
# color_discrete_sequence=["blue"],
)
for trace in scatter_fig.data:
fig.add_trace(trace)
"""
"""
fig.update_xaxes(
showgrid=False,
showticklabels=False,
zeroline=True,
zerolinecolor="white",
zerolinewidth=2,
)
fig.update_yaxes(
showgrid=False,
showticklabels=False,
zeroline=True,
zerolinecolor="white",
zerolinewidth=2,
)
"""
if manual_axis_name is None:
y_top_name = y_top_words
y_bottom_name = y_bottom_words
x_left_name = x_left_words
x_right_name = x_right_words
else:
y_top_name = manual_axis_name["y_top_name"]
y_bottom_name = manual_axis_name["y_bottom_name"]
x_left_name = manual_axis_name["x_left_name"]
x_right_name = manual_axis_name["x_right_name"]
fig.update_layout(
annotations=[
dict(
x=0,
# y=max_val,
y=max(df_fig[y_axis_name]),
xref="x",
yref="y",
text=y_top_name,
showarrow=False,
xanchor="right",
yanchor="top",
font=dict(size=width / label_size_ratio_label, color="white"),
),
dict(
x=0,
y=min(df_fig[y_axis_name]),
# y=-max_val,
xref="x",
yref="y",
text=y_bottom_name,
showarrow=False,
xanchor="left",
yanchor="bottom",
font=dict(size=width / label_size_ratio_label, color="white"),
),
dict(
x=max(df_fig[x_axis_name]),
# x=max_val,
y=0,
xref="x",
yref="y",
text=x_left_name,
showarrow=False,
xanchor="right",
yanchor="top",
font=dict(size=width / label_size_ratio_label, color="white"),
),
dict(
x=min(df_fig[x_axis_name]),
# x=-max_val,
y=0,
xref="x",
yref="y",
text=x_right_name,
showarrow=False,
xanchor="left",
yanchor="bottom",
font=dict(size=width / label_size_ratio_label, color="white"),
),
]
)
if clustering:
df_bourdieu_pivot = df_bourdieu.pivot(
index="doc_id", columns="names", values="coordinates"
)
df_bourdieu_pivot = df_bourdieu_pivot.reset_index()
df_bourdieu_pivot.columns = ["doc_id", "x", "y"]
df_bourdieu_pivot = df_bourdieu_pivot.set_index("doc_id")
dict_doc = df_bourdieu_pivot[["x", "y"]].to_dict("index")
for doc in new_docs:
doc.x = dict_doc.get(doc.doc_id)["x"]
doc.y = dict_doc.get(doc.doc_id)["y"]
new_docs = [doc for doc in new_docs if doc.doc_id in outside_ids]
bourdieu_topics = get_topics(
docs=new_docs,
terms=terms,
n_clusters=topic_n_clusters,
ngrams=topic_ngrams,
name_lenght=topic_terms,
top_terms_overall=topic_top_terms_overall,
)
if topic_gen_name:
# Get top documents for the generative AI query
new_docs = get_top_documents(new_docs, bourdieu_topics, ranking_terms=20)
bourdieu_topics = get_clean_topic_all(
generative_model,
language=gen_topic_language,
topics=bourdieu_topics,
docs=new_docs,
use_doc=use_doc_gen_topic,
)
label_size_ratio_clusters = 100
topics_x = [x.x_centroid for x in bourdieu_topics]
topics_y = [x.y_centroid for x in bourdieu_topics]
topic_names = [x.name for x in bourdieu_topics]
topics_name_plotly = [wrap_by_word(x, 7) for x in topic_names]
# Display Topics
for x, y, label in zip(topics_x, topics_y, topics_name_plotly):
fig.add_annotation(
x=x,
y=y,
text=label,
font=dict(
family="Courier New, monospace",
size=width / label_size_ratio_clusters,
color="red",
),
bordercolor="#c7c7c7",
borderwidth=width / 1000,
borderpad=width / 500,
bgcolor="white",
opacity=1,
)
if convex_hull:
try:
for topic in bourdieu_topics:
# Create a Scatter plot with the convex hull coordinates
trace = go.Scatter(
x=topic.convex_hull.x_coordinates,
y=topic.convex_hull.y_coordinates, # Assuming y=0 for simplicity
mode="lines",
name="Convex Hull",
line=dict(color="grey"),
showlegend=False,
)
fig.add_trace(trace)
except:
pass
if display_percent:
# Calculate the percentage for every box
df_fig_percent = df_fig[df_fig["doc_id"].isin(outside_ids)]
label_size_ratio_percent = 20
opacity = 0.4
case1_count = len(
df_fig_percent[
(df_fig_percent["cont1"] < 0) & (df_fig_percent["cont2"] < 0)
]
)
total_count = len(df_fig_percent)
case1_percentage = str(round((case1_count / total_count) * 100, 1)) + "%"
fig.add_annotation(
x=min(df_fig_percent[x_axis_name]),
y=min(df_fig_percent[y_axis_name]),
text=case1_percentage,
font=dict(
family="Courier New, monospace",
size=width / label_size_ratio_percent,
color="grey",
),
opacity=opacity,
xanchor="left",
)
case2_count = len(
df_fig_percent[
(df_fig_percent["cont1"] < 0) & (df_fig_percent["cont2"] > 0)
]
)
case2_percentage = str(round((case2_count / total_count) * 100, 1)) + "%"
fig.add_annotation(
x=min(df_fig_percent[x_axis_name]),
y=max(df_fig_percent[y_axis_name]),
text=case2_percentage,
font=dict(
family="Courier New, monospace",
size=width / label_size_ratio_percent,
color="grey",
),
opacity=opacity,
xanchor="left",
)
case3_count = len(
df_fig_percent[
(df_fig_percent["cont1"] > 0) & (df_fig_percent["cont2"] < 0)
]
)
case3_percentage = str(round((case3_count / total_count) * 100, 1)) + "%"
fig.add_annotation(
x=max(df_fig_percent[x_axis_name]),
y=min(df_fig_percent[y_axis_name]),
text=case3_percentage,
font=dict(
family="Courier New, monospace",
size=width / label_size_ratio_percent,
color="grey",
),
opacity=opacity,
xanchor="left",
)
case4_count = len(
df_fig_percent[
(df_fig_percent["cont1"] > 0) & (df_fig_percent["cont2"] > 0)
]
)
case4_percentage = str(round((case4_count / total_count) * 100, 1)) + "%"
fig.add_annotation(
x=max(df_fig_percent[x_axis_name]),
y=max(df_fig_percent[y_axis_name]),
text=case4_percentage,
font=dict(
family="Courier New, monospace",
size=width / label_size_ratio_percent,
color="grey",
),
opacity=opacity,
xanchor="left",
)
# Update the x-axis and y-axis labels
fig.update_xaxes(
title_text="",
scaleanchor="y",
scaleratio=1,
showgrid=False,
showticklabels=False,
zeroline=True,
zerolinecolor="white",
zerolinewidth=2,
)
fig.update_yaxes(
title_text="",
scaleanchor="x",
scaleratio=1,
showgrid=False,
showticklabels=False,
zeroline=True,
zerolinecolor="white",
zerolinewidth=2,
)
return fig, df_bourdieu
| [] |
2024-01-10 | charlesdedampierre/BunkaTopics | bunkatopics~functions~topic_gen_representation.py | import typing as t
import openai
import pandas as pd
from langchain.chains import LLMChain
from langchain.prompts import ChatPromptTemplate
from tqdm import tqdm
from bunkatopics.datamodel import Document, Topic
from langchain.llms import OpenAI
from .prompts import promp_template_topics_terms, promp_template_topics_terms_no_docs
# Our main prompt with documents ([DOCUMENTS]) and keywords ([KEYWORDS]) tags
TERM_ID = str
def get_clean_topic(
generative_model,
specific_terms: t.List[str],
specific_documents: t.List[str],
language="english",
top_doc: int = 3,
top_terms: int = 10,
use_doc=True,
context: str = "different things",
):
specific_terms = specific_terms[:top_terms]
specific_documents = specific_documents[:top_doc]
if use_doc:
PROMPT_TOPICS = ChatPromptTemplate.from_template(promp_template_topics_terms)
topic_chain = LLMChain(llm=generative_model, prompt=PROMPT_TOPICS)
clean_topic_name = topic_chain(
{
"terms": ", ".join(specific_terms),
"documents": " \n".join(specific_documents),
"context": context,
"language": language,
}
)
else:
PROMPT_TOPICS_NO_DOCS = ChatPromptTemplate.from_template(
promp_template_topics_terms_no_docs
)
topic_chain = LLMChain(llm=generative_model, prompt=PROMPT_TOPICS_NO_DOCS)
clean_topic_name = topic_chain(
{
"terms": ", ".join(specific_terms),
"context": context,
"language": language,
}
)
clean_topic_name = clean_topic_name["text"]
return clean_topic_name
def get_clean_topic_all(
generative_model,
topics: t.List[Topic],
docs: t.List[Document],
language: str = "english",
top_doc: int = 3,
top_terms: int = 10,
use_doc=False,
context: str = "everything",
) -> t.List[Topic]:
df = get_df_prompt(topics, docs)
topic_ids = list(df["topic_id"])
specific_terms = list(df["keywords"])
top_doc_contents = list(df["content"])
final_dict = {}
pbar = tqdm(total=len(topic_ids), desc="Creating new labels for clusters")
for topic_ic, x, y in zip(topic_ids, specific_terms, top_doc_contents):
clean_topic_name = get_clean_topic(
generative_model=generative_model,
language=language,
specific_terms=x,
specific_documents=y,
use_doc=use_doc,
top_terms=top_terms,
top_doc=top_doc,
context=context,
)
final_dict[topic_ic] = clean_topic_name
pbar.update(1)
for topic in topics:
topic.name = final_dict.get(topic.topic_id)
return topics
def get_df_prompt(topics: t.List[Topic], docs: t.List[Document]) -> pd.DataFrame:
"""
get a dataframe to input the prompt
"""
docs_with_ranks = [x for x in docs if x.topic_ranking is not None]
df_for_prompt = pd.DataFrame(
{
"topic_id": [x.topic_ranking.topic_id for x in docs_with_ranks],
"rank": [x.topic_ranking.rank for x in docs_with_ranks],
"doc_id": [x.doc_id for x in docs_with_ranks],
}
)
df_for_prompt = df_for_prompt.sort_values(
["topic_id", "rank"], ascending=(False, True)
)
df_for_prompt = df_for_prompt[["topic_id", "doc_id"]]
df_doc = pd.DataFrame(
{
"doc_id": [x.doc_id for x in docs],
"content": [x.content for x in docs],
}
)
df_for_prompt = pd.merge(df_for_prompt, df_doc, on="doc_id")
df_for_prompt = df_for_prompt.groupby("topic_id")["content"].apply(
lambda x: list(x)
)
df_keywords = pd.DataFrame(
{
"topic_id": [x.topic_id for x in topics],
"keywords": [x.name.split(" | ") for x in topics],
}
)
df_for_prompt = pd.merge(df_keywords, df_for_prompt, on="topic_id")
return df_for_prompt
| [
"rank",
"content"
] |
2024-01-10 | charlesdedampierre/BunkaTopics | streamlit~app_full.py | import sys
sys.path.append("../")
from bunkatopics import Bunka
import pandas as pd
import streamlit as st
from langchain.embeddings import HuggingFaceEmbeddings
import random
import os
from dotenv import load_dotenv
load_dotenv()
# Define a Streamlit app
st.title("Topic Modeling with Bunka")
# Upload CSV file
csv_file = st.file_uploader("Upload a CSV file", type=["csv"])
gen_ai = True
from langchain.llms import LlamaCpp
generative_model = LlamaCpp(
model_path=os.getenv("MODEL_PATH"),
n_ctx=2048,
temperature=0.75,
max_tokens=2000,
top_p=1,
verbose=False,
)
generative_model.client.verbose = False
# Initialize Bunka and fit it with the text data (cache the fit operation)
@st.cache_resource
def fit_bunka(full_docs):
bunka.fit(full_docs)
return bunka
if csv_file is not None:
# Read CSV into a DataFrame
df = pd.read_csv(csv_file)
# Extract text from the CSV column named 'text' (adjust column name as needed)
text_data = df["text"].tolist()
# Sample a subset of the text data (you can adjust the sample size)
sample_size = 500
full_docs = random.sample(text_data, sample_size)
embedding_model = HuggingFaceEmbeddings(model_name="all-MiniLM-L6-v2")
bunka = Bunka(embedding_model=embedding_model)
bunka = fit_bunka(full_docs)
st.subheader("Topic Modeling Visualization")
num_clusters = 5
df_topics = bunka.get_topics(n_clusters=num_clusters)
if gen_ai:
df_clean_names = bunka.get_clean_topic_name(generative_model=generative_model)
# Visualize topics
topic_fig = bunka.visualize_topics(width=800, height=800)
st.plotly_chart(topic_fig)
# Add a section for customizing the visualize_bourdieu parameters
st.sidebar.title("Customize visualize_bourdieu Parameters")
x_left_words = st.sidebar.text_input("x_left_words (comma-separated)", "war")
x_right_words = st.sidebar.text_input("x_right_words (comma-separated)", "peace")
y_top_words = st.sidebar.text_input("y_top_words (comma-separated)", "men")
y_bottom_words = st.sidebar.text_input("y_bottom_words (comma-separated)", "women")
# Display the visualize_bourdieu results
if st.sidebar.button("Visualize Bourdieu"):
bunka = fit_bunka(full_docs)
bourdieu_fig = bunka.visualize_bourdieu(
generative_model,
x_left_words=x_left_words.split(","),
x_right_words=x_right_words.split(","),
y_top_words=y_top_words.split(","),
y_bottom_words=y_bottom_words.split(","),
height=800,
width=800,
display_percent=True,
clustering=True,
topic_n_clusters=10,
topic_terms=5,
topic_top_terms_overall=500,
topic_gen_name=False,
)
st.subheader("Bourdieu Visualization")
st.plotly_chart(bourdieu_fig)
| [] |
2024-01-10 | charlesdedampierre/BunkaTopics | bunkatopics~functions~bourdieu_api.py | import random
import typing as t
import numpy as np
import pandas as pd
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.preprocessing import MinMaxScaler
from bunkatopics.datamodel import (
BourdieuDimension,
BourdieuQuery,
ContinuumDimension,
Document,
Term,
Topic,
TopicGenParam,
TopicParam,
)
from bunkatopics.functions.topic_document import get_top_documents
from bunkatopics.functions.topic_gen_representation import get_clean_topic_all
from bunkatopics.functions.topics_modeling import get_topics
from langchain.llms import OpenAI
import os
from dotenv import load_dotenv
pd.options.mode.chained_assignment = None
def bourdieu_api(
generative_model,
embedding_model,
docs: t.List[Document],
terms: t.List[Term],
bourdieu_query: BourdieuQuery = BourdieuQuery(),
topic_param: TopicParam = TopicParam(),
generative_ai_name=False,
topic_gen_param: TopicGenParam = TopicGenParam(),
) -> (t.List[Document], t.List[Topic]):
# Reset
for doc in docs:
doc.bourdieu_dimensions = []
# Compute Continuums
new_docs = get_continuum(
embedding_model,
docs,
cont_name="cont1",
left_words=bourdieu_query.x_left_words,
right_words=bourdieu_query.x_right_words,
)
bourdieu_docs = get_continuum(
embedding_model,
new_docs,
cont_name="cont2",
left_words=bourdieu_query.y_top_words,
right_words=bourdieu_query.y_bottom_words,
)
# There are two coordinates
df_bourdieu = pd.DataFrame(
[
{
"doc_id": x.doc_id,
"coordinates": [y.distance for y in x.bourdieu_dimensions],
"names": [y.continuum.id for y in x.bourdieu_dimensions],
}
for x in bourdieu_docs
]
)
df_bourdieu = df_bourdieu.explode(["coordinates", "names"])
df_bourdieu_pivot = df_bourdieu[["doc_id", "coordinates", "names"]]
df_bourdieu_pivot = df_bourdieu_pivot.pivot(
index="doc_id", columns="names", values="coordinates"
)
# Add to the bourdieu_docs
df_outsides = df_bourdieu_pivot.reset_index()
df_outsides["cont1"] = df_outsides["cont1"].astype(
float
) # Cont1 is the default name
df_outsides["cont2"] = df_outsides["cont2"].astype(float)
x_values = df_outsides["cont1"].values
y_values = df_outsides["cont2"].values
distances = np.sqrt(x_values**2 + y_values**2)
circle_radius = max(df_outsides.cont1) * bourdieu_query.radius_size
df_outsides["distances"] = distances
df_outsides["outside"] = "0"
df_outsides["outside"][df_outsides["distances"] >= circle_radius] = "1"
outside_ids = list(df_outsides["doc_id"][df_outsides["outside"] == "1"])
bourdieu_docs = [x for x in bourdieu_docs if x.doc_id in outside_ids]
bourdieu_dict = df_bourdieu_pivot.to_dict(orient="index")
for doc in bourdieu_docs:
doc.x = bourdieu_dict.get(doc.doc_id)["cont1"]
doc.y = bourdieu_dict.get(doc.doc_id)["cont2"]
bourdieu_topics = get_topics(
docs=bourdieu_docs,
terms=terms,
n_clusters=topic_param.n_clusters,
ngrams=topic_param.ngrams,
name_lenght=topic_param.name_lenght,
top_terms_overall=topic_param.top_terms_overall,
)
bourdieu_docs, bourdieu_topics = get_top_documents(
bourdieu_docs, bourdieu_topics, ranking_terms=20
)
if generative_ai_name:
bourdieu_topics: t.List[Topic] = get_clean_topic_all(
generative_model,
bourdieu_topics,
bourdieu_docs,
language=topic_gen_param.language,
context=topic_gen_param.context,
use_doc=topic_gen_param.use_doc,
)
return (bourdieu_docs, bourdieu_topics)
def get_continuum(
embedding_model,
docs: t.List[Document],
cont_name: str = "emotion",
left_words: list = ["hate", "pain"],
right_words: list = ["love", "good"],
scale: bool = False,
) -> t.List[Document]:
df_docs = pd.DataFrame.from_records([doc.dict() for doc in docs])
df_emb = df_docs[["doc_id", "embedding"]]
df_emb = df_emb.set_index("doc_id")
df_emb = pd.DataFrame(list(df_emb["embedding"]))
df_emb.index = df_docs["doc_id"]
continuum = ContinuumDimension(
id=cont_name, left_words=left_words, right_words=right_words
)
# Compute the extremity embeddings
left_embedding = embedding_model.embed_documents(continuum.left_words)
right_embedding = embedding_model.embed_documents(continuum.right_words)
left_embedding = pd.DataFrame(left_embedding).mean().values.reshape(1, -1)
right_embedding = pd.DataFrame(right_embedding).mean().values.reshape(1, -1)
# Make the difference to get the continnum
continuum_embedding = left_embedding - right_embedding
df_continuum = pd.DataFrame(continuum_embedding)
df_continuum.index = ["distance"]
# Compute the Cosine Similarity
full_emb = pd.concat([df_emb, df_continuum])
df_bert = pd.DataFrame(cosine_similarity(full_emb))
df_bert.index = full_emb.index
df_bert.columns = full_emb.index
df_bert = df_bert.iloc[-1:,].T
df_bert = df_bert.sort_values("distance", ascending=False).reset_index()
df_bert = df_bert[1:]
df_bert = df_bert.rename(columns={"index": "doc_id"})
final_df = pd.merge(df_bert, df_docs[["doc_id", "content"]], on="doc_id")
if scale:
scaler = MinMaxScaler(feature_range=(-1, 1))
final_df[["distance"]] = scaler.fit_transform(final_df[["distance"]])
final_df = final_df.set_index("doc_id")
final_df = final_df[["distance"]]
distance_dict = final_df.to_dict("index")
bourdieu_docs = docs.copy()
for doc in bourdieu_docs:
res = BourdieuDimension(
continuum=continuum, distance=distance_dict.get(doc.doc_id)["distance"]
)
doc.bourdieu_dimensions.append(res)
return bourdieu_docs
| [] |
2024-01-10 | charlesdedampierre/BunkaTopics | tests~run_bunka.py | import sys
sys.path.append("../")
from bunkatopics import Bunka
from bunkatopics.functions.clean_text import clean_tweet
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.llms import LlamaCpp
import random
from datasets import load_dataset
import os
from dotenv import load_dotenv
load_dotenv()
random.seed(42)
if __name__ == "__main__":
# Social Data
dataset = load_dataset("rguo123/trump_tweets")["train"]["content"]
full_docs = random.sample(dataset, 500)
full_docs = [clean_tweet(x) for x in full_docs]
# Scientific Litterature Data
# dataset = load_dataset("CShorten/ML-ArXiv-Papers")["train"]["title"]
# full_docs = random.sample(dataset, 500)
embedding_model = HuggingFaceEmbeddings(model_name="all-MiniLM-L6-v2")
bunka = Bunka(embedding_model=embedding_model)
generative_model = LlamaCpp(
model_path=os.getenv("MODEL_PATH"),
n_ctx=2048,
temperature=0.75,
max_tokens=2000,
top_p=1,
verbose=False,
)
generative_model.client.verbose = False
bunka.fit(full_docs)
# Topic Modeling
df_topics = bunka.get_topics(n_clusters=2)
topic_fig = bunka.visualize_topics(width=800, height=800)
topic_fig.show()
# Topic Modeling Clean
df_topics = bunka.get_clean_topic_name(
generative_model=generative_model,
use_doc=False,
)
topic_fig_clean = bunka.visualize_topics(width=800, height=800)
topic_fig_clean.show()
fig_solo = bunka.visualize_bourdieu_one_dimension(
left=["negative", "bad"],
right=["positive"],
width=1200,
height=1200,
explainer=False,
)
fig_solo.show()
bourdieu_fig = bunka.visualize_bourdieu(
generative_model=generative_model,
x_left_words=["war"],
x_right_words=["peace"],
y_top_words=["men"],
y_bottom_words=["women"],
height=1500,
width=1500,
display_percent=True,
clustering=True,
topic_n_clusters=3,
topic_terms=5,
topic_top_terms_overall=500,
topic_gen_name=True,
)
bourdieu_fig.show()
| [] |
2024-01-10 | valentinkm/EchoCards | text_splitter.py | from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.document_loaders import TextLoader
from langchain.vectorstores import FAISS
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.document_loaders import PyPDFLoader
# Recursive splitting to consider different separators in generic text
r_splitter = RecursiveCharacterTextSplitter(
chunk_size=2000,
chunk_overlap=200,
separators=["\n\n", "\n", " ", "", "."],
length_function = len
)
transcript_file = "files/transcript.txt"
# create function of the above
def load_split_transcript(transcript_file):
loader = TextLoader(transcript_file)
data = loader.load()
docs = r_splitter.split_documents(data)
return docs
| [] |
2024-01-10 | valentinkm/EchoCards | text_converter.py | from dotenv import load_dotenv
from langchain.chat_models import ChatOpenAI
from langchain.chains.summarize import load_summarize_chain
from text2qa_prompts import prompt, refine_prompt
_ = load_dotenv()
llm = ChatOpenAI(temperature=0.7, model_name="gpt-3.5-turbo")
chain = load_summarize_chain(llm, chain_type="refine")
chain = load_summarize_chain(
llm=llm,
chain_type="refine",
question_prompt=prompt,
refine_prompt=refine_prompt,
return_intermediate_steps=True,
input_key="input_documents",
output_key="output_text",
)
# generate transcript
def generate_qa_transcript(docs, topic):
result = chain({"input_documents":docs, "topic":topic}, return_only_outputs=False)
intermediate_text = "\n".join(result[''])
output_text = result['output_text']
qa_transcript = f"{intermediate_text}\n\n{output_text}"
return qa_transcript | [] |
2024-01-10 | valentinkm/EchoCards | text2qa_prompts.py | from langchain.prompts import PromptTemplate
prompt_template = """You are an dilligint research student. Your are given a generated transcript of a lecture on {topic}.\n
Transcript:\n
{text}\
Step by step extract all information from the transcript and convert it into a clean and complete concise summary document with highlighted key concepts, theories, and definitions.\
Use Markdown formatting.\n
Do not leave anything out and do not add anyting extra.\n
Tone: scientific\n
Task:\n
- Highlight Key Concepts: Emphasize crucial theories, definitions, and concepts using Markdown formatting like bold or italic.\
- Ensure Completeness: Incorporate all bullet points, sub-points, and other nested lists from the transcript without omitting any content.\
- Do Not Add Extra Material: Keep the lecture notes faithful to the original transcript, avoiding any addition, removal, or modification of the substance of the content.\
- Work Step-by-Step: Methodically work through the transcript, slide by slide, to ensure that the final document is both accurate and complete.\
This task is designed to facilitate the creation of complete set of lecture notes that serve as an effective study and reference tool.\
LECTURE NOTES:"""
prompt = PromptTemplate.from_template(prompt_template)
refine_template = (
"""You are a diligent research student. You are given a generated transcript of a lecture on {topic}.\n
Existing Q&A Notes: {existing_answer}\n
Your goal is to continue supplementing the existing Q&A notes with additional context from the continued lecture transcript provided below.\n
------------\n
{text}\n
------------\n
Task:\n
- Highlight Key Concepts: Emphasize crucial theories, definitions, and concepts using Markdown formatting like bold or italic.\
- Ensure Completeness: Incorporate all bullet points, sub-points, and other nested lists from the transcript without omitting any content.\
- Do Not Add Extra Material: Keep the lecture notes faithful to the original transcript, avoiding any addition, removal, or modification of the substance of the content.\
- Work Step-by-Step: Methodically work through the transcript, slide by slide, to ensure that the final document is both accurate and complete.\
This task is designed to facilitate the creation of complete set of lecture notes that serve as an effective study and reference tool."""
)
refine_prompt = PromptTemplate.from_template(refine_template) | [
"You are a diligent research student. You are given a generated transcript of a lecture on {topic}.\n\n Existing Q&A Notes: {existing_answer}\n\n Your goal is to continue supplementing the existing Q&A notes with additional context from the continued lecture transcript provided below.\n\n ------------\n\n {text}\n\n ------------\n\n Task:\n\n - Highlight Key Concepts: Emphasize crucial theories, definitions, and concepts using Markdown formatting like bold or italic. - Ensure Completeness: Incorporate all bullet points, sub-points, and other nested lists from the transcript without omitting any content. - Do Not Add Extra Material: Keep the lecture notes faithful to the original transcript, avoiding any addition, removal, or modification of the substance of the content. - Work Step-by-Step: Methodically work through the transcript, slide by slide, to ensure that the final document is both accurate and complete.This task is designed to facilitate the creation of complete set of lecture notes that serve as an effective study and reference tool.",
"You are an dilligint research student. Your are given a generated transcript of a lecture on {topic}.\n\nTranscript:\n\n{text}Step by step extract all information from the transcript and convert it into a clean and complete concise summary document with highlighted key concepts, theories, and definitions.Use Markdown formatting.\n\nDo not leave anything out and do not add anyting extra.\n\nTone: scientific\n\nTask:\n\n - Highlight Key Concepts: Emphasize crucial theories, definitions, and concepts using Markdown formatting like bold or italic. - Ensure Completeness: Incorporate all bullet points, sub-points, and other nested lists from the transcript without omitting any content. - Do Not Add Extra Material: Keep the lecture notes faithful to the original transcript, avoiding any addition, removal, or modification of the substance of the content. - Work Step-by-Step: Methodically work through the transcript, slide by slide, to ensure that the final document is both accurate and complete.This task is designed to facilitate the creation of complete set of lecture notes that serve as an effective study and reference tool.LECTURE NOTES:"
] |
2024-01-10 | irthomasthomas/label-maker | research~label_maker_multi_function_calling_weather_example.py | from openai import OpenAI
import json, os
client = OpenAI(
api_key=os.environ["OPENAI_API_KEY"],
)
OPENAI_API_KEY = client.api_key
# Example dummy function hard coded to return the same weather
# In production, this could be your backend API or an external API
def get_current_weather(location, unit="fahrenheit"):
"""Get the current weather in a given location"""
if "tokyo" in location.lower():
return json.dumps({"location": "Tokyo", "temperature": "10", "unit": unit})
elif "san francisco" in location.lower():
return json.dumps({"location": "San Francisco", "temperature": "72", "unit": unit})
elif "paris" in location.lower():
return json.dumps({"location": "Paris", "temperature": "22", "unit": unit})
else:
return json.dumps({"location": location, "temperature": "unknown"})
def run_conversation():
# Step 1: send the conversation and available functions to the model
messages = [{"role": "user", "content": "What's the weather like in San Francisco, Tokyo, and Paris?"}]
tools = [
{
"type": "function",
"function": {
"name": "get_current_weather",
"description": "Get the current weather in a given location",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city and state, e.g. San Francisco, CA",
},
"unit": {"type": "string", "enum": ["celsius", "fahrenheit"]},
},
"required": ["location"],
},
},
}
]
response = client.chat.completions.create(
model="gpt-3.5-turbo-1106",
messages=messages,
tools=tools,
tool_choice="auto", # auto is default, but we'll be explicit
)
response_message = response.choices[0].message
tool_calls = response_message.tool_calls
# Step 2: check if the model wanted to call a function
if tool_calls:
# Step 3: call the function
# Note: the JSON response may not always be valid; be sure to handle errors
available_functions = {
"get_current_weather": get_current_weather,
} # only one function in this example, but you can have multiple
messages.append(response_message) # extend conversation with assistant's reply
# Step 4: send the info for each function call and function response to the model
print(f"messages:")
for message in messages:
print(f"{message}\n")
for tool_call in tool_calls:
function_name = tool_call.function.name
function_to_call = available_functions[function_name]
function_args = json.loads(tool_call.function.arguments)
function_response = function_to_call(
location=function_args.get("location"),
unit=function_args.get("unit"),
)
messages.append(
{
"tool_call_id": tool_call.id,
"role": "tool",
"name": function_name,
"content": function_response,
}
) # extend conversation with function response
second_response = client.chat.completions.create(
model="gpt-3.5-turbo-1106",
messages=messages,
) # get a new response from the model where it can see the function response
return second_response
print(run_conversation()) | [
"What's the weather like in San Francisco, Tokyo, and Paris?"
] |
2024-01-10 | irthomasthomas/label-maker | label_maker.py | import os, json, argparse, subprocess
from openai import OpenAI
client = OpenAI(
api_key=os.environ["OPENAI_API_KEY"],
)
OPENAI_API_KEY = client.api_key
def generate_new_labels(labels, url, title, description):
"""Generate new labels if the existing labels are inadequate."""
messages = [
{"role": "system", "content": """You are a helpful assistant designed to output JSON lists of labels.
Think carefully about the labels you select.
The labels you create should make it easier to organize and search for information."""},
{"role": "user", "content": f"""Think of some keywords for this link.\n
url: {url}\n
title: {title}\n
description: {description}\n
**labels:**
{labels}\n
Write A MAXIMUM OF TWO label,description pairs to describe this link:\n
*IMPORTANT* Make sure the labels are unique and highly descriptive."""}
]
# Step 1: call the model
response = client.chat.completions.create(
model="gpt-3.5-turbo-1106",
response_format={"type": "json_object"},
temperature=1,
seed=0,
messages=messages,
)
response_message = response.choices[0].message
return response_message
def create_new_labels(repo, label_list):
"""Create new labels for a GitHub repo."""
new_labels_created = []
for label in label_list:
label_name = label["name"]
label_description = label["description"]
command = ["gh", "label", "create", "-R", repo, label_name, "-d", label_description]
result = subprocess.run(command, capture_output=True, text=True, check=True)
if result.stderr:
print("Error:", result.stderr)
else:
print(f"Created label: {label_name}")
new_labels_created.append(label_name)
return new_labels_created
def request_labels_list(repo):
with open('/dev/tty', 'w') as f:
f.write(f"get_issues_labels_list: {repo}\n\n")
per_page = 100
command = ["gh", "label", "list", "-R", repo, "-L", "100", "--json", "name,description,color"]
# Execute the command using subprocess
result = subprocess.run(command, capture_output=True, text=True, check=True)
labels = json.loads(result.stdout)
if labels:
f.write(f"got {len(labels)} labels\n\n")
# Print the information or do further processing if needed
# for label in labels:
# print(f"Label Name: {label['name']}, Color: {label['color']}")
# If an error occurs, print the error message
if result.stderr:
print("Error:", result.stderr)
parsed_labels = ""
label_dict = {}
for label in labels:
parsed_labels += f"{label['name']}: {label['description']}\n"
# label_dict[label['name']] = label['description']
return parsed_labels
def new_labels_needed(labels, url, title, description):
adequate_labels_query = f"""Given the following bookmark:
url: {url}
title: {title}
description: {description}
Are new labels needed to adequately delineate this bookmark? (True) or can you label it accurately with the existing labels? (False)
Only answer True if you are certain that new labels are needed. If you are unsure, then answer False.
Only reply with True or False.
**labels:**
{labels}
**Important**: Say nothing except true or false."""
messages = [
{"role": "system", "content": """You are a helpful assistant designed to answer binary questions with True or False."""},
{"role": "user", "content": adequate_labels_query}
]
# Step 1: call the model
response = client.chat.completions.create(
model="gpt-3.5-turbo-1106",
temperature=0,
seed=0,
messages=messages,
)
response_message = response.choices[0].message
print(f"New Labels Are Needed: {response_message.content}")
if response_message.content == "True":
return True
else:
return False
def pick_labels(url, title, description, labels):
"""
Choose the labels to assign to a bookmark.
"""
pick_labels_query = f"""Pick A MINIMUM OF THREE (3) labels from the list to describe this link:\n
*IMPORTANT* Only pick from the labels provided. Output a JSON list of labels.
url: {url}\ntitle: {title}\ndescription: {description}\nlabels: {labels}
"""
messages = [
{"role": "system", "content": """You are a helpful assistant designed to output JSON lists of labels.
Think carefully about the labels you select.
The labels you select should make it easier to organize and search for information.
**IMPORTANT** Only pick from the labels provided."""},
{"role": "user", "content": pick_labels_query}
]
# Step 1: call the model
response = client.chat.completions.create(
model="gpt-3.5-turbo-1106",
response_format={"type": "json_object"},
temperature=1,
seed=0,
messages=messages
)
# return a list of labels
response_message = response.choices[0].message.content
print(f"Labels Picked: {response_message}")
return response_message
parser = argparse.ArgumentParser(description='Generate labels for a given bookmark.')
parser.add_argument('--url', metavar='url', type=str, help='The url of the bookmark.')
parser.add_argument('--title', metavar='title', type=str, help='The title of the bookmark.')
parser.add_argument('--description', metavar='description', type=str, help='The selected text of the bookmark.')
parser.add_argument('--repo', metavar='repo', type=str, help='The repo to get labels from.', default="irthomasthomas/undecidability")
args = parser.parse_args()
# Algorithm:
# 1. Request a list of labels from the repo.
# 2. Check if the existing labels are adequate.
# 3. If not, generate new labels.
# 4. Create the new labels.
# 5. Pick the labels to assign to the bookmark.
# 6. Return the labels.
labels_dict = {}
if args.url:
labels = request_labels_list(args.repo)
print(f"labels count: {len(labels)}")
if new_labels_needed(labels, args.url, args.title, args.description):
generated_labels = generate_new_labels(labels, args.url, args.title, args.description)
generated_labels_list = json.loads(generated_labels.content)
print(f"LABELS REQUESTED:\n {generated_labels_list}")
picked_labels = json.loads(pick_labels(args.url, args.title, args.description, labels))
# picked_labels = {"labels:":["label1", "label2"]}
if generated_labels:
# manually add the 'New Label' label picked_labels
picked_labels["labels"].append("New Label") # TypeError: string indices must be integers, not 'str'
labels_dict["picked_labels"] = picked_labels
# add the generated label's name,description pairs to the picked labels as a list of dicts
labels_dict["generated_labels"] = generated_labels_list
print(f"LABELS PICKED:\n {labels_dict}") | [
"You are a helpful assistant designed to output JSON lists of labels. \n Think carefully about the labels you select. \n The labels you select should make it easier to organize and search for information. \n **IMPORTANT** Only pick from the labels provided.",
"You are a helpful assistant designed to output JSON lists of labels.\n Think carefully about the labels you select.\n The labels you create should make it easier to organize and search for information.",
"You are a helpful assistant designed to answer binary questions with True or False."
] |
2024-01-10 | axgpt/langchain | libs~langchain~langchain~vectorstores~supabase.py | from __future__ import annotations
import uuid
from itertools import repeat
from typing import (
TYPE_CHECKING,
Any,
Dict,
Iterable,
List,
Optional,
Tuple,
Type,
Union,
)
import numpy as np
from langchain_core.schema.embeddings import Embeddings
from langchain_core.schema.vectorstore import VectorStore
from langchain.docstore.document import Document
from langchain.vectorstores.utils import maximal_marginal_relevance
if TYPE_CHECKING:
import supabase
class SupabaseVectorStore(VectorStore):
"""`Supabase Postgres` vector store.
It assumes you have the `pgvector`
extension installed and a `match_documents` (or similar) function. For more details:
https://integrations.langchain.com/vectorstores?integration_name=SupabaseVectorStore
You can implement your own `match_documents` function in order to limit the search
space to a subset of documents based on your own authorization or business logic.
Note that the Supabase Python client does not yet support async operations.
If you'd like to use `max_marginal_relevance_search`, please review the instructions
below on modifying the `match_documents` function to return matched embeddings.
Examples:
.. code-block:: python
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain_core.schema import Document
from langchain.vectorstores import SupabaseVectorStore
from supabase.client import create_client
docs = [
Document(page_content="foo", metadata={"id": 1}),
]
embeddings = OpenAIEmbeddings()
supabase_client = create_client("my_supabase_url", "my_supabase_key")
vector_store = SupabaseVectorStore.from_documents(
docs,
embeddings,
client=supabase_client,
table_name="documents",
query_name="match_documents",
chunk_size=500,
)
To load from an existing table:
.. code-block:: python
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import SupabaseVectorStore
from supabase.client import create_client
embeddings = OpenAIEmbeddings()
supabase_client = create_client("my_supabase_url", "my_supabase_key")
vector_store = SupabaseVectorStore(
client=supabase_client,
embedding=embeddings,
table_name="documents",
query_name="match_documents",
)
"""
def __init__(
self,
client: supabase.client.Client,
embedding: Embeddings,
table_name: str,
chunk_size: int = 500,
query_name: Union[str, None] = None,
) -> None:
"""Initialize with supabase client."""
try:
import supabase # noqa: F401
except ImportError:
raise ImportError(
"Could not import supabase python package. "
"Please install it with `pip install supabase`."
)
self._client = client
self._embedding: Embeddings = embedding
self.table_name = table_name or "documents"
self.query_name = query_name or "match_documents"
self.chunk_size = chunk_size or 500
# According to the SupabaseVectorStore JS implementation, the best chunk size
# is 500. Though for large datasets it can be too large so it is configurable.
@property
def embeddings(self) -> Embeddings:
return self._embedding
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[Dict[Any, Any]]] = None,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> List[str]:
ids = ids or [str(uuid.uuid4()) for _ in texts]
docs = self._texts_to_documents(texts, metadatas)
vectors = self._embedding.embed_documents(list(texts))
return self.add_vectors(vectors, docs, ids)
@classmethod
def from_texts(
cls: Type["SupabaseVectorStore"],
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
client: Optional[supabase.client.Client] = None,
table_name: Optional[str] = "documents",
query_name: Union[str, None] = "match_documents",
chunk_size: int = 500,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> "SupabaseVectorStore":
"""Return VectorStore initialized from texts and embeddings."""
if not client:
raise ValueError("Supabase client is required.")
if not table_name:
raise ValueError("Supabase document table_name is required.")
embeddings = embedding.embed_documents(texts)
ids = [str(uuid.uuid4()) for _ in texts]
docs = cls._texts_to_documents(texts, metadatas)
cls._add_vectors(client, table_name, embeddings, docs, ids, chunk_size)
return cls(
client=client,
embedding=embedding,
table_name=table_name,
query_name=query_name,
chunk_size=chunk_size,
)
def add_vectors(
self,
vectors: List[List[float]],
documents: List[Document],
ids: List[str],
) -> List[str]:
return self._add_vectors(
self._client, self.table_name, vectors, documents, ids, self.chunk_size
)
def similarity_search(
self,
query: str,
k: int = 4,
filter: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> List[Document]:
vector = self._embedding.embed_query(query)
return self.similarity_search_by_vector(vector, k=k, filter=filter, **kwargs)
def similarity_search_by_vector(
self,
embedding: List[float],
k: int = 4,
filter: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> List[Document]:
result = self.similarity_search_by_vector_with_relevance_scores(
embedding, k=k, filter=filter, **kwargs
)
documents = [doc for doc, _ in result]
return documents
def similarity_search_with_relevance_scores(
self,
query: str,
k: int = 4,
filter: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
vector = self._embedding.embed_query(query)
return self.similarity_search_by_vector_with_relevance_scores(
vector, k=k, filter=filter
)
def match_args(
self, query: List[float], filter: Optional[Dict[str, Any]]
) -> Dict[str, Any]:
ret: Dict[str, Any] = dict(query_embedding=query)
if filter:
ret["filter"] = filter
return ret
def similarity_search_by_vector_with_relevance_scores(
self,
query: List[float],
k: int,
filter: Optional[Dict[str, Any]] = None,
postgrest_filter: Optional[str] = None,
) -> List[Tuple[Document, float]]:
match_documents_params = self.match_args(query, filter)
query_builder = self._client.rpc(self.query_name, match_documents_params)
if postgrest_filter:
query_builder.params = query_builder.params.set(
"and", f"({postgrest_filter})"
)
query_builder.params = query_builder.params.set("limit", k)
res = query_builder.execute()
match_result = [
(
Document(
metadata=search.get("metadata", {}), # type: ignore
page_content=search.get("content", ""),
),
search.get("similarity", 0.0),
)
for search in res.data
if search.get("content")
]
return match_result
def similarity_search_by_vector_returning_embeddings(
self,
query: List[float],
k: int,
filter: Optional[Dict[str, Any]] = None,
postgrest_filter: Optional[str] = None,
) -> List[Tuple[Document, float, np.ndarray[np.float32, Any]]]:
match_documents_params = self.match_args(query, filter)
query_builder = self._client.rpc(self.query_name, match_documents_params)
if postgrest_filter:
query_builder.params = query_builder.params.set(
"and", f"({postgrest_filter})"
)
query_builder.params = query_builder.params.set("limit", k)
res = query_builder.execute()
match_result = [
(
Document(
metadata=search.get("metadata", {}), # type: ignore
page_content=search.get("content", ""),
),
search.get("similarity", 0.0),
# Supabase returns a vector type as its string represation (!).
# This is a hack to convert the string to numpy array.
np.fromstring(
search.get("embedding", "").strip("[]"), np.float32, sep=","
),
)
for search in res.data
if search.get("content")
]
return match_result
@staticmethod
def _texts_to_documents(
texts: Iterable[str],
metadatas: Optional[Iterable[Dict[Any, Any]]] = None,
) -> List[Document]:
"""Return list of Documents from list of texts and metadatas."""
if metadatas is None:
metadatas = repeat({})
docs = [
Document(page_content=text, metadata=metadata)
for text, metadata in zip(texts, metadatas)
]
return docs
@staticmethod
def _add_vectors(
client: supabase.client.Client,
table_name: str,
vectors: List[List[float]],
documents: List[Document],
ids: List[str],
chunk_size: int,
) -> List[str]:
"""Add vectors to Supabase table."""
rows: List[Dict[str, Any]] = [
{
"id": ids[idx],
"content": documents[idx].page_content,
"embedding": embedding,
"metadata": documents[idx].metadata, # type: ignore
}
for idx, embedding in enumerate(vectors)
]
id_list: List[str] = []
for i in range(0, len(rows), chunk_size):
chunk = rows[i : i + chunk_size]
result = client.from_(table_name).upsert(chunk).execute() # type: ignore
if len(result.data) == 0:
raise Exception("Error inserting: No rows added")
# VectorStore.add_vectors returns ids as strings
ids = [str(i.get("id")) for i in result.data if i.get("id")]
id_list.extend(ids)
return id_list
def max_marginal_relevance_search_by_vector(
self,
embedding: List[float],
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents selected by maximal marginal relevance.
"""
result = self.similarity_search_by_vector_returning_embeddings(
embedding, fetch_k
)
matched_documents = [doc_tuple[0] for doc_tuple in result]
matched_embeddings = [doc_tuple[2] for doc_tuple in result]
mmr_selected = maximal_marginal_relevance(
np.array([embedding], dtype=np.float32),
matched_embeddings,
k=k,
lambda_mult=lambda_mult,
)
filtered_documents = [matched_documents[i] for i in mmr_selected]
return filtered_documents
def max_marginal_relevance_search(
self,
query: str,
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents selected by maximal marginal relevance.
`max_marginal_relevance_search` requires that `query_name` returns matched
embeddings alongside the match documents. The following function
demonstrates how to do this:
```sql
CREATE FUNCTION match_documents_embeddings(query_embedding vector(1536),
match_count int)
RETURNS TABLE(
id uuid,
content text,
metadata jsonb,
embedding vector(1536),
similarity float)
LANGUAGE plpgsql
AS $$
# variable_conflict use_column
BEGIN
RETURN query
SELECT
id,
content,
metadata,
embedding,
1 -(docstore.embedding <=> query_embedding) AS similarity
FROM
docstore
ORDER BY
docstore.embedding <=> query_embedding
LIMIT match_count;
END;
$$;
```
"""
embedding = self._embedding.embed_query(query)
docs = self.max_marginal_relevance_search_by_vector(
embedding, k, fetch_k, lambda_mult=lambda_mult
)
return docs
def delete(self, ids: Optional[List[str]] = None, **kwargs: Any) -> None:
"""Delete by vector IDs.
Args:
ids: List of ids to delete.
"""
if ids is None:
raise ValueError("No ids provided to delete.")
rows: List[Dict[str, Any]] = [
{
"id": id,
}
for id in ids
]
# TODO: Check if this can be done in bulk
for row in rows:
self._client.from_(self.table_name).delete().eq("id", row["id"]).execute()
| [] |
2024-01-10 | axgpt/langchain | libs~langchain~tests~integration_tests~memory~test_cassandra.py | import os
import time
from typing import Optional
from cassandra.cluster import Cluster
from langchain_core.schema.messages import AIMessage, HumanMessage
from langchain.memory import ConversationBufferMemory
from langchain.memory.chat_message_histories.cassandra import (
CassandraChatMessageHistory,
)
def _chat_message_history(
session_id: str = "test-session",
drop: bool = True,
ttl_seconds: Optional[int] = None,
) -> CassandraChatMessageHistory:
keyspace = "cmh_test_keyspace"
table_name = "cmh_test_table"
# get db connection
if "CASSANDRA_CONTACT_POINTS" in os.environ:
contact_points = os.environ["CONTACT_POINTS"].split(",")
cluster = Cluster(contact_points)
else:
cluster = Cluster()
#
session = cluster.connect()
# ensure keyspace exists
session.execute(
(
f"CREATE KEYSPACE IF NOT EXISTS {keyspace} "
f"WITH replication = {{'class': 'SimpleStrategy', 'replication_factor': 1}}"
)
)
# drop table if required
if drop:
session.execute(f"DROP TABLE IF EXISTS {keyspace}.{table_name}")
#
return CassandraChatMessageHistory(
session_id=session_id,
session=session,
keyspace=keyspace,
table_name=table_name,
**({} if ttl_seconds is None else {"ttl_seconds": ttl_seconds}),
)
def test_memory_with_message_store() -> None:
"""Test the memory with a message store."""
# setup cassandra as a message store
message_history = _chat_message_history()
memory = ConversationBufferMemory(
memory_key="baz",
chat_memory=message_history,
return_messages=True,
)
assert memory.chat_memory.messages == []
# add some messages
memory.chat_memory.add_ai_message("This is me, the AI")
memory.chat_memory.add_user_message("This is me, the human")
messages = memory.chat_memory.messages
expected = [
AIMessage(content="This is me, the AI"),
HumanMessage(content="This is me, the human"),
]
assert messages == expected
# clear the store
memory.chat_memory.clear()
assert memory.chat_memory.messages == []
def test_memory_separate_session_ids() -> None:
"""Test that separate session IDs do not share entries."""
message_history1 = _chat_message_history(session_id="test-session1")
memory1 = ConversationBufferMemory(
memory_key="mk1",
chat_memory=message_history1,
return_messages=True,
)
message_history2 = _chat_message_history(session_id="test-session2")
memory2 = ConversationBufferMemory(
memory_key="mk2",
chat_memory=message_history2,
return_messages=True,
)
memory1.chat_memory.add_ai_message("Just saying.")
assert memory2.chat_memory.messages == []
memory1.chat_memory.clear()
memory2.chat_memory.clear()
def test_memory_ttl() -> None:
"""Test time-to-live feature of the memory."""
message_history = _chat_message_history(ttl_seconds=5)
memory = ConversationBufferMemory(
memory_key="baz",
chat_memory=message_history,
return_messages=True,
)
#
assert memory.chat_memory.messages == []
memory.chat_memory.add_ai_message("Nothing special here.")
time.sleep(2)
assert memory.chat_memory.messages != []
time.sleep(5)
assert memory.chat_memory.messages == []
| [
"This is me, the AI",
"This is me, the human"
] |
2024-01-10 | axgpt/langchain | libs~langchain~langchain~chains~api~openapi~response_chain.py | """Response parser."""
import json
import re
from typing import Any
from langchain_core.prompts.prompt import PromptTemplate
from langchain_core.schema import BaseOutputParser
from langchain_core.schema.language_model import BaseLanguageModel
from langchain.chains.api.openapi.prompts import RESPONSE_TEMPLATE
from langchain.chains.llm import LLMChain
class APIResponderOutputParser(BaseOutputParser):
"""Parse the response and error tags."""
def _load_json_block(self, serialized_block: str) -> str:
try:
response_content = json.loads(serialized_block, strict=False)
return response_content.get("response", "ERROR parsing response.")
except json.JSONDecodeError:
return "ERROR parsing response."
except:
raise
def parse(self, llm_output: str) -> str:
"""Parse the response and error tags."""
json_match = re.search(r"```json(.*?)```", llm_output, re.DOTALL)
if json_match:
return self._load_json_block(json_match.group(1).strip())
else:
raise ValueError(f"No response found in output: {llm_output}.")
@property
def _type(self) -> str:
return "api_responder"
class APIResponderChain(LLMChain):
"""Get the response parser."""
@classmethod
def from_llm(
cls, llm: BaseLanguageModel, verbose: bool = True, **kwargs: Any
) -> LLMChain:
"""Get the response parser."""
output_parser = APIResponderOutputParser()
prompt = PromptTemplate(
template=RESPONSE_TEMPLATE,
output_parser=output_parser,
input_variables=["response", "instructions"],
)
return cls(prompt=prompt, llm=llm, verbose=verbose, **kwargs)
| [
"instructions",
"response"
] |
2024-01-10 | axgpt/langchain | libs~langchain~langchain~callbacks~flyte_callback.py | """FlyteKit callback handler."""
from __future__ import annotations
import logging
from copy import deepcopy
from typing import TYPE_CHECKING, Any, Dict, List, Tuple
from langchain_core.schema import AgentAction, AgentFinish, LLMResult
from langchain.callbacks.base import BaseCallbackHandler
from langchain.callbacks.utils import (
BaseMetadataCallbackHandler,
flatten_dict,
import_pandas,
import_spacy,
import_textstat,
)
if TYPE_CHECKING:
import flytekit
from flytekitplugins.deck import renderer
logger = logging.getLogger(__name__)
def import_flytekit() -> Tuple[flytekit, renderer]:
"""Import flytekit and flytekitplugins-deck-standard."""
try:
import flytekit # noqa: F401
from flytekitplugins.deck import renderer # noqa: F401
except ImportError:
raise ImportError(
"To use the flyte callback manager you need"
"to have the `flytekit` and `flytekitplugins-deck-standard`"
"packages installed. Please install them with `pip install flytekit`"
"and `pip install flytekitplugins-deck-standard`."
)
return flytekit, renderer
def analyze_text(
text: str,
nlp: Any = None,
textstat: Any = None,
) -> dict:
"""Analyze text using textstat and spacy.
Parameters:
text (str): The text to analyze.
nlp (spacy.lang): The spacy language model to use for visualization.
Returns:
(dict): A dictionary containing the complexity metrics and visualization
files serialized to HTML string.
"""
resp: Dict[str, Any] = {}
if textstat is not None:
text_complexity_metrics = {
"flesch_reading_ease": textstat.flesch_reading_ease(text),
"flesch_kincaid_grade": textstat.flesch_kincaid_grade(text),
"smog_index": textstat.smog_index(text),
"coleman_liau_index": textstat.coleman_liau_index(text),
"automated_readability_index": textstat.automated_readability_index(text),
"dale_chall_readability_score": textstat.dale_chall_readability_score(text),
"difficult_words": textstat.difficult_words(text),
"linsear_write_formula": textstat.linsear_write_formula(text),
"gunning_fog": textstat.gunning_fog(text),
"fernandez_huerta": textstat.fernandez_huerta(text),
"szigriszt_pazos": textstat.szigriszt_pazos(text),
"gutierrez_polini": textstat.gutierrez_polini(text),
"crawford": textstat.crawford(text),
"gulpease_index": textstat.gulpease_index(text),
"osman": textstat.osman(text),
}
resp.update({"text_complexity_metrics": text_complexity_metrics})
resp.update(text_complexity_metrics)
if nlp is not None:
spacy = import_spacy()
doc = nlp(text)
dep_out = spacy.displacy.render( # type: ignore
doc, style="dep", jupyter=False, page=True
)
ent_out = spacy.displacy.render( # type: ignore
doc, style="ent", jupyter=False, page=True
)
text_visualizations = {
"dependency_tree": dep_out,
"entities": ent_out,
}
resp.update(text_visualizations)
return resp
class FlyteCallbackHandler(BaseMetadataCallbackHandler, BaseCallbackHandler):
"""This callback handler that is used within a Flyte task."""
def __init__(self) -> None:
"""Initialize callback handler."""
flytekit, renderer = import_flytekit()
self.pandas = import_pandas()
self.textstat = None
try:
self.textstat = import_textstat()
except ImportError:
logger.warning(
"Textstat library is not installed. \
It may result in the inability to log \
certain metrics that can be captured with Textstat."
)
spacy = None
try:
spacy = import_spacy()
except ImportError:
logger.warning(
"Spacy library is not installed. \
It may result in the inability to log \
certain metrics that can be captured with Spacy."
)
super().__init__()
self.nlp = None
if spacy:
try:
self.nlp = spacy.load("en_core_web_sm")
except OSError:
logger.warning(
"FlyteCallbackHandler uses spacy's en_core_web_sm model"
" for certain metrics. To download,"
" run the following command in your terminal:"
" `python -m spacy download en_core_web_sm`"
)
self.table_renderer = renderer.TableRenderer
self.markdown_renderer = renderer.MarkdownRenderer
self.deck = flytekit.Deck(
"LangChain Metrics",
self.markdown_renderer().to_html("## LangChain Metrics"),
)
def on_llm_start(
self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
) -> None:
"""Run when LLM starts."""
self.step += 1
self.llm_starts += 1
self.starts += 1
resp: Dict[str, Any] = {}
resp.update({"action": "on_llm_start"})
resp.update(flatten_dict(serialized))
resp.update(self.get_custom_callback_meta())
prompt_responses = []
for prompt in prompts:
prompt_responses.append(prompt)
resp.update({"prompts": prompt_responses})
self.deck.append(self.markdown_renderer().to_html("### LLM Start"))
self.deck.append(
self.table_renderer().to_html(self.pandas.DataFrame([resp])) + "\n"
)
def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
"""Run when LLM generates a new token."""
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
"""Run when LLM ends running."""
self.step += 1
self.llm_ends += 1
self.ends += 1
resp: Dict[str, Any] = {}
resp.update({"action": "on_llm_end"})
resp.update(flatten_dict(response.llm_output or {}))
resp.update(self.get_custom_callback_meta())
self.deck.append(self.markdown_renderer().to_html("### LLM End"))
self.deck.append(self.table_renderer().to_html(self.pandas.DataFrame([resp])))
for generations in response.generations:
for generation in generations:
generation_resp = deepcopy(resp)
generation_resp.update(flatten_dict(generation.dict()))
if self.nlp or self.textstat:
generation_resp.update(
analyze_text(
generation.text, nlp=self.nlp, textstat=self.textstat
)
)
complexity_metrics: Dict[str, float] = generation_resp.pop(
"text_complexity_metrics"
) # type: ignore # noqa: E501
self.deck.append(
self.markdown_renderer().to_html("#### Text Complexity Metrics")
)
self.deck.append(
self.table_renderer().to_html(
self.pandas.DataFrame([complexity_metrics])
)
+ "\n"
)
dependency_tree = generation_resp["dependency_tree"]
self.deck.append(
self.markdown_renderer().to_html("#### Dependency Tree")
)
self.deck.append(dependency_tree)
entities = generation_resp["entities"]
self.deck.append(self.markdown_renderer().to_html("#### Entities"))
self.deck.append(entities)
else:
self.deck.append(
self.markdown_renderer().to_html("#### Generated Response")
)
self.deck.append(self.markdown_renderer().to_html(generation.text))
def on_llm_error(self, error: BaseException, **kwargs: Any) -> None:
"""Run when LLM errors."""
self.step += 1
self.errors += 1
def on_chain_start(
self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any
) -> None:
"""Run when chain starts running."""
self.step += 1
self.chain_starts += 1
self.starts += 1
resp: Dict[str, Any] = {}
resp.update({"action": "on_chain_start"})
resp.update(flatten_dict(serialized))
resp.update(self.get_custom_callback_meta())
chain_input = ",".join([f"{k}={v}" for k, v in inputs.items()])
input_resp = deepcopy(resp)
input_resp["inputs"] = chain_input
self.deck.append(self.markdown_renderer().to_html("### Chain Start"))
self.deck.append(
self.table_renderer().to_html(self.pandas.DataFrame([input_resp])) + "\n"
)
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:
"""Run when chain ends running."""
self.step += 1
self.chain_ends += 1
self.ends += 1
resp: Dict[str, Any] = {}
chain_output = ",".join([f"{k}={v}" for k, v in outputs.items()])
resp.update({"action": "on_chain_end", "outputs": chain_output})
resp.update(self.get_custom_callback_meta())
self.deck.append(self.markdown_renderer().to_html("### Chain End"))
self.deck.append(
self.table_renderer().to_html(self.pandas.DataFrame([resp])) + "\n"
)
def on_chain_error(self, error: BaseException, **kwargs: Any) -> None:
"""Run when chain errors."""
self.step += 1
self.errors += 1
def on_tool_start(
self, serialized: Dict[str, Any], input_str: str, **kwargs: Any
) -> None:
"""Run when tool starts running."""
self.step += 1
self.tool_starts += 1
self.starts += 1
resp: Dict[str, Any] = {}
resp.update({"action": "on_tool_start", "input_str": input_str})
resp.update(flatten_dict(serialized))
resp.update(self.get_custom_callback_meta())
self.deck.append(self.markdown_renderer().to_html("### Tool Start"))
self.deck.append(
self.table_renderer().to_html(self.pandas.DataFrame([resp])) + "\n"
)
def on_tool_end(self, output: str, **kwargs: Any) -> None:
"""Run when tool ends running."""
self.step += 1
self.tool_ends += 1
self.ends += 1
resp: Dict[str, Any] = {}
resp.update({"action": "on_tool_end", "output": output})
resp.update(self.get_custom_callback_meta())
self.deck.append(self.markdown_renderer().to_html("### Tool End"))
self.deck.append(
self.table_renderer().to_html(self.pandas.DataFrame([resp])) + "\n"
)
def on_tool_error(self, error: BaseException, **kwargs: Any) -> None:
"""Run when tool errors."""
self.step += 1
self.errors += 1
def on_text(self, text: str, **kwargs: Any) -> None:
"""
Run when agent is ending.
"""
self.step += 1
self.text_ctr += 1
resp: Dict[str, Any] = {}
resp.update({"action": "on_text", "text": text})
resp.update(self.get_custom_callback_meta())
self.deck.append(self.markdown_renderer().to_html("### On Text"))
self.deck.append(
self.table_renderer().to_html(self.pandas.DataFrame([resp])) + "\n"
)
def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> None:
"""Run when agent ends running."""
self.step += 1
self.agent_ends += 1
self.ends += 1
resp: Dict[str, Any] = {}
resp.update(
{
"action": "on_agent_finish",
"output": finish.return_values["output"],
"log": finish.log,
}
)
resp.update(self.get_custom_callback_meta())
self.deck.append(self.markdown_renderer().to_html("### Agent Finish"))
self.deck.append(
self.table_renderer().to_html(self.pandas.DataFrame([resp])) + "\n"
)
def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
"""Run on agent action."""
self.step += 1
self.tool_starts += 1
self.starts += 1
resp: Dict[str, Any] = {}
resp.update(
{
"action": "on_agent_action",
"tool": action.tool,
"tool_input": action.tool_input,
"log": action.log,
}
)
resp.update(self.get_custom_callback_meta())
self.deck.append(self.markdown_renderer().to_html("### Agent Action"))
self.deck.append(
self.table_renderer().to_html(self.pandas.DataFrame([resp])) + "\n"
)
| [
"[]"
] |
2024-01-10 | axgpt/langchain | libs~langchain~langchain~retrievers~databerry.py | from typing import List, Optional
import aiohttp
import requests
from langchain_core.schema import BaseRetriever, Document
from langchain.callbacks.manager import (
AsyncCallbackManagerForRetrieverRun,
CallbackManagerForRetrieverRun,
)
class DataberryRetriever(BaseRetriever):
"""`Databerry API` retriever."""
datastore_url: str
top_k: Optional[int]
api_key: Optional[str]
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> List[Document]:
response = requests.post(
self.datastore_url,
json={
"query": query,
**({"topK": self.top_k} if self.top_k is not None else {}),
},
headers={
"Content-Type": "application/json",
**(
{"Authorization": f"Bearer {self.api_key}"}
if self.api_key is not None
else {}
),
},
)
data = response.json()
return [
Document(
page_content=r["text"],
metadata={"source": r["source"], "score": r["score"]},
)
for r in data["results"]
]
async def _aget_relevant_documents(
self, query: str, *, run_manager: AsyncCallbackManagerForRetrieverRun
) -> List[Document]:
async with aiohttp.ClientSession() as session:
async with session.request(
"POST",
self.datastore_url,
json={
"query": query,
**({"topK": self.top_k} if self.top_k is not None else {}),
},
headers={
"Content-Type": "application/json",
**(
{"Authorization": f"Bearer {self.api_key}"}
if self.api_key is not None
else {}
),
},
) as response:
data = await response.json()
return [
Document(
page_content=r["text"],
metadata={"source": r["source"], "score": r["score"]},
)
for r in data["results"]
]
| [] |
2024-01-10 | axgpt/langchain | libs~core~langchain_core~schema~language_model.py | from __future__ import annotations
from abc import ABC, abstractmethod
from functools import lru_cache
from typing import (
TYPE_CHECKING,
Any,
List,
Optional,
Sequence,
Set,
TypeVar,
Union,
)
from typing_extensions import TypeAlias
from langchain_core.runnables import RunnableSerializable
from langchain_core.schema.messages import AnyMessage, BaseMessage, get_buffer_string
from langchain_core.schema.output import LLMResult
from langchain_core.schema.prompt import PromptValue
from langchain_core.utils import get_pydantic_field_names
if TYPE_CHECKING:
from langchain_core.callbacks.manager import Callbacks
@lru_cache(maxsize=None) # Cache the tokenizer
def get_tokenizer() -> Any:
try:
from transformers import GPT2TokenizerFast
except ImportError:
raise ImportError(
"Could not import transformers python package. "
"This is needed in order to calculate get_token_ids. "
"Please install it with `pip install transformers`."
)
# create a GPT-2 tokenizer instance
return GPT2TokenizerFast.from_pretrained("gpt2")
def _get_token_ids_default_method(text: str) -> List[int]:
"""Encode the text into token IDs."""
# get the cached tokenizer
tokenizer = get_tokenizer()
# tokenize the text using the GPT-2 tokenizer
return tokenizer.encode(text)
LanguageModelInput = Union[PromptValue, str, List[BaseMessage]]
LanguageModelOutput = TypeVar("LanguageModelOutput")
class BaseLanguageModel(
RunnableSerializable[LanguageModelInput, LanguageModelOutput], ABC
):
"""Abstract base class for interfacing with language models.
All language model wrappers inherit from BaseLanguageModel.
Exposes three main methods:
- generate_prompt: generate language model outputs for a sequence of prompt
values. A prompt value is a model input that can be converted to any language
model input format (string or messages).
- predict: pass in a single string to a language model and return a string
prediction.
- predict_messages: pass in a sequence of BaseMessages (corresponding to a single
model call) to a language model and return a BaseMessage prediction.
Each of these has an equivalent asynchronous method.
"""
@property
def InputType(self) -> TypeAlias:
"""Get the input type for this runnable."""
from langchain_core.prompts.base import StringPromptValue
from langchain_core.prompts.chat import ChatPromptValueConcrete
# This is a version of LanguageModelInput which replaces the abstract
# base class BaseMessage with a union of its subclasses, which makes
# for a much better schema.
return Union[
str,
Union[StringPromptValue, ChatPromptValueConcrete],
List[AnyMessage],
]
@abstractmethod
def generate_prompt(
self,
prompts: List[PromptValue],
stop: Optional[List[str]] = None,
callbacks: Callbacks = None,
**kwargs: Any,
) -> LLMResult:
"""Pass a sequence of prompts to the model and return model generations.
This method should make use of batched calls for models that expose a batched
API.
Use this method when you want to:
1. take advantage of batched calls,
2. need more output from the model than just the top generated value,
3. are building chains that are agnostic to the underlying language model
type (e.g., pure text completion models vs chat models).
Args:
prompts: List of PromptValues. A PromptValue is an object that can be
converted to match the format of any language model (string for pure
text generation models and BaseMessages for chat models).
stop: Stop words to use when generating. Model output is cut off at the
first occurrence of any of these substrings.
callbacks: Callbacks to pass through. Used for executing additional
functionality, such as logging or streaming, throughout generation.
**kwargs: Arbitrary additional keyword arguments. These are usually passed
to the model provider API call.
Returns:
An LLMResult, which contains a list of candidate Generations for each input
prompt and additional model provider-specific output.
"""
@abstractmethod
async def agenerate_prompt(
self,
prompts: List[PromptValue],
stop: Optional[List[str]] = None,
callbacks: Callbacks = None,
**kwargs: Any,
) -> LLMResult:
"""Asynchronously pass a sequence of prompts and return model generations.
This method should make use of batched calls for models that expose a batched
API.
Use this method when you want to:
1. take advantage of batched calls,
2. need more output from the model than just the top generated value,
3. are building chains that are agnostic to the underlying language model
type (e.g., pure text completion models vs chat models).
Args:
prompts: List of PromptValues. A PromptValue is an object that can be
converted to match the format of any language model (string for pure
text generation models and BaseMessages for chat models).
stop: Stop words to use when generating. Model output is cut off at the
first occurrence of any of these substrings.
callbacks: Callbacks to pass through. Used for executing additional
functionality, such as logging or streaming, throughout generation.
**kwargs: Arbitrary additional keyword arguments. These are usually passed
to the model provider API call.
Returns:
An LLMResult, which contains a list of candidate Generations for each input
prompt and additional model provider-specific output.
"""
@abstractmethod
def predict(
self, text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any
) -> str:
"""Pass a single string input to the model and return a string prediction.
Use this method when passing in raw text. If you want to pass in specific
types of chat messages, use predict_messages.
Args:
text: String input to pass to the model.
stop: Stop words to use when generating. Model output is cut off at the
first occurrence of any of these substrings.
**kwargs: Arbitrary additional keyword arguments. These are usually passed
to the model provider API call.
Returns:
Top model prediction as a string.
"""
@abstractmethod
def predict_messages(
self,
messages: List[BaseMessage],
*,
stop: Optional[Sequence[str]] = None,
**kwargs: Any,
) -> BaseMessage:
"""Pass a message sequence to the model and return a message prediction.
Use this method when passing in chat messages. If you want to pass in raw text,
use predict.
Args:
messages: A sequence of chat messages corresponding to a single model input.
stop: Stop words to use when generating. Model output is cut off at the
first occurrence of any of these substrings.
**kwargs: Arbitrary additional keyword arguments. These are usually passed
to the model provider API call.
Returns:
Top model prediction as a message.
"""
@abstractmethod
async def apredict(
self, text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any
) -> str:
"""Asynchronously pass a string to the model and return a string prediction.
Use this method when calling pure text generation models and only the top
candidate generation is needed.
Args:
text: String input to pass to the model.
stop: Stop words to use when generating. Model output is cut off at the
first occurrence of any of these substrings.
**kwargs: Arbitrary additional keyword arguments. These are usually passed
to the model provider API call.
Returns:
Top model prediction as a string.
"""
@abstractmethod
async def apredict_messages(
self,
messages: List[BaseMessage],
*,
stop: Optional[Sequence[str]] = None,
**kwargs: Any,
) -> BaseMessage:
"""Asynchronously pass messages to the model and return a message prediction.
Use this method when calling chat models and only the top
candidate generation is needed.
Args:
messages: A sequence of chat messages corresponding to a single model input.
stop: Stop words to use when generating. Model output is cut off at the
first occurrence of any of these substrings.
**kwargs: Arbitrary additional keyword arguments. These are usually passed
to the model provider API call.
Returns:
Top model prediction as a message.
"""
def get_token_ids(self, text: str) -> List[int]:
"""Return the ordered ids of the tokens in a text.
Args:
text: The string input to tokenize.
Returns:
A list of ids corresponding to the tokens in the text, in order they occur
in the text.
"""
return _get_token_ids_default_method(text)
def get_num_tokens(self, text: str) -> int:
"""Get the number of tokens present in the text.
Useful for checking if an input will fit in a model's context window.
Args:
text: The string input to tokenize.
Returns:
The integer number of tokens in the text.
"""
return len(self.get_token_ids(text))
def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int:
"""Get the number of tokens in the messages.
Useful for checking if an input will fit in a model's context window.
Args:
messages: The message inputs to tokenize.
Returns:
The sum of the number of tokens across the messages.
"""
return sum([self.get_num_tokens(get_buffer_string([m])) for m in messages])
@classmethod
def _all_required_field_names(cls) -> Set:
"""DEPRECATED: Kept for backwards compatibility.
Use get_pydantic_field_names.
"""
return get_pydantic_field_names(cls)
| [] |
2024-01-10 | axgpt/langchain | libs~langchain~langchain~vectorstores~annoy.py | from __future__ import annotations
import os
import pickle
import uuid
from configparser import ConfigParser
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple
import numpy as np
from langchain_core.schema.embeddings import Embeddings
from langchain_core.schema.vectorstore import VectorStore
from langchain.docstore.base import Docstore
from langchain.docstore.document import Document
from langchain.docstore.in_memory import InMemoryDocstore
from langchain.vectorstores.utils import maximal_marginal_relevance
INDEX_METRICS = frozenset(["angular", "euclidean", "manhattan", "hamming", "dot"])
DEFAULT_METRIC = "angular"
def dependable_annoy_import() -> Any:
"""Import annoy if available, otherwise raise error."""
try:
import annoy
except ImportError:
raise ImportError(
"Could not import annoy python package. "
"Please install it with `pip install --user annoy` "
)
return annoy
class Annoy(VectorStore):
"""`Annoy` vector store.
To use, you should have the ``annoy`` python package installed.
Example:
.. code-block:: python
from langchain.vectorstores import Annoy
db = Annoy(embedding_function, index, docstore, index_to_docstore_id)
"""
def __init__(
self,
embedding_function: Callable,
index: Any,
metric: str,
docstore: Docstore,
index_to_docstore_id: Dict[int, str],
):
"""Initialize with necessary components."""
self.embedding_function = embedding_function
self.index = index
self.metric = metric
self.docstore = docstore
self.index_to_docstore_id = index_to_docstore_id
@property
def embeddings(self) -> Optional[Embeddings]:
# TODO: Accept embedding object directly
return None
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
**kwargs: Any,
) -> List[str]:
raise NotImplementedError(
"Annoy does not allow to add new data once the index is build."
)
def process_index_results(
self, idxs: List[int], dists: List[float]
) -> List[Tuple[Document, float]]:
"""Turns annoy results into a list of documents and scores.
Args:
idxs: List of indices of the documents in the index.
dists: List of distances of the documents in the index.
Returns:
List of Documents and scores.
"""
docs = []
for idx, dist in zip(idxs, dists):
_id = self.index_to_docstore_id[idx]
doc = self.docstore.search(_id)
if not isinstance(doc, Document):
raise ValueError(f"Could not find document for id {_id}, got {doc}")
docs.append((doc, dist))
return docs
def similarity_search_with_score_by_vector(
self, embedding: List[float], k: int = 4, search_k: int = -1
) -> List[Tuple[Document, float]]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
search_k: inspect up to search_k nodes which defaults
to n_trees * n if not provided
Returns:
List of Documents most similar to the query and score for each
"""
idxs, dists = self.index.get_nns_by_vector(
embedding, k, search_k=search_k, include_distances=True
)
return self.process_index_results(idxs, dists)
def similarity_search_with_score_by_index(
self, docstore_index: int, k: int = 4, search_k: int = -1
) -> List[Tuple[Document, float]]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
search_k: inspect up to search_k nodes which defaults
to n_trees * n if not provided
Returns:
List of Documents most similar to the query and score for each
"""
idxs, dists = self.index.get_nns_by_item(
docstore_index, k, search_k=search_k, include_distances=True
)
return self.process_index_results(idxs, dists)
def similarity_search_with_score(
self, query: str, k: int = 4, search_k: int = -1
) -> List[Tuple[Document, float]]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
search_k: inspect up to search_k nodes which defaults
to n_trees * n if not provided
Returns:
List of Documents most similar to the query and score for each
"""
embedding = self.embedding_function(query)
docs = self.similarity_search_with_score_by_vector(embedding, k, search_k)
return docs
def similarity_search_by_vector(
self, embedding: List[float], k: int = 4, search_k: int = -1, **kwargs: Any
) -> List[Document]:
"""Return docs most similar to embedding vector.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
search_k: inspect up to search_k nodes which defaults
to n_trees * n if not provided
Returns:
List of Documents most similar to the embedding.
"""
docs_and_scores = self.similarity_search_with_score_by_vector(
embedding, k, search_k
)
return [doc for doc, _ in docs_and_scores]
def similarity_search_by_index(
self, docstore_index: int, k: int = 4, search_k: int = -1, **kwargs: Any
) -> List[Document]:
"""Return docs most similar to docstore_index.
Args:
docstore_index: Index of document in docstore
k: Number of Documents to return. Defaults to 4.
search_k: inspect up to search_k nodes which defaults
to n_trees * n if not provided
Returns:
List of Documents most similar to the embedding.
"""
docs_and_scores = self.similarity_search_with_score_by_index(
docstore_index, k, search_k
)
return [doc for doc, _ in docs_and_scores]
def similarity_search(
self, query: str, k: int = 4, search_k: int = -1, **kwargs: Any
) -> List[Document]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
search_k: inspect up to search_k nodes which defaults
to n_trees * n if not provided
Returns:
List of Documents most similar to the query.
"""
docs_and_scores = self.similarity_search_with_score(query, k, search_k)
return [doc for doc, _ in docs_and_scores]
def max_marginal_relevance_search_by_vector(
self,
embedding: List[float],
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
embedding: Embedding to look up documents similar to.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
k: Number of Documents to return. Defaults to 4.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents selected by maximal marginal relevance.
"""
idxs = self.index.get_nns_by_vector(
embedding, fetch_k, search_k=-1, include_distances=False
)
embeddings = [self.index.get_item_vector(i) for i in idxs]
mmr_selected = maximal_marginal_relevance(
np.array([embedding], dtype=np.float32),
embeddings,
k=k,
lambda_mult=lambda_mult,
)
# ignore the -1's if not enough docs are returned/indexed
selected_indices = [idxs[i] for i in mmr_selected if i != -1]
docs = []
for i in selected_indices:
_id = self.index_to_docstore_id[i]
doc = self.docstore.search(_id)
if not isinstance(doc, Document):
raise ValueError(f"Could not find document for id {_id}, got {doc}")
docs.append(doc)
return docs
def max_marginal_relevance_search(
self,
query: str,
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents selected by maximal marginal relevance.
"""
embedding = self.embedding_function(query)
docs = self.max_marginal_relevance_search_by_vector(
embedding, k, fetch_k, lambda_mult=lambda_mult
)
return docs
@classmethod
def __from(
cls,
texts: List[str],
embeddings: List[List[float]],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
metric: str = DEFAULT_METRIC,
trees: int = 100,
n_jobs: int = -1,
**kwargs: Any,
) -> Annoy:
if metric not in INDEX_METRICS:
raise ValueError(
(
f"Unsupported distance metric: {metric}. "
f"Expected one of {list(INDEX_METRICS)}"
)
)
annoy = dependable_annoy_import()
if not embeddings:
raise ValueError("embeddings must be provided to build AnnoyIndex")
f = len(embeddings[0])
index = annoy.AnnoyIndex(f, metric=metric)
for i, emb in enumerate(embeddings):
index.add_item(i, emb)
index.build(trees, n_jobs=n_jobs)
documents = []
for i, text in enumerate(texts):
metadata = metadatas[i] if metadatas else {}
documents.append(Document(page_content=text, metadata=metadata))
index_to_id = {i: str(uuid.uuid4()) for i in range(len(documents))}
docstore = InMemoryDocstore(
{index_to_id[i]: doc for i, doc in enumerate(documents)}
)
return cls(embedding.embed_query, index, metric, docstore, index_to_id)
@classmethod
def from_texts(
cls,
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
metric: str = DEFAULT_METRIC,
trees: int = 100,
n_jobs: int = -1,
**kwargs: Any,
) -> Annoy:
"""Construct Annoy wrapper from raw documents.
Args:
texts: List of documents to index.
embedding: Embedding function to use.
metadatas: List of metadata dictionaries to associate with documents.
metric: Metric to use for indexing. Defaults to "angular".
trees: Number of trees to use for indexing. Defaults to 100.
n_jobs: Number of jobs to use for indexing. Defaults to -1.
This is a user friendly interface that:
1. Embeds documents.
2. Creates an in memory docstore
3. Initializes the Annoy database
This is intended to be a quick way to get started.
Example:
.. code-block:: python
from langchain.vectorstores import Annoy
from langchain.embeddings import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
index = Annoy.from_texts(texts, embeddings)
"""
embeddings = embedding.embed_documents(texts)
return cls.__from(
texts, embeddings, embedding, metadatas, metric, trees, n_jobs, **kwargs
)
@classmethod
def from_embeddings(
cls,
text_embeddings: List[Tuple[str, List[float]]],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
metric: str = DEFAULT_METRIC,
trees: int = 100,
n_jobs: int = -1,
**kwargs: Any,
) -> Annoy:
"""Construct Annoy wrapper from embeddings.
Args:
text_embeddings: List of tuples of (text, embedding)
embedding: Embedding function to use.
metadatas: List of metadata dictionaries to associate with documents.
metric: Metric to use for indexing. Defaults to "angular".
trees: Number of trees to use for indexing. Defaults to 100.
n_jobs: Number of jobs to use for indexing. Defaults to -1
This is a user friendly interface that:
1. Creates an in memory docstore with provided embeddings
2. Initializes the Annoy database
This is intended to be a quick way to get started.
Example:
.. code-block:: python
from langchain.vectorstores import Annoy
from langchain.embeddings import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
text_embeddings = embeddings.embed_documents(texts)
text_embedding_pairs = list(zip(texts, text_embeddings))
db = Annoy.from_embeddings(text_embedding_pairs, embeddings)
"""
texts = [t[0] for t in text_embeddings]
embeddings = [t[1] for t in text_embeddings]
return cls.__from(
texts, embeddings, embedding, metadatas, metric, trees, n_jobs, **kwargs
)
def save_local(self, folder_path: str, prefault: bool = False) -> None:
"""Save Annoy index, docstore, and index_to_docstore_id to disk.
Args:
folder_path: folder path to save index, docstore,
and index_to_docstore_id to.
prefault: Whether to pre-load the index into memory.
"""
path = Path(folder_path)
os.makedirs(path, exist_ok=True)
# save index, index config, docstore and index_to_docstore_id
config_object = ConfigParser()
config_object["ANNOY"] = {
"f": self.index.f,
"metric": self.metric,
}
self.index.save(str(path / "index.annoy"), prefault=prefault)
with open(path / "index.pkl", "wb") as file:
pickle.dump((self.docstore, self.index_to_docstore_id, config_object), file)
@classmethod
def load_local(
cls,
folder_path: str,
embeddings: Embeddings,
) -> Annoy:
"""Load Annoy index, docstore, and index_to_docstore_id to disk.
Args:
folder_path: folder path to load index, docstore,
and index_to_docstore_id from.
embeddings: Embeddings to use when generating queries.
"""
path = Path(folder_path)
# load index separately since it is not picklable
annoy = dependable_annoy_import()
# load docstore and index_to_docstore_id
with open(path / "index.pkl", "rb") as file:
docstore, index_to_docstore_id, config_object = pickle.load(file)
f = int(config_object["ANNOY"]["f"])
metric = config_object["ANNOY"]["metric"]
index = annoy.AnnoyIndex(f, metric=metric)
index.load(str(path / "index.annoy"))
return cls(
embeddings.embed_query, index, metric, docstore, index_to_docstore_id
)
| [] |
2024-01-10 | axgpt/langchain | libs~langchain~langchain~retrievers~zep.py | from __future__ import annotations
from enum import Enum
from typing import TYPE_CHECKING, Any, Dict, List, Optional
from langchain_core.pydantic_v1 import root_validator
from langchain_core.schema import BaseRetriever, Document
from langchain.callbacks.manager import (
AsyncCallbackManagerForRetrieverRun,
CallbackManagerForRetrieverRun,
)
if TYPE_CHECKING:
from zep_python.memory import MemorySearchResult
class SearchScope(str, Enum):
"""Which documents to search. Messages or Summaries?"""
messages = "messages"
"""Search chat history messages."""
summary = "summary"
"""Search chat history summaries."""
class SearchType(str, Enum):
"""Enumerator of the types of search to perform."""
similarity = "similarity"
"""Similarity search."""
mmr = "mmr"
"""Maximal Marginal Relevance reranking of similarity search."""
class ZepRetriever(BaseRetriever):
"""`Zep` MemoryStore Retriever.
Search your user's long-term chat history with Zep.
Zep offers both simple semantic search and Maximal Marginal Relevance (MMR)
reranking of search results.
Note: You will need to provide the user's `session_id` to use this retriever.
Args:
url: URL of your Zep server (required)
api_key: Your Zep API key (optional)
session_id: Identifies your user or a user's session (required)
top_k: Number of documents to return (default: 3, optional)
search_type: Type of search to perform (similarity / mmr) (default: similarity,
optional)
mmr_lambda: Lambda value for MMR search. Defaults to 0.5 (optional)
Zep - Fast, scalable building blocks for LLM Apps
=========
Zep is an open source platform for productionizing LLM apps. Go from a prototype
built in LangChain or LlamaIndex, or a custom app, to production in minutes without
rewriting code.
For server installation instructions, see:
https://docs.getzep.com/deployment/quickstart/
"""
zep_client: Optional[Any] = None
"""Zep client."""
url: str
"""URL of your Zep server."""
api_key: Optional[str] = None
"""Your Zep API key."""
session_id: str
"""Zep session ID."""
top_k: Optional[int]
"""Number of items to return."""
search_scope: SearchScope = SearchScope.messages
"""Which documents to search. Messages or Summaries?"""
search_type: SearchType = SearchType.similarity
"""Type of search to perform (similarity / mmr)"""
mmr_lambda: Optional[float] = None
"""Lambda value for MMR search."""
@root_validator(pre=True)
def create_client(cls, values: dict) -> dict:
try:
from zep_python import ZepClient
except ImportError:
raise ImportError(
"Could not import zep-python package. "
"Please install it with `pip install zep-python`."
)
values["zep_client"] = values.get(
"zep_client",
ZepClient(base_url=values["url"], api_key=values.get("api_key")),
)
return values
def _messages_search_result_to_doc(
self, results: List[MemorySearchResult]
) -> List[Document]:
return [
Document(
page_content=r.message.pop("content"),
metadata={"score": r.dist, **r.message},
)
for r in results
if r.message
]
def _summary_search_result_to_doc(
self, results: List[MemorySearchResult]
) -> List[Document]:
return [
Document(
page_content=r.summary.content,
metadata={
"score": r.dist,
"uuid": r.summary.uuid,
"created_at": r.summary.created_at,
"token_count": r.summary.token_count,
},
)
for r in results
if r.summary
]
def _get_relevant_documents(
self,
query: str,
*,
run_manager: CallbackManagerForRetrieverRun,
metadata: Optional[Dict[str, Any]] = None,
) -> List[Document]:
from zep_python.memory import MemorySearchPayload
if not self.zep_client:
raise RuntimeError("Zep client not initialized.")
payload = MemorySearchPayload(
text=query,
metadata=metadata,
search_scope=self.search_scope,
search_type=self.search_type,
mmr_lambda=self.mmr_lambda,
)
results: List[MemorySearchResult] = self.zep_client.memory.search_memory(
self.session_id, payload, limit=self.top_k
)
if self.search_scope == SearchScope.summary:
return self._summary_search_result_to_doc(results)
return self._messages_search_result_to_doc(results)
async def _aget_relevant_documents(
self,
query: str,
*,
run_manager: AsyncCallbackManagerForRetrieverRun,
metadata: Optional[Dict[str, Any]] = None,
) -> List[Document]:
from zep_python.memory import MemorySearchPayload
if not self.zep_client:
raise RuntimeError("Zep client not initialized.")
payload = MemorySearchPayload(
text=query,
metadata=metadata,
search_scope=self.search_scope,
search_type=self.search_type,
mmr_lambda=self.mmr_lambda,
)
results: List[MemorySearchResult] = await self.zep_client.memory.asearch_memory(
self.session_id, payload, limit=self.top_k
)
if self.search_scope == SearchScope.summary:
return self._summary_search_result_to_doc(results)
return self._messages_search_result_to_doc(results)
| [] |
2024-01-10 | axgpt/langchain | libs~langchain~langchain~embeddings~llamacpp.py | from typing import Any, Dict, List, Optional
from langchain_core.pydantic_v1 import BaseModel, Extra, Field, root_validator
from langchain_core.schema.embeddings import Embeddings
class LlamaCppEmbeddings(BaseModel, Embeddings):
"""llama.cpp embedding models.
To use, you should have the llama-cpp-python library installed, and provide the
path to the Llama model as a named parameter to the constructor.
Check out: https://github.com/abetlen/llama-cpp-python
Example:
.. code-block:: python
from langchain.embeddings import LlamaCppEmbeddings
llama = LlamaCppEmbeddings(model_path="/path/to/model.bin")
"""
client: Any #: :meta private:
model_path: str
n_ctx: int = Field(512, alias="n_ctx")
"""Token context window."""
n_parts: int = Field(-1, alias="n_parts")
"""Number of parts to split the model into.
If -1, the number of parts is automatically determined."""
seed: int = Field(-1, alias="seed")
"""Seed. If -1, a random seed is used."""
f16_kv: bool = Field(False, alias="f16_kv")
"""Use half-precision for key/value cache."""
logits_all: bool = Field(False, alias="logits_all")
"""Return logits for all tokens, not just the last token."""
vocab_only: bool = Field(False, alias="vocab_only")
"""Only load the vocabulary, no weights."""
use_mlock: bool = Field(False, alias="use_mlock")
"""Force system to keep model in RAM."""
n_threads: Optional[int] = Field(None, alias="n_threads")
"""Number of threads to use. If None, the number
of threads is automatically determined."""
n_batch: Optional[int] = Field(8, alias="n_batch")
"""Number of tokens to process in parallel.
Should be a number between 1 and n_ctx."""
n_gpu_layers: Optional[int] = Field(None, alias="n_gpu_layers")
"""Number of layers to be loaded into gpu memory. Default None."""
verbose: bool = Field(True, alias="verbose")
"""Print verbose output to stderr."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that llama-cpp-python library is installed."""
model_path = values["model_path"]
model_param_names = [
"n_ctx",
"n_parts",
"seed",
"f16_kv",
"logits_all",
"vocab_only",
"use_mlock",
"n_threads",
"n_batch",
"verbose",
]
model_params = {k: values[k] for k in model_param_names}
# For backwards compatibility, only include if non-null.
if values["n_gpu_layers"] is not None:
model_params["n_gpu_layers"] = values["n_gpu_layers"]
try:
from llama_cpp import Llama
values["client"] = Llama(model_path, embedding=True, **model_params)
except ImportError:
raise ModuleNotFoundError(
"Could not import llama-cpp-python library. "
"Please install the llama-cpp-python library to "
"use this embedding model: pip install llama-cpp-python"
)
except Exception as e:
raise ValueError(
f"Could not load Llama model from path: {model_path}. "
f"Received error {e}"
)
return values
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Embed a list of documents using the Llama model.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
embeddings = [self.client.embed(text) for text in texts]
return [list(map(float, e)) for e in embeddings]
def embed_query(self, text: str) -> List[float]:
"""Embed a query using the Llama model.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
embedding = self.client.embed(text)
return list(map(float, embedding))
| [] |
2024-01-10 | axgpt/langchain | libs~core~langchain_core~callbacks~tracers~root_listeners.py | from typing import Callable, Optional, Union
from uuid import UUID
from langchain_core.callbacks.tracers.base import BaseTracer
from langchain_core.callbacks.tracers.schemas import Run
from langchain_core.runnables.config import (
RunnableConfig,
call_func_with_variable_args,
)
Listener = Union[Callable[[Run], None], Callable[[Run, RunnableConfig], None]]
class RootListenersTracer(BaseTracer):
def __init__(
self,
*,
config: RunnableConfig,
on_start: Optional[Listener],
on_end: Optional[Listener],
on_error: Optional[Listener],
) -> None:
super().__init__()
self.config = config
self._arg_on_start = on_start
self._arg_on_end = on_end
self._arg_on_error = on_error
self.root_id: Optional[UUID] = None
def _persist_run(self, run: Run) -> None:
# This is a legacy method only called once for an entire run tree
# therefore not useful here
pass
def _on_run_create(self, run: Run) -> None:
if self.root_id is not None:
return
self.root_id = run.id
if self._arg_on_start is not None:
call_func_with_variable_args(self._arg_on_start, run, self.config)
def _on_run_update(self, run: Run) -> None:
if run.id != self.root_id:
return
if run.error is None:
if self._arg_on_end is not None:
call_func_with_variable_args(self._arg_on_end, run, self.config)
else:
if self._arg_on_error is not None:
call_func_with_variable_args(self._arg_on_error, run, self.config)
| [] |
2024-01-10 | axgpt/langchain | libs~langchain~tests~integration_tests~memory~chat_message_histories~test_zep.py | from typing import TYPE_CHECKING
import pytest
from langchain_core.schema.messages import AIMessage, HumanMessage, SystemMessage
from pytest_mock import MockerFixture
from langchain.memory.chat_message_histories import ZepChatMessageHistory
if TYPE_CHECKING:
from zep_python import ZepClient
@pytest.fixture
@pytest.mark.requires("zep_python")
def zep_chat(mocker: MockerFixture) -> ZepChatMessageHistory:
mock_zep_client: ZepClient = mocker.patch("zep_python.ZepClient", autospec=True)
mock_zep_client.memory = mocker.patch(
"zep_python.memory.client.MemoryClient", autospec=True
)
zep_chat: ZepChatMessageHistory = ZepChatMessageHistory(
"test_session", "http://localhost:8000"
)
zep_chat.zep_client = mock_zep_client
return zep_chat
@pytest.mark.requires("zep_python")
def test_messages(mocker: MockerFixture, zep_chat: ZepChatMessageHistory) -> None:
from zep_python import Memory, Message, Summary
mock_memory: Memory = Memory(
summary=Summary(
content="summary",
),
messages=[
Message(content="message", role="ai", metadata={"key": "value"}),
Message(content="message2", role="human", metadata={"key2": "value2"}),
],
)
zep_chat.zep_client.memory.get_memory.return_value = mock_memory # type: ignore
result = zep_chat.messages
assert len(result) == 3
assert isinstance(result[0], SystemMessage) # summary
assert isinstance(result[1], AIMessage)
assert isinstance(result[2], HumanMessage)
@pytest.mark.requires("zep_python")
def test_add_user_message(
mocker: MockerFixture, zep_chat: ZepChatMessageHistory
) -> None:
zep_chat.add_user_message("test message")
zep_chat.zep_client.memory.add_memory.assert_called_once() # type: ignore
@pytest.mark.requires("zep_python")
def test_add_ai_message(mocker: MockerFixture, zep_chat: ZepChatMessageHistory) -> None:
zep_chat.add_ai_message("test message")
zep_chat.zep_client.memory.add_memory.assert_called_once() # type: ignore
@pytest.mark.requires("zep_python")
def test_append(mocker: MockerFixture, zep_chat: ZepChatMessageHistory) -> None:
zep_chat.add_message(AIMessage(content="test message"))
zep_chat.zep_client.memory.add_memory.assert_called_once() # type: ignore
@pytest.mark.requires("zep_python")
def test_search(mocker: MockerFixture, zep_chat: ZepChatMessageHistory) -> None:
zep_chat.search("test query")
zep_chat.zep_client.memory.search_memory.assert_called_once_with( # type: ignore
"test_session", mocker.ANY, limit=None
)
@pytest.mark.requires("zep_python")
def test_clear(mocker: MockerFixture, zep_chat: ZepChatMessageHistory) -> None:
zep_chat.clear()
zep_chat.zep_client.memory.delete_memory.assert_called_once_with( # type: ignore
"test_session"
)
| [
"message2",
"message",
"test message"
] |
2024-01-10 | axgpt/langchain | libs~langchain~langchain~output_parsers~regex_dict.py | from __future__ import annotations
import re
from typing import Dict, Optional
from langchain_core.schema import BaseOutputParser
class RegexDictParser(BaseOutputParser):
"""Parse the output of an LLM call into a Dictionary using a regex."""
regex_pattern: str = r"{}:\s?([^.'\n']*)\.?" # : :meta private:
"""The regex pattern to use to parse the output."""
output_key_to_format: Dict[str, str]
"""The keys to use for the output."""
no_update_value: Optional[str] = None
"""The default key to use for the output."""
@property
def _type(self) -> str:
"""Return the type key."""
return "regex_dict_parser"
def parse(self, text: str) -> Dict[str, str]:
"""Parse the output of an LLM call."""
result = {}
for output_key, expected_format in self.output_key_to_format.items():
specific_regex = self.regex_pattern.format(re.escape(expected_format))
matches = re.findall(specific_regex, text)
if not matches:
raise ValueError(
f"No match found for output key: {output_key} with expected format \
{expected_format} on text {text}"
)
elif len(matches) > 1:
raise ValueError(
f"Multiple matches found for output key: {output_key} with \
expected format {expected_format} on text {text}"
)
elif (
self.no_update_value is not None and matches[0] == self.no_update_value
):
continue
else:
result[output_key] = matches[0]
return result
| [] |
2024-01-10 | axgpt/langchain | libs~langchain~langchain~document_loaders~parsers~audio.py | import logging
import time
from typing import Dict, Iterator, Optional, Tuple
from langchain_core.schema import Document
from langchain.document_loaders.base import BaseBlobParser
from langchain.document_loaders.blob_loaders import Blob
logger = logging.getLogger(__name__)
class OpenAIWhisperParser(BaseBlobParser):
"""Transcribe and parse audio files.
Audio transcription is with OpenAI Whisper model."""
def __init__(self, api_key: Optional[str] = None):
self.api_key = api_key
def lazy_parse(self, blob: Blob) -> Iterator[Document]:
"""Lazily parse the blob."""
import io
try:
import openai
except ImportError:
raise ImportError(
"openai package not found, please install it with "
"`pip install openai`"
)
try:
from pydub import AudioSegment
except ImportError:
raise ImportError(
"pydub package not found, please install it with " "`pip install pydub`"
)
# Set the API key if provided
if self.api_key:
openai.api_key = self.api_key
# Audio file from disk
audio = AudioSegment.from_file(blob.path)
# Define the duration of each chunk in minutes
# Need to meet 25MB size limit for Whisper API
chunk_duration = 20
chunk_duration_ms = chunk_duration * 60 * 1000
# Split the audio into chunk_duration_ms chunks
for split_number, i in enumerate(range(0, len(audio), chunk_duration_ms)):
# Audio chunk
chunk = audio[i : i + chunk_duration_ms]
file_obj = io.BytesIO(chunk.export(format="mp3").read())
if blob.source is not None:
file_obj.name = blob.source + f"_part_{split_number}.mp3"
else:
file_obj.name = f"part_{split_number}.mp3"
# Transcribe
print(f"Transcribing part {split_number+1}!")
attempts = 0
while attempts < 3:
try:
transcript = openai.Audio.transcribe("whisper-1", file_obj)
break
except Exception as e:
attempts += 1
print(f"Attempt {attempts} failed. Exception: {str(e)}")
time.sleep(5)
else:
print("Failed to transcribe after 3 attempts.")
continue
yield Document(
page_content=transcript.text,
metadata={"source": blob.source, "chunk": split_number},
)
class OpenAIWhisperParserLocal(BaseBlobParser):
"""Transcribe and parse audio files with OpenAI Whisper model.
Audio transcription with OpenAI Whisper model locally from transformers.
Parameters:
device - device to use
NOTE: By default uses the gpu if available,
if you want to use cpu, please set device = "cpu"
lang_model - whisper model to use, for example "openai/whisper-medium"
forced_decoder_ids - id states for decoder in multilanguage model,
usage example:
from transformers import WhisperProcessor
processor = WhisperProcessor.from_pretrained("openai/whisper-medium")
forced_decoder_ids = WhisperProcessor.get_decoder_prompt_ids(language="french",
task="transcribe")
forced_decoder_ids = WhisperProcessor.get_decoder_prompt_ids(language="french",
task="translate")
"""
def __init__(
self,
device: str = "0",
lang_model: Optional[str] = None,
forced_decoder_ids: Optional[Tuple[Dict]] = None,
):
"""Initialize the parser.
Args:
device: device to use.
lang_model: whisper model to use, for example "openai/whisper-medium".
Defaults to None.
forced_decoder_ids: id states for decoder in a multilanguage model.
Defaults to None.
"""
try:
from transformers import pipeline
except ImportError:
raise ImportError(
"transformers package not found, please install it with "
"`pip install transformers`"
)
try:
import torch
except ImportError:
raise ImportError(
"torch package not found, please install it with " "`pip install torch`"
)
# set device, cpu by default check if there is a GPU available
if device == "cpu":
self.device = "cpu"
if lang_model is not None:
self.lang_model = lang_model
print("WARNING! Model override. Using model: ", self.lang_model)
else:
# unless overridden, use the small base model on cpu
self.lang_model = "openai/whisper-base"
else:
if torch.cuda.is_available():
self.device = "cuda:0"
# check GPU memory and select automatically the model
mem = torch.cuda.get_device_properties(self.device).total_memory / (
1024**2
)
if mem < 5000:
rec_model = "openai/whisper-base"
elif mem < 7000:
rec_model = "openai/whisper-small"
elif mem < 12000:
rec_model = "openai/whisper-medium"
else:
rec_model = "openai/whisper-large"
# check if model is overridden
if lang_model is not None:
self.lang_model = lang_model
print("WARNING! Model override. Might not fit in your GPU")
else:
self.lang_model = rec_model
else:
"cpu"
print("Using the following model: ", self.lang_model)
# load model for inference
self.pipe = pipeline(
"automatic-speech-recognition",
model=self.lang_model,
chunk_length_s=30,
device=self.device,
)
if forced_decoder_ids is not None:
try:
self.pipe.model.config.forced_decoder_ids = forced_decoder_ids
except Exception as exception_text:
logger.info(
"Unable to set forced_decoder_ids parameter for whisper model"
f"Text of exception: {exception_text}"
"Therefore whisper model will use default mode for decoder"
)
def lazy_parse(self, blob: Blob) -> Iterator[Document]:
"""Lazily parse the blob."""
import io
try:
from pydub import AudioSegment
except ImportError:
raise ImportError(
"pydub package not found, please install it with `pip install pydub`"
)
try:
import librosa
except ImportError:
raise ImportError(
"librosa package not found, please install it with "
"`pip install librosa`"
)
# Audio file from disk
audio = AudioSegment.from_file(blob.path)
file_obj = io.BytesIO(audio.export(format="mp3").read())
# Transcribe
print(f"Transcribing part {blob.path}!")
y, sr = librosa.load(file_obj, sr=16000)
prediction = self.pipe(y.copy(), batch_size=8)["text"]
yield Document(
page_content=prediction,
metadata={"source": blob.source},
)
class YandexSTTParser(BaseBlobParser):
"""Transcribe and parse audio files.
Audio transcription is with OpenAI Whisper model."""
def __init__(
self,
*,
api_key: Optional[str] = None,
iam_token: Optional[str] = None,
model: str = "general",
language: str = "auto",
):
"""Initialize the parser.
Args:
api_key: API key for a service account
with the `ai.speechkit-stt.user` role.
iam_token: IAM token for a service account
with the `ai.speechkit-stt.user` role.
model: Recognition model name.
Defaults to general.
language: The language in ISO 639-1 format.
Defaults to automatic language recognition.
Either `api_key` or `iam_token` must be provided, but not both.
"""
if (api_key is None) == (iam_token is None):
raise ValueError(
"Either 'api_key' or 'iam_token' must be provided, but not both."
)
self.api_key = api_key
self.iam_token = iam_token
self.model = model
self.language = language
def lazy_parse(self, blob: Blob) -> Iterator[Document]:
"""Lazily parse the blob."""
try:
from speechkit import configure_credentials, creds, model_repository
from speechkit.stt import AudioProcessingType
except ImportError:
raise ImportError(
"yandex-speechkit package not found, please install it with "
"`pip install yandex-speechkit`"
)
try:
from pydub import AudioSegment
except ImportError:
raise ImportError(
"pydub package not found, please install it with " "`pip install pydub`"
)
if self.api_key:
configure_credentials(
yandex_credentials=creds.YandexCredentials(api_key=self.api_key)
)
else:
configure_credentials(
yandex_credentials=creds.YandexCredentials(iam_token=self.iam_token)
)
audio = AudioSegment.from_file(blob.path)
model = model_repository.recognition_model()
model.model = self.model
model.language = self.language
model.audio_processing_type = AudioProcessingType.Full
result = model.transcribe(audio)
for res in result:
yield Document(
page_content=res.normalized_text,
metadata={"source": blob.source},
)
| [] |
2024-01-10 | axgpt/langchain | libs~langchain~langchain~document_transformers~google_translate.py | from typing import Any, Optional, Sequence
from langchain_core.schema import BaseDocumentTransformer, Document
from langchain.utilities.vertexai import get_client_info
class GoogleTranslateTransformer(BaseDocumentTransformer):
"""Translate text documents using Google Cloud Translation."""
def __init__(
self,
project_id: str,
*,
location: str = "global",
model_id: Optional[str] = None,
glossary_id: Optional[str] = None,
api_endpoint: Optional[str] = None,
) -> None:
"""
Arguments:
project_id: Google Cloud Project ID.
location: (Optional) Translate model location.
model_id: (Optional) Translate model ID to use.
glossary_id: (Optional) Translate glossary ID to use.
api_endpoint: (Optional) Regional endpoint to use.
"""
try:
from google.api_core.client_options import ClientOptions
from google.cloud import translate
except ImportError as exc:
raise ImportError(
"Install Google Cloud Translate to use this parser."
"(pip install google-cloud-translate)"
) from exc
self.project_id = project_id
self.location = location
self.model_id = model_id
self.glossary_id = glossary_id
self._client = translate.TranslationServiceClient(
client_info=get_client_info("translate"),
client_options=(
ClientOptions(api_endpoint=api_endpoint) if api_endpoint else None
),
)
self._parent_path = self._client.common_location_path(project_id, location)
# For some reason, there's no `model_path()` method for the client.
self._model_path = (
f"{self._parent_path}/models/{model_id}" if model_id else None
)
self._glossary_path = (
self._client.glossary_path(project_id, location, glossary_id)
if glossary_id
else None
)
def transform_documents(
self, documents: Sequence[Document], **kwargs: Any
) -> Sequence[Document]:
"""Translate text documents using Google Translate.
Arguments:
source_language_code: ISO 639 language code of the input document.
target_language_code: ISO 639 language code of the output document.
For supported languages, refer to:
https://cloud.google.com/translate/docs/languages
mime_type: (Optional) Media Type of input text.
Options: `text/plain`, `text/html`
"""
try:
from google.cloud import translate
except ImportError as exc:
raise ImportError(
"Install Google Cloud Translate to use this parser."
"(pip install google-cloud-translate)"
) from exc
response = self._client.translate_text(
request=translate.TranslateTextRequest(
contents=[doc.page_content for doc in documents],
parent=self._parent_path,
model=self._model_path,
glossary_config=translate.TranslateTextGlossaryConfig(
glossary=self._glossary_path
),
source_language_code=kwargs.get("source_language_code", None),
target_language_code=kwargs.get("target_language_code"),
mime_type=kwargs.get("mime_type", "text/plain"),
)
)
# If using a glossary, the translations will be in `glossary_translations`.
translations = response.glossary_translations or response.translations
return [
Document(
page_content=translation.translated_text,
metadata={
**doc.metadata,
"model": translation.model,
"detected_language_code": translation.detected_language_code,
},
)
for doc, translation in zip(documents, translations)
]
| [] |
2024-01-10 | axgpt/langchain | libs~langchain~tests~unit_tests~output_parsers~test_enum_parser.py | from enum import Enum
from langchain_core.schema import OutputParserException
from langchain.output_parsers.enum import EnumOutputParser
class Colors(Enum):
RED = "red"
GREEN = "green"
BLUE = "blue"
def test_enum_output_parser_parse() -> None:
parser = EnumOutputParser(enum=Colors)
# Test valid inputs
result = parser.parse("red")
assert result == Colors.RED
result = parser.parse("green")
assert result == Colors.GREEN
result = parser.parse("blue")
assert result == Colors.BLUE
# Test invalid input
try:
parser.parse("INVALID")
assert False, "Should have raised OutputParserException"
except OutputParserException:
pass
| [] |
2024-01-10 | axgpt/langchain | libs~langchain~langchain~embeddings~minimax.py | from __future__ import annotations
import logging
from typing import Any, Callable, Dict, List, Optional
import requests
from langchain_core.pydantic_v1 import BaseModel, Extra, root_validator
from langchain_core.schema.embeddings import Embeddings
from tenacity import (
before_sleep_log,
retry,
stop_after_attempt,
wait_exponential,
)
from langchain.utils import get_from_dict_or_env
logger = logging.getLogger(__name__)
def _create_retry_decorator() -> Callable[[Any], Any]:
"""Returns a tenacity retry decorator."""
multiplier = 1
min_seconds = 1
max_seconds = 4
max_retries = 6
return retry(
reraise=True,
stop=stop_after_attempt(max_retries),
wait=wait_exponential(multiplier=multiplier, min=min_seconds, max=max_seconds),
before_sleep=before_sleep_log(logger, logging.WARNING),
)
def embed_with_retry(embeddings: MiniMaxEmbeddings, *args: Any, **kwargs: Any) -> Any:
"""Use tenacity to retry the completion call."""
retry_decorator = _create_retry_decorator()
@retry_decorator
def _embed_with_retry(*args: Any, **kwargs: Any) -> Any:
return embeddings.embed(*args, **kwargs)
return _embed_with_retry(*args, **kwargs)
class MiniMaxEmbeddings(BaseModel, Embeddings):
"""MiniMax's embedding service.
To use, you should have the environment variable ``MINIMAX_GROUP_ID`` and
``MINIMAX_API_KEY`` set with your API token, or pass it as a named parameter to
the constructor.
Example:
.. code-block:: python
from langchain.embeddings import MiniMaxEmbeddings
embeddings = MiniMaxEmbeddings()
query_text = "This is a test query."
query_result = embeddings.embed_query(query_text)
document_text = "This is a test document."
document_result = embeddings.embed_documents([document_text])
"""
endpoint_url: str = "https://api.minimax.chat/v1/embeddings"
"""Endpoint URL to use."""
model: str = "embo-01"
"""Embeddings model name to use."""
embed_type_db: str = "db"
"""For embed_documents"""
embed_type_query: str = "query"
"""For embed_query"""
minimax_group_id: Optional[str] = None
"""Group ID for MiniMax API."""
minimax_api_key: Optional[str] = None
"""API Key for MiniMax API."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that group id and api key exists in environment."""
minimax_group_id = get_from_dict_or_env(
values, "minimax_group_id", "MINIMAX_GROUP_ID"
)
minimax_api_key = get_from_dict_or_env(
values, "minimax_api_key", "MINIMAX_API_KEY"
)
values["minimax_group_id"] = minimax_group_id
values["minimax_api_key"] = minimax_api_key
return values
def embed(
self,
texts: List[str],
embed_type: str,
) -> List[List[float]]:
payload = {
"model": self.model,
"type": embed_type,
"texts": texts,
}
# HTTP headers for authorization
headers = {
"Authorization": f"Bearer {self.minimax_api_key}",
"Content-Type": "application/json",
}
params = {
"GroupId": self.minimax_group_id,
}
# send request
response = requests.post(
self.endpoint_url, params=params, headers=headers, json=payload
)
parsed_response = response.json()
# check for errors
if parsed_response["base_resp"]["status_code"] != 0:
raise ValueError(
f"MiniMax API returned an error: {parsed_response['base_resp']}"
)
embeddings = parsed_response["vectors"]
return embeddings
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Embed documents using a MiniMax embedding endpoint.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
embeddings = embed_with_retry(self, texts=texts, embed_type=self.embed_type_db)
return embeddings
def embed_query(self, text: str) -> List[float]:
"""Embed a query using a MiniMax embedding endpoint.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
embeddings = embed_with_retry(
self, texts=[text], embed_type=self.embed_type_query
)
return embeddings[0]
| [] |
2024-01-10 | axgpt/langchain | libs~langchain~langchain~agents~output_parsers~self_ask.py | from typing import Sequence, Union
from langchain_core.schema import AgentAction, AgentFinish, OutputParserException
from langchain.agents.agent import AgentOutputParser
class SelfAskOutputParser(AgentOutputParser):
"""Parses self-ask style LLM calls.
Expects output to be in one of two formats.
If the output signals that an action should be taken,
should be in the below format. This will result in an AgentAction
being returned.
```
Thoughts go here...
Follow up: what is the temperature in SF?
```
If the output signals that a final answer should be given,
should be in the below format. This will result in an AgentFinish
being returned.
```
Thoughts go here...
So the final answer is: The temperature is 100 degrees
```
"""
followups: Sequence[str] = ("Follow up:", "Followup:")
finish_string: str = "So the final answer is: "
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
last_line = text.split("\n")[-1]
if not any([follow in last_line for follow in self.followups]):
if self.finish_string not in last_line:
raise OutputParserException(f"Could not parse output: {text}")
return AgentFinish({"output": last_line[len(self.finish_string) :]}, text)
after_colon = text.split(":")[-1].strip()
return AgentAction("Intermediate Answer", after_colon, text)
@property
def _type(self) -> str:
return "self_ask"
| [] |
2024-01-10 | axgpt/langchain | libs~langchain~langchain~schema~callbacks~tracers~stdout.py | from langchain_core.callbacks.tracers.stdout import (
ConsoleCallbackHandler,
FunctionCallbackHandler,
elapsed,
try_json_stringify,
)
__all__ = [
"try_json_stringify",
"elapsed",
"FunctionCallbackHandler",
"ConsoleCallbackHandler",
]
| [] |
2024-01-10 | axgpt/langchain | libs~langchain~langchain~document_loaders~joplin.py | import json
import urllib
from datetime import datetime
from typing import Iterator, List, Optional
from langchain_core.schema import Document
from langchain.document_loaders.base import BaseLoader
from langchain.utils import get_from_env
LINK_NOTE_TEMPLATE = "joplin://x-callback-url/openNote?id={id}"
class JoplinLoader(BaseLoader):
"""Load notes from `Joplin`.
In order to use this loader, you need to have Joplin running with the
Web Clipper enabled (look for "Web Clipper" in the app settings).
To get the access token, you need to go to the Web Clipper options and
under "Advanced Options" you will find the access token.
You can find more information about the Web Clipper service here:
https://joplinapp.org/clipper/
"""
def __init__(
self,
access_token: Optional[str] = None,
port: int = 41184,
host: str = "localhost",
) -> None:
"""
Args:
access_token: The access token to use.
port: The port where the Web Clipper service is running. Default is 41184.
host: The host where the Web Clipper service is running.
Default is localhost.
"""
access_token = access_token or get_from_env(
"access_token", "JOPLIN_ACCESS_TOKEN"
)
base_url = f"http://{host}:{port}"
self._get_note_url = (
f"{base_url}/notes?token={access_token}"
f"&fields=id,parent_id,title,body,created_time,updated_time&page={{page}}"
)
self._get_folder_url = (
f"{base_url}/folders/{{id}}?token={access_token}&fields=title"
)
self._get_tag_url = (
f"{base_url}/notes/{{id}}/tags?token={access_token}&fields=title"
)
def _get_notes(self) -> Iterator[Document]:
has_more = True
page = 1
while has_more:
req_note = urllib.request.Request(self._get_note_url.format(page=page))
with urllib.request.urlopen(req_note) as response:
json_data = json.loads(response.read().decode())
for note in json_data["items"]:
metadata = {
"source": LINK_NOTE_TEMPLATE.format(id=note["id"]),
"folder": self._get_folder(note["parent_id"]),
"tags": self._get_tags(note["id"]),
"title": note["title"],
"created_time": self._convert_date(note["created_time"]),
"updated_time": self._convert_date(note["updated_time"]),
}
yield Document(page_content=note["body"], metadata=metadata)
has_more = json_data["has_more"]
page += 1
def _get_folder(self, folder_id: str) -> str:
req_folder = urllib.request.Request(self._get_folder_url.format(id=folder_id))
with urllib.request.urlopen(req_folder) as response:
json_data = json.loads(response.read().decode())
return json_data["title"]
def _get_tags(self, note_id: str) -> List[str]:
req_tag = urllib.request.Request(self._get_tag_url.format(id=note_id))
with urllib.request.urlopen(req_tag) as response:
json_data = json.loads(response.read().decode())
return [tag["title"] for tag in json_data["items"]]
def _convert_date(self, date: int) -> str:
return datetime.fromtimestamp(date / 1000).strftime("%Y-%m-%d %H:%M:%S")
def lazy_load(self) -> Iterator[Document]:
yield from self._get_notes()
def load(self) -> List[Document]:
return list(self.lazy_load())
| [
"joplin://x-callback-url/openNote?id={id}"
] |
2024-01-10 | axgpt/langchain | libs~langchain~tests~integration_tests~vectorstores~docarray~test_in_memory.py | from pathlib import Path
from typing import List
import numpy as np
import pytest
from langchain_core.schema import Document
from langchain.vectorstores.docarray import DocArrayInMemorySearch
from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings
@pytest.fixture
def texts() -> List[str]:
return ["foo", "bar", "baz"]
def test_from_texts(texts: List[str]) -> None:
"""Test end to end construction and simple similarity search."""
docsearch = DocArrayInMemorySearch.from_texts(
texts,
FakeEmbeddings(),
)
assert isinstance(docsearch, DocArrayInMemorySearch)
assert docsearch.doc_index.num_docs() == 3
def test_add_texts(texts: List[str], tmp_path: Path) -> None:
"""Test end to end construction and simple similarity search."""
docsearch = DocArrayInMemorySearch.from_params(FakeEmbeddings())
assert isinstance(docsearch, DocArrayInMemorySearch)
assert docsearch.doc_index.num_docs() == 0
docsearch.add_texts(texts=texts)
assert docsearch.doc_index.num_docs() == 3
@pytest.mark.parametrize("metric", ["cosine_sim", "euclidean_dist", "sqeuclidean_dist"])
def test_sim_search(metric: str, texts: List[str]) -> None:
"""Test end to end construction and simple similarity search."""
texts = ["foo", "bar", "baz"]
in_memory_vec_store = DocArrayInMemorySearch.from_texts(
texts=texts,
embedding=FakeEmbeddings(),
metric=metric,
)
output = in_memory_vec_store.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
@pytest.mark.parametrize("metric", ["cosine_sim", "euclidean_dist", "sqeuclidean_dist"])
def test_sim_search_with_score(metric: str, texts: List[str]) -> None:
"""Test end to end construction and similarity search with score."""
in_memory_vec_store = DocArrayInMemorySearch.from_texts(
texts=texts,
embedding=FakeEmbeddings(),
metric=metric,
)
output = in_memory_vec_store.similarity_search_with_score("foo", k=1)
out_doc, out_score = output[0]
assert out_doc == Document(page_content="foo")
expected_score = 0.0 if "dist" in metric else 1.0
assert np.isclose(out_score, expected_score, atol=1.0e-6)
@pytest.mark.parametrize("metric", ["cosine_sim", "euclidean_dist", "sqeuclidean_dist"])
def test_sim_search_by_vector(metric: str, texts: List[str]) -> None:
"""Test end to end construction and similarity search by vector."""
in_memory_vec_store = DocArrayInMemorySearch.from_texts(
texts=texts,
embedding=FakeEmbeddings(),
metric=metric,
)
embedding = [1.0] * 10
output = in_memory_vec_store.similarity_search_by_vector(embedding, k=1)
assert output == [Document(page_content="bar")]
@pytest.mark.parametrize("metric", ["cosine_sim", "euclidean_dist", "sqeuclidean_dist"])
def test_max_marginal_relevance_search(metric: str, texts: List[str]) -> None:
"""Test MRR search."""
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = DocArrayInMemorySearch.from_texts(
texts, FakeEmbeddings(), metadatas=metadatas, metric=metric
)
output = docsearch.max_marginal_relevance_search("foo", k=2, fetch_k=3)
assert output == [
Document(page_content="foo", metadata={"page": 0}),
Document(page_content="bar", metadata={"page": 1}),
]
| [] |
2024-01-10 | axgpt/langchain | libs~langchain~langchain~retrievers~kay.py | from __future__ import annotations
from typing import Any, List
from langchain_core.schema import BaseRetriever, Document
from langchain.callbacks.manager import CallbackManagerForRetrieverRun
class KayAiRetriever(BaseRetriever):
"""
Retriever for Kay.ai datasets.
To work properly, expects you to have KAY_API_KEY env variable set.
You can get one for free at https://kay.ai/.
"""
client: Any
num_contexts: int
@classmethod
def create(
cls,
dataset_id: str,
data_types: List[str],
num_contexts: int = 6,
) -> KayAiRetriever:
"""
Create a KayRetriever given a Kay dataset id and a list of datasources.
Args:
dataset_id: A dataset id category in Kay, like "company"
data_types: A list of datasources present within a dataset. For
"company" the corresponding datasources could be
["10-K", "10-Q", "8-K", "PressRelease"].
num_contexts: The number of documents to retrieve on each query.
Defaults to 6.
"""
try:
from kay.rag.retrievers import KayRetriever
except ImportError:
raise ImportError(
"Could not import kay python package. Please install it with "
"`pip install kay`.",
)
client = KayRetriever(dataset_id, data_types)
return cls(client=client, num_contexts=num_contexts)
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> List[Document]:
ctxs = self.client.query(query=query, num_context=self.num_contexts)
docs = []
for ctx in ctxs:
page_content = ctx.pop("chunk_embed_text", None)
if page_content is None:
continue
docs.append(Document(page_content=page_content, metadata={**ctx}))
return docs
| [] |
2024-01-10 | axgpt/langchain | libs~langchain~langchain~prompts~example_selector~length_based.py | from langchain_core.prompts.example_selector.length_based import (
LengthBasedExampleSelector,
)
__all__ = ["LengthBasedExampleSelector"]
| [] |
2024-01-10 | axgpt/langchain | libs~langchain~langchain~retrievers~elastic_search_bm25.py | """Wrapper around Elasticsearch vector database."""
from __future__ import annotations
import uuid
from typing import Any, Iterable, List
from langchain_core.schema import BaseRetriever
from langchain.callbacks.manager import CallbackManagerForRetrieverRun
from langchain.docstore.document import Document
class ElasticSearchBM25Retriever(BaseRetriever):
"""`Elasticsearch` retriever that uses `BM25`.
To connect to an Elasticsearch instance that requires login credentials,
including Elastic Cloud, use the Elasticsearch URL format
https://username:password@es_host:9243. For example, to connect to Elastic
Cloud, create the Elasticsearch URL with the required authentication details and
pass it to the ElasticVectorSearch constructor as the named parameter
elasticsearch_url.
You can obtain your Elastic Cloud URL and login credentials by logging in to the
Elastic Cloud console at https://cloud.elastic.co, selecting your deployment, and
navigating to the "Deployments" page.
To obtain your Elastic Cloud password for the default "elastic" user:
1. Log in to the Elastic Cloud console at https://cloud.elastic.co
2. Go to "Security" > "Users"
3. Locate the "elastic" user and click "Edit"
4. Click "Reset password"
5. Follow the prompts to reset the password
The format for Elastic Cloud URLs is
https://username:password@cluster_id.region_id.gcp.cloud.es.io:9243.
"""
client: Any
"""Elasticsearch client."""
index_name: str
"""Name of the index to use in Elasticsearch."""
@classmethod
def create(
cls, elasticsearch_url: str, index_name: str, k1: float = 2.0, b: float = 0.75
) -> ElasticSearchBM25Retriever:
"""
Create a ElasticSearchBM25Retriever from a list of texts.
Args:
elasticsearch_url: URL of the Elasticsearch instance to connect to.
index_name: Name of the index to use in Elasticsearch.
k1: BM25 parameter k1.
b: BM25 parameter b.
Returns:
"""
from elasticsearch import Elasticsearch
# Create an Elasticsearch client instance
es = Elasticsearch(elasticsearch_url)
# Define the index settings and mappings
settings = {
"analysis": {"analyzer": {"default": {"type": "standard"}}},
"similarity": {
"custom_bm25": {
"type": "BM25",
"k1": k1,
"b": b,
}
},
}
mappings = {
"properties": {
"content": {
"type": "text",
"similarity": "custom_bm25", # Use the custom BM25 similarity
}
}
}
# Create the index with the specified settings and mappings
es.indices.create(index=index_name, mappings=mappings, settings=settings)
return cls(client=es, index_name=index_name)
def add_texts(
self,
texts: Iterable[str],
refresh_indices: bool = True,
) -> List[str]:
"""Run more texts through the embeddings and add to the retriever.
Args:
texts: Iterable of strings to add to the retriever.
refresh_indices: bool to refresh ElasticSearch indices
Returns:
List of ids from adding the texts into the retriever.
"""
try:
from elasticsearch.helpers import bulk
except ImportError:
raise ValueError(
"Could not import elasticsearch python package. "
"Please install it with `pip install elasticsearch`."
)
requests = []
ids = []
for i, text in enumerate(texts):
_id = str(uuid.uuid4())
request = {
"_op_type": "index",
"_index": self.index_name,
"content": text,
"_id": _id,
}
ids.append(_id)
requests.append(request)
bulk(self.client, requests)
if refresh_indices:
self.client.indices.refresh(index=self.index_name)
return ids
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> List[Document]:
query_dict = {"query": {"match": {"content": query}}}
res = self.client.search(index=self.index_name, body=query_dict)
docs = []
for r in res["hits"]["hits"]:
docs.append(Document(page_content=r["_source"]["content"]))
return docs
| [
"{'type': 'text', 'similarity': 'custom_bm25'}"
] |
2024-01-10 | axgpt/langchain | libs~langchain~langchain~retrievers~web_research.py | import logging
import re
from typing import List, Optional
from langchain_core.prompts import BasePromptTemplate, PromptTemplate
from langchain_core.pydantic_v1 import BaseModel, Field
from langchain_core.schema import BaseRetriever, Document
from langchain_core.schema.vectorstore import VectorStore
from langchain.callbacks.manager import (
AsyncCallbackManagerForRetrieverRun,
CallbackManagerForRetrieverRun,
)
from langchain.chains import LLMChain
from langchain.chains.prompt_selector import ConditionalPromptSelector
from langchain.document_loaders import AsyncHtmlLoader
from langchain.document_transformers import Html2TextTransformer
from langchain.llms import LlamaCpp
from langchain.llms.base import BaseLLM
from langchain.output_parsers.pydantic import PydanticOutputParser
from langchain.text_splitter import RecursiveCharacterTextSplitter, TextSplitter
from langchain.utilities import GoogleSearchAPIWrapper
logger = logging.getLogger(__name__)
class SearchQueries(BaseModel):
"""Search queries to research for the user's goal."""
queries: List[str] = Field(
..., description="List of search queries to look up on Google"
)
DEFAULT_LLAMA_SEARCH_PROMPT = PromptTemplate(
input_variables=["question"],
template="""<<SYS>> \n You are an assistant tasked with improving Google search \
results. \n <</SYS>> \n\n [INST] Generate THREE Google search queries that \
are similar to this question. The output should be a numbered list of questions \
and each should have a question mark at the end: \n\n {question} [/INST]""",
)
DEFAULT_SEARCH_PROMPT = PromptTemplate(
input_variables=["question"],
template="""You are an assistant tasked with improving Google search \
results. Generate THREE Google search queries that are similar to \
this question. The output should be a numbered list of questions and each \
should have a question mark at the end: {question}""",
)
class LineList(BaseModel):
"""List of questions."""
lines: List[str] = Field(description="Questions")
class QuestionListOutputParser(PydanticOutputParser):
"""Output parser for a list of numbered questions."""
def __init__(self) -> None:
super().__init__(pydantic_object=LineList)
def parse(self, text: str) -> LineList:
lines = re.findall(r"\d+\..*?(?:\n|$)", text)
return LineList(lines=lines)
class WebResearchRetriever(BaseRetriever):
"""`Google Search API` retriever."""
# Inputs
vectorstore: VectorStore = Field(
..., description="Vector store for storing web pages"
)
llm_chain: LLMChain
search: GoogleSearchAPIWrapper = Field(..., description="Google Search API Wrapper")
num_search_results: int = Field(1, description="Number of pages per Google search")
text_splitter: TextSplitter = Field(
RecursiveCharacterTextSplitter(chunk_size=1500, chunk_overlap=50),
description="Text splitter for splitting web pages into chunks",
)
url_database: List[str] = Field(
default_factory=list, description="List of processed URLs"
)
@classmethod
def from_llm(
cls,
vectorstore: VectorStore,
llm: BaseLLM,
search: GoogleSearchAPIWrapper,
prompt: Optional[BasePromptTemplate] = None,
num_search_results: int = 1,
text_splitter: RecursiveCharacterTextSplitter = RecursiveCharacterTextSplitter(
chunk_size=1500, chunk_overlap=150
),
) -> "WebResearchRetriever":
"""Initialize from llm using default template.
Args:
vectorstore: Vector store for storing web pages
llm: llm for search question generation
search: GoogleSearchAPIWrapper
prompt: prompt to generating search questions
num_search_results: Number of pages per Google search
text_splitter: Text splitter for splitting web pages into chunks
Returns:
WebResearchRetriever
"""
if not prompt:
QUESTION_PROMPT_SELECTOR = ConditionalPromptSelector(
default_prompt=DEFAULT_SEARCH_PROMPT,
conditionals=[
(lambda llm: isinstance(llm, LlamaCpp), DEFAULT_LLAMA_SEARCH_PROMPT)
],
)
prompt = QUESTION_PROMPT_SELECTOR.get_prompt(llm)
# Use chat model prompt
llm_chain = LLMChain(
llm=llm,
prompt=prompt,
output_parser=QuestionListOutputParser(),
)
return cls(
vectorstore=vectorstore,
llm_chain=llm_chain,
search=search,
num_search_results=num_search_results,
text_splitter=text_splitter,
)
def clean_search_query(self, query: str) -> str:
# Some search tools (e.g., Google) will
# fail to return results if query has a
# leading digit: 1. "LangCh..."
# Check if the first character is a digit
if query[0].isdigit():
# Find the position of the first quote
first_quote_pos = query.find('"')
if first_quote_pos != -1:
# Extract the part of the string after the quote
query = query[first_quote_pos + 1 :]
# Remove the trailing quote if present
if query.endswith('"'):
query = query[:-1]
return query.strip()
def search_tool(self, query: str, num_search_results: int = 1) -> List[dict]:
"""Returns num_search_results pages per Google search."""
query_clean = self.clean_search_query(query)
result = self.search.results(query_clean, num_search_results)
return result
def _get_relevant_documents(
self,
query: str,
*,
run_manager: CallbackManagerForRetrieverRun,
) -> List[Document]:
"""Search Google for documents related to the query input.
Args:
query: user query
Returns:
Relevant documents from all various urls.
"""
# Get search questions
logger.info("Generating questions for Google Search ...")
result = self.llm_chain({"question": query})
logger.info(f"Questions for Google Search (raw): {result}")
questions = getattr(result["text"], "lines", [])
logger.info(f"Questions for Google Search: {questions}")
# Get urls
logger.info("Searching for relevant urls...")
urls_to_look = []
for query in questions:
# Google search
search_results = self.search_tool(query, self.num_search_results)
logger.info("Searching for relevant urls...")
logger.info(f"Search results: {search_results}")
for res in search_results:
if res.get("link", None):
urls_to_look.append(res["link"])
# Relevant urls
urls = set(urls_to_look)
# Check for any new urls that we have not processed
new_urls = list(urls.difference(self.url_database))
logger.info(f"New URLs to load: {new_urls}")
# Load, split, and add new urls to vectorstore
if new_urls:
loader = AsyncHtmlLoader(new_urls, ignore_load_errors=True)
html2text = Html2TextTransformer()
logger.info("Indexing new urls...")
docs = loader.load()
docs = list(html2text.transform_documents(docs))
docs = self.text_splitter.split_documents(docs)
self.vectorstore.add_documents(docs)
self.url_database.extend(new_urls)
# Search for relevant splits
# TODO: make this async
logger.info("Grabbing most relevant splits from urls...")
docs = []
for query in questions:
docs.extend(self.vectorstore.similarity_search(query))
# Get unique docs
unique_documents_dict = {
(doc.page_content, tuple(sorted(doc.metadata.items()))): doc for doc in docs
}
unique_documents = list(unique_documents_dict.values())
return unique_documents
async def _aget_relevant_documents(
self,
query: str,
*,
run_manager: AsyncCallbackManagerForRetrieverRun,
) -> List[Document]:
raise NotImplementedError
| [
"<<SYS>> \n You are an assistant tasked with improving Google search results. \n <</SYS>> \n\n [INST] Generate THREE Google search queries that are similar to this question. The output should be a numbered list of questions and each should have a question mark at the end: \n\n {question} [/INST]",
"question",
"You are an assistant tasked with improving Google search results. Generate THREE Google search queries that are similar to this question. The output should be a numbered list of questions and each should have a question mark at the end: {question}"
] |
2024-01-10 | axgpt/langchain | libs~langchain~langchain~embeddings~javelin_ai_gateway.py | from __future__ import annotations
from typing import Any, Iterator, List, Optional
from langchain_core.pydantic_v1 import BaseModel
from langchain_core.schema.embeddings import Embeddings
def _chunk(texts: List[str], size: int) -> Iterator[List[str]]:
for i in range(0, len(texts), size):
yield texts[i : i + size]
class JavelinAIGatewayEmbeddings(Embeddings, BaseModel):
"""
Wrapper around embeddings LLMs in the Javelin AI Gateway.
To use, you should have the ``javelin_sdk`` python package installed.
For more information, see https://docs.getjavelin.io
Example:
.. code-block:: python
from langchain.embeddings import JavelinAIGatewayEmbeddings
embeddings = JavelinAIGatewayEmbeddings(
gateway_uri="<javelin-ai-gateway-uri>",
route="<your-javelin-gateway-embeddings-route>"
)
"""
client: Any
"""javelin client."""
route: str
"""The route to use for the Javelin AI Gateway API."""
gateway_uri: Optional[str] = None
"""The URI for the Javelin AI Gateway API."""
javelin_api_key: Optional[str] = None
"""The API key for the Javelin AI Gateway API."""
def __init__(self, **kwargs: Any):
try:
from javelin_sdk import (
JavelinClient,
UnauthorizedError,
)
except ImportError:
raise ImportError(
"Could not import javelin_sdk python package. "
"Please install it with `pip install javelin_sdk`."
)
super().__init__(**kwargs)
if self.gateway_uri:
try:
self.client = JavelinClient(
base_url=self.gateway_uri, api_key=self.javelin_api_key
)
except UnauthorizedError as e:
raise ValueError("Javelin: Incorrect API Key.") from e
def _query(self, texts: List[str]) -> List[List[float]]:
embeddings = []
for txt in _chunk(texts, 20):
try:
resp = self.client.query_route(self.route, query_body={"input": txt})
resp_dict = resp.dict()
embeddings_chunk = resp_dict.get("llm_response", {}).get("data", [])
for item in embeddings_chunk:
if "embedding" in item:
embeddings.append(item["embedding"])
except ValueError as e:
print("Failed to query route: " + str(e))
return embeddings
async def _aquery(self, texts: List[str]) -> List[List[float]]:
embeddings = []
for txt in _chunk(texts, 20):
try:
resp = await self.client.aquery_route(
self.route, query_body={"input": txt}
)
resp_dict = resp.dict()
embeddings_chunk = resp_dict.get("llm_response", {}).get("data", [])
for item in embeddings_chunk:
if "embedding" in item:
embeddings.append(item["embedding"])
except ValueError as e:
print("Failed to query route: " + str(e))
return embeddings
def embed_documents(self, texts: List[str]) -> List[List[float]]:
return self._query(texts)
def embed_query(self, text: str) -> List[float]:
return self._query([text])[0]
async def aembed_documents(self, texts: List[str]) -> List[List[float]]:
return await self._aquery(texts)
async def aembed_query(self, text: str) -> List[float]:
result = await self._aquery([text])
return result[0]
| [] |
2024-01-10 | axgpt/langchain | libs~langchain~tests~integration_tests~vectorstores~docarray~test_hnsw.py | from pathlib import Path
from typing import List
import numpy as np
import pytest
from langchain_core.schema import Document
from langchain.vectorstores.docarray import DocArrayHnswSearch
from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings
@pytest.fixture
def texts() -> List[str]:
return ["foo", "bar", "baz"]
def test_from_texts(texts: List[str], tmp_path: Path) -> None:
"""Test end to end construction and simple similarity search."""
docsearch = DocArrayHnswSearch.from_texts(
texts,
FakeEmbeddings(),
work_dir=str(tmp_path),
n_dim=10,
)
assert docsearch.doc_index.num_docs() == 3
def test_add_texts(texts: List[str], tmp_path: Path) -> None:
"""Test end to end construction and simple similarity search."""
docsearch = DocArrayHnswSearch.from_params(
work_dir=str(tmp_path),
n_dim=10,
embedding=FakeEmbeddings(),
)
docsearch.add_texts(texts=texts)
assert docsearch.doc_index.num_docs() == 3
@pytest.mark.parametrize("metric", ["cosine", "l2"])
def test_sim_search(metric: str, texts: List[str], tmp_path: Path) -> None:
"""Test end to end construction and simple similarity search."""
hnsw_vec_store = DocArrayHnswSearch.from_texts(
texts,
FakeEmbeddings(),
work_dir=str(tmp_path),
n_dim=10,
dist_metric=metric,
index=True,
)
output = hnsw_vec_store.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
@pytest.mark.parametrize("metric", ["cosine", "l2"])
def test_sim_search_all_configurations(
metric: str, texts: List[str], tmp_path: Path
) -> None:
"""Test end to end construction and simple similarity search."""
hnsw_vec_store = DocArrayHnswSearch.from_texts(
texts,
FakeEmbeddings(),
work_dir=str(tmp_path),
dist_metric=metric,
n_dim=10,
max_elements=8,
ef_construction=300,
ef=20,
M=8,
allow_replace_deleted=False,
num_threads=2,
)
output = hnsw_vec_store.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
@pytest.mark.parametrize("metric", ["cosine", "l2"])
def test_sim_search_by_vector(metric: str, texts: List[str], tmp_path: Path) -> None:
"""Test end to end construction and similarity search by vector."""
hnsw_vec_store = DocArrayHnswSearch.from_texts(
texts,
FakeEmbeddings(),
work_dir=str(tmp_path),
n_dim=10,
dist_metric=metric,
)
embedding = [1.0] * 10
output = hnsw_vec_store.similarity_search_by_vector(embedding, k=1)
assert output == [Document(page_content="bar")]
@pytest.mark.parametrize("metric", ["cosine", "l2"])
def test_sim_search_with_score(metric: str, tmp_path: Path) -> None:
"""Test end to end construction and similarity search with score."""
texts = ["foo", "bar", "baz"]
hnsw_vec_store = DocArrayHnswSearch.from_texts(
texts,
FakeEmbeddings(),
work_dir=str(tmp_path),
n_dim=10,
dist_metric=metric,
)
output = hnsw_vec_store.similarity_search_with_score("foo", k=1)
assert len(output) == 1
out_doc, out_score = output[0]
assert out_doc == Document(page_content="foo")
assert np.isclose(out_score, 0.0, atol=1.0e-6)
def test_sim_search_with_score_for_ip_metric(texts: List[str], tmp_path: Path) -> None:
"""
Test end to end construction and similarity search with score for ip
(inner-product) metric.
"""
hnsw_vec_store = DocArrayHnswSearch.from_texts(
texts,
FakeEmbeddings(),
work_dir=str(tmp_path),
n_dim=10,
dist_metric="ip",
)
output = hnsw_vec_store.similarity_search_with_score("foo", k=3)
assert len(output) == 3
for result in output:
assert result[1] == -8.0
@pytest.mark.parametrize("metric", ["cosine", "l2"])
def test_max_marginal_relevance_search(
metric: str, texts: List[str], tmp_path: Path
) -> None:
"""Test MRR search."""
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = DocArrayHnswSearch.from_texts(
texts,
FakeEmbeddings(),
metadatas=metadatas,
dist_metric=metric,
work_dir=str(tmp_path),
n_dim=10,
)
output = docsearch.max_marginal_relevance_search("foo", k=2, fetch_k=3)
assert output == [
Document(page_content="foo", metadata={"page": 0}),
Document(page_content="bar", metadata={"page": 1}),
]
| [] |
2024-01-10 | axgpt/langchain | libs~langchain~langchain~chat_models~javelin_ai_gateway.py | import logging
from typing import Any, Dict, List, Mapping, Optional, cast
from langchain_core.pydantic_v1 import BaseModel, Extra, SecretStr
from langchain_core.schema import (
ChatGeneration,
ChatResult,
)
from langchain_core.schema.messages import (
AIMessage,
BaseMessage,
ChatMessage,
FunctionMessage,
HumanMessage,
SystemMessage,
)
from langchain.callbacks.manager import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain.chat_models.base import BaseChatModel
logger = logging.getLogger(__name__)
# Ignoring type because below is valid pydantic code
# Unexpected keyword argument "extra" for "__init_subclass__" of "object" [call-arg]
class ChatParams(BaseModel, extra=Extra.allow): # type: ignore[call-arg]
"""Parameters for the `Javelin AI Gateway` LLM."""
temperature: float = 0.0
stop: Optional[List[str]] = None
max_tokens: Optional[int] = None
class ChatJavelinAIGateway(BaseChatModel):
"""`Javelin AI Gateway` chat models API.
To use, you should have the ``javelin_sdk`` python package installed.
For more information, see https://docs.getjavelin.io
Example:
.. code-block:: python
from langchain.chat_models import ChatJavelinAIGateway
chat = ChatJavelinAIGateway(
gateway_uri="<javelin-ai-gateway-uri>",
route="<javelin-ai-gateway-chat-route>",
params={
"temperature": 0.1
}
)
"""
route: str
"""The route to use for the Javelin AI Gateway API."""
gateway_uri: Optional[str] = None
"""The URI for the Javelin AI Gateway API."""
params: Optional[ChatParams] = None
"""Parameters for the Javelin AI Gateway LLM."""
client: Any
"""javelin client."""
javelin_api_key: Optional[SecretStr] = None
"""The API key for the Javelin AI Gateway."""
def __init__(self, **kwargs: Any):
try:
from javelin_sdk import (
JavelinClient,
UnauthorizedError,
)
except ImportError:
raise ImportError(
"Could not import javelin_sdk python package. "
"Please install it with `pip install javelin_sdk`."
)
super().__init__(**kwargs)
if self.gateway_uri:
try:
self.client = JavelinClient(
base_url=self.gateway_uri,
api_key=cast(SecretStr, self.javelin_api_key).get_secret_value(),
)
except UnauthorizedError as e:
raise ValueError("Javelin: Incorrect API Key.") from e
@property
def _default_params(self) -> Dict[str, Any]:
params: Dict[str, Any] = {
"gateway_uri": self.gateway_uri,
"javelin_api_key": cast(SecretStr, self.javelin_api_key).get_secret_value(),
"route": self.route,
**(self.params.dict() if self.params else {}),
}
return params
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
message_dicts = [
ChatJavelinAIGateway._convert_message_to_dict(message)
for message in messages
]
data: Dict[str, Any] = {
"messages": message_dicts,
**(self.params.dict() if self.params else {}),
}
resp = self.client.query_route(self.route, query_body=data)
return ChatJavelinAIGateway._create_chat_result(resp.dict())
async def _agenerate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
message_dicts = [
ChatJavelinAIGateway._convert_message_to_dict(message)
for message in messages
]
data: Dict[str, Any] = {
"messages": message_dicts,
**(self.params.dict() if self.params else {}),
}
resp = await self.client.aquery_route(self.route, query_body=data)
return ChatJavelinAIGateway._create_chat_result(resp.dict())
@property
def _identifying_params(self) -> Dict[str, Any]:
return self._default_params
def _get_invocation_params(
self, stop: Optional[List[str]] = None, **kwargs: Any
) -> Dict[str, Any]:
"""Get the parameters used to invoke the model FOR THE CALLBACKS."""
return {
**self._default_params,
**super()._get_invocation_params(stop=stop, **kwargs),
}
@property
def _llm_type(self) -> str:
"""Return type of chat model."""
return "javelin-ai-gateway-chat"
@staticmethod
def _convert_dict_to_message(_dict: Mapping[str, Any]) -> BaseMessage:
role = _dict["role"]
content = _dict["content"]
if role == "user":
return HumanMessage(content=content)
elif role == "assistant":
return AIMessage(content=content)
elif role == "system":
return SystemMessage(content=content)
else:
return ChatMessage(content=content, role=role)
@staticmethod
def _raise_functions_not_supported() -> None:
raise ValueError(
"Function messages are not supported by the Javelin AI Gateway. Please"
" create a feature request at https://docs.getjavelin.io"
)
@staticmethod
def _convert_message_to_dict(message: BaseMessage) -> dict:
if isinstance(message, ChatMessage):
message_dict = {"role": message.role, "content": message.content}
elif isinstance(message, HumanMessage):
message_dict = {"role": "user", "content": message.content}
elif isinstance(message, AIMessage):
message_dict = {"role": "assistant", "content": message.content}
elif isinstance(message, SystemMessage):
message_dict = {"role": "system", "content": message.content}
elif isinstance(message, FunctionMessage):
raise ValueError(
"Function messages are not supported by the Javelin AI Gateway. Please"
" create a feature request at https://docs.getjavelin.io"
)
else:
raise ValueError(f"Got unknown message type: {message}")
if "function_call" in message.additional_kwargs:
ChatJavelinAIGateway._raise_functions_not_supported()
if message.additional_kwargs:
logger.warning(
"Additional message arguments are unsupported by Javelin AI Gateway "
" and will be ignored: %s",
message.additional_kwargs,
)
return message_dict
@staticmethod
def _create_chat_result(response: Mapping[str, Any]) -> ChatResult:
generations = []
for candidate in response["llm_response"]["choices"]:
message = ChatJavelinAIGateway._convert_dict_to_message(
candidate["message"]
)
message_metadata = candidate.get("metadata", {})
gen = ChatGeneration(
message=message,
generation_info=dict(message_metadata),
)
generations.append(gen)
response_metadata = response.get("metadata", {})
return ChatResult(generations=generations, llm_output=response_metadata)
| [] |
2024-01-10 | axgpt/langchain | libs~langchain~langchain~retrievers~azure_cognitive_search.py | from __future__ import annotations
import json
from typing import Dict, List, Optional
import aiohttp
import requests
from langchain_core.pydantic_v1 import Extra, root_validator
from langchain_core.schema import BaseRetriever, Document
from langchain.callbacks.manager import (
AsyncCallbackManagerForRetrieverRun,
CallbackManagerForRetrieverRun,
)
from langchain.utils import get_from_dict_or_env
class AzureCognitiveSearchRetriever(BaseRetriever):
"""`Azure Cognitive Search` service retriever."""
service_name: str = ""
"""Name of Azure Cognitive Search service"""
index_name: str = ""
"""Name of Index inside Azure Cognitive Search service"""
api_key: str = ""
"""API Key. Both Admin and Query keys work, but for reading data it's
recommended to use a Query key."""
api_version: str = "2020-06-30"
"""API version"""
aiosession: Optional[aiohttp.ClientSession] = None
"""ClientSession, in case we want to reuse connection for better performance."""
content_key: str = "content"
"""Key in a retrieved result to set as the Document page_content."""
top_k: Optional[int] = None
"""Number of results to retrieve. Set to None to retrieve all results."""
class Config:
extra = Extra.forbid
arbitrary_types_allowed = True
@root_validator(pre=True)
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that service name, index name and api key exists in environment."""
values["service_name"] = get_from_dict_or_env(
values, "service_name", "AZURE_COGNITIVE_SEARCH_SERVICE_NAME"
)
values["index_name"] = get_from_dict_or_env(
values, "index_name", "AZURE_COGNITIVE_SEARCH_INDEX_NAME"
)
values["api_key"] = get_from_dict_or_env(
values, "api_key", "AZURE_COGNITIVE_SEARCH_API_KEY"
)
return values
def _build_search_url(self, query: str) -> str:
base_url = f"https://{self.service_name}.search.windows.net/"
endpoint_path = f"indexes/{self.index_name}/docs?api-version={self.api_version}"
top_param = f"&$top={self.top_k}" if self.top_k else ""
return base_url + endpoint_path + f"&search={query}" + top_param
@property
def _headers(self) -> Dict[str, str]:
return {
"Content-Type": "application/json",
"api-key": self.api_key,
}
def _search(self, query: str) -> List[dict]:
search_url = self._build_search_url(query)
response = requests.get(search_url, headers=self._headers)
if response.status_code != 200:
raise Exception(f"Error in search request: {response}")
return json.loads(response.text)["value"]
async def _asearch(self, query: str) -> List[dict]:
search_url = self._build_search_url(query)
if not self.aiosession:
async with aiohttp.ClientSession() as session:
async with session.get(search_url, headers=self._headers) as response:
response_json = await response.json()
else:
async with self.aiosession.get(
search_url, headers=self._headers
) as response:
response_json = await response.json()
return response_json["value"]
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> List[Document]:
search_results = self._search(query)
return [
Document(page_content=result.pop(self.content_key), metadata=result)
for result in search_results
]
async def _aget_relevant_documents(
self, query: str, *, run_manager: AsyncCallbackManagerForRetrieverRun
) -> List[Document]:
search_results = await self._asearch(query)
return [
Document(page_content=result.pop(self.content_key), metadata=result)
for result in search_results
]
| [] |
2024-01-10 | axgpt/langchain | libs~langchain~langchain~agents~agent_toolkits~vectorstore~toolkit.py | """Toolkit for interacting with a vector store."""
from typing import List
from langchain_core.pydantic_v1 import BaseModel, Field
from langchain_core.schema.language_model import BaseLanguageModel
from langchain_core.schema.vectorstore import VectorStore
from langchain.agents.agent_toolkits.base import BaseToolkit
from langchain.llms.openai import OpenAI
from langchain.tools import BaseTool
from langchain.tools.vectorstore.tool import (
VectorStoreQATool,
VectorStoreQAWithSourcesTool,
)
class VectorStoreInfo(BaseModel):
"""Information about a VectorStore."""
vectorstore: VectorStore = Field(exclude=True)
name: str
description: str
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
class VectorStoreToolkit(BaseToolkit):
"""Toolkit for interacting with a Vector Store."""
vectorstore_info: VectorStoreInfo = Field(exclude=True)
llm: BaseLanguageModel = Field(default_factory=lambda: OpenAI(temperature=0))
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
def get_tools(self) -> List[BaseTool]:
"""Get the tools in the toolkit."""
description = VectorStoreQATool.get_description(
self.vectorstore_info.name, self.vectorstore_info.description
)
qa_tool = VectorStoreQATool(
name=self.vectorstore_info.name,
description=description,
vectorstore=self.vectorstore_info.vectorstore,
llm=self.llm,
)
description = VectorStoreQAWithSourcesTool.get_description(
self.vectorstore_info.name, self.vectorstore_info.description
)
qa_with_sources_tool = VectorStoreQAWithSourcesTool(
name=f"{self.vectorstore_info.name}_with_sources",
description=description,
vectorstore=self.vectorstore_info.vectorstore,
llm=self.llm,
)
return [qa_tool, qa_with_sources_tool]
class VectorStoreRouterToolkit(BaseToolkit):
"""Toolkit for routing between Vector Stores."""
vectorstores: List[VectorStoreInfo] = Field(exclude=True)
llm: BaseLanguageModel = Field(default_factory=lambda: OpenAI(temperature=0))
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
def get_tools(self) -> List[BaseTool]:
"""Get the tools in the toolkit."""
tools: List[BaseTool] = []
for vectorstore_info in self.vectorstores:
description = VectorStoreQATool.get_description(
vectorstore_info.name, vectorstore_info.description
)
qa_tool = VectorStoreQATool(
name=vectorstore_info.name,
description=description,
vectorstore=vectorstore_info.vectorstore,
llm=self.llm,
)
tools.append(qa_tool)
return tools
| [] |
2024-01-10 | axgpt/langchain | libs~langchain~langchain~embeddings~edenai.py | from typing import Any, Dict, List, Optional
from langchain_core.pydantic_v1 import BaseModel, Extra, Field, root_validator
from langchain_core.schema.embeddings import Embeddings
from langchain.utilities.requests import Requests
from langchain.utils import get_from_dict_or_env
class EdenAiEmbeddings(BaseModel, Embeddings):
"""EdenAI embedding.
environment variable ``EDENAI_API_KEY`` set with your API key, or pass
it as a named parameter.
"""
edenai_api_key: Optional[str] = Field(None, description="EdenAI API Token")
provider: str = "openai"
"""embedding provider to use (eg: openai,google etc.)"""
model: Optional[str] = None
"""
model name for above provider (eg: 'text-davinci-003' for openai)
available models are shown on https://docs.edenai.co/ under 'available providers'
"""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key exists in environment."""
values["edenai_api_key"] = get_from_dict_or_env(
values, "edenai_api_key", "EDENAI_API_KEY"
)
return values
@staticmethod
def get_user_agent() -> str:
from langchain import __version__
return f"langchain/{__version__}"
def _generate_embeddings(self, texts: List[str]) -> List[List[float]]:
"""Compute embeddings using EdenAi api."""
url = "https://api.edenai.run/v2/text/embeddings"
headers = {
"accept": "application/json",
"content-type": "application/json",
"authorization": f"Bearer {self.edenai_api_key}",
"User-Agent": self.get_user_agent(),
}
payload: Dict[str, Any] = {"texts": texts, "providers": self.provider}
if self.model is not None:
payload["settings"] = {self.provider: self.model}
request = Requests(headers=headers)
response = request.post(url=url, data=payload)
if response.status_code >= 500:
raise Exception(f"EdenAI Server: Error {response.status_code}")
elif response.status_code >= 400:
raise ValueError(f"EdenAI received an invalid payload: {response.text}")
elif response.status_code != 200:
raise Exception(
f"EdenAI returned an unexpected response with status "
f"{response.status_code}: {response.text}"
)
temp = response.json()
provider_response = temp[self.provider]
if provider_response.get("status") == "fail":
err_msg = provider_response.get("error", {}).get("message")
raise Exception(err_msg)
embeddings = []
for embed_item in temp[self.provider]["items"]:
embedding = embed_item["embedding"]
embeddings.append(embedding)
return embeddings
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Embed a list of documents using EdenAI.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
return self._generate_embeddings(texts)
def embed_query(self, text: str) -> List[float]:
"""Embed a query using EdenAI.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
return self._generate_embeddings([text])[0]
| [
"application/json"
] |
2024-01-10 | axgpt/langchain | libs~langchain~langchain~chat_loaders~telegram.py | import json
import logging
import os
import tempfile
import zipfile
from pathlib import Path
from typing import Iterator, List, Union
from langchain_core.schema import AIMessage, BaseMessage, HumanMessage
from langchain_core.schema.chat import ChatSession
from langchain.chat_loaders.base import BaseChatLoader
logger = logging.getLogger(__name__)
class TelegramChatLoader(BaseChatLoader):
"""Load `telegram` conversations to LangChain chat messages.
To export, use the Telegram Desktop app from
https://desktop.telegram.org/, select a conversation, click the three dots
in the top right corner, and select "Export chat history". Then select
"Machine-readable JSON" (preferred) to export. Note: the 'lite' versions of
the desktop app (like "Telegram for MacOS") do not support exporting chat
history.
"""
def __init__(
self,
path: Union[str, Path],
):
"""Initialize the TelegramChatLoader.
Args:
path (Union[str, Path]): Path to the exported Telegram chat zip,
directory, json, or HTML file.
"""
self.path = path if isinstance(path, str) else str(path)
def _load_single_chat_session_html(self, file_path: str) -> ChatSession:
"""Load a single chat session from an HTML file.
Args:
file_path (str): Path to the HTML file.
Returns:
ChatSession: The loaded chat session.
"""
try:
from bs4 import BeautifulSoup
except ImportError:
raise ImportError(
"Please install the 'beautifulsoup4' package to load"
" Telegram HTML files. You can do this by running"
"'pip install beautifulsoup4' in your terminal."
)
with open(file_path, "r", encoding="utf-8") as file:
soup = BeautifulSoup(file, "html.parser")
results: List[Union[HumanMessage, AIMessage]] = []
previous_sender = None
for message in soup.select(".message.default"):
timestamp = message.select_one(".pull_right.date.details")["title"]
from_name_element = message.select_one(".from_name")
if from_name_element is None and previous_sender is None:
logger.debug("from_name not found in message")
continue
elif from_name_element is None:
from_name = previous_sender
else:
from_name = from_name_element.text.strip()
text = message.select_one(".text").text.strip()
results.append(
HumanMessage(
content=text,
additional_kwargs={
"sender": from_name,
"events": [{"message_time": timestamp}],
},
)
)
previous_sender = from_name
return ChatSession(messages=results)
def _load_single_chat_session_json(self, file_path: str) -> ChatSession:
"""Load a single chat session from a JSON file.
Args:
file_path (str): Path to the JSON file.
Returns:
ChatSession: The loaded chat session.
"""
with open(file_path, "r", encoding="utf-8") as file:
data = json.load(file)
messages = data.get("messages", [])
results: List[BaseMessage] = []
for message in messages:
text = message.get("text", "")
timestamp = message.get("date", "")
from_name = message.get("from", "")
results.append(
HumanMessage(
content=text,
additional_kwargs={
"sender": from_name,
"events": [{"message_time": timestamp}],
},
)
)
return ChatSession(messages=results)
def _iterate_files(self, path: str) -> Iterator[str]:
"""Iterate over files in a directory or zip file.
Args:
path (str): Path to the directory or zip file.
Yields:
str: Path to each file.
"""
if os.path.isfile(path) and path.endswith((".html", ".json")):
yield path
elif os.path.isdir(path):
for root, _, files in os.walk(path):
for file in files:
if file.endswith((".html", ".json")):
yield os.path.join(root, file)
elif zipfile.is_zipfile(path):
with zipfile.ZipFile(path) as zip_file:
for file in zip_file.namelist():
if file.endswith((".html", ".json")):
with tempfile.TemporaryDirectory() as temp_dir:
yield zip_file.extract(file, path=temp_dir)
def lazy_load(self) -> Iterator[ChatSession]:
"""Lazy load the messages from the chat file and yield them
in as chat sessions.
Yields:
ChatSession: The loaded chat session.
"""
for file_path in self._iterate_files(self.path):
if file_path.endswith(".html"):
yield self._load_single_chat_session_html(file_path)
elif file_path.endswith(".json"):
yield self._load_single_chat_session_json(file_path)
| [] |
2024-01-10 | axgpt/langchain | libs~langchain~tests~unit_tests~retrievers~test_multi_query.py | from typing import List
import pytest as pytest
from langchain_core.schema import Document
from langchain.retrievers.multi_query import _unique_documents
@pytest.mark.parametrize(
"documents,expected",
[
([], []),
([Document(page_content="foo")], [Document(page_content="foo")]),
([Document(page_content="foo")] * 2, [Document(page_content="foo")]),
(
[Document(page_content="foo", metadata={"bar": "baz"})] * 2,
[Document(page_content="foo", metadata={"bar": "baz"})],
),
(
[Document(page_content="foo", metadata={"bar": [1, 2]})] * 2,
[Document(page_content="foo", metadata={"bar": [1, 2]})],
),
(
[Document(page_content="foo", metadata={"bar": {1, 2}})] * 2,
[Document(page_content="foo", metadata={"bar": {1, 2}})],
),
(
[
Document(page_content="foo", metadata={"bar": [1, 2]}),
Document(page_content="foo", metadata={"bar": [2, 1]}),
],
[
Document(page_content="foo", metadata={"bar": [1, 2]}),
Document(page_content="foo", metadata={"bar": [2, 1]}),
],
),
],
)
def test__unique_documents(documents: List[Document], expected: List[Document]) -> None:
assert _unique_documents(documents) == expected
| [] |
2024-01-10 | axgpt/langchain | libs~langchain~langchain~vectorstores~dingo.py | from __future__ import annotations
import logging
import uuid
from typing import Any, Iterable, List, Optional, Tuple
import numpy as np
from langchain_core.schema.embeddings import Embeddings
from langchain_core.schema.vectorstore import VectorStore
from langchain.docstore.document import Document
from langchain.vectorstores.utils import maximal_marginal_relevance
logger = logging.getLogger(__name__)
class Dingo(VectorStore):
"""`Dingo` vector store.
To use, you should have the ``dingodb`` python package installed.
Example:
.. code-block:: python
from langchain.vectorstores import Dingo
from langchain.embeddings.openai import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
dingo = Dingo(embeddings, "text")
"""
def __init__(
self,
embedding: Embeddings,
text_key: str,
*,
client: Any = None,
index_name: Optional[str] = None,
dimension: int = 1024,
host: Optional[List[str]] = None,
user: str = "root",
password: str = "123123",
self_id: bool = False,
):
"""Initialize with Dingo client."""
try:
import dingodb
except ImportError:
raise ImportError(
"Could not import dingo python package. "
"Please install it with `pip install dingodb."
)
host = host if host is not None else ["172.20.31.10:13000"]
# collection
if client is not None:
dingo_client = client
else:
try:
# connect to dingo db
dingo_client = dingodb.DingoDB(user, password, host)
except ValueError as e:
raise ValueError(f"Dingo failed to connect: {e}")
self._text_key = text_key
self._client = dingo_client
if (
index_name is not None
and index_name not in dingo_client.get_index()
and index_name.upper() not in dingo_client.get_index()
):
if self_id is True:
dingo_client.create_index(
index_name, dimension=dimension, auto_id=False
)
else:
dingo_client.create_index(index_name, dimension=dimension)
self._index_name = index_name
self._embedding = embedding
@property
def embeddings(self) -> Optional[Embeddings]:
return self._embedding
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
text_key: str = "text",
batch_size: int = 500,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
ids: Optional list of ids to associate with the texts.
Returns:
List of ids from adding the texts into the vectorstore.
"""
# Embed and create the documents
ids = ids or [str(uuid.uuid1().int)[:13] for _ in texts]
metadatas_list = []
texts = list(texts)
embeds = self._embedding.embed_documents(texts)
for i, text in enumerate(texts):
metadata = metadatas[i] if metadatas else {}
metadata[self._text_key] = text
metadatas_list.append(metadata)
# upsert to Dingo
for i in range(0, len(list(texts)), batch_size):
j = i + batch_size
add_res = self._client.vector_add(
self._index_name, metadatas_list[i:j], embeds[i:j], ids[i:j]
)
if not add_res:
raise Exception("vector add fail")
return ids
def similarity_search(
self,
query: str,
k: int = 4,
search_params: Optional[dict] = None,
timeout: Optional[int] = None,
**kwargs: Any,
) -> List[Document]:
"""Return Dingo documents most similar to query, along with scores.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
search_params: Dictionary of argument(s) to filter on metadata
Returns:
List of Documents most similar to the query and score for each
"""
docs_and_scores = self.similarity_search_with_score(
query, k=k, search_params=search_params
)
return [doc for doc, _ in docs_and_scores]
def similarity_search_with_score(
self,
query: str,
k: int = 4,
search_params: Optional[dict] = None,
timeout: Optional[int] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Return Dingo documents most similar to query, along with scores.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
search_params: Dictionary of argument(s) to filter on metadata
Returns:
List of Documents most similar to the query and score for each
"""
docs = []
query_obj = self._embedding.embed_query(query)
results = self._client.vector_search(
self._index_name, xq=query_obj, top_k=k, search_params=search_params
)
if not results:
return []
for res in results[0]["vectorWithDistances"]:
metadatas = res["scalarData"]
id = res["id"]
score = res["distance"]
text = metadatas[self._text_key]["fields"][0]["data"]
metadata = {"id": id, "text": text, "score": score}
for meta_key in metadatas.keys():
metadata[meta_key] = metadatas[meta_key]["fields"][0]["data"]
docs.append((Document(page_content=text, metadata=metadata), score))
return docs
def max_marginal_relevance_search_by_vector(
self,
embedding: List[float],
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
search_params: Optional[dict] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents selected by maximal marginal relevance.
"""
results = self._client.vector_search(
self._index_name, [embedding], search_params=search_params, top_k=k
)
mmr_selected = maximal_marginal_relevance(
np.array([embedding], dtype=np.float32),
[
item["vector"]["floatValues"]
for item in results[0]["vectorWithDistances"]
],
k=k,
lambda_mult=lambda_mult,
)
selected = []
for i in mmr_selected:
meta_data = {}
for k, v in results[0]["vectorWithDistances"][i]["scalarData"].items():
meta_data.update({str(k): v["fields"][0]["data"]})
selected.append(meta_data)
return [
Document(page_content=metadata.pop(self._text_key), metadata=metadata)
for metadata in selected
]
def max_marginal_relevance_search(
self,
query: str,
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
search_params: Optional[dict] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents selected by maximal marginal relevance.
"""
embedding = self._embedding.embed_query(query)
return self.max_marginal_relevance_search_by_vector(
embedding, k, fetch_k, lambda_mult, search_params
)
@classmethod
def from_texts(
cls,
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
text_key: str = "text",
index_name: Optional[str] = None,
dimension: int = 1024,
client: Any = None,
host: List[str] = ["172.20.31.10:13000"],
user: str = "root",
password: str = "123123",
batch_size: int = 500,
**kwargs: Any,
) -> Dingo:
"""Construct Dingo wrapper from raw documents.
This is a user friendly interface that:
1. Embeds documents.
2. Adds the documents to a provided Dingo index
This is intended to be a quick way to get started.
Example:
.. code-block:: python
from langchain.vectorstores import Dingo
from langchain.embeddings import OpenAIEmbeddings
import dingodb
sss
embeddings = OpenAIEmbeddings()
dingo = Dingo.from_texts(
texts,
embeddings,
index_name="langchain-demo"
)
"""
try:
import dingodb
except ImportError:
raise ImportError(
"Could not import dingo python package. "
"Please install it with `pip install dingodb`."
)
if client is not None:
dingo_client = client
else:
try:
# connect to dingo db
dingo_client = dingodb.DingoDB(user, password, host)
except ValueError as e:
raise ValueError(f"Dingo failed to connect: {e}")
if kwargs is not None and kwargs.get("self_id") is True:
if (
index_name is not None
and index_name not in dingo_client.get_index()
and index_name.upper() not in dingo_client.get_index()
):
dingo_client.create_index(
index_name, dimension=dimension, auto_id=False
)
else:
if (
index_name is not None
and index_name not in dingo_client.get_index()
and index_name.upper() not in dingo_client.get_index()
):
dingo_client.create_index(index_name, dimension=dimension)
# Embed and create the documents
ids = ids or [str(uuid.uuid1().int)[:13] for _ in texts]
metadatas_list = []
texts = list(texts)
embeds = embedding.embed_documents(texts)
for i, text in enumerate(texts):
metadata = metadatas[i] if metadatas else {}
metadata[text_key] = text
metadatas_list.append(metadata)
# upsert to Dingo
for i in range(0, len(list(texts)), batch_size):
j = i + batch_size
add_res = dingo_client.vector_add(
index_name, metadatas_list[i:j], embeds[i:j], ids[i:j]
)
if not add_res:
raise Exception("vector add fail")
return cls(embedding, text_key, client=dingo_client, index_name=index_name)
def delete(
self,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> Any:
"""Delete by vector IDs or filter.
Args:
ids: List of ids to delete.
"""
if ids is None:
raise ValueError("No ids provided to delete.")
return self._client.vector_delete(self._index_name, ids=ids)
| [] |
2024-01-10 | axgpt/langchain | libs~core~langchain_core~schema~chat_history.py | from __future__ import annotations
from abc import ABC, abstractmethod
from typing import List
from langchain_core.schema.messages import AIMessage, BaseMessage, HumanMessage
class BaseChatMessageHistory(ABC):
"""Abstract base class for storing chat message history.
See `ChatMessageHistory` for default implementation.
Example:
.. code-block:: python
class FileChatMessageHistory(BaseChatMessageHistory):
storage_path: str
session_id: str
@property
def messages(self):
with open(os.path.join(storage_path, session_id), 'r:utf-8') as f:
messages = json.loads(f.read())
return messages_from_dict(messages)
def add_message(self, message: BaseMessage) -> None:
messages = self.messages.append(_message_to_dict(message))
with open(os.path.join(storage_path, session_id), 'w') as f:
json.dump(f, messages)
def clear(self):
with open(os.path.join(storage_path, session_id), 'w') as f:
f.write("[]")
"""
messages: List[BaseMessage]
"""A list of Messages stored in-memory."""
def add_user_message(self, message: str) -> None:
"""Convenience method for adding a human message string to the store.
Args:
message: The string contents of a human message.
"""
self.add_message(HumanMessage(content=message))
def add_ai_message(self, message: str) -> None:
"""Convenience method for adding an AI message string to the store.
Args:
message: The string contents of an AI message.
"""
self.add_message(AIMessage(content=message))
@abstractmethod
def add_message(self, message: BaseMessage) -> None:
"""Add a Message object to the store.
Args:
message: A BaseMessage object to store.
"""
raise NotImplementedError()
@abstractmethod
def clear(self) -> None:
"""Remove all messages from the store"""
| [] |
2024-01-10 | axgpt/langchain | libs~langchain~langchain~embeddings~cohere.py | from typing import Any, Dict, List, Optional
from langchain_core.pydantic_v1 import BaseModel, Extra, root_validator
from langchain_core.schema.embeddings import Embeddings
from langchain.utils import get_from_dict_or_env
class CohereEmbeddings(BaseModel, Embeddings):
"""Cohere embedding models.
To use, you should have the ``cohere`` python package installed, and the
environment variable ``COHERE_API_KEY`` set with your API key or pass it
as a named parameter to the constructor.
Example:
.. code-block:: python
from langchain.embeddings import CohereEmbeddings
cohere = CohereEmbeddings(
model="embed-english-light-v3.0", cohere_api_key="my-api-key"
)
"""
client: Any #: :meta private:
"""Cohere client."""
async_client: Any #: :meta private:
"""Cohere async client."""
model: str = "embed-english-v2.0"
"""Model name to use."""
truncate: Optional[str] = None
"""Truncate embeddings that are too long from start or end ("NONE"|"START"|"END")"""
cohere_api_key: Optional[str] = None
max_retries: Optional[int] = None
"""Maximum number of retries to make when generating."""
request_timeout: Optional[float] = None
"""Timeout in seconds for the Cohere API request."""
user_agent: str = "langchain"
"""Identifier for the application making the request."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
cohere_api_key = get_from_dict_or_env(
values, "cohere_api_key", "COHERE_API_KEY"
)
max_retries = values.get("max_retries")
request_timeout = values.get("request_timeout")
try:
import cohere
client_name = values["user_agent"]
values["client"] = cohere.Client(
cohere_api_key,
max_retries=max_retries,
timeout=request_timeout,
client_name=client_name,
)
values["async_client"] = cohere.AsyncClient(
cohere_api_key,
max_retries=max_retries,
timeout=request_timeout,
client_name=client_name,
)
except ImportError:
raise ValueError(
"Could not import cohere python package. "
"Please install it with `pip install cohere`."
)
return values
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Call out to Cohere's embedding endpoint.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
embeddings = self.client.embed(
model=self.model,
texts=texts,
input_type="search_document",
truncate=self.truncate,
).embeddings
return [list(map(float, e)) for e in embeddings]
async def aembed_documents(self, texts: List[str]) -> List[List[float]]:
"""Async call out to Cohere's embedding endpoint.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
embeddings = await self.async_client.embed(
model=self.model,
texts=texts,
input_type="search_document",
truncate=self.truncate,
)
return [list(map(float, e)) for e in embeddings.embeddings]
def embed_query(self, text: str) -> List[float]:
"""Call out to Cohere's embedding endpoint.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
embeddings = self.client.embed(
model=self.model,
texts=[text],
input_type="search_query",
truncate=self.truncate,
).embeddings
return [list(map(float, e)) for e in embeddings][0]
async def aembed_query(self, text: str) -> List[float]:
"""Async call out to Cohere's embedding endpoint.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
embeddings = await self.async_client.embed(
model=self.model,
texts=[text],
input_type="search_query",
truncate=self.truncate,
)
return [list(map(float, e)) for e in embeddings.embeddings][0]
| [] |
2024-01-10 | axgpt/langchain | libs~langchain~langchain~retrievers~chaindesk.py | from typing import Any, List, Optional
import aiohttp
import requests
from langchain_core.schema import BaseRetriever, Document
from langchain.callbacks.manager import (
AsyncCallbackManagerForRetrieverRun,
CallbackManagerForRetrieverRun,
)
class ChaindeskRetriever(BaseRetriever):
"""`Chaindesk API` retriever."""
datastore_url: str
top_k: Optional[int]
api_key: Optional[str]
def __init__(
self,
datastore_url: str,
top_k: Optional[int] = None,
api_key: Optional[str] = None,
):
self.datastore_url = datastore_url
self.api_key = api_key
self.top_k = top_k
def _get_relevant_documents(
self,
query: str,
*,
run_manager: CallbackManagerForRetrieverRun,
**kwargs: Any,
) -> List[Document]:
response = requests.post(
self.datastore_url,
json={
"query": query,
**({"topK": self.top_k} if self.top_k is not None else {}),
},
headers={
"Content-Type": "application/json",
**(
{"Authorization": f"Bearer {self.api_key}"}
if self.api_key is not None
else {}
),
},
)
data = response.json()
return [
Document(
page_content=r["text"],
metadata={"source": r["source"], "score": r["score"]},
)
for r in data["results"]
]
async def _aget_relevant_documents(
self,
query: str,
*,
run_manager: AsyncCallbackManagerForRetrieverRun,
**kwargs: Any,
) -> List[Document]:
async with aiohttp.ClientSession() as session:
async with session.request(
"POST",
self.datastore_url,
json={
"query": query,
**({"topK": self.top_k} if self.top_k is not None else {}),
},
headers={
"Content-Type": "application/json",
**(
{"Authorization": f"Bearer {self.api_key}"}
if self.api_key is not None
else {}
),
},
) as response:
data = await response.json()
return [
Document(
page_content=r["text"],
metadata={"source": r["source"], "score": r["score"]},
)
for r in data["results"]
]
| [] |
2024-01-10 | axgpt/langchain | libs~langchain~langchain~utilities~apify.py | from typing import TYPE_CHECKING, Any, Callable, Dict, Optional
from langchain_core.pydantic_v1 import BaseModel, root_validator
from langchain_core.schema.document import Document
from langchain.utils import get_from_dict_or_env
if TYPE_CHECKING:
from langchain.document_loaders import ApifyDatasetLoader
class ApifyWrapper(BaseModel):
"""Wrapper around Apify.
To use, you should have the ``apify-client`` python package installed,
and the environment variable ``APIFY_API_TOKEN`` set with your API key, or pass
`apify_api_token` as a named parameter to the constructor.
"""
apify_client: Any
apify_client_async: Any
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate environment.
Validate that an Apify API token is set and the apify-client
Python package exists in the current environment.
"""
apify_api_token = get_from_dict_or_env(
values, "apify_api_token", "APIFY_API_TOKEN"
)
try:
from apify_client import ApifyClient, ApifyClientAsync
values["apify_client"] = ApifyClient(apify_api_token)
values["apify_client_async"] = ApifyClientAsync(apify_api_token)
except ImportError:
raise ImportError(
"Could not import apify-client Python package. "
"Please install it with `pip install apify-client`."
)
return values
def call_actor(
self,
actor_id: str,
run_input: Dict,
dataset_mapping_function: Callable[[Dict], Document],
*,
build: Optional[str] = None,
memory_mbytes: Optional[int] = None,
timeout_secs: Optional[int] = None,
) -> "ApifyDatasetLoader":
"""Run an Actor on the Apify platform and wait for results to be ready.
Args:
actor_id (str): The ID or name of the Actor on the Apify platform.
run_input (Dict): The input object of the Actor that you're trying to run.
dataset_mapping_function (Callable): A function that takes a single
dictionary (an Apify dataset item) and converts it to an
instance of the Document class.
build (str, optional): Optionally specifies the actor build to run.
It can be either a build tag or build number.
memory_mbytes (int, optional): Optional memory limit for the run,
in megabytes.
timeout_secs (int, optional): Optional timeout for the run, in seconds.
Returns:
ApifyDatasetLoader: A loader that will fetch the records from the
Actor run's default dataset.
"""
from langchain.document_loaders import ApifyDatasetLoader
actor_call = self.apify_client.actor(actor_id).call(
run_input=run_input,
build=build,
memory_mbytes=memory_mbytes,
timeout_secs=timeout_secs,
)
return ApifyDatasetLoader(
dataset_id=actor_call["defaultDatasetId"],
dataset_mapping_function=dataset_mapping_function,
)
async def acall_actor(
self,
actor_id: str,
run_input: Dict,
dataset_mapping_function: Callable[[Dict], Document],
*,
build: Optional[str] = None,
memory_mbytes: Optional[int] = None,
timeout_secs: Optional[int] = None,
) -> "ApifyDatasetLoader":
"""Run an Actor on the Apify platform and wait for results to be ready.
Args:
actor_id (str): The ID or name of the Actor on the Apify platform.
run_input (Dict): The input object of the Actor that you're trying to run.
dataset_mapping_function (Callable): A function that takes a single
dictionary (an Apify dataset item) and converts it to
an instance of the Document class.
build (str, optional): Optionally specifies the actor build to run.
It can be either a build tag or build number.
memory_mbytes (int, optional): Optional memory limit for the run,
in megabytes.
timeout_secs (int, optional): Optional timeout for the run, in seconds.
Returns:
ApifyDatasetLoader: A loader that will fetch the records from the
Actor run's default dataset.
"""
from langchain.document_loaders import ApifyDatasetLoader
actor_call = await self.apify_client_async.actor(actor_id).call(
run_input=run_input,
build=build,
memory_mbytes=memory_mbytes,
timeout_secs=timeout_secs,
)
return ApifyDatasetLoader(
dataset_id=actor_call["defaultDatasetId"],
dataset_mapping_function=dataset_mapping_function,
)
def call_actor_task(
self,
task_id: str,
task_input: Dict,
dataset_mapping_function: Callable[[Dict], Document],
*,
build: Optional[str] = None,
memory_mbytes: Optional[int] = None,
timeout_secs: Optional[int] = None,
) -> "ApifyDatasetLoader":
"""Run a saved Actor task on Apify and wait for results to be ready.
Args:
task_id (str): The ID or name of the task on the Apify platform.
task_input (Dict): The input object of the task that you're trying to run.
Overrides the task's saved input.
dataset_mapping_function (Callable): A function that takes a single
dictionary (an Apify dataset item) and converts it to an
instance of the Document class.
build (str, optional): Optionally specifies the actor build to run.
It can be either a build tag or build number.
memory_mbytes (int, optional): Optional memory limit for the run,
in megabytes.
timeout_secs (int, optional): Optional timeout for the run, in seconds.
Returns:
ApifyDatasetLoader: A loader that will fetch the records from the
task run's default dataset.
"""
from langchain.document_loaders import ApifyDatasetLoader
task_call = self.apify_client.task(task_id).call(
task_input=task_input,
build=build,
memory_mbytes=memory_mbytes,
timeout_secs=timeout_secs,
)
return ApifyDatasetLoader(
dataset_id=task_call["defaultDatasetId"],
dataset_mapping_function=dataset_mapping_function,
)
async def acall_actor_task(
self,
task_id: str,
task_input: Dict,
dataset_mapping_function: Callable[[Dict], Document],
*,
build: Optional[str] = None,
memory_mbytes: Optional[int] = None,
timeout_secs: Optional[int] = None,
) -> "ApifyDatasetLoader":
"""Run a saved Actor task on Apify and wait for results to be ready.
Args:
task_id (str): The ID or name of the task on the Apify platform.
task_input (Dict): The input object of the task that you're trying to run.
Overrides the task's saved input.
dataset_mapping_function (Callable): A function that takes a single
dictionary (an Apify dataset item) and converts it to an
instance of the Document class.
build (str, optional): Optionally specifies the actor build to run.
It can be either a build tag or build number.
memory_mbytes (int, optional): Optional memory limit for the run,
in megabytes.
timeout_secs (int, optional): Optional timeout for the run, in seconds.
Returns:
ApifyDatasetLoader: A loader that will fetch the records from the
task run's default dataset.
"""
from langchain.document_loaders import ApifyDatasetLoader
task_call = await self.apify_client_async.task(task_id).call(
task_input=task_input,
build=build,
memory_mbytes=memory_mbytes,
timeout_secs=timeout_secs,
)
return ApifyDatasetLoader(
dataset_id=task_call["defaultDatasetId"],
dataset_mapping_function=dataset_mapping_function,
)
| [] |
2024-01-10 | axgpt/langchain | libs~langchain~langchain~prompts~example_selector~ngram_overlap.py | """Select and order examples based on ngram overlap score (sentence_bleu score).
https://www.nltk.org/_modules/nltk/translate/bleu_score.html
https://aclanthology.org/P02-1040.pdf
"""
from typing import Dict, List
import numpy as np
from langchain_core.prompts.example_selector.base import BaseExampleSelector
from langchain_core.prompts.prompt import PromptTemplate
from langchain_core.pydantic_v1 import BaseModel, root_validator
def ngram_overlap_score(source: List[str], example: List[str]) -> float:
"""Compute ngram overlap score of source and example as sentence_bleu score.
Use sentence_bleu with method1 smoothing function and auto reweighting.
Return float value between 0.0 and 1.0 inclusive.
https://www.nltk.org/_modules/nltk/translate/bleu_score.html
https://aclanthology.org/P02-1040.pdf
"""
from nltk.translate.bleu_score import (
SmoothingFunction, # type: ignore
sentence_bleu,
)
hypotheses = source[0].split()
references = [s.split() for s in example]
return float(
sentence_bleu(
references,
hypotheses,
smoothing_function=SmoothingFunction().method1,
auto_reweigh=True,
)
)
class NGramOverlapExampleSelector(BaseExampleSelector, BaseModel):
"""Select and order examples based on ngram overlap score (sentence_bleu score).
https://www.nltk.org/_modules/nltk/translate/bleu_score.html
https://aclanthology.org/P02-1040.pdf
"""
examples: List[dict]
"""A list of the examples that the prompt template expects."""
example_prompt: PromptTemplate
"""Prompt template used to format the examples."""
threshold: float = -1.0
"""Threshold at which algorithm stops. Set to -1.0 by default.
For negative threshold:
select_examples sorts examples by ngram_overlap_score, but excludes none.
For threshold greater than 1.0:
select_examples excludes all examples, and returns an empty list.
For threshold equal to 0.0:
select_examples sorts examples by ngram_overlap_score,
and excludes examples with no ngram overlap with input.
"""
@root_validator(pre=True)
def check_dependencies(cls, values: Dict) -> Dict:
"""Check that valid dependencies exist."""
try:
from nltk.translate.bleu_score import ( # noqa: F401
SmoothingFunction,
sentence_bleu,
)
except ImportError as e:
raise ImportError(
"Not all the correct dependencies for this ExampleSelect exist."
"Please install nltk with `pip install nltk`."
) from e
return values
def add_example(self, example: Dict[str, str]) -> None:
"""Add new example to list."""
self.examples.append(example)
def select_examples(self, input_variables: Dict[str, str]) -> List[dict]:
"""Return list of examples sorted by ngram_overlap_score with input.
Descending order.
Excludes any examples with ngram_overlap_score less than or equal to threshold.
"""
inputs = list(input_variables.values())
examples = []
k = len(self.examples)
score = [0.0] * k
first_prompt_template_key = self.example_prompt.input_variables[0]
for i in range(k):
score[i] = ngram_overlap_score(
inputs, [self.examples[i][first_prompt_template_key]]
)
while True:
arg_max = np.argmax(score)
if (score[arg_max] < self.threshold) or abs(
score[arg_max] - self.threshold
) < 1e-9:
break
examples.append(self.examples[arg_max])
score[arg_max] = self.threshold - 1.0
return examples
| [] |
2024-01-10 | axgpt/langchain | libs~langchain~langchain~evaluation~agents~trajectory_eval_chain.py | """A chain for evaluating ReAct style agents.
This chain is used to evaluate ReAct style agents by reasoning about
the sequence of actions taken and their outcomes. It uses a language model
chain (LLMChain) to generate the reasoning and scores.
"""
import re
from typing import (
Any,
Dict,
List,
Optional,
Sequence,
Tuple,
TypedDict,
Union,
cast,
)
from langchain_core.pydantic_v1 import Extra, Field
from langchain_core.schema import AgentAction, BaseOutputParser, OutputParserException
from langchain_core.schema.language_model import BaseLanguageModel
from langchain.callbacks.manager import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
Callbacks,
)
from langchain.chains.llm import LLMChain
from langchain.chat_models.base import BaseChatModel
from langchain.evaluation.agents.trajectory_eval_prompt import (
EVAL_CHAT_PROMPT,
TOOL_FREE_EVAL_CHAT_PROMPT,
)
from langchain.evaluation.schema import AgentTrajectoryEvaluator, LLMEvalChain
from langchain.tools.base import BaseTool
class TrajectoryEval(TypedDict):
"""A named tuple containing the score and reasoning for a trajectory."""
score: float
"""The score for the trajectory, normalized from 0 to 1."""
reasoning: str
"""The reasoning for the score."""
class TrajectoryOutputParser(BaseOutputParser):
"""Trajectory output parser."""
@property
def _type(self) -> str:
return "agent_trajectory"
def parse(self, text: str) -> TrajectoryEval:
"""Parse the output text and extract the score and reasoning.
Args:
text (str): The output text to parse.
Returns:
TrajectoryEval: A named tuple containing the normalized score and reasoning.
Raises:
OutputParserException: If the score is not found in the output text or
if the LLM's score is not a digit in the range 1-5.
"""
if "Score:" not in text:
raise OutputParserException(
f"Could not find score in model eval output: {text}"
)
reasoning, score_str = text.split("Score: ", maxsplit=1)
reasoning, score_str = reasoning.strip(), score_str.strip()
# Use regex to extract the score.
# This will get the number in the string, even if it is a float or more than 10.
# E.g. "Score: 1" will return 1, "Score: 3.5" will return 3.5, and
# "Score: 10" will return 10.
# The score should be an integer digit in the range 1-5.
_score = re.search(r"(\d+(\.\d+)?)", score_str)
# If the score is not found or is a float, raise an exception.
if _score is None or "." in _score.group(1):
raise OutputParserException(
f"Score is not an integer digit in the range 1-5: {text}"
)
score = int(_score.group(1))
# If the score is not in the range 1-5, raise an exception.
if not 1 <= score <= 5:
raise OutputParserException(
f"Score is not a digit in the range 1-5: {text}"
)
normalized_score = (score - 1) / 4
return TrajectoryEval(score=normalized_score, reasoning=reasoning)
class TrajectoryEvalChain(AgentTrajectoryEvaluator, LLMEvalChain):
"""A chain for evaluating ReAct style agents.
This chain is used to evaluate ReAct style agents by reasoning about
the sequence of actions taken and their outcomes.
Example:
.. code-block:: python
from langchain.agents import AgentType, initialize_agent
from langchain.chat_models import ChatOpenAI
from langchain.evaluation import TrajectoryEvalChain
from langchain.tools import tool
@tool
def geography_answers(country: str, question: str) -> str:
\"\"\"Very helpful answers to geography questions.\"\"\"
return f"{country}? IDK - We may never know {question}."
llm = ChatOpenAI(model="gpt-3.5-turbo", temperature=0)
agent = initialize_agent(
tools=[geography_answers],
llm=llm,
agent=AgentType.OPENAI_FUNCTIONS,
return_intermediate_steps=True,
)
question = "How many dwell in the largest minor region in Argentina?"
response = agent(question)
eval_chain = TrajectoryEvalChain.from_llm(
llm=llm, agent_tools=[geography_answers], return_reasoning=True
)
result = eval_chain.evaluate_agent_trajectory(
input=question,
agent_trajectory=response["intermediate_steps"],
prediction=response["output"],
reference="Paris",
)
print(result["score"])
# 0
""" # noqa: E501
agent_tools: Optional[List[BaseTool]] = None
"""A list of tools available to the agent."""
eval_chain: LLMChain
"""The language model chain used for evaluation."""
output_parser: TrajectoryOutputParser = Field(
default_factory=TrajectoryOutputParser
)
"""The output parser used to parse the output."""
return_reasoning: bool = False # :meta private:
"""DEPRECATED. Reasoning always returned."""
class Config:
"""Configuration for the QAEvalChain."""
extra = Extra.ignore
@property
def requires_reference(self) -> bool:
"""Whether this evaluator requires a reference label."""
return False
@property
def _tools_description(self) -> str:
"""Get the description of the agent tools.
Returns:
str: The description of the agent tools.
"""
if self.agent_tools is None:
return ""
return "\n\n".join(
[
f"""Tool {i}: {tool.name}
Description: {tool.description}"""
for i, tool in enumerate(self.agent_tools, 1)
]
)
@staticmethod
def get_agent_trajectory(
steps: Union[str, Sequence[Tuple[AgentAction, str]]]
) -> str:
"""Get the agent trajectory as a formatted string.
Args:
steps (Union[str, List[Tuple[AgentAction, str]]]): The agent trajectory.
Returns:
str: The formatted agent trajectory.
"""
if isinstance(steps, str):
return steps
return "\n\n".join(
[
f"""Step {i}:
Tool used: {action.tool}
Tool input: {action.tool_input}
Tool output: {output}"""
for i, (action, output) in enumerate(steps, 1)
]
)
@staticmethod
def _format_reference(reference: Optional[str]) -> str:
"""Format the reference text.
Args:
reference (str): The reference text.
Returns:
str: The formatted reference text.
"""
if not reference:
return ""
return f"""
The following is the expected answer. Use this to measure correctness:
[GROUND_TRUTH]
{reference}
[END_GROUND_TRUTH]
"""
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
agent_tools: Optional[Sequence[BaseTool]] = None,
output_parser: Optional[TrajectoryOutputParser] = None,
**kwargs: Any,
) -> "TrajectoryEvalChain":
"""Create a TrajectoryEvalChain object from a language model chain.
Args:
llm (BaseChatModel): The language model chain.
agent_tools (Optional[Sequence[BaseTool]]): A list of tools
available to the agent.
output_parser (Optional[TrajectoryOutputParser]): The output parser
used to parse the chain output into a score.
Returns:
TrajectoryEvalChain: The TrajectoryEvalChain object.
"""
if not isinstance(llm, BaseChatModel):
raise NotImplementedError(
"Only chat models supported by the current trajectory eval"
)
if agent_tools:
prompt = EVAL_CHAT_PROMPT
else:
prompt = TOOL_FREE_EVAL_CHAT_PROMPT
eval_chain = LLMChain(llm=llm, prompt=prompt)
return cls(
agent_tools=agent_tools,
eval_chain=eval_chain,
output_parser=output_parser or TrajectoryOutputParser(),
**kwargs,
)
@property
def input_keys(self) -> List[str]:
"""Get the input keys for the chain.
Returns:
List[str]: The input keys.
"""
return ["question", "agent_trajectory", "answer", "reference"]
@property
def output_keys(self) -> List[str]:
"""Get the output keys for the chain.
Returns:
List[str]: The output keys.
"""
return ["score", "reasoning"]
def prep_inputs(self, inputs: Union[Dict[str, Any], Any]) -> Dict[str, str]:
"""Validate and prep inputs."""
if "reference" not in inputs:
inputs["reference"] = self._format_reference(inputs.get("reference"))
return super().prep_inputs(inputs)
def _call(
self,
inputs: Dict[str, str],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, Any]:
"""Run the chain and generate the output.
Args:
inputs (Dict[str, str]): The input values for the chain.
run_manager (Optional[CallbackManagerForChainRun]): The callback
manager for the chain run.
Returns:
Dict[str, Any]: The output values of the chain.
"""
chain_input = {**inputs}
if self.agent_tools:
chain_input["tool_descriptions"] = self._tools_description
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
raw_output = self.eval_chain.run(
chain_input, callbacks=_run_manager.get_child()
)
return cast(dict, self.output_parser.parse(raw_output))
async def _acall(
self,
inputs: Dict[str, str],
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
) -> Dict[str, Any]:
"""Run the chain and generate the output.
Args:
inputs (Dict[str, str]): The input values for the chain.
run_manager (Optional[CallbackManagerForChainRun]): The callback
manager for the chain run.
Returns:
Dict[str, Any]: The output values of the chain.
"""
chain_input = {**inputs}
if self.agent_tools:
chain_input["tool_descriptions"] = self._tools_description
_run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager()
raw_output = await self.eval_chain.arun(
chain_input, callbacks=_run_manager.get_child()
)
return cast(dict, self.output_parser.parse(raw_output))
def _evaluate_agent_trajectory(
self,
*,
prediction: str,
input: str,
agent_trajectory: Sequence[Tuple[AgentAction, str]],
reference: Optional[str] = None,
callbacks: Callbacks = None,
tags: Optional[List[str]] = None,
metadata: Optional[Dict[str, Any]] = None,
include_run_info: bool = False,
**kwargs: Any,
) -> dict:
"""Evaluate a trajectory.
Args:
prediction (str): The final predicted response.
input (str): The input to the agent.
agent_trajectory (List[Tuple[AgentAction, str]]):
The intermediate steps forming the agent trajectory.
reference (Optional[str]): The reference answer.
callbacks (Callbacks): Callbacks to use for this chain run.
Returns:
dict: The evaluation result, which includes the score and optionally
the reasoning for reaching that.
"""
inputs = {
"question": input,
"agent_trajectory": self.get_agent_trajectory(agent_trajectory),
"answer": prediction,
"reference": reference,
}
return self.__call__(
inputs=inputs,
callbacks=callbacks,
tags=tags,
metadata=metadata,
include_run_info=include_run_info,
return_only_outputs=True,
)
async def _aevaluate_agent_trajectory(
self,
*,
prediction: str,
input: str,
agent_trajectory: Sequence[Tuple[AgentAction, str]],
reference: Optional[str] = None,
callbacks: Callbacks = None,
tags: Optional[List[str]] = None,
metadata: Optional[Dict[str, Any]] = None,
include_run_info: bool = False,
**kwargs: Any,
) -> dict:
"""Asynchronously evaluate a trajectory.
Args:
prediction (str): The final predicted response.
input (str): The input to the agent.
agent_trajectory (List[Tuple[AgentAction, str]]):
The intermediate steps forming the agent trajectory.
reference (Optional[str]): The reference answer.
callbacks (Callbacks): Callbacks to use for this chain run.
Returns:
dict: The evaluation result, which includes the score and optionally
the reasoning for reaching that.
"""
inputs = {
"question": input,
"agent_trajectory": self.get_agent_trajectory(agent_trajectory),
"answer": prediction,
"reference": reference,
}
return await self.acall(
inputs=inputs,
callbacks=callbacks,
tags=tags,
metadata=metadata,
include_run_info=include_run_info,
return_only_outputs=True,
)
| [] |
2024-01-10 | axgpt/langchain | libs~langchain~langchain~chat_models~hunyuan.py | import base64
import hashlib
import hmac
import json
import logging
import time
from typing import Any, Dict, Iterator, List, Mapping, Optional, Type
from urllib.parse import urlparse
import requests
from langchain_core.pydantic_v1 import Field, SecretStr, root_validator
from langchain_core.schema import (
AIMessage,
BaseMessage,
ChatGeneration,
ChatMessage,
ChatResult,
HumanMessage,
)
from langchain_core.schema.messages import (
AIMessageChunk,
BaseMessageChunk,
ChatMessageChunk,
HumanMessageChunk,
)
from langchain_core.schema.output import ChatGenerationChunk
from langchain_core.utils import (
convert_to_secret_str,
get_pydantic_field_names,
)
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.chat_models.base import BaseChatModel, _generate_from_stream
from langchain.utils import get_from_dict_or_env
logger = logging.getLogger(__name__)
DEFAULT_API_BASE = "https://hunyuan.cloud.tencent.com"
DEFAULT_PATH = "/hyllm/v1/chat/completions"
def _convert_message_to_dict(message: BaseMessage) -> dict:
message_dict: Dict[str, Any]
if isinstance(message, ChatMessage):
message_dict = {"role": message.role, "content": message.content}
elif isinstance(message, HumanMessage):
message_dict = {"role": "user", "content": message.content}
elif isinstance(message, AIMessage):
message_dict = {"role": "assistant", "content": message.content}
else:
raise TypeError(f"Got unknown type {message}")
return message_dict
def _convert_dict_to_message(_dict: Mapping[str, Any]) -> BaseMessage:
role = _dict["role"]
if role == "user":
return HumanMessage(content=_dict["content"])
elif role == "assistant":
return AIMessage(content=_dict.get("content", "") or "")
else:
return ChatMessage(content=_dict["content"], role=role)
def _convert_delta_to_message_chunk(
_dict: Mapping[str, Any], default_class: Type[BaseMessageChunk]
) -> BaseMessageChunk:
role = _dict.get("role")
content = _dict.get("content") or ""
if role == "user" or default_class == HumanMessageChunk:
return HumanMessageChunk(content=content)
elif role == "assistant" or default_class == AIMessageChunk:
return AIMessageChunk(content=content)
elif role or default_class == ChatMessageChunk:
return ChatMessageChunk(content=content, role=role)
else:
return default_class(content=content)
# signature generation
# https://cloud.tencent.com/document/product/1729/97732#532252ce-e960-48a7-8821-940a9ce2ccf3
def _signature(secret_key: SecretStr, url: str, payload: Dict[str, Any]) -> str:
sorted_keys = sorted(payload.keys())
url_info = urlparse(url)
sign_str = url_info.netloc + url_info.path + "?"
for key in sorted_keys:
value = payload[key]
if isinstance(value, list) or isinstance(value, dict):
value = json.dumps(value, separators=(",", ":"))
elif isinstance(value, float):
value = "%g" % value
sign_str = sign_str + key + "=" + str(value) + "&"
sign_str = sign_str[:-1]
hmacstr = hmac.new(
key=secret_key.get_secret_value().encode("utf-8"),
msg=sign_str.encode("utf-8"),
digestmod=hashlib.sha1,
).digest()
return base64.b64encode(hmacstr).decode("utf-8")
def _create_chat_result(response: Mapping[str, Any]) -> ChatResult:
generations = []
for choice in response["choices"]:
message = _convert_dict_to_message(choice["messages"])
generations.append(ChatGeneration(message=message))
token_usage = response["usage"]
llm_output = {"token_usage": token_usage}
return ChatResult(generations=generations, llm_output=llm_output)
class ChatHunyuan(BaseChatModel):
"""Tencent Hunyuan chat models API by Tencent.
For more information, see https://cloud.tencent.com/document/product/1729
"""
@property
def lc_secrets(self) -> Dict[str, str]:
return {
"hunyuan_app_id": "HUNYUAN_APP_ID",
"hunyuan_secret_id": "HUNYUAN_SECRET_ID",
"hunyuan_secret_key": "HUNYUAN_SECRET_KEY",
}
@property
def lc_serializable(self) -> bool:
return True
hunyuan_api_base: str = Field(default=DEFAULT_API_BASE)
"""Hunyuan custom endpoints"""
hunyuan_app_id: Optional[int] = None
"""Hunyuan App ID"""
hunyuan_secret_id: Optional[str] = None
"""Hunyuan Secret ID"""
hunyuan_secret_key: Optional[SecretStr] = None
"""Hunyuan Secret Key"""
streaming: bool = False
"""Whether to stream the results or not."""
request_timeout: int = 60
"""Timeout for requests to Hunyuan API. Default is 60 seconds."""
query_id: Optional[str] = None
"""Query id for troubleshooting"""
temperature: float = 1.0
"""What sampling temperature to use."""
top_p: float = 1.0
"""What probability mass to use."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for API call not explicitly specified."""
class Config:
"""Configuration for this pydantic object."""
allow_population_by_field_name = True
@root_validator(pre=True)
def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = get_pydantic_field_names(cls)
extra = values.get("model_kwargs", {})
for field_name in list(values):
if field_name in extra:
raise ValueError(f"Found {field_name} supplied twice.")
if field_name not in all_required_field_names:
logger.warning(
f"""WARNING! {field_name} is not default parameter.
{field_name} was transferred to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
invalid_model_kwargs = all_required_field_names.intersection(extra.keys())
if invalid_model_kwargs:
raise ValueError(
f"Parameters {invalid_model_kwargs} should be specified explicitly. "
f"Instead they were passed in as part of `model_kwargs` parameter."
)
values["model_kwargs"] = extra
return values
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
values["hunyuan_api_base"] = get_from_dict_or_env(
values,
"hunyuan_api_base",
"HUNYUAN_API_BASE",
DEFAULT_API_BASE,
)
values["hunyuan_app_id"] = get_from_dict_or_env(
values,
"hunyuan_app_id",
"HUNYUAN_APP_ID",
)
values["hunyuan_secret_id"] = get_from_dict_or_env(
values,
"hunyuan_secret_id",
"HUNYUAN_SECRET_ID",
)
values["hunyuan_secret_key"] = convert_to_secret_str(
get_from_dict_or_env(
values,
"hunyuan_secret_key",
"HUNYUAN_SECRET_KEY",
)
)
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling Hunyuan API."""
normal_params = {
"app_id": self.hunyuan_app_id,
"secret_id": self.hunyuan_secret_id,
"temperature": self.temperature,
"top_p": self.top_p,
}
if self.query_id is not None:
normal_params["query_id"] = self.query_id
return {**normal_params, **self.model_kwargs}
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
if self.streaming:
stream_iter = self._stream(
messages=messages, stop=stop, run_manager=run_manager, **kwargs
)
return _generate_from_stream(stream_iter)
res = self._chat(messages, **kwargs)
response = res.json()
if "error" in response:
raise ValueError(f"Error from Hunyuan api response: {response}")
return _create_chat_result(response)
def _stream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[ChatGenerationChunk]:
res = self._chat(messages, **kwargs)
default_chunk_class = AIMessageChunk
for chunk in res.iter_lines():
response = json.loads(chunk)
if "error" in response:
raise ValueError(f"Error from Hunyuan api response: {response}")
for choice in response["choices"]:
chunk = _convert_delta_to_message_chunk(
choice["delta"], default_chunk_class
)
default_chunk_class = chunk.__class__
yield ChatGenerationChunk(message=chunk)
if run_manager:
run_manager.on_llm_new_token(chunk.content)
def _chat(self, messages: List[BaseMessage], **kwargs: Any) -> requests.Response:
if self.hunyuan_secret_key is None:
raise ValueError("Hunyuan secret key is not set.")
parameters = {**self._default_params, **kwargs}
headers = parameters.pop("headers", {})
timestamp = parameters.pop("timestamp", int(time.time()))
expired = parameters.pop("expired", timestamp + 24 * 60 * 60)
payload = {
"timestamp": timestamp,
"expired": expired,
"messages": [_convert_message_to_dict(m) for m in messages],
**parameters,
}
if self.streaming:
payload["stream"] = 1
url = self.hunyuan_api_base + DEFAULT_PATH
res = requests.post(
url=url,
timeout=self.request_timeout,
headers={
"Content-Type": "application/json",
"Authorization": _signature(
secret_key=self.hunyuan_secret_key, url=url, payload=payload
),
**headers,
},
json=payload,
stream=self.streaming,
)
return res
@property
def _llm_type(self) -> str:
return "hunyuan-chat"
| [
"content"
] |
2024-01-10 | axgpt/langchain | libs~langchain~langchain~vectorstores~tencentvectordb.py | """Wrapper around the Tencent vector database."""
from __future__ import annotations
import json
import logging
import time
from typing import Any, Dict, Iterable, List, Optional, Tuple
import numpy as np
from langchain_core.schema.embeddings import Embeddings
from langchain_core.schema.vectorstore import VectorStore
from langchain_core.utils import guard_import
from langchain.docstore.document import Document
from langchain.vectorstores.utils import maximal_marginal_relevance
logger = logging.getLogger(__name__)
class ConnectionParams:
"""Tencent vector DB Connection params.
See the following documentation for details:
https://cloud.tencent.com/document/product/1709/95820
Attribute:
url (str) : The access address of the vector database server
that the client needs to connect to.
key (str): API key for client to access the vector database server,
which is used for authentication.
username (str) : Account for client to access the vector database server.
timeout (int) : Request Timeout.
"""
def __init__(self, url: str, key: str, username: str = "root", timeout: int = 10):
self.url = url
self.key = key
self.username = username
self.timeout = timeout
class IndexParams:
"""Tencent vector DB Index params.
See the following documentation for details:
https://cloud.tencent.com/document/product/1709/95826
"""
def __init__(
self,
dimension: int,
shard: int = 1,
replicas: int = 2,
index_type: str = "HNSW",
metric_type: str = "L2",
params: Optional[Dict] = None,
):
self.dimension = dimension
self.shard = shard
self.replicas = replicas
self.index_type = index_type
self.metric_type = metric_type
self.params = params
class TencentVectorDB(VectorStore):
"""Initialize wrapper around the tencent vector database.
In order to use this you need to have a database instance.
See the following documentation for details:
https://cloud.tencent.com/document/product/1709/94951
"""
field_id: str = "id"
field_vector: str = "vector"
field_text: str = "text"
field_metadata: str = "metadata"
def __init__(
self,
embedding: Embeddings,
connection_params: ConnectionParams,
index_params: IndexParams = IndexParams(128),
database_name: str = "LangChainDatabase",
collection_name: str = "LangChainCollection",
drop_old: Optional[bool] = False,
):
self.document = guard_import("tcvectordb.model.document")
tcvectordb = guard_import("tcvectordb")
self.embedding_func = embedding
self.index_params = index_params
self.vdb_client = tcvectordb.VectorDBClient(
url=connection_params.url,
username=connection_params.username,
key=connection_params.key,
timeout=connection_params.timeout,
)
db_list = self.vdb_client.list_databases()
db_exist: bool = False
for db in db_list:
if database_name == db.database_name:
db_exist = True
break
if db_exist:
self.database = self.vdb_client.database(database_name)
else:
self.database = self.vdb_client.create_database(database_name)
try:
self.collection = self.database.describe_collection(collection_name)
if drop_old:
self.database.drop_collection(collection_name)
self._create_collection(collection_name)
except tcvectordb.exceptions.VectorDBException:
self._create_collection(collection_name)
def _create_collection(self, collection_name: str) -> None:
enum = guard_import("tcvectordb.model.enum")
vdb_index = guard_import("tcvectordb.model.index")
index_type = None
for k, v in enum.IndexType.__members__.items():
if k == self.index_params.index_type:
index_type = v
if index_type is None:
raise ValueError("unsupported index_type")
metric_type = None
for k, v in enum.MetricType.__members__.items():
if k == self.index_params.metric_type:
metric_type = v
if metric_type is None:
raise ValueError("unsupported metric_type")
if self.index_params.params is None:
params = vdb_index.HNSWParams(m=16, efconstruction=200)
else:
params = vdb_index.HNSWParams(
m=self.index_params.params.get("M", 16),
efconstruction=self.index_params.params.get("efConstruction", 200),
)
index = vdb_index.Index(
vdb_index.FilterIndex(
self.field_id, enum.FieldType.String, enum.IndexType.PRIMARY_KEY
),
vdb_index.VectorIndex(
self.field_vector,
self.index_params.dimension,
index_type,
metric_type,
params,
),
vdb_index.FilterIndex(
self.field_text, enum.FieldType.String, enum.IndexType.FILTER
),
vdb_index.FilterIndex(
self.field_metadata, enum.FieldType.String, enum.IndexType.FILTER
),
)
self.collection = self.database.create_collection(
name=collection_name,
shard=self.index_params.shard,
replicas=self.index_params.replicas,
description="Collection for LangChain",
index=index,
)
@property
def embeddings(self) -> Embeddings:
return self.embedding_func
@classmethod
def from_texts(
cls,
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
connection_params: Optional[ConnectionParams] = None,
index_params: Optional[IndexParams] = None,
database_name: str = "LangChainDatabase",
collection_name: str = "LangChainCollection",
drop_old: Optional[bool] = False,
**kwargs: Any,
) -> TencentVectorDB:
"""Create a collection, indexes it with HNSW, and insert data."""
if len(texts) == 0:
raise ValueError("texts is empty")
if connection_params is None:
raise ValueError("connection_params is empty")
try:
embeddings = embedding.embed_documents(texts[0:1])
except NotImplementedError:
embeddings = [embedding.embed_query(texts[0])]
dimension = len(embeddings[0])
if index_params is None:
index_params = IndexParams(dimension=dimension)
else:
index_params.dimension = dimension
vector_db = cls(
embedding=embedding,
connection_params=connection_params,
index_params=index_params,
database_name=database_name,
collection_name=collection_name,
drop_old=drop_old,
)
vector_db.add_texts(texts=texts, metadatas=metadatas)
return vector_db
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
timeout: Optional[int] = None,
batch_size: int = 1000,
**kwargs: Any,
) -> List[str]:
"""Insert text data into TencentVectorDB."""
texts = list(texts)
try:
embeddings = self.embedding_func.embed_documents(texts)
except NotImplementedError:
embeddings = [self.embedding_func.embed_query(x) for x in texts]
if len(embeddings) == 0:
logger.debug("Nothing to insert, skipping.")
return []
pks: list[str] = []
total_count = len(embeddings)
for start in range(0, total_count, batch_size):
# Grab end index
docs = []
end = min(start + batch_size, total_count)
for id in range(start, end, 1):
metadata = "{}"
if metadatas is not None:
metadata = json.dumps(metadatas[id])
doc = self.document.Document(
id="{}-{}-{}".format(time.time_ns(), hash(texts[id]), id),
vector=embeddings[id],
text=texts[id],
metadata=metadata,
)
docs.append(doc)
pks.append(str(id))
self.collection.upsert(docs, timeout)
return pks
def similarity_search(
self,
query: str,
k: int = 4,
param: Optional[dict] = None,
expr: Optional[str] = None,
timeout: Optional[int] = None,
**kwargs: Any,
) -> List[Document]:
"""Perform a similarity search against the query string."""
res = self.similarity_search_with_score(
query=query, k=k, param=param, expr=expr, timeout=timeout, **kwargs
)
return [doc for doc, _ in res]
def similarity_search_with_score(
self,
query: str,
k: int = 4,
param: Optional[dict] = None,
expr: Optional[str] = None,
timeout: Optional[int] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Perform a search on a query string and return results with score."""
# Embed the query text.
embedding = self.embedding_func.embed_query(query)
res = self.similarity_search_with_score_by_vector(
embedding=embedding, k=k, param=param, expr=expr, timeout=timeout, **kwargs
)
return res
def similarity_search_by_vector(
self,
embedding: List[float],
k: int = 4,
param: Optional[dict] = None,
expr: Optional[str] = None,
timeout: Optional[int] = None,
**kwargs: Any,
) -> List[Document]:
"""Perform a similarity search against the query string."""
res = self.similarity_search_with_score_by_vector(
embedding=embedding, k=k, param=param, expr=expr, timeout=timeout, **kwargs
)
return [doc for doc, _ in res]
def similarity_search_with_score_by_vector(
self,
embedding: List[float],
k: int = 4,
param: Optional[dict] = None,
expr: Optional[str] = None,
timeout: Optional[int] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Perform a search on a query string and return results with score."""
filter = None if expr is None else self.document.Filter(expr)
ef = 10 if param is None else param.get("ef", 10)
res: List[List[Dict]] = self.collection.search(
vectors=[embedding],
filter=filter,
params=self.document.HNSWSearchParams(ef=ef),
retrieve_vector=False,
limit=k,
timeout=timeout,
)
# Organize results.
ret: List[Tuple[Document, float]] = []
if res is None or len(res) == 0:
return ret
for result in res[0]:
meta = result.get(self.field_metadata)
if meta is not None:
meta = json.loads(meta)
doc = Document(page_content=result.get(self.field_text), metadata=meta)
pair = (doc, result.get("score", 0.0))
ret.append(pair)
return ret
def max_marginal_relevance_search(
self,
query: str,
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
param: Optional[dict] = None,
expr: Optional[str] = None,
timeout: Optional[int] = None,
**kwargs: Any,
) -> List[Document]:
"""Perform a search and return results that are reordered by MMR."""
embedding = self.embedding_func.embed_query(query)
return self.max_marginal_relevance_search_by_vector(
embedding=embedding,
k=k,
fetch_k=fetch_k,
lambda_mult=lambda_mult,
param=param,
expr=expr,
timeout=timeout,
**kwargs,
)
def max_marginal_relevance_search_by_vector(
self,
embedding: list[float],
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
param: Optional[dict] = None,
expr: Optional[str] = None,
timeout: Optional[int] = None,
**kwargs: Any,
) -> List[Document]:
"""Perform a search and return results that are reordered by MMR."""
filter = None if expr is None else self.document.Filter(expr)
ef = 10 if param is None else param.get("ef", 10)
res: List[List[Dict]] = self.collection.search(
vectors=[embedding],
filter=filter,
params=self.document.HNSWSearchParams(ef=ef),
retrieve_vector=True,
limit=fetch_k,
timeout=timeout,
)
# Organize results.
documents = []
ordered_result_embeddings = []
for result in res[0]:
meta = result.get(self.field_metadata)
if meta is not None:
meta = json.loads(meta)
doc = Document(page_content=result.get(self.field_text), metadata=meta)
documents.append(doc)
ordered_result_embeddings.append(result.get(self.field_vector))
# Get the new order of results.
new_ordering = maximal_marginal_relevance(
np.array(embedding), ordered_result_embeddings, k=k, lambda_mult=lambda_mult
)
# Reorder the values and return.
ret = []
for x in new_ordering:
# Function can return -1 index
if x == -1:
break
else:
ret.append(documents[x])
return ret
| [] |
2024-01-10 | axgpt/langchain | libs~langchain~tests~integration_tests~retrievers~test_wikipedia.py | """Integration test for Wikipedia API Wrapper."""
from typing import List
import pytest
from langchain_core.schema import Document
from langchain.retrievers import WikipediaRetriever
@pytest.fixture
def retriever() -> WikipediaRetriever:
return WikipediaRetriever()
def assert_docs(docs: List[Document], all_meta: bool = False) -> None:
for doc in docs:
assert doc.page_content
assert doc.metadata
main_meta = {"title", "summary"}
assert set(doc.metadata).issuperset(main_meta)
if all_meta:
assert len(set(doc.metadata)) > len(main_meta)
else:
assert len(set(doc.metadata)) == len(main_meta)
def test_load_success(retriever: WikipediaRetriever) -> None:
docs = retriever.get_relevant_documents("HUNTER X HUNTER")
assert len(docs) > 1
assert_docs(docs, all_meta=False)
def test_load_success_all_meta(retriever: WikipediaRetriever) -> None:
retriever.load_all_available_meta = True
docs = retriever.get_relevant_documents("HUNTER X HUNTER")
assert len(docs) > 1
assert_docs(docs, all_meta=True)
def test_load_success_init_args() -> None:
retriever = WikipediaRetriever(
lang="en", top_k_results=1, load_all_available_meta=True
)
docs = retriever.get_relevant_documents("HUNTER X HUNTER")
assert len(docs) == 1
assert_docs(docs, all_meta=True)
def test_load_no_result(retriever: WikipediaRetriever) -> None:
docs = retriever.get_relevant_documents(
"NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL"
)
assert not docs
| [] |
2024-01-10 | axgpt/langchain | libs~langchain~tests~integration_tests~vectorstores~conftest.py | import os
from typing import Generator, List, Union
import pytest
from langchain_core.schema import Document
from vcr.request import Request
from langchain.document_loaders import TextLoader
from langchain.embeddings import OpenAIEmbeddings
from langchain.text_splitter import CharacterTextSplitter
# Those environment variables turn on Deep Lake pytest mode.
# It significantly makes tests run much faster.
# Need to run before `import deeplake`
os.environ["BUGGER_OFF"] = "true"
os.environ["DEEPLAKE_DOWNLOAD_PATH"] = "./testing/local_storage"
os.environ["DEEPLAKE_PYTEST_ENABLED"] = "true"
# This fixture returns a dictionary containing filter_headers options
# for replacing certain headers with dummy values during cassette playback
# Specifically, it replaces the authorization header with a dummy value to
# prevent sensitive data from being recorded in the cassette.
# It also filters request to certain hosts (specified in the `ignored_hosts` list)
# to prevent data from being recorded in the cassette.
@pytest.fixture(scope="module")
def vcr_config() -> dict:
skipped_host = ["pinecone.io"]
def before_record_response(response: dict) -> Union[dict, None]:
return response
def before_record_request(request: Request) -> Union[Request, None]:
for host in skipped_host:
if request.host.startswith(host) or request.host.endswith(host):
return None
return request
return {
"before_record_request": before_record_request,
"before_record_response": before_record_response,
"filter_headers": [
("authorization", "authorization-DUMMY"),
("X-OpenAI-Client-User-Agent", "X-OpenAI-Client-User-Agent-DUMMY"),
("Api-Key", "Api-Key-DUMMY"),
("User-Agent", "User-Agent-DUMMY"),
],
"ignore_localhost": True,
}
# Define a fixture that yields a generator object returning a list of documents
@pytest.fixture(scope="function")
def documents() -> Generator[List[Document], None, None]:
"""Return a generator that yields a list of documents."""
# Create a CharacterTextSplitter object for splitting the documents into chunks
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
# Load the documents from a file located in the fixtures directory
documents = TextLoader(
os.path.join(os.path.dirname(__file__), "fixtures", "sharks.txt")
).load()
# Yield the documents split into chunks
yield text_splitter.split_documents(documents)
@pytest.fixture(scope="function")
def texts() -> Generator[List[str], None, None]:
# Load the documents from a file located in the fixtures directory
documents = TextLoader(
os.path.join(os.path.dirname(__file__), "fixtures", "sharks.txt")
).load()
yield [doc.page_content for doc in documents]
@pytest.fixture(scope="module")
def embedding_openai() -> OpenAIEmbeddings:
return OpenAIEmbeddings()
| [] |
2024-01-10 | axgpt/langchain | libs~langchain~tests~integration_tests~vectorstores~fake_embeddings.py | """Fake Embedding class for testing purposes."""
import math
from typing import List
from langchain_core.schema.embeddings import Embeddings
fake_texts = ["foo", "bar", "baz"]
class FakeEmbeddings(Embeddings):
"""Fake embeddings functionality for testing."""
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Return simple embeddings.
Embeddings encode each text as its index."""
return [[float(1.0)] * 9 + [float(i)] for i in range(len(texts))]
async def aembed_documents(self, texts: List[str]) -> List[List[float]]:
return self.embed_documents(texts)
def embed_query(self, text: str) -> List[float]:
"""Return constant query embeddings.
Embeddings are identical to embed_documents(texts)[0].
Distance to each text will be that text's index,
as it was passed to embed_documents."""
return [float(1.0)] * 9 + [float(0.0)]
async def aembed_query(self, text: str) -> List[float]:
return self.embed_query(text)
class ConsistentFakeEmbeddings(FakeEmbeddings):
"""Fake embeddings which remember all the texts seen so far to return consistent
vectors for the same texts."""
def __init__(self, dimensionality: int = 10) -> None:
self.known_texts: List[str] = []
self.dimensionality = dimensionality
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Return consistent embeddings for each text seen so far."""
out_vectors = []
for text in texts:
if text not in self.known_texts:
self.known_texts.append(text)
vector = [float(1.0)] * (self.dimensionality - 1) + [
float(self.known_texts.index(text))
]
out_vectors.append(vector)
return out_vectors
def embed_query(self, text: str) -> List[float]:
"""Return consistent embeddings for the text, if seen before, or a constant
one if the text is unknown."""
return self.embed_documents([text])[0]
if text not in self.known_texts:
return [float(1.0)] * (self.dimensionality - 1) + [float(0.0)]
return [float(1.0)] * (self.dimensionality - 1) + [
float(self.known_texts.index(text))
]
class AngularTwoDimensionalEmbeddings(Embeddings):
"""
From angles (as strings in units of pi) to unit embedding vectors on a circle.
"""
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""
Make a list of texts into a list of embedding vectors.
"""
return [self.embed_query(text) for text in texts]
def embed_query(self, text: str) -> List[float]:
"""
Convert input text to a 'vector' (list of floats).
If the text is a number, use it as the angle for the
unit vector in units of pi.
Any other input text becomes the singular result [0, 0] !
"""
try:
angle = float(text)
return [math.cos(angle * math.pi), math.sin(angle * math.pi)]
except ValueError:
# Assume: just test string, no attention is paid to values.
return [0.0, 0.0]
| [] |
2024-01-10 | axgpt/langchain | libs~langchain~langchain~vectorstores~mongodb_atlas.py | from __future__ import annotations
import logging
from typing import (
TYPE_CHECKING,
Any,
Dict,
Generator,
Iterable,
List,
Optional,
Tuple,
TypeVar,
Union,
)
import numpy as np
from langchain_core.schema.embeddings import Embeddings
from langchain_core.schema.vectorstore import VectorStore
from langchain.docstore.document import Document
from langchain.vectorstores.utils import maximal_marginal_relevance
if TYPE_CHECKING:
from pymongo.collection import Collection
MongoDBDocumentType = TypeVar("MongoDBDocumentType", bound=Dict[str, Any])
logger = logging.getLogger(__name__)
DEFAULT_INSERT_BATCH_SIZE = 100
class MongoDBAtlasVectorSearch(VectorStore):
"""`MongoDB Atlas Vector Search` vector store.
To use, you should have both:
- the ``pymongo`` python package installed
- a connection string associated with a MongoDB Atlas Cluster having deployed an
Atlas Search index
Example:
.. code-block:: python
from langchain.vectorstores import MongoDBAtlasVectorSearch
from langchain.embeddings.openai import OpenAIEmbeddings
from pymongo import MongoClient
mongo_client = MongoClient("<YOUR-CONNECTION-STRING>")
collection = mongo_client["<db_name>"]["<collection_name>"]
embeddings = OpenAIEmbeddings()
vectorstore = MongoDBAtlasVectorSearch(collection, embeddings)
"""
def __init__(
self,
collection: Collection[MongoDBDocumentType],
embedding: Embeddings,
*,
index_name: str = "default",
text_key: str = "text",
embedding_key: str = "embedding",
):
"""
Args:
collection: MongoDB collection to add the texts to.
embedding: Text embedding model to use.
text_key: MongoDB field that will contain the text for each
document.
embedding_key: MongoDB field that will contain the embedding for
each document.
index_name: Name of the Atlas Search index.
"""
self._collection = collection
self._embedding = embedding
self._index_name = index_name
self._text_key = text_key
self._embedding_key = embedding_key
@property
def embeddings(self) -> Embeddings:
return self._embedding
@classmethod
def from_connection_string(
cls,
connection_string: str,
namespace: str,
embedding: Embeddings,
**kwargs: Any,
) -> MongoDBAtlasVectorSearch:
"""Construct a `MongoDB Atlas Vector Search` vector store
from a MongoDB connection URI.
Args:
connection_string: A valid MongoDB connection URI.
namespace: A valid MongoDB namespace (database and collection).
embedding: The text embedding model to use for the vector store.
Returns:
A new MongoDBAtlasVectorSearch instance.
"""
try:
from importlib.metadata import version
from pymongo import DriverInfo, MongoClient
except ImportError:
raise ImportError(
"Could not import pymongo, please install it with "
"`pip install pymongo`."
)
client: MongoClient = MongoClient(
connection_string,
driver=DriverInfo(name="Langchain", version=version("langchain")),
)
db_name, collection_name = namespace.split(".")
collection = client[db_name][collection_name]
return cls(collection, embedding, **kwargs)
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[Dict[str, Any]]] = None,
**kwargs: Any,
) -> List:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
Returns:
List of ids from adding the texts into the vectorstore.
"""
batch_size = kwargs.get("batch_size", DEFAULT_INSERT_BATCH_SIZE)
_metadatas: Union[List, Generator] = metadatas or ({} for _ in texts)
texts_batch = []
metadatas_batch = []
result_ids = []
for i, (text, metadata) in enumerate(zip(texts, _metadatas)):
texts_batch.append(text)
metadatas_batch.append(metadata)
if (i + 1) % batch_size == 0:
result_ids.extend(self._insert_texts(texts_batch, metadatas_batch))
texts_batch = []
metadatas_batch = []
if texts_batch:
result_ids.extend(self._insert_texts(texts_batch, metadatas_batch))
return result_ids
def _insert_texts(self, texts: List[str], metadatas: List[Dict[str, Any]]) -> List:
if not texts:
return []
# Embed and create the documents
embeddings = self._embedding.embed_documents(texts)
to_insert = [
{self._text_key: t, self._embedding_key: embedding, **m}
for t, m, embedding in zip(texts, metadatas, embeddings)
]
# insert the documents in MongoDB Atlas
insert_result = self._collection.insert_many(to_insert) # type: ignore
return insert_result.inserted_ids
def _similarity_search_with_score(
self,
embedding: List[float],
k: int = 4,
pre_filter: Optional[Dict] = None,
post_filter_pipeline: Optional[List[Dict]] = None,
) -> List[Tuple[Document, float]]:
params = {
"queryVector": embedding,
"path": self._embedding_key,
"numCandidates": k * 10,
"limit": k,
"index": self._index_name,
}
if pre_filter:
params["filter"] = pre_filter
query = {"$vectorSearch": params}
pipeline = [
query,
{"$set": {"score": {"$meta": "vectorSearchScore"}}},
]
if post_filter_pipeline is not None:
pipeline.extend(post_filter_pipeline)
cursor = self._collection.aggregate(pipeline) # type: ignore[arg-type]
docs = []
for res in cursor:
text = res.pop(self._text_key)
score = res.pop("score")
docs.append((Document(page_content=text, metadata=res), score))
return docs
def similarity_search_with_score(
self,
query: str,
*,
k: int = 4,
pre_filter: Optional[Dict] = None,
post_filter_pipeline: Optional[List[Dict]] = None,
) -> List[Tuple[Document, float]]:
"""Return MongoDB documents most similar to the given query and their scores.
Uses the knnBeta Operator available in MongoDB Atlas Search.
This feature is in early access and available only for evaluation purposes, to
validate functionality, and to gather feedback from a small closed group of
early access users. It is not recommended for production deployments as we
may introduce breaking changes.
For more: https://www.mongodb.com/docs/atlas/atlas-search/knn-beta
Args:
query: Text to look up documents similar to.
k: (Optional) number of documents to return. Defaults to 4.
pre_filter: (Optional) dictionary of argument(s) to prefilter document
fields on.
post_filter_pipeline: (Optional) Pipeline of MongoDB aggregation stages
following the knnBeta vector search.
Returns:
List of documents most similar to the query and their scores.
"""
embedding = self._embedding.embed_query(query)
docs = self._similarity_search_with_score(
embedding,
k=k,
pre_filter=pre_filter,
post_filter_pipeline=post_filter_pipeline,
)
return docs
def similarity_search(
self,
query: str,
k: int = 4,
pre_filter: Optional[Dict] = None,
post_filter_pipeline: Optional[List[Dict]] = None,
**kwargs: Any,
) -> List[Document]:
"""Return MongoDB documents most similar to the given query.
Uses the knnBeta Operator available in MongoDB Atlas Search.
This feature is in early access and available only for evaluation purposes, to
validate functionality, and to gather feedback from a small closed group of
early access users. It is not recommended for production deployments as we
may introduce breaking changes.
For more: https://www.mongodb.com/docs/atlas/atlas-search/knn-beta
Args:
query: Text to look up documents similar to.
k: (Optional) number of documents to return. Defaults to 4.
pre_filter: (Optional) dictionary of argument(s) to prefilter document
fields on.
post_filter_pipeline: (Optional) Pipeline of MongoDB aggregation stages
following the knnBeta vector search.
Returns:
List of documents most similar to the query and their scores.
"""
docs_and_scores = self.similarity_search_with_score(
query,
k=k,
pre_filter=pre_filter,
post_filter_pipeline=post_filter_pipeline,
)
return [doc for doc, _ in docs_and_scores]
def max_marginal_relevance_search(
self,
query: str,
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
pre_filter: Optional[Dict] = None,
post_filter_pipeline: Optional[List[Dict]] = None,
**kwargs: Any,
) -> List[Document]:
"""Return documents selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query: Text to look up documents similar to.
k: (Optional) number of documents to return. Defaults to 4.
fetch_k: (Optional) number of documents to fetch before passing to MMR
algorithm. Defaults to 20.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
pre_filter: (Optional) dictionary of argument(s) to prefilter on document
fields.
post_filter_pipeline: (Optional) pipeline of MongoDB aggregation stages
following the knnBeta vector search.
Returns:
List of documents selected by maximal marginal relevance.
"""
query_embedding = self._embedding.embed_query(query)
docs = self._similarity_search_with_score(
query_embedding,
k=fetch_k,
pre_filter=pre_filter,
post_filter_pipeline=post_filter_pipeline,
)
mmr_doc_indexes = maximal_marginal_relevance(
np.array(query_embedding),
[doc.metadata[self._embedding_key] for doc, _ in docs],
k=k,
lambda_mult=lambda_mult,
)
mmr_docs = [docs[i][0] for i in mmr_doc_indexes]
return mmr_docs
@classmethod
def from_texts(
cls,
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[Dict]] = None,
collection: Optional[Collection[MongoDBDocumentType]] = None,
**kwargs: Any,
) -> MongoDBAtlasVectorSearch:
"""Construct a `MongoDB Atlas Vector Search` vector store from raw documents.
This is a user-friendly interface that:
1. Embeds documents.
2. Adds the documents to a provided MongoDB Atlas Vector Search index
(Lucene)
This is intended to be a quick way to get started.
Example:
.. code-block:: python
from pymongo import MongoClient
from langchain.vectorstores import MongoDBAtlasVectorSearch
from langchain.embeddings import OpenAIEmbeddings
mongo_client = MongoClient("<YOUR-CONNECTION-STRING>")
collection = mongo_client["<db_name>"]["<collection_name>"]
embeddings = OpenAIEmbeddings()
vectorstore = MongoDBAtlasVectorSearch.from_texts(
texts,
embeddings,
metadatas=metadatas,
collection=collection
)
"""
if collection is None:
raise ValueError("Must provide 'collection' named parameter.")
vectorstore = cls(collection, embedding, **kwargs)
vectorstore.add_texts(texts, metadatas=metadatas)
return vectorstore
| [] |
2024-01-10 | axgpt/langchain | libs~langchain~langchain~embeddings~ernie.py | import asyncio
import logging
import threading
from functools import partial
from typing import Dict, List, Optional
import requests
from langchain_core.pydantic_v1 import BaseModel, root_validator
from langchain_core.schema.embeddings import Embeddings
from langchain.utils import get_from_dict_or_env
logger = logging.getLogger(__name__)
class ErnieEmbeddings(BaseModel, Embeddings):
"""`Ernie Embeddings V1` embedding models."""
ernie_api_base: Optional[str] = None
ernie_client_id: Optional[str] = None
ernie_client_secret: Optional[str] = None
access_token: Optional[str] = None
chunk_size: int = 16
model_name = "ErnieBot-Embedding-V1"
_lock = threading.Lock()
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
values["ernie_api_base"] = get_from_dict_or_env(
values, "ernie_api_base", "ERNIE_API_BASE", "https://aip.baidubce.com"
)
values["ernie_client_id"] = get_from_dict_or_env(
values,
"ernie_client_id",
"ERNIE_CLIENT_ID",
)
values["ernie_client_secret"] = get_from_dict_or_env(
values,
"ernie_client_secret",
"ERNIE_CLIENT_SECRET",
)
return values
def _embedding(self, json: object) -> dict:
base_url = (
f"{self.ernie_api_base}/rpc/2.0/ai_custom/v1/wenxinworkshop/embeddings"
)
resp = requests.post(
f"{base_url}/embedding-v1",
headers={
"Content-Type": "application/json",
},
params={"access_token": self.access_token},
json=json,
)
return resp.json()
def _refresh_access_token_with_lock(self) -> None:
with self._lock:
logger.debug("Refreshing access token")
base_url: str = f"{self.ernie_api_base}/oauth/2.0/token"
resp = requests.post(
base_url,
headers={
"Content-Type": "application/json",
"Accept": "application/json",
},
params={
"grant_type": "client_credentials",
"client_id": self.ernie_client_id,
"client_secret": self.ernie_client_secret,
},
)
self.access_token = str(resp.json().get("access_token"))
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Embed search docs.
Args:
texts: The list of texts to embed
Returns:
List[List[float]]: List of embeddings, one for each text.
"""
if not self.access_token:
self._refresh_access_token_with_lock()
text_in_chunks = [
texts[i : i + self.chunk_size]
for i in range(0, len(texts), self.chunk_size)
]
lst = []
for chunk in text_in_chunks:
resp = self._embedding({"input": [text for text in chunk]})
if resp.get("error_code"):
if resp.get("error_code") == 111:
self._refresh_access_token_with_lock()
resp = self._embedding({"input": [text for text in chunk]})
else:
raise ValueError(f"Error from Ernie: {resp}")
lst.extend([i["embedding"] for i in resp["data"]])
return lst
def embed_query(self, text: str) -> List[float]:
"""Embed query text.
Args:
text: The text to embed.
Returns:
List[float]: Embeddings for the text.
"""
if not self.access_token:
self._refresh_access_token_with_lock()
resp = self._embedding({"input": [text]})
if resp.get("error_code"):
if resp.get("error_code") == 111:
self._refresh_access_token_with_lock()
resp = self._embedding({"input": [text]})
else:
raise ValueError(f"Error from Ernie: {resp}")
return resp["data"][0]["embedding"]
async def aembed_query(self, text: str) -> List[float]:
"""Asynchronous Embed query text.
Args:
text: The text to embed.
Returns:
List[float]: Embeddings for the text.
"""
return await asyncio.get_running_loop().run_in_executor(
None, partial(self.embed_query, text)
)
async def aembed_documents(self, texts: List[str]) -> List[List[float]]:
"""Asynchronous Embed search docs.
Args:
texts: The list of texts to embed
Returns:
List[List[float]]: List of embeddings, one for each text.
"""
result = await asyncio.gather(*[self.aembed_query(text) for text in texts])
return list(result)
| [] |
2024-01-10 | axgpt/langchain | libs~langchain~langchain~vectorstores~sklearn.py | """ Wrapper around scikit-learn NearestNeighbors implementation.
The vector store can be persisted in json, bson or parquet format.
"""
import json
import math
import os
from abc import ABC, abstractmethod
from typing import Any, Dict, Iterable, List, Literal, Optional, Tuple, Type
from uuid import uuid4
from langchain_core.schema.embeddings import Embeddings
from langchain_core.schema.vectorstore import VectorStore
from langchain_core.utils import guard_import
from langchain.docstore.document import Document
from langchain.vectorstores.utils import maximal_marginal_relevance
DEFAULT_K = 4 # Number of Documents to return.
DEFAULT_FETCH_K = 20 # Number of Documents to initially fetch during MMR search.
class BaseSerializer(ABC):
"""Base class for serializing data."""
def __init__(self, persist_path: str) -> None:
self.persist_path = persist_path
@classmethod
@abstractmethod
def extension(cls) -> str:
"""The file extension suggested by this serializer (without dot)."""
@abstractmethod
def save(self, data: Any) -> None:
"""Saves the data to the persist_path"""
@abstractmethod
def load(self) -> Any:
"""Loads the data from the persist_path"""
class JsonSerializer(BaseSerializer):
"""Serializes data in json using the json package from python standard library."""
@classmethod
def extension(cls) -> str:
return "json"
def save(self, data: Any) -> None:
with open(self.persist_path, "w") as fp:
json.dump(data, fp)
def load(self) -> Any:
with open(self.persist_path, "r") as fp:
return json.load(fp)
class BsonSerializer(BaseSerializer):
"""Serializes data in binary json using the `bson` python package."""
def __init__(self, persist_path: str) -> None:
super().__init__(persist_path)
self.bson = guard_import("bson")
@classmethod
def extension(cls) -> str:
return "bson"
def save(self, data: Any) -> None:
with open(self.persist_path, "wb") as fp:
fp.write(self.bson.dumps(data))
def load(self) -> Any:
with open(self.persist_path, "rb") as fp:
return self.bson.loads(fp.read())
class ParquetSerializer(BaseSerializer):
"""Serializes data in `Apache Parquet` format using the `pyarrow` package."""
def __init__(self, persist_path: str) -> None:
super().__init__(persist_path)
self.pd = guard_import("pandas")
self.pa = guard_import("pyarrow")
self.pq = guard_import("pyarrow.parquet")
@classmethod
def extension(cls) -> str:
return "parquet"
def save(self, data: Any) -> None:
df = self.pd.DataFrame(data)
table = self.pa.Table.from_pandas(df)
if os.path.exists(self.persist_path):
backup_path = str(self.persist_path) + "-backup"
os.rename(self.persist_path, backup_path)
try:
self.pq.write_table(table, self.persist_path)
except Exception as exc:
os.rename(backup_path, self.persist_path)
raise exc
else:
os.remove(backup_path)
else:
self.pq.write_table(table, self.persist_path)
def load(self) -> Any:
table = self.pq.read_table(self.persist_path)
df = table.to_pandas()
return {col: series.tolist() for col, series in df.items()}
SERIALIZER_MAP: Dict[str, Type[BaseSerializer]] = {
"json": JsonSerializer,
"bson": BsonSerializer,
"parquet": ParquetSerializer,
}
class SKLearnVectorStoreException(RuntimeError):
"""Exception raised by SKLearnVectorStore."""
pass
class SKLearnVectorStore(VectorStore):
"""Simple in-memory vector store based on the `scikit-learn` library
`NearestNeighbors` implementation."""
def __init__(
self,
embedding: Embeddings,
*,
persist_path: Optional[str] = None,
serializer: Literal["json", "bson", "parquet"] = "json",
metric: str = "cosine",
**kwargs: Any,
) -> None:
np = guard_import("numpy")
sklearn_neighbors = guard_import("sklearn.neighbors", pip_name="scikit-learn")
# non-persistent properties
self._np = np
self._neighbors = sklearn_neighbors.NearestNeighbors(metric=metric, **kwargs)
self._neighbors_fitted = False
self._embedding_function = embedding
self._persist_path = persist_path
self._serializer: Optional[BaseSerializer] = None
if self._persist_path is not None:
serializer_cls = SERIALIZER_MAP[serializer]
self._serializer = serializer_cls(persist_path=self._persist_path)
# data properties
self._embeddings: List[List[float]] = []
self._texts: List[str] = []
self._metadatas: List[dict] = []
self._ids: List[str] = []
# cache properties
self._embeddings_np: Any = np.asarray([])
if self._persist_path is not None and os.path.isfile(self._persist_path):
self._load()
@property
def embeddings(self) -> Embeddings:
return self._embedding_function
def persist(self) -> None:
if self._serializer is None:
raise SKLearnVectorStoreException(
"You must specify a persist_path on creation to persist the "
"collection."
)
data = {
"ids": self._ids,
"texts": self._texts,
"metadatas": self._metadatas,
"embeddings": self._embeddings,
}
self._serializer.save(data)
def _load(self) -> None:
if self._serializer is None:
raise SKLearnVectorStoreException(
"You must specify a persist_path on creation to load the " "collection."
)
data = self._serializer.load()
self._embeddings = data["embeddings"]
self._texts = data["texts"]
self._metadatas = data["metadatas"]
self._ids = data["ids"]
self._update_neighbors()
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> List[str]:
_texts = list(texts)
_ids = ids or [str(uuid4()) for _ in _texts]
self._texts.extend(_texts)
self._embeddings.extend(self._embedding_function.embed_documents(_texts))
self._metadatas.extend(metadatas or ([{}] * len(_texts)))
self._ids.extend(_ids)
self._update_neighbors()
return _ids
def _update_neighbors(self) -> None:
if len(self._embeddings) == 0:
raise SKLearnVectorStoreException(
"No data was added to SKLearnVectorStore."
)
self._embeddings_np = self._np.asarray(self._embeddings)
self._neighbors.fit(self._embeddings_np)
self._neighbors_fitted = True
def _similarity_index_search_with_score(
self, query_embedding: List[float], *, k: int = DEFAULT_K, **kwargs: Any
) -> List[Tuple[int, float]]:
"""Search k embeddings similar to the query embedding. Returns a list of
(index, distance) tuples."""
if not self._neighbors_fitted:
raise SKLearnVectorStoreException(
"No data was added to SKLearnVectorStore."
)
neigh_dists, neigh_idxs = self._neighbors.kneighbors(
[query_embedding], n_neighbors=k
)
return list(zip(neigh_idxs[0], neigh_dists[0]))
def similarity_search_with_score(
self, query: str, *, k: int = DEFAULT_K, **kwargs: Any
) -> List[Tuple[Document, float]]:
query_embedding = self._embedding_function.embed_query(query)
indices_dists = self._similarity_index_search_with_score(
query_embedding, k=k, **kwargs
)
return [
(
Document(
page_content=self._texts[idx],
metadata={"id": self._ids[idx], **self._metadatas[idx]},
),
dist,
)
for idx, dist in indices_dists
]
def similarity_search(
self, query: str, k: int = DEFAULT_K, **kwargs: Any
) -> List[Document]:
docs_scores = self.similarity_search_with_score(query, k=k, **kwargs)
return [doc for doc, _ in docs_scores]
def _similarity_search_with_relevance_scores(
self, query: str, k: int = DEFAULT_K, **kwargs: Any
) -> List[Tuple[Document, float]]:
docs_dists = self.similarity_search_with_score(query, k=k, **kwargs)
docs, dists = zip(*docs_dists)
scores = [1 / math.exp(dist) for dist in dists]
return list(zip(list(docs), scores))
def max_marginal_relevance_search_by_vector(
self,
embedding: List[float],
k: int = DEFAULT_K,
fetch_k: int = DEFAULT_FETCH_K,
lambda_mult: float = 0.5,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents selected by maximal marginal relevance.
"""
indices_dists = self._similarity_index_search_with_score(
embedding, k=fetch_k, **kwargs
)
indices, _ = zip(*indices_dists)
result_embeddings = self._embeddings_np[indices,]
mmr_selected = maximal_marginal_relevance(
self._np.array(embedding, dtype=self._np.float32),
result_embeddings,
k=k,
lambda_mult=lambda_mult,
)
mmr_indices = [indices[i] for i in mmr_selected]
return [
Document(
page_content=self._texts[idx],
metadata={"id": self._ids[idx], **self._metadatas[idx]},
)
for idx in mmr_indices
]
def max_marginal_relevance_search(
self,
query: str,
k: int = DEFAULT_K,
fetch_k: int = DEFAULT_FETCH_K,
lambda_mult: float = 0.5,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents selected by maximal marginal relevance.
"""
if self._embedding_function is None:
raise ValueError(
"For MMR search, you must specify an embedding function on creation."
)
embedding = self._embedding_function.embed_query(query)
docs = self.max_marginal_relevance_search_by_vector(
embedding, k, fetch_k, lambda_mul=lambda_mult
)
return docs
@classmethod
def from_texts(
cls,
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
persist_path: Optional[str] = None,
**kwargs: Any,
) -> "SKLearnVectorStore":
vs = SKLearnVectorStore(embedding, persist_path=persist_path, **kwargs)
vs.add_texts(texts, metadatas=metadatas, ids=ids)
return vs
| [] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.