date_collected
stringclasses 1
value | repo_name
stringlengths 6
116
| file_name
stringlengths 2
220
| file_contents
stringlengths 13
357k
| prompts
sequence |
---|---|---|---|---|
2024-01-10 | i3oi3ka/mini_twitter | mini_twitter~consult_gpt~consult.py | import os
from django.http import JsonResponse
from django.shortcuts import render
from openai import OpenAI
from dotenv import load_dotenv
def chat_with_gpt(request):
user_input = request.GET.get('user_input')
if not user_input:
return JsonResponse({'response': 'Запит не може бути пистим, запитайте щось'})
client = OpenAI(
api_key=os.getenv("OPENAI_API_KEY"),
)
chat_completion = client.chat.completions.create(
messages=[
{
"role": "user",
"content": user_input,
}
],
model="gpt-3.5-turbo",
temperature=0.5,
max_tokens=1000
)
chat_response = chat_completion.choices[0].message.content
return JsonResponse({'response': chat_response})
def consult(request):
return render(request, 'consult.html')
| [] |
2024-01-10 | plan-ai/planai-api | app~task~anonymize.py | from flask import make_response, jsonify
from app.task.model import Task
from langchain.chat_models import ChatOpenAI
from langchain.schema.messages import HumanMessage
from app.user.auth import validate_user
import configparser
config = configparser.ConfigParser()
config.read("config.ini")
model = ChatOpenAI(
model_name=config["OpenAI"]["MODEL"],
temperature=float(config["OpenAI"]["TEMPERATURE"]),
openai_api_key=config["OpenAI"]["TOKEN"],
)
PROMPT_FORMAT = """Take the following task and find a way to remove company specific information from it and create a freelance task that is anoymized:
{}
Send all output in form:
projectTitle:
project:
"""
def anonymize_task(task_title: str, task_desc: str):
prompt = PROMPT_FORMAT.format(f"{task_title}:{task_desc}")
messages = [
HumanMessage(content=prompt)
]
response = model.invoke(prompt).content
return (
response.split("projectTitle:")[1].split("project")[0],
response.split("project")[1],
)
def get_anonymized_task(auth: str, task_id: str):
user = validate_user(auth)
if user is None:
return make_response({"message": "User validator failed"}, 401)
try:
task = Task.objects(id=task_id).first()
anonymized_task_title, anonymized_task_desc = anonymize_task(
task.task_title, task.task_desc
)
message = {
"anonymized_task_title": anonymized_task_title,
"anonymized_task_desc": anonymized_task_desc,
}
status_code = 200
except:
message = {"message": "Task anonymization failed"}
status_code = 500
return make_response(jsonify(message), status_code)
| [
"PLACEHOLDER:PLACEHOLDER",
"Take the following task and find a way to remove company specific information from it and create a freelance task that is anoymized:\n\n{}\n\nSend all output in form:\n\nprojectTitle: \n\nproject:\n\n"
] |
2024-01-10 | plan-ai/planai-api | app~user~auth.py | # NOTE: USING LOCAL DATETIME RIGHT NOW IN AUTH TO ALLOW
# FOR FAST DEV AND MVP. THIS WILL FAIL IN CASE OF DISTRIBUTED SYSTEMS
# PLEASE REFACTOR THIS CODE BEFORE PUSHING TO PROD
from datetime import datetime, timedelta
from app.user.model import User, Auth, Org
from app.billing.model import Plan
from app.openai_config.model import OpenAI
from flask import make_response, jsonify
import hashlib
import jwt
import configparser
import requests
# reads confg file
config = configparser.ConfigParser()
config.read("config.ini")
# set app defaults
secret_token = config["AUTH"]["SECRET"]
session_time = int(config["AUTH"]["EXPIRY"].strip())
# Create a new SHA-256 hash object
sha256_hash = hashlib.sha256()
def sha256(input: str):
# Update the hash object with the bytes-like object
sha256_hash.update(input.encode())
# Get the hexadecimal representation of the hash
return sha256_hash.hexdigest()
def create_user(
user_name: str, user_email: str, user_auth_provider: Auth, user_org: Org
):
user = User(
user_name=user_name,
user_email=user_email,
user_auth_provider=user_auth_provider,
user_org=user_org,
user_created=datetime.now(),
)
try:
user.save()
return user
except:
return None
def create_org(org_name: str, user_email: str):
org_domain = user_email.split("@")[1]
free_plan = Plan.objects(plan_type="freeTier").first()
if free_plan is None:
return None
openai = OpenAI(custom_token=False, token=None, spending_limit=None)
org = Org(
org_name=org_name,
org_domain=org_domain,
org_created=datetime.now(),
org_plan=free_plan,
org_open_ai=openai,
)
try:
org.save()
return org
except Exception as err:
return None
def create_auth(user_auth_provider: str, user_hashed_token: str, user_uid: str):
auth = Auth(
user_auth_provider=user_auth_provider,
user_hashed_token=user_hashed_token,
user_uid=user_uid,
)
try:
return auth
except:
return None
def encode_jwt(user_auth: Auth, secret_token: str):
jwt_payload = {
"hashedToken": user_auth.user_hashed_token,
"authProvider": user_auth.user_auth_provider,
"userUID": user_auth.user_uid,
"dateTime": datetime.now().isoformat(),
}
jwt_token = jwt.encode(jwt_payload, secret_token, "HS256")
return jwt_token
def validate_user(jwt_token: str):
jwt_payload = jwt.decode(jwt_token, secret_token, ["HS256"])
time_diff = datetime.now() - datetime.fromisoformat(jwt_payload["dateTime"])
user_auth = Auth(
user_hashed_token=jwt_payload["hashedToken"],
user_auth_provider=jwt_payload["authProvider"],
user_uid=jwt_payload["userUID"],
)
user = User.objects(user_auth_provider=user_auth).first()
if time_diff.seconds < session_time and user is not None:
return user
return None
def parse_emails(email_response):
emails = []
primary_email = None
for email in email_response:
emails.append(email["email"])
if email["primary"]:
primary_email = email["email"]
return emails, primary_email
def verify_gh_access_token(github_uid, gh_access_token):
"""
Used as an internal helper function to validate if the
github_uid and gh_acess_token sent into the function
are of convergent origin and return the name and
avatar url(github profile pic) if true
"""
url = "https://api.github.com/user"
headers = {
"Accept": "application/vnd.github+json",
"Authorization": f"Bearer {gh_access_token}",
"X-GitHub-Api-Version": "2022-11-28",
}
response = requests.get(url, headers=headers).json()
if int(github_uid) != response["id"]:
return False, None, None, None, None
email_response = requests.get(
"https://api.github.com/user/emails", headers=headers
).json()
try:
emails, primary_email = parse_emails(email_response)
except:
emails, primary_email = None, None
return True, response["name"], response["avatar_url"], emails, primary_email
def create_user_github(org_name: str, user_uid: str, user_token: str):
try:
(
is_gh_valid,
user_gh_name,
user_gh_profile_pic,
emails,
primary_email,
) = verify_gh_access_token(user_uid, user_token)
if not is_gh_valid:
message = {"message": "Githb auth failed"}
status_code = 422
return make_response(jsonify(message), status_code)
hashed_token = sha256(user_token)
auth = create_auth("github", hashed_token, user_uid)
if auth is None:
message = {"message": "Githb auth object creation failed"}
status_code = 400
return make_response(jsonify(message), status_code)
org = create_org(org_name, primary_email)
if org is None:
message = {"message": "Org object creation failed"}
status_code = 400
return make_response(jsonify(message), status_code)
user = create_user(user_gh_name, primary_email, auth, org)
if user is None:
message = {"message": "User object creation failed"}
status_code = 400
return make_response(jsonify(message), status_code)
jwt = encode_jwt(auth, secret_token)
message = {
"message": "User created sucessfully",
"user": str(user.id),
"jwt": jwt,
}
status_code = 200
except Exception as err:
message = {"messgae": "User creation failed", "reason": repr(err)}
status_code = 500
return make_response(jsonify(message), status_code)
def login_user_github(github_uid: str, gh_access_token: str):
try:
hashed_token = sha256(gh_access_token)
auth = Auth(
user_auth_provider="github",
user_hashed_token=hashed_token,
user_uid=github_uid,
)
user = User.objects(user_auth_provider=auth).first()
if user is None:
return make_response({"message": "User validator failed"}, 401)
message = {
"jwt_token": encode_jwt(auth, secret_token),
"user_name": user.user_name,
"user_email": user.user_email,
"user_profile_pic": user.user_profile_pic,
"id": str(user.id),
}
status_code = 200
except Exception as err:
message = {
"message": "User validation failed unexpectedly",
"reason": repr(err),
}
status_code = 500
return make_response(jsonify(message), status_code)
def update_token(current_jwt: str):
try:
user = validate_user(current_jwt)
if user is None:
return make_response({"message": "User validator failed"}, 401)
message = {"updated_jwt": encode_jwt(user.user_auth_provider, secret_token)}
status_code = 200
except Exception as err:
message = {"message": "JWT auth failed unexpectedly", "reason": repr(err)}
status_code = 500
return make_response(jsonify(message), status_code)
def verify_jira_token(jira_token: str, jira_id: str):
headers = {
"Authorization": f"Bearer {jira_token}",
"Accept": "application/json",
"Content-Type": "application/json",
}
# Make a request to get the current user's details
response = requests.get(f"{base_url}/rest/api/2/myself", headers=headers)
if response.status_code == 200:
json_response = response.json()
# Check if the account ID matches with the provided one
return json_response.get("accountId") == jira_id
return False
def add_jira_integration(auth: str, jira_token: str, jira_id: str):
user = validate_user(auth)
if user is None:
return make_response({"message": "User validator failed"}, 401)
try:
is_jira_token_valid = verify_jira_token(jira_token, jira_id)
if not is_jira_token_valid:
return make_response({"message": "Invalid jira token for jira id"}, 400)
hashed_token = sha256(jira_token)
auth = create_auth("jira", hashed_token, jira_id)
user.update(set__user_additional_auth_provider=auth)
message = {
"message": "User jira token updated successfully",
"user": str(user.id),
"jiraToken": jira_token,
"jiraId": jira_id,
}
status_code = 200
except:
message = {"message": "User jira token updation failed"}
status_code = 400
return make_response(jsonify(message), 500)
| [] |
2024-01-10 | seanwojcik/voice-chatgpt | speech_to_text.py | import openai
import pyaudio
import wave
import keyboard
import time
def record_audio(filename, duration=10):
CHUNK = 1024
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = 16000
p = pyaudio.PyAudio()
stream = p.open(
format=FORMAT, channels=CHANNELS, rate=RATE, input=True, frames_per_buffer=CHUNK
)
print("\n* Recording your response... [Press Enter to stop, ESC to quit]")
frames = []
start_time = time.time()
while True:
data = stream.read(CHUNK)
frames.append(data)
elapsed_time = time.time() - start_time
if keyboard.is_pressed("esc"):
exit(1)
elif (
(duration and elapsed_time >= duration)
or keyboard.is_pressed("space")
or keyboard.is_pressed("enter")
):
break
print("* Finished recording\n")
stream.stop_stream()
stream.close()
p.terminate()
wf = wave.open(filename, "wb")
wf.setnchannels(CHANNELS)
wf.setsampwidth(p.get_sample_size(FORMAT))
wf.setframerate(RATE)
wf.writeframes(b"".join(frames))
wf.close()
def transcribe_audio(filename, api_key=None):
if api_key is not None:
openai.api_key = api_key
with open(filename, "rb") as audio_file:
transcript = openai.Audio.transcribe("whisper-1", audio_file)
return transcript["text"]
| [] |
2024-01-10 | stephenzhao/llama_index | llama_index~query_engine~sql_vector_query_engine.py | """SQL Vector query engine."""
from langchain.input import print_text
from typing import Optional, cast, Dict, Any, Callable
from llama_index.indices.query.base import BaseQueryEngine
from llama_index.indices.struct_store.sql_query import GPTNLStructStoreQueryEngine
from llama_index.indices.vector_store.retrievers.auto_retriever import (
VectorIndexAutoRetriever,
)
from llama_index.indices.query.schema import QueryBundle
from llama_index.response.schema import RESPONSE_TYPE, Response
from llama_index.tools.query_engine import QueryEngineTool
from llama_index.query_engine.retriever_query_engine import RetrieverQueryEngine
from llama_index.indices.service_context import ServiceContext
from llama_index.selectors.llm_selectors import LLMSingleSelector
from llama_index.prompts.base import Prompt
from llama_index.indices.query.query_transform.base import BaseQueryTransform
import logging
from llama_index.langchain_helpers.chain_wrapper import LLMPredictor
from llama_index.llm_predictor.base import BaseLLMPredictor
from llama_index.callbacks.base import CallbackManager
logger = logging.getLogger(__name__)
DEFAULT_SQL_VECTOR_SYNTHESIS_PROMPT_TMPL = """
The original question is given below.
This question has been translated into a SQL query. Both the SQL query and the response are given below.
Given the SQL response, the question has also been translated into a vector store query.
The vector store query and response is given below.
Given SQL query, SQL response, transformed vector store query, and vector store response, please synthesize a response to the original question.
Original question: {query_str}
SQL query: {sql_query_str}
SQL response: {sql_response_str}
Transformed vector store query: {vector_store_query_str}
Vector store response: {vector_store_response_str}
Response:
""" # noqa
DEFAULT_SQL_VECTOR_SYNTHESIS_PROMPT = Prompt(DEFAULT_SQL_VECTOR_SYNTHESIS_PROMPT_TMPL)
DEFAULT_SQL_AUGMENT_TRANSFORM_PROMPT_TMPL = """
"The original question is given below.
This question has been translated into a SQL query. Both the SQL query and the response are given below.
The SQL response either answers the question, or should provide additional context that can be used to make the question more specific.
Your job is to come up with a more specific question that needs to be answered to fully answer the original question, or 'None' if the original question has already been fully answered from the SQL response. Do not create a new question that is irrelevant to the original question; in that case return None instead.
Examples:
Original question: Please give more details about the demographics of the city with the highest population.
SQL query: SELECT city, population FROM cities ORDER BY population DESC LIMIT 1
SQL response: The city with the highest population is New York City.
New question: Can you tell me more about the demographics of New York City?
Original question: Please compare the sports environment of cities in North America.
SQL query: SELECT city_name FROM cities WHERE continent = 'North America' LIMIT 3
SQL response: The cities in North America are New York, San Francisco, and Toronto.
New question: What sports are played in New York, San Francisco, and Toronto?
Original question: What is the city with the highest population?
SQL query: SELECT city, population FROM cities ORDER BY population DESC LIMIT 1
SQL response: The city with the highest population is New York City.
New question: None
Original question: What countries are the top 3 ATP players from?
SQL query: SELECT country FROM players WHERE rank <= 3
SQL response: The top 3 ATP players are from Serbia, Russia, and Spain.
New question: None
Original question: {query_str}
SQL query: {sql_query_str}
SQL response: {sql_response_str}
New question: "
""" # noqa
DEFAULT_SQL_AUGMENT_TRANSFORM_PROMPT = Prompt(DEFAULT_SQL_AUGMENT_TRANSFORM_PROMPT_TMPL)
def _default_check_stop(query_bundle: QueryBundle) -> bool:
"""Default check stop function."""
return query_bundle.query_str.lower() == "none"
def _format_sql_query(sql_query: str) -> str:
"""Format SQL query."""
return sql_query.replace("\n", " ").replace("\t", " ")
class SQLAugmentQueryTransform(BaseQueryTransform):
"""SQL Augment Query Transform.
This query transform will transform the query into a more specific query
after augmenting with SQL results.
Args:
llm_predictor (LLMPredictor): LLM predictor to use for query transformation.
sql_augment_transform_prompt (Prompt): Prompt to use for query transformation.
check_stop_parser (Optional[Callable[[str], bool]]): Check stop function.
"""
def __init__(
self,
llm_predictor: Optional[BaseLLMPredictor] = None,
sql_augment_transform_prompt: Optional[Prompt] = None,
check_stop_parser: Optional[Callable[[QueryBundle], bool]] = None,
) -> None:
"""Initialize params."""
self._llm_predictor = llm_predictor or LLMPredictor()
self._sql_augment_transform_prompt = (
sql_augment_transform_prompt or DEFAULT_SQL_AUGMENT_TRANSFORM_PROMPT
)
self._check_stop_parser = check_stop_parser or _default_check_stop
def _run(self, query_bundle: QueryBundle, extra_info: Dict) -> QueryBundle:
"""Run query transform."""
query_str = query_bundle.query_str
sql_query = extra_info["sql_query"]
sql_query_response = extra_info["sql_query_response"]
new_query_str, formatted_prompt = self._llm_predictor.predict(
self._sql_augment_transform_prompt,
query_str=query_str,
sql_query_str=sql_query,
sql_response_str=sql_query_response,
)
return QueryBundle(
new_query_str, custom_embedding_strs=query_bundle.custom_embedding_strs
)
def check_stop(self, query_bundle: QueryBundle) -> bool:
"""Check if query indicates stop."""
return self._check_stop_parser(query_bundle)
class SQLAutoVectorQueryEngine(BaseQueryEngine):
"""SQL + Vector Index Auto Retriever Query Engine.
This query engine can query both a SQL database
as well as a vector database. It will first decide
whether it needs to query the SQL database or vector store.
If it decides to query the SQL database, it will also decide
whether to augment information with retrieved results from the vector store.
We use the VectorIndexAutoRetriever to retrieve results.
Args:
sql_query_tool (QueryEngineTool): Query engine tool for SQL database.
vector_query_tool (QueryEngineTool): Query engine tool for vector database.
selector (Optional[LLMSingleSelector]): Selector to use.
service_context (Optional[ServiceContext]): Service context to use.
sql_vector_synthesis_prompt (Optional[Prompt]): Prompt to use for SQL vector
synthesis.
sql_augment_query_transform (Optional[SQLAugmentQueryTransform]): Query
transform to use for SQL augmentation.
use_sql_vector_synthesis (bool): Whether to use SQL vector synthesis.
callback_manager (Optional[CallbackManager]): Callback manager to use.
verbose (bool): Whether to print intermediate results.
"""
def __init__(
self,
sql_query_tool: QueryEngineTool,
vector_query_tool: QueryEngineTool,
selector: Optional[LLMSingleSelector] = None,
service_context: Optional[ServiceContext] = None,
sql_vector_synthesis_prompt: Optional[Prompt] = None,
sql_augment_query_transform: Optional[SQLAugmentQueryTransform] = None,
use_sql_vector_synthesis: bool = True,
callback_manager: Optional[CallbackManager] = None,
verbose: bool = True,
) -> None:
"""Initialize params."""
super().__init__(callback_manager=callback_manager)
# validate that the query engines are of the right type
if not isinstance(sql_query_tool.query_engine, GPTNLStructStoreQueryEngine):
raise ValueError(
"sql_query_tool.query_engine must be an instance of "
"GPTNLStructStoreQueryEngine"
)
if not isinstance(vector_query_tool.query_engine, RetrieverQueryEngine):
raise ValueError(
"vector_query_tool.query_engine must be an instance of "
"RetrieverQueryEngine"
)
if not isinstance(
vector_query_tool.query_engine.retriever, VectorIndexAutoRetriever
):
raise ValueError(
"vector_query_tool.query_engine.retriever must be an instance "
"of VectorIndexAutoRetriever"
)
self._sql_query_tool = sql_query_tool
self._vector_query_tool = vector_query_tool
sql_query_engine = cast(
GPTNLStructStoreQueryEngine, sql_query_tool.query_engine
)
self._service_context = service_context or sql_query_engine.service_context
self._selector = selector or LLMSingleSelector.from_defaults()
self._sql_vector_synthesis_prompt = (
sql_vector_synthesis_prompt or DEFAULT_SQL_VECTOR_SYNTHESIS_PROMPT
)
self._sql_augment_query_transform = (
sql_augment_query_transform
or SQLAugmentQueryTransform(
llm_predictor=self._service_context.llm_predictor
)
)
self._use_sql_vector_synthesis = use_sql_vector_synthesis
self._verbose = verbose
@classmethod
def from_sql_and_vector_query_engines(
cls,
sql_query_engine: GPTNLStructStoreQueryEngine,
sql_tool_name: str,
sql_tool_description: str,
vector_auto_retriever: RetrieverQueryEngine,
vector_tool_name: str,
vector_tool_description: str,
selector: Optional[LLMSingleSelector] = None,
**kwargs: Any,
) -> "SQLAutoVectorQueryEngine":
"""From SQL and vector query engines.
Args:
sql_query_engine (GPTNLStructStoreQueryEngine): SQL query engine.
vector_query_engine (VectorIndexAutoRetriever): Vector retriever.
selector (Optional[LLMSingleSelector]): Selector to use.
"""
sql_query_tool = QueryEngineTool.from_defaults(
sql_query_engine, name=sql_tool_name, description=sql_tool_description
)
vector_query_tool = QueryEngineTool.from_defaults(
vector_auto_retriever,
name=vector_tool_name,
description=vector_tool_description,
)
return cls(sql_query_tool, vector_query_tool, selector, **kwargs)
def _query_sql_vector(self, query_bundle: QueryBundle) -> RESPONSE_TYPE:
"""Query SQL database + vector db in sequence."""
# first query SQL database
sql_response = self._sql_query_tool.query_engine.query(query_bundle)
if not self._use_sql_vector_synthesis:
return sql_response
sql_query = (
sql_response.extra_info["sql_query"] if sql_response.extra_info else None
)
if self._verbose:
print_text(f"SQL query: {sql_query}\n", color="yellow")
print_text(f"SQL response: {sql_response}\n", color="yellow")
# given SQL db, transform query into new query
new_query = self._sql_augment_query_transform(
query_bundle.query_str,
extra_info={
"sql_query": _format_sql_query(sql_query),
"sql_query_response": str(sql_response),
},
)
if self._verbose:
print_text(
f"Transformed query given SQL response: {new_query.query_str}\n",
color="blue",
)
logger.info(f"> Transformed query given SQL response: {new_query.query_str}")
if self._sql_augment_query_transform.check_stop(new_query):
return sql_response
vector_response = self._vector_query_tool.query_engine.query(new_query)
if self._verbose:
print_text(f"Vector DB response: {vector_response}\n", color="pink")
logger.info(f"> Vector DB response: {vector_response}")
response_str, _ = self._service_context.llm_predictor.predict(
self._sql_vector_synthesis_prompt,
query_str=query_bundle.query_str,
sql_query_str=sql_query,
sql_response_str=str(sql_response),
vector_store_query_str=new_query.query_str,
vector_store_response_str=str(vector_response),
)
if self._verbose:
print_text(f"Final response: {response_str}\n", color="green")
response_extra_info = {
**(sql_response.extra_info or {}),
**(vector_response.extra_info or {}),
}
source_nodes = vector_response.source_nodes
return Response(
response_str,
extra_info=response_extra_info,
source_nodes=source_nodes,
)
def _query(self, query_bundle: QueryBundle) -> RESPONSE_TYPE:
"""Query and get response."""
# TODO: see if this can be consolidated with logic in RouterQueryEngine
metadatas = [self._sql_query_tool.metadata, self._vector_query_tool.metadata]
result = self._selector.select(metadatas, query_bundle)
# pick sql query
if result.ind == 0:
if self._verbose:
print_text(f"Querying SQL database: {result.reason}\n", color="blue")
logger.info(f"> Querying SQL database: {result.reason}")
return self._query_sql_vector(query_bundle)
elif result.ind == 1:
if self._verbose:
print_text(f"Querying vector database: {result.reason}\n", color="blue")
logger.info(f"> Querying vector database: {result.reason}")
response = self._vector_query_tool.query_engine.query(query_bundle)
if self._verbose:
print_text(f"Vector DB response: {response}\n", color="pink")
return response
else:
raise ValueError(f"Invalid result.ind: {result.ind}")
async def _aquery(self, query_bundle: QueryBundle) -> RESPONSE_TYPE:
# TODO: make async
return self._query(query_bundle)
| [
"\nThe original question is given below.\nThis question has been translated into a SQL query. Both the SQL query and the response are given below.\nGiven the SQL response, the question has also been translated into a vector store query.\nThe vector store query and response is given below.\nGiven SQL query, SQL response, transformed vector store query, and vector store response, please synthesize a response to the original question.\n\nOriginal question: {query_str}\nSQL query: {sql_query_str}\nSQL response: {sql_response_str}\nTransformed vector store query: {vector_store_query_str}\nVector store response: {vector_store_response_str}\nResponse: \n",
"\n\"The original question is given below.\nThis question has been translated into a SQL query. Both the SQL query and the response are given below.\nThe SQL response either answers the question, or should provide additional context that can be used to make the question more specific.\nYour job is to come up with a more specific question that needs to be answered to fully answer the original question, or 'None' if the original question has already been fully answered from the SQL response. Do not create a new question that is irrelevant to the original question; in that case return None instead.\n\nExamples:\n\nOriginal question: Please give more details about the demographics of the city with the highest population.\nSQL query: SELECT city, population FROM cities ORDER BY population DESC LIMIT 1\nSQL response: The city with the highest population is New York City.\nNew question: Can you tell me more about the demographics of New York City?\n\nOriginal question: Please compare the sports environment of cities in North America.\nSQL query: SELECT city_name FROM cities WHERE continent = 'North America' LIMIT 3\nSQL response: The cities in North America are New York, San Francisco, and Toronto.\nNew question: What sports are played in New York, San Francisco, and Toronto?\n\nOriginal question: What is the city with the highest population?\nSQL query: SELECT city, population FROM cities ORDER BY population DESC LIMIT 1\nSQL response: The city with the highest population is New York City.\nNew question: None\n\nOriginal question: What countries are the top 3 ATP players from?\nSQL query: SELECT country FROM players WHERE rank <= 3\nSQL response: The top 3 ATP players are from Serbia, Russia, and Spain.\nNew question: None\n\nOriginal question: {query_str}\nSQL query: {sql_query_str}\nSQL response: {sql_response_str}\nNew question: \"\n",
"North America",
"None"
] |
2024-01-10 | shahidul034/LLM-and-Langchain-Tutorial-and-resources | testing.py | # from huggingface_hub import notebook_login
# notebook_login()
import torch
import transformers
from transformers import AutoTokenizer, AutoModelForCausalLM
path=r'/home/drmohammad/Documents/LLM/Llamav2hf/Llama-2-7b-chat-hf'
tokenizer = AutoTokenizer.from_pretrained(path,
use_auth_token=True,)
model = AutoModelForCausalLM.from_pretrained(path,
device_map='auto',
torch_dtype=torch.float16,
use_auth_token=True,
# load_in_8bit=True,
# load_in_4bit=True
)
# Use a pipeline for later
from transformers import pipeline
pipe = pipeline("text-generation",
model=model,
tokenizer= tokenizer,
torch_dtype=torch.bfloat16,
device_map="auto",
max_new_tokens = 512,
do_sample=True,
top_k=30,
num_return_sequences=1,
eos_token_id=tokenizer.eos_token_id
)
import json
import textwrap
B_INST, E_INST = "[INST]", "[/INST]"
B_SYS, E_SYS = "<<SYS>>\n", "\n<</SYS>>\n\n"
DEFAULT_SYSTEM_PROMPT = """\
You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.
If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information."""
def get_prompt(instruction, new_system_prompt=DEFAULT_SYSTEM_PROMPT ):
SYSTEM_PROMPT = B_SYS + new_system_prompt + E_SYS
prompt_template = B_INST + SYSTEM_PROMPT + instruction + E_INST
return prompt_template
def cut_off_text(text, prompt):
cutoff_phrase = prompt
index = text.find(cutoff_phrase)
if index != -1:
return text[:index]
else:
return text
def remove_substring(string, substring):
return string.replace(substring, "")
def generate(text):
prompt = get_prompt(text)
with torch.autocast('cuda', dtype=torch.bfloat16):
inputs = tokenizer(prompt, return_tensors="pt").to('cuda')
outputs = model.generate(**inputs,
max_new_tokens=512,
eos_token_id=tokenizer.eos_token_id,
pad_token_id=tokenizer.eos_token_id,
)
final_outputs = tokenizer.batch_decode(outputs, skip_special_tokens=True)[0]
final_outputs = cut_off_text(final_outputs, '</s>')
final_outputs = remove_substring(final_outputs, prompt)
return final_outputs
def parse_text(text):
wrapped_text = textwrap.fill(text, width=100)
print(wrapped_text +'\n\n')
# return assistant_text
# instruction = "What is the temperature in Melbourne?"
# get_prompt(instruction)
# instruction = "Summarize the following text for me {text}"
# system_prompt = "You are an expert and summarization and reducing the number of words used"
# print(get_prompt(instruction, system_prompt))
from langchain import HuggingFacePipeline
from langchain import PromptTemplate, LLMChain
from langchain.memory import ConversationBufferMemory
llm = HuggingFacePipeline(pipeline = pipe, model_kwargs = {'temperature':0})
# instruction = "Summarize the following article for me {text}"
# system_prompt = "You are an expert and summarization and expressing key ideas succintly"
# template = get_prompt(instruction, system_prompt)
# print(template)
# text=open("text.txt","r").read()
# prompt = PromptTemplate(template=template, input_variables=["text"])
# llm_chain = LLMChain(prompt=prompt, llm=llm)
# output = llm_chain.run(text)
# print(output)
instruction = "Chat History:\n\n{chat_history} \n\nUser: {user_input}"
system_prompt = "You are a helpful assistant, you always only answer for the assistant then you stop. read the chat history to get context"
template = get_prompt(instruction, system_prompt)
prompt = PromptTemplate(
input_variables=["chat_history", "user_input"], template=template
)
memory = ConversationBufferMemory(memory_key="chat_history")
llm_chain = LLMChain(
llm=llm,
prompt=prompt,
verbose=True,
memory=memory,
)
ans=llm_chain.predict(user_input="Hi, my name is Sam")
print(ans) | [
"You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.\n\nIf a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.",
"chat_history",
"PLACEHOLDERPLACEHOLDERPLACEHOLDER",
"You are a helpful assistant, you always only answer for the assistant then you stop. read the chat history to get context",
"PLACEHOLDERPLACEHOLDERPLACEHOLDERPLACEHOLDERChat History:\n\n{chat_history} \n\nUser: {user_input}PLACEHOLDER",
"user_input"
] |
2024-01-10 | vasconceloscezar/ai-agent-langchain-pinecone | memory~retrieve_memory.py | import gzip
import json
from dotenv import load_dotenv
load_dotenv()
import os
from langchain import PromptTemplate
from langchain.llms import OpenAI
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import (
CharacterTextSplitter,
RecursiveCharacterTextSplitter,
)
from langchain.vectorstores import Pinecone
from langchain.document_loaders import TextLoader
import pinecone
from testing_pinecone import init_pinecone
def compress_metadata(metadata):
print("Compressing metadata")
metadata_str = json.dumps(metadata)
compressed_metadata = gzip.compress(metadata_str.encode("utf-8"))
return compressed_metadata
def decompress_metadata(compressed_metadata):
print("Decompressing metadata")
decompressed_metadata_str = gzip.decompress(compressed_metadata).decode("utf-8")
return json.loads(decompressed_metadata_str)
init_pinecone()
openai = OpenAI(
model_name="text-davinci-003",
openai_api_key=os.environ["OPENAI_API_KEY"],
)
loader = TextLoader("./memory/output.txt")
documents = loader.load()
print(f"Loaded {len(documents)} documents")
text_splitter = RecursiveCharacterTextSplitter(chunk_size=256, chunk_overlap=20)
print("Splitting documents")
docs = text_splitter.split_documents(documents)
print(f"Split {len(docs)} documents into {text_splitter} ")
embeddings = OpenAIEmbeddings()
print("Creating Pinecone index")
index_name = "gptest"
docsearch = Pinecone.from_documents(
docs, embeddings, index_name=index_name, metadata_transform=compress_metadata
)
query = "Quais alterações no cálculo de ICMS foram feitas?"
retrieved_docs = docsearch.similarity_search(
query, metadata_transform=decompress_metadata
)
template = """Answer the question based on the context below. If the
question cannot be answered using the information provided answer
with "I don't know".
Context: {context}
Question: {query}
Answer: """
prompt_template = PromptTemplate(
input_variables=["context", "query"], template=template
)
responses = []
for doc in retrieved_docs:
context = doc.page_content
prompt = prompt_template.format(context=context, query=query)
response = openai(prompt)
responses.append(response)
print(responses)
| [
"I don't know",
"Answer the question based on the context below. If the\nquestion cannot be answered using the information provided answer\nwith \"I don't know\".\nContext: {context}\nQuestion: {query}\nAnswer: ",
"context"
] |
2024-01-10 | vasconceloscezar/ai-agent-langchain-pinecone | chaty.py | import os
import openai
import socketio
from dotenv import load_dotenv
from datetime import datetime
load_dotenv()
# Set up OpenAI API
openai.api_key = os.environ["OPENAI_API_KEY"]
# Set up Socket.IO client
sio = socketio.Client()
message_history = []
initial_prompt = "You are Andrew, a Customer Support agent. Your task is to guide and help the user through the system."
def generate_response(message):
new_message = f"\nUser Message: {message} \nCustomer Support(Andrew):"
prompt = initial_prompt + "".join(message_history) + new_message
current_time_and_date = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
try:
completions = openai.Completion.create(
engine="text-davinci-003",
prompt=prompt,
temperature=0.5,
max_tokens=500,
top_p=1.0,
frequency_penalty=0.5,
presence_penalty=0.0,
stop=["(Andrew):"],
)
response = (
completions.choices[0].text
if completions.choices[0].text
else "Thanks for reaching out to us, we will get back to you soon."
)
message_history.append(prompt)
message_history.append(response)
print(f"({current_time_and_date}) PROMPT: {prompt}\nRESPONSE: {response}")
return response
except Exception as error:
print(f"({current_time_and_date}) PROMPT: {prompt} ERROR: {error}")
return "Sorry, I am having trouble generating a response right now. Please try again later."
@sio.event
def connect_error(error):
print(f"Connection error: {error}")
@sio.event
def message(data):
if data["user"] == "Bot":
return
print(f'({data["user"]}) said:', data["message"])
response = generate_response(data["message"])
sio.emit("message", {"user": "Bot", "message": response})
def send_message(message):
if message.strip():
sio.emit("message", {"user": "Bot", "message": message})
if __name__ == "__main__":
sio.connect("http://localhost:8080")
try:
while True:
message = input("Enter message: ")
send_message(message)
except KeyboardInterrupt:
sio.disconnect()
| [
"initial_prompt + \"\".join(message_history) + new_message",
"You are Andrew, a Customer Support agent. Your task is to guide and help the user through the system."
] |
2024-01-10 | vasconceloscezar/ai-agent-langchain-pinecone | memory~ingest_data_pinecone.py | from langchain.vectorstores import Pinecone
from langchain.embeddings.openai import OpenAIEmbeddings
import pinecone
from testing_pinecone import init_pinecone
init_pinecone()
# Step 3: Create Pinecone index
index = pinecone.Index("gptest")
# Step 4: Initialize embeddings
embeddings = OpenAIEmbeddings()
# Step 5: Create Pinecone instance
vectorstore = Pinecone(index, embeddings.embed_query, "text")
# Step 6: Read local data from files
texts = []
with open("output.txt", "r") as f:
texts.append(f.read())
# with open("file2.txt", "r") as f:
# texts.append(f.read())
# ... add more files as needed
# Step 7: Embed local data and add it to the Pinecone index
vectorstore.add_texts(texts)
| [] |
2024-01-10 | ekiefl/smartgpt | smartgpt~message.py | from __future__ import annotations
import time
from typing import Dict, List
import attrs
import cattrs
import openai
from openai.error import APIError, InvalidRequestError, RateLimitError
from smartgpt import strenum
from smartgpt.logger import default_logger
from smartgpt.settings.credentials import Credentials
class Role(strenum.StrEnum):
"""Enum representing the different roles in a conversation"""
USER = strenum.auto()
ASSISTANT = strenum.auto()
SYSTEM = strenum.auto()
@attrs.define(frozen=True)
class Message:
"""Structure for encapsulating a message in a conversation.
Attributes:
role:
The role of the sender of the message.
content:
The content of the message.
"""
role: Role
content: str
@attrs.define
class Response:
"""Structure for encapsulating a response from the GPT model.
Attributes:
message:
The message from the model.
total_tokens:
The total number of tokens used.
finish_reason:
The reason for finishing the response.
"""
message: Message
total_tokens: int
finish_reason: str
@classmethod
def from_openai_response(cls, response) -> Response:
"""Factory method to create a Response from an OpenAI API response.
Args:
response:
The response from the OpenAI API.
Returns:
Response:
A Response instance with values from the API response.
"""
return cls(
message=cattrs.structure(response["choices"][0]["message"], Message),
total_tokens=response["usage"]["total_tokens"],
finish_reason=response["choices"][0]["finish_reason"],
)
@attrs.define
class GPTBot:
"""Represents an interface to the GPT model.
It encapsulates the process of sending messages to the model and receiving
responses. The class also handles message history.
Attributes:
messages:
A list of messages sent to and received from the model.
credentials:
Credentials for accessing the model.
model:
The model to use (default is 'gpt-4').
temp:
The temperature parameter to use when generating responses.
"""
messages: List[Dict[str, str]] = attrs.field(factory=list)
credentials: Credentials = attrs.field(default=Credentials.default())
model: str = attrs.field(default="gpt-4")
temp: float = attrs.field(default=0.5)
def append_message(self, message: Message) -> None:
"""Appends a message to the current message history.
Args:
message: The message to append.
"""
self.messages.append(attrs.asdict(message))
def request(self) -> Response:
"""Sends the current message history to the GPT model via an API request
The message history includes all previous interactions, which allows the model
to generate a response based on the entire conversation context.
Returns:
Response:
A Response object that encapsulates the model's response, which includes
the generated message, remaining tokens, and other metadata.
"""
try:
return Response.from_openai_response(
openai.ChatCompletion.create(
model=self.model,
messages=self.messages,
api_key=self.credentials.key,
)
)
except RateLimitError:
default_logger.info("Hit rate limit. Sleeping for 20 seconds...")
time.sleep(20)
return self.request()
except APIError:
default_logger.info("Potentially bad gateway. Sleeping for 20 seconds...")
time.sleep(20)
return self.request()
except InvalidRequestError:
raise NotImplementedError()
def response(self, prompt: str) -> Message:
"""Appends prompt to message history and sends request to the GPT model.
The model's response is then appended to the message history.
Args:
prompt:
The prompt to send to the model.
Returns:
Message:
The model's response encapsulated in a Message object.
"""
self.append_message(Message(Role.USER, prompt))
response = self.request()
self.append_message(response.message)
return response.message
| [] |
2024-01-10 | flyyufelix/google_research_football_kaggle | rl_agent~stable-baselines3~stable_baselines3~common~policies.py | """Policies: abstract base class and concrete implementations."""
import collections
from abc import ABC, abstractmethod
from functools import partial
from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union
import gym
import numpy as np
import torch as th
from torch import nn
from stable_baselines3.common.distributions import (
BernoulliDistribution,
CategoricalDistribution,
DiagGaussianDistribution,
Distribution,
MultiCategoricalDistribution,
StateDependentNoiseDistribution,
make_proba_distribution,
)
from stable_baselines3.common.preprocessing import get_action_dim, is_image_space, preprocess_obs
from stable_baselines3.common.torch_layers import BaseFeaturesExtractor, FlattenExtractor, MlpExtractor, NatureCNN, create_mlp
from stable_baselines3.common.utils import get_device, is_vectorized_observation
from stable_baselines3.common.vec_env import VecTransposeImage
from stable_baselines3.common.vec_env.obs_dict_wrapper import ObsDictWrapper
class BaseModel(nn.Module, ABC):
"""
The base model object: makes predictions in response to observations.
In the case of policies, the prediction is an action. In the case of critics, it is the
estimated value of the observation.
:param observation_space: The observation space of the environment
:param action_space: The action space of the environment
:param features_extractor_class: Features extractor to use.
:param features_extractor_kwargs: Keyword arguments
to pass to the features extractor.
:param features_extractor: Network to extract features
(a CNN when using images, a nn.Flatten() layer otherwise)
:param normalize_images: Whether to normalize images or not,
dividing by 255.0 (True by default)
:param optimizer_class: The optimizer to use,
``th.optim.Adam`` by default
:param optimizer_kwargs: Additional keyword arguments,
excluding the learning rate, to pass to the optimizer
"""
def __init__(
self,
observation_space: gym.spaces.Space,
action_space: gym.spaces.Space,
features_extractor_class: Type[BaseFeaturesExtractor] = FlattenExtractor,
features_extractor_kwargs: Optional[Dict[str, Any]] = None,
features_extractor: Optional[nn.Module] = None,
normalize_images: bool = True,
optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,
optimizer_kwargs: Optional[Dict[str, Any]] = None,
):
super(BaseModel, self).__init__()
if optimizer_kwargs is None:
optimizer_kwargs = {}
if features_extractor_kwargs is None:
features_extractor_kwargs = {}
self.observation_space = observation_space
self.action_space = action_space
self.features_extractor = features_extractor
self.normalize_images = normalize_images
self.optimizer_class = optimizer_class
self.optimizer_kwargs = optimizer_kwargs
self.optimizer = None # type: Optional[th.optim.Optimizer]
self.features_extractor_class = features_extractor_class
self.features_extractor_kwargs = features_extractor_kwargs
@abstractmethod
def forward(self, *args, **kwargs):
del args, kwargs
def _update_features_extractor(
self, net_kwargs: Dict[str, Any], features_extractor: Optional[BaseFeaturesExtractor] = None
) -> Dict[str, Any]:
"""
Update the network keyword arguments and create a new features extractor object if needed.
If a ``features_extractor`` object is passed, then it will be shared.
:param net_kwargs: the base network keyword arguments, without the ones
related to features extractor
:param features_extractor: a features extractor object.
If None, a new object will be created.
:return: The updated keyword arguments
"""
net_kwargs = net_kwargs.copy()
if features_extractor is None:
# The features extractor is not shared, create a new one
features_extractor = self.make_features_extractor()
net_kwargs.update(dict(features_extractor=features_extractor, features_dim=features_extractor.features_dim))
return net_kwargs
def make_features_extractor(self) -> BaseFeaturesExtractor:
""" Helper method to create a features extractor."""
return self.features_extractor_class(self.observation_space, **self.features_extractor_kwargs)
def extract_features(self, obs: th.Tensor) -> th.Tensor:
"""
Preprocess the observation if needed and extract features.
:param obs:
:return:
"""
assert self.features_extractor is not None, "No features extractor was set"
preprocessed_obs = preprocess_obs(obs, self.observation_space, normalize_images=self.normalize_images)
return self.features_extractor(preprocessed_obs)
def _get_data(self) -> Dict[str, Any]:
"""
Get data that need to be saved in order to re-create the model.
This corresponds to the arguments of the constructor.
:return:
"""
return dict(
observation_space=self.observation_space,
action_space=self.action_space,
# Passed to the constructor by child class
# squash_output=self.squash_output,
# features_extractor=self.features_extractor
normalize_images=self.normalize_images,
)
@property
def device(self) -> th.device:
"""Infer which device this policy lives on by inspecting its parameters.
If it has no parameters, the 'cpu' device is used as a fallback.
:return:"""
for param in self.parameters():
return param.device
return get_device("cpu")
def save(self, path: str) -> None:
"""
Save model to a given location.
:param path:
"""
th.save({"state_dict": self.state_dict(), "data": self._get_data()}, path)
@classmethod
def load(cls, path: str, device: Union[th.device, str] = "auto") -> "BaseModel":
"""
Load model from path.
:param path:
:param device: Device on which the policy should be loaded.
:return:
"""
device = get_device(device)
saved_variables = th.load(path, map_location=device)
# Create policy object
model = cls(**saved_variables["data"]) # pytype: disable=not-instantiable
# Load weights
model.load_state_dict(saved_variables["state_dict"])
model.to(device)
return model
def load_from_vector(self, vector: np.ndarray) -> None:
"""
Load parameters from a 1D vector.
:param vector:
"""
th.nn.utils.vector_to_parameters(th.FloatTensor(vector).to(self.device), self.parameters())
def parameters_to_vector(self) -> np.ndarray:
"""
Convert the parameters to a 1D vector.
:return:
"""
return th.nn.utils.parameters_to_vector(self.parameters()).detach().cpu().numpy()
class BasePolicy(BaseModel):
"""The base policy object.
Parameters are mostly the same as `BaseModel`; additions are documented below.
:param args: positional arguments passed through to `BaseModel`.
:param kwargs: keyword arguments passed through to `BaseModel`.
:param squash_output: For continuous actions, whether the output is squashed
or not using a ``tanh()`` function.
"""
def __init__(self, *args, squash_output: bool = False, **kwargs):
super(BasePolicy, self).__init__(*args, **kwargs)
self._squash_output = squash_output
@staticmethod
def _dummy_schedule(progress_remaining: float) -> float:
""" (float) Useful for pickling policy."""
del progress_remaining
return 0.0
@property
def squash_output(self) -> bool:
"""(bool) Getter for squash_output."""
return self._squash_output
@staticmethod
def init_weights(module: nn.Module, gain: float = 1) -> None:
"""
Orthogonal initialization (used in PPO and A2C)
"""
if isinstance(module, (nn.Linear, nn.Conv2d)):
nn.init.orthogonal_(module.weight, gain=gain)
if module.bias is not None:
module.bias.data.fill_(0.0)
@abstractmethod
def _predict(self, observation: th.Tensor, deterministic: bool = False) -> th.Tensor:
"""
Get the action according to the policy for a given observation.
By default provides a dummy implementation -- not all BasePolicy classes
implement this, e.g. if they are a Critic in an Actor-Critic method.
:param observation:
:param deterministic: Whether to use stochastic or deterministic actions
:return: Taken action according to the policy
"""
def predict(
self,
observation: np.ndarray,
state: Optional[np.ndarray] = None,
mask: Optional[np.ndarray] = None,
deterministic: bool = False,
) -> Tuple[np.ndarray, Optional[np.ndarray]]:
"""
Get the policy action and state from an observation (and optional state).
Includes sugar-coating to handle different observations (e.g. normalizing images).
:param observation: the input observation
:param state: The last states (can be None, used in recurrent policies)
:param mask: The last masks (can be None, used in recurrent policies)
:param deterministic: Whether or not to return deterministic actions.
:return: the model's action and the next state
(used in recurrent policies)
"""
# TODO (GH/1): add support for RNN policies
# if state is None:
# state = self.initial_state
# if mask is None:
# mask = [False for _ in range(self.n_envs)]
if isinstance(observation, dict):
observation = ObsDictWrapper.convert_dict(observation)
else:
observation = np.array(observation)
# Handle the different cases for images
# as PyTorch use channel first format
if is_image_space(self.observation_space):
if not (
observation.shape == self.observation_space.shape or observation.shape[1:] == self.observation_space.shape
):
# Try to re-order the channels
transpose_obs = VecTransposeImage.transpose_image(observation)
if (
transpose_obs.shape == self.observation_space.shape
or transpose_obs.shape[1:] == self.observation_space.shape
):
observation = transpose_obs
vectorized_env = is_vectorized_observation(observation, self.observation_space)
observation = observation.reshape((-1,) + self.observation_space.shape)
observation = th.as_tensor(observation).to(self.device)
with th.no_grad():
actions = self._predict(observation, deterministic=deterministic)
# Convert to numpy
actions = actions.cpu().numpy()
if isinstance(self.action_space, gym.spaces.Box):
if self.squash_output:
# Rescale to proper domain when using squashing
actions = self.unscale_action(actions)
else:
# Actions could be on arbitrary scale, so clip the actions to avoid
# out of bound error (e.g. if sampling from a Gaussian distribution)
actions = np.clip(actions, self.action_space.low, self.action_space.high)
if not vectorized_env:
if state is not None:
raise ValueError("Error: The environment must be vectorized when using recurrent policies.")
actions = actions[0]
return actions, state
def scale_action(self, action: np.ndarray) -> np.ndarray:
"""
Rescale the action from [low, high] to [-1, 1]
(no need for symmetric action space)
:param action: Action to scale
:return: Scaled action
"""
low, high = self.action_space.low, self.action_space.high
return 2.0 * ((action - low) / (high - low)) - 1.0
def unscale_action(self, scaled_action: np.ndarray) -> np.ndarray:
"""
Rescale the action from [-1, 1] to [low, high]
(no need for symmetric action space)
:param scaled_action: Action to un-scale
"""
low, high = self.action_space.low, self.action_space.high
return low + (0.5 * (scaled_action + 1.0) * (high - low))
class ActorCriticPolicy(BasePolicy):
"""
Policy class for actor-critic algorithms (has both policy and value prediction).
Used by A2C, PPO and the likes.
:param observation_space: Observation space
:param action_space: Action space
:param lr_schedule: Learning rate schedule (could be constant)
:param net_arch: The specification of the policy and value networks.
:param activation_fn: Activation function
:param ortho_init: Whether to use or not orthogonal initialization
:param use_sde: Whether to use State Dependent Exploration or not
:param log_std_init: Initial value for the log standard deviation
:param full_std: Whether to use (n_features x n_actions) parameters
for the std instead of only (n_features,) when using gSDE
:param sde_net_arch: Network architecture for extracting features
when using gSDE. If None, the latent features from the policy will be used.
Pass an empty list to use the states as features.
:param use_expln: Use ``expln()`` function instead of ``exp()`` to ensure
a positive standard deviation (cf paper). It allows to keep variance
above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.
:param squash_output: Whether to squash the output using a tanh function,
this allows to ensure boundaries when using gSDE.
:param features_extractor_class: Features extractor to use.
:param features_extractor_kwargs: Keyword arguments
to pass to the features extractor.
:param normalize_images: Whether to normalize images or not,
dividing by 255.0 (True by default)
:param optimizer_class: The optimizer to use,
``th.optim.Adam`` by default
:param optimizer_kwargs: Additional keyword arguments,
excluding the learning rate, to pass to the optimizer
"""
def __init__(
self,
observation_space: gym.spaces.Space,
action_space: gym.spaces.Space,
lr_schedule: Callable[[float], float],
net_arch: Optional[List[Union[int, Dict[str, List[int]]]]] = None,
activation_fn: Type[nn.Module] = nn.Tanh,
ortho_init: bool = True,
use_sde: bool = False,
log_std_init: float = 0.0,
full_std: bool = True,
sde_net_arch: Optional[List[int]] = None,
use_expln: bool = False,
squash_output: bool = False,
features_extractor_class: Type[BaseFeaturesExtractor] = FlattenExtractor,
features_extractor_kwargs: Optional[Dict[str, Any]] = None,
normalize_images: bool = True,
optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,
optimizer_kwargs: Optional[Dict[str, Any]] = None,
):
if optimizer_kwargs is None:
optimizer_kwargs = {}
# Small values to avoid NaN in Adam optimizer
if optimizer_class == th.optim.Adam:
optimizer_kwargs["eps"] = 1e-5
super(ActorCriticPolicy, self).__init__(
observation_space,
action_space,
features_extractor_class,
features_extractor_kwargs,
optimizer_class=optimizer_class,
optimizer_kwargs=optimizer_kwargs,
squash_output=squash_output,
)
# Default network architecture, from stable-baselines
if net_arch is None:
if features_extractor_class == FlattenExtractor:
net_arch = [dict(pi=[64, 64], vf=[64, 64])]
else:
net_arch = []
self.net_arch = net_arch
self.activation_fn = activation_fn
self.ortho_init = ortho_init
self.features_extractor = features_extractor_class(self.observation_space, **self.features_extractor_kwargs)
self.features_dim = self.features_extractor.features_dim
self.normalize_images = normalize_images
self.log_std_init = log_std_init
dist_kwargs = None
# Keyword arguments for gSDE distribution
if use_sde:
dist_kwargs = {
"full_std": full_std,
"squash_output": squash_output,
"use_expln": use_expln,
"learn_features": sde_net_arch is not None,
}
self.sde_features_extractor = None
self.sde_net_arch = sde_net_arch
self.use_sde = use_sde
self.dist_kwargs = dist_kwargs
# Action distribution
self.action_dist = make_proba_distribution(action_space, use_sde=use_sde, dist_kwargs=dist_kwargs)
self._build(lr_schedule)
def _get_data(self) -> Dict[str, Any]:
data = super()._get_data()
default_none_kwargs = self.dist_kwargs or collections.defaultdict(lambda: None)
data.update(
dict(
net_arch=self.net_arch,
activation_fn=self.activation_fn,
use_sde=self.use_sde,
log_std_init=self.log_std_init,
squash_output=default_none_kwargs["squash_output"],
full_std=default_none_kwargs["full_std"],
sde_net_arch=default_none_kwargs["sde_net_arch"],
use_expln=default_none_kwargs["use_expln"],
lr_schedule=self._dummy_schedule, # dummy lr schedule, not needed for loading policy alone
ortho_init=self.ortho_init,
optimizer_class=self.optimizer_class,
optimizer_kwargs=self.optimizer_kwargs,
features_extractor_class=self.features_extractor_class,
features_extractor_kwargs=self.features_extractor_kwargs,
)
)
return data
def reset_noise(self, n_envs: int = 1) -> None:
"""
Sample new weights for the exploration matrix.
:param n_envs:
"""
assert isinstance(self.action_dist, StateDependentNoiseDistribution), "reset_noise() is only available when using gSDE"
self.action_dist.sample_weights(self.log_std, batch_size=n_envs)
def _build_mlp_extractor(self) -> None:
"""
Create the policy and value networks.
Part of the layers can be shared.
"""
# Note: If net_arch is None and some features extractor is used,
# net_arch here is an empty list and mlp_extractor does not
# really contain any layers (acts like an identity module).
self.mlp_extractor = MlpExtractor(
self.features_dim, net_arch=self.net_arch, activation_fn=self.activation_fn, device=self.device
)
def _build(self, lr_schedule: Callable[[float], float]) -> None:
"""
Create the networks and the optimizer.
:param lr_schedule: Learning rate schedule
lr_schedule(1) is the initial learning rate
"""
self._build_mlp_extractor()
latent_dim_pi = self.mlp_extractor.latent_dim_pi
# Separate features extractor for gSDE
if self.sde_net_arch is not None:
self.sde_features_extractor, latent_sde_dim = create_sde_features_extractor(
self.features_dim, self.sde_net_arch, self.activation_fn
)
if isinstance(self.action_dist, DiagGaussianDistribution):
self.action_net, self.log_std = self.action_dist.proba_distribution_net(
latent_dim=latent_dim_pi, log_std_init=self.log_std_init
)
elif isinstance(self.action_dist, StateDependentNoiseDistribution):
latent_sde_dim = latent_dim_pi if self.sde_net_arch is None else latent_sde_dim
self.action_net, self.log_std = self.action_dist.proba_distribution_net(
latent_dim=latent_dim_pi, latent_sde_dim=latent_sde_dim, log_std_init=self.log_std_init
)
elif isinstance(self.action_dist, CategoricalDistribution):
self.action_net = self.action_dist.proba_distribution_net(latent_dim=latent_dim_pi)
elif isinstance(self.action_dist, MultiCategoricalDistribution):
self.action_net = self.action_dist.proba_distribution_net(latent_dim=latent_dim_pi)
elif isinstance(self.action_dist, BernoulliDistribution):
self.action_net = self.action_dist.proba_distribution_net(latent_dim=latent_dim_pi)
else:
raise NotImplementedError(f"Unsupported distribution '{self.action_dist}'.")
self.value_net = nn.Linear(self.mlp_extractor.latent_dim_vf, 1)
# Init weights: use orthogonal initialization
# with small initial weight for the output
if self.ortho_init:
# TODO: check for features_extractor
# Values from stable-baselines.
# features_extractor/mlp values are
# originally from openai/baselines (default gains/init_scales).
module_gains = {
self.features_extractor: np.sqrt(2),
self.mlp_extractor: np.sqrt(2),
self.action_net: 0.01,
self.value_net: 1,
}
for module, gain in module_gains.items():
module.apply(partial(self.init_weights, gain=gain))
# Setup optimizer with initial learning rate
self.optimizer = self.optimizer_class(self.parameters(), lr=lr_schedule(1), **self.optimizer_kwargs)
def forward(self, obs: th.Tensor, deterministic: bool = False) -> Tuple[th.Tensor, th.Tensor, th.Tensor]:
"""
Forward pass in all the networks (actor and critic)
:param obs: Observation
:param deterministic: Whether to sample or use deterministic actions
:return: action, value and log probability of the action
"""
latent_pi, latent_vf, latent_sde = self._get_latent(obs)
# Evaluate the values for the given observations
values = self.value_net(latent_vf)
distribution = self._get_action_dist_from_latent(latent_pi, latent_sde=latent_sde)
actions = distribution.get_actions(deterministic=deterministic)
log_prob = distribution.log_prob(actions)
return actions, values, log_prob
def _get_latent(self, obs: th.Tensor) -> Tuple[th.Tensor, th.Tensor, th.Tensor]:
"""
Get the latent code (i.e., activations of the last layer of each network)
for the different networks.
:param obs: Observation
:return: Latent codes
for the actor, the value function and for gSDE function
"""
# Preprocess the observation if needed
features = self.extract_features(obs) # return Logits
latent_pi, latent_vf = self.mlp_extractor(features)
# Features for sde
latent_sde = latent_pi
if self.sde_features_extractor is not None:
latent_sde = self.sde_features_extractor(features)
return latent_pi, latent_vf, latent_sde
def _get_action_dist_from_latent(self, latent_pi: th.Tensor, latent_sde: Optional[th.Tensor] = None) -> Distribution:
"""
Retrieve action distribution given the latent codes.
:param latent_pi: Latent code for the actor
:param latent_sde: Latent code for the gSDE exploration function
:return: Action distribution
"""
mean_actions = self.action_net(latent_pi)
if isinstance(self.action_dist, DiagGaussianDistribution):
return self.action_dist.proba_distribution(mean_actions, self.log_std)
elif isinstance(self.action_dist, CategoricalDistribution):
# Here mean_actions are the logits before the softmax
return self.action_dist.proba_distribution(action_logits=mean_actions)
elif isinstance(self.action_dist, MultiCategoricalDistribution):
# Here mean_actions are the flattened logits
return self.action_dist.proba_distribution(action_logits=mean_actions)
elif isinstance(self.action_dist, BernoulliDistribution):
# Here mean_actions are the logits (before rounding to get the binary actions)
return self.action_dist.proba_distribution(action_logits=mean_actions)
elif isinstance(self.action_dist, StateDependentNoiseDistribution):
return self.action_dist.proba_distribution(mean_actions, self.log_std, latent_sde)
else:
raise ValueError("Invalid action distribution")
def _predict(self, observation: th.Tensor, deterministic: bool = False) -> th.Tensor:
"""
Get the action according to the policy for a given observation.
:param observation:
:param deterministic: Whether to use stochastic or deterministic actions
:return: Taken action according to the policy
"""
latent_pi, _, latent_sde = self._get_latent(observation)
distribution = self._get_action_dist_from_latent(latent_pi, latent_sde)
return distribution.get_actions(deterministic=deterministic)
def evaluate_actions(self, obs: th.Tensor, actions: th.Tensor) -> Tuple[th.Tensor, th.Tensor, th.Tensor]:
"""
Evaluate actions according to the current policy,
given the observations.
:param obs:
:param actions:
:return: estimated value, log likelihood of taking those actions
and entropy of the action distribution.
"""
latent_pi, latent_vf, latent_sde = self._get_latent(obs)
distribution = self._get_action_dist_from_latent(latent_pi, latent_sde)
log_prob = distribution.log_prob(actions)
values = self.value_net(latent_vf)
return values, log_prob, distribution.entropy()
def get_distribution(self, obs: th.Tensor) -> th.Tensor:
"""
Get distribution for all actions
"""
latent_pi, latent_vf, latent_sde = self._get_latent(obs)
distribution = self._get_action_dist_from_latent(latent_pi, latent_sde)
return distribution
class ActorCriticCnnPolicy(ActorCriticPolicy):
"""
CNN policy class for actor-critic algorithms (has both policy and value prediction).
Used by A2C, PPO and the likes.
:param observation_space: Observation space
:param action_space: Action space
:param lr_schedule: Learning rate schedule (could be constant)
:param net_arch: The specification of the policy and value networks.
:param activation_fn: Activation function
:param ortho_init: Whether to use or not orthogonal initialization
:param use_sde: Whether to use State Dependent Exploration or not
:param log_std_init: Initial value for the log standard deviation
:param full_std: Whether to use (n_features x n_actions) parameters
for the std instead of only (n_features,) when using gSDE
:param sde_net_arch: Network architecture for extracting features
when using gSDE. If None, the latent features from the policy will be used.
Pass an empty list to use the states as features.
:param use_expln: Use ``expln()`` function instead of ``exp()`` to ensure
a positive standard deviation (cf paper). It allows to keep variance
above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.
:param squash_output: Whether to squash the output using a tanh function,
this allows to ensure boundaries when using gSDE.
:param features_extractor_class: Features extractor to use.
:param features_extractor_kwargs: Keyword arguments
to pass to the features extractor.
:param normalize_images: Whether to normalize images or not,
dividing by 255.0 (True by default)
:param optimizer_class: The optimizer to use,
``th.optim.Adam`` by default
:param optimizer_kwargs: Additional keyword arguments,
excluding the learning rate, to pass to the optimizer
"""
def __init__(
self,
observation_space: gym.spaces.Space,
action_space: gym.spaces.Space,
lr_schedule: Callable,
net_arch: Optional[List[Union[int, Dict[str, List[int]]]]] = None,
activation_fn: Type[nn.Module] = nn.Tanh,
ortho_init: bool = True,
use_sde: bool = False,
log_std_init: float = 0.0,
full_std: bool = True,
sde_net_arch: Optional[List[int]] = None,
use_expln: bool = False,
squash_output: bool = False,
features_extractor_class: Type[BaseFeaturesExtractor] = NatureCNN,
features_extractor_kwargs: Optional[Dict[str, Any]] = None,
normalize_images: bool = False,
optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,
optimizer_kwargs: Optional[Dict[str, Any]] = None,
):
super(ActorCriticCnnPolicy, self).__init__(
observation_space,
action_space,
lr_schedule,
net_arch,
activation_fn,
ortho_init,
use_sde,
log_std_init,
full_std,
sde_net_arch,
use_expln,
squash_output,
features_extractor_class,
features_extractor_kwargs,
normalize_images,
optimizer_class,
optimizer_kwargs,
)
class ContinuousCritic(BaseModel):
"""
Critic network(s) for DDPG/SAC/TD3.
It represents the action-state value function (Q-value function).
Compared to A2C/PPO critics, this one represents the Q-value
and takes the continuous action as input. It is concatenated with the state
and then fed to the network which outputs a single value: Q(s, a).
For more recent algorithms like SAC/TD3, multiple networks
are created to give different estimates.
By default, it creates two critic networks used to reduce overestimation
thanks to clipped Q-learning (cf TD3 paper).
:param observation_space: Obervation space
:param action_space: Action space
:param net_arch: Network architecture
:param features_extractor: Network to extract features
(a CNN when using images, a nn.Flatten() layer otherwise)
:param features_dim: Number of features
:param activation_fn: Activation function
:param normalize_images: Whether to normalize images or not,
dividing by 255.0 (True by default)
:param n_critics: Number of critic networks to create.
:param share_features_extractor: Whether the features extractor is shared or not
between the actor and the critic (this saves computation time)
"""
def __init__(
self,
observation_space: gym.spaces.Space,
action_space: gym.spaces.Space,
net_arch: List[int],
features_extractor: nn.Module,
features_dim: int,
activation_fn: Type[nn.Module] = nn.ReLU,
normalize_images: bool = True,
n_critics: int = 2,
share_features_extractor: bool = True,
):
super().__init__(
observation_space,
action_space,
features_extractor=features_extractor,
normalize_images=normalize_images,
)
action_dim = get_action_dim(self.action_space)
self.share_features_extractor = share_features_extractor
self.n_critics = n_critics
self.q_networks = []
for idx in range(n_critics):
q_net = create_mlp(features_dim + action_dim, 1, net_arch, activation_fn)
q_net = nn.Sequential(*q_net)
self.add_module(f"qf{idx}", q_net)
self.q_networks.append(q_net)
def forward(self, obs: th.Tensor, actions: th.Tensor) -> Tuple[th.Tensor, ...]:
# Learn the features extractor using the policy loss only
# when the features_extractor is shared with the actor
with th.set_grad_enabled(not self.share_features_extractor):
features = self.extract_features(obs)
qvalue_input = th.cat([features, actions], dim=1)
return tuple(q_net(qvalue_input) for q_net in self.q_networks)
def q1_forward(self, obs: th.Tensor, actions: th.Tensor) -> th.Tensor:
"""
Only predict the Q-value using the first network.
This allows to reduce computation when all the estimates are not needed
(e.g. when updating the policy in TD3).
"""
with th.no_grad():
features = self.extract_features(obs)
return self.q_networks[0](th.cat([features, actions], dim=1))
def create_sde_features_extractor(
features_dim: int, sde_net_arch: List[int], activation_fn: Type[nn.Module]
) -> Tuple[nn.Sequential, int]:
"""
Create the neural network that will be used to extract features
for the gSDE exploration function.
:param features_dim:
:param sde_net_arch:
:param activation_fn:
:return:
"""
# Special case: when using states as features (i.e. sde_net_arch is an empty list)
# don't use any activation function
sde_activation = activation_fn if len(sde_net_arch) > 0 else None
latent_sde_net = create_mlp(features_dim, -1, sde_net_arch, activation_fn=sde_activation, squash_output=False)
latent_sde_dim = sde_net_arch[-1] if len(sde_net_arch) > 0 else features_dim
sde_features_extractor = nn.Sequential(*latent_sde_net)
return sde_features_extractor, latent_sde_dim
_policy_registry = dict() # type: Dict[Type[BasePolicy], Dict[str, Type[BasePolicy]]]
def get_policy_from_name(base_policy_type: Type[BasePolicy], name: str) -> Type[BasePolicy]:
"""
Returns the registered policy from the base type and name.
See `register_policy` for registering policies and explanation.
:param base_policy_type: the base policy class
:param name: the policy name
:return: the policy
"""
if base_policy_type not in _policy_registry:
raise KeyError(f"Error: the policy type {base_policy_type} is not registered!")
if name not in _policy_registry[base_policy_type]:
raise KeyError(
f"Error: unknown policy type {name},"
f"the only registed policy type are: {list(_policy_registry[base_policy_type].keys())}!"
)
return _policy_registry[base_policy_type][name]
def register_policy(name: str, policy: Type[BasePolicy]) -> None:
"""
Register a policy, so it can be called using its name.
e.g. SAC('MlpPolicy', ...) instead of SAC(MlpPolicy, ...).
The goal here is to standardize policy naming, e.g.
all algorithms can call upon "MlpPolicy" or "CnnPolicy",
and they receive respective policies that work for them.
Consider following:
OnlinePolicy
-- OnlineMlpPolicy ("MlpPolicy")
-- OnlineCnnPolicy ("CnnPolicy")
OfflinePolicy
-- OfflineMlpPolicy ("MlpPolicy")
-- OfflineCnnPolicy ("CnnPolicy")
Two policies have name "MlpPolicy" and two have "CnnPolicy".
In `get_policy_from_name`, the parent class (e.g. OnlinePolicy)
is given and used to select and return the correct policy.
:param name: the policy name
:param policy: the policy class
"""
sub_class = None
for cls in BasePolicy.__subclasses__():
if issubclass(policy, cls):
sub_class = cls
break
if sub_class is None:
raise ValueError(f"Error: the policy {policy} is not of any known subclasses of BasePolicy!")
if sub_class not in _policy_registry:
_policy_registry[sub_class] = {}
if name in _policy_registry[sub_class]:
# Check if the registered policy is same
# we try to register. If not so,
# do not override and complain.
if _policy_registry[sub_class][name] != policy:
raise ValueError(f"Error: the name {name} is already registered for a different policy, will not override.")
_policy_registry[sub_class][name] = policy
| [] |
2024-01-10 | Kkhokho/Demo_Langchain | Agents_Demo.py | import os
import requests
from langchain.llms import OpenAI
from langchain.agents import tool
from langchain.agents import initialize_agent, AgentType
os.environ["OPENAI_API_KEY"] = "API_KEY"
llm = OpenAI(temperature=0.3)
# Define tool for agent
@tool("search", return_direct=True)
def search(query: str) -> str:
"""
You call this function when user need search information for an input string (in variable query) and return the search results back to user
"""
return f"Search API Results for {query}"
tools = [search]
agent = initialize_agent(
tools,
llm,
agent=AgentType.STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION,
verbose=True,
)
command = "Give me the list of posts related to me"
result = agent.run(command)
print(result) | [] |
2024-01-10 | Kkhokho/Demo_Langchain | model_demo.py | from langchain.schema import (
AIMessage,
HumanMessage,
SystemMessage
)
from langchain.chat_models import ChatOpenAI
import os
os.environ["OPENAI_API_KEY"] = "API_KEY"
# temperature là độ ngẫu nhiên của câu trả lời,
chat = ChatOpenAI(model_name="gpt-3.5-turbo",temperature=0.3)
messages = [
SystemMessage(content="You are an expert data scientist"),
HumanMessage(content="Write a Python script that trains a neural network on simulated data "),
AIMessage(content="""
import numpy as np
import tensorflow as tf
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
# Generate simulated data
np.random.seed(42)
X = np.random.rand(1000, 10) # 1000 samples, 10 features
y = (X[:, 0] + X[:, 1] > 1).astype(int) # Binary classification task
# Split the data into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
# Build a simple neural network model
model = tf.keras.Sequential([
tf.keras.layers.Dense(64, activation='relu', input_shape=(10,)),
tf.keras.layers.Dense(1, activation='sigmoid')
])
# Compile the model
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
# Train the model
model.fit(X_train, y_train, epochs=10, batch_size=32, validation_split=0.2)
# Evaluate the model on the test set
y_pred = (model.predict(X_test) > 0.5).astype(int)
accuracy = accuracy_score(y_test, y_pred)
print("Test Accuracy:", accuracy)
"""),
]
response=chat(messages)
print(response.content,end='\n') | [
"You are an expert data scientist",
"\nimport numpy as np\nimport tensorflow as tf\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\n\n# Generate simulated data\nnp.random.seed(42)\nX = np.random.rand(1000, 10) # 1000 samples, 10 features\ny = (X[:, 0] + X[:, 1] > 1).astype(int) # Binary classification task\n\n# Split the data into training and testing sets\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)\n\n# Build a simple neural network model\nmodel = tf.keras.Sequential([\n tf.keras.layers.Dense(64, activation='relu', input_shape=(10,)),\n tf.keras.layers.Dense(1, activation='sigmoid')\n])\n\n# Compile the model\nmodel.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])\n\n# Train the model\nmodel.fit(X_train, y_train, epochs=10, batch_size=32, validation_split=0.2)\n\n# Evaluate the model on the test set\ny_pred = (model.predict(X_test) > 0.5).astype(int)\naccuracy = accuracy_score(y_test, y_pred)\n\nprint(\"Test Accuracy:\", accuracy)\n ",
"Write a Python script that trains a neural network on simulated data "
] |
2024-01-10 | Kkhokho/Demo_Langchain | Embeddings_VectorStores.py | from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores import FAISS
import os
os.environ["OPENAI_API_KEY"] = "API_KEY"
file_path = "C:\\Users\\DELL\\OneDrive - Hanoi University of Science and Technology\\Tài liệu\\Demo\\data.txt"
with open(file_path, "r", encoding="utf-8") as file:
text_content = file.read()
text_splitter = RecursiveCharacterTextSplitter(
chunk_size = 100,
chunk_overlap = 0,
)
texts = text_splitter.create_documents([text_content])
embeddings = OpenAIEmbeddings()
db = FAISS.from_documents(texts, embeddings)
# Do a simple vector similarity search
query = "autoencoder?"
result = db.similarity_search(query)
print("Câu trả lời: \n");
print(result[0])
| [] |
2024-01-10 | Kkhokho/Demo_Langchain | Chain_Demo.py | # Import schema for chat messages and ChatOpenAI in order to query chat models GPT-3.5-turbo or GPT-4
from langchain.schema import AIMessage, HumanMessage, SystemMessage
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain, SimpleSequentialChain
from langchain.vectorstores import FAISS
import os
# Set your OpenAI API key here
os.environ["OPENAI_API_KEY"] = "API_KEY"
# Import prompt and define PromptTemplate
prompt_first = PromptTemplate(
input_variables=["name"],
template="""You are very skilled at searching for information about NFTs.
Let's gather information about {name} NFTs on rarible, niftygateway, binance, Opensea.""",
)
prompt_second = PromptTemplate(
input_variables=["name"],
template="Retrieve specific values, owner, and creator information of {name} NFTs.",
)
# Function to run language model with PromptTemplate
llm = OpenAI()
# Import LLMChain and define chain with language model and prompt as arguments.
chain_first = LLMChain(llm=llm, prompt=prompt_first)
chain_second = LLMChain(llm=llm, prompt=prompt_second)
# Run the chain only specifying the input variable.
overall_chain = SimpleSequentialChain(chains=[chain_first, chain_second], verbose=True)
explanation = overall_chain.run("Nexian Gem")
# Print the chain's output
# print(explanation) | [
"name",
"You are very skilled at searching for information about NFTs. \nLet's gather information about {name} NFTs on rarible, niftygateway, binance, Opensea.",
"Retrieve specific values, owner, and creator information of {name} NFTs."
] |
2024-01-10 | Kkhokho/Demo_Langchain | Prompt_Template.py | import os
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate, FewShotPromptTemplate
from langchain.chains import LLMChain
os.environ["OPENAI_API_KEY"] = "API_KEY"
# Text model example
llm = OpenAI(temperature=0.3)
template = """
Search exactly price, current owner, creator information for {name} NFTs on {name_place} marketplace.
"""
prompt_template = PromptTemplate(
input_variables=["content", "style"],
template=template,
)
# Print the template after format
print(prompt_template.format(name="Nexian Gem", name_place="rarible"))
# Save prompt to json
prompt_template.save("Info.json")
# # Define a chain
chain = LLMChain(llm=llm, prompt=prompt_template)
print(chain.run(name="Nexian", name_place="rarible"))
# First, create the list of few shot examples.
examples = [
{
"name": "LENA",
"price": "$10.00",
"owner": "Khôi",
"creator": "albert herz",
"location": "niftygateway",
},
{
"name": "LENA",
"price": "$3,120",
"owner": "sikoslovake",
"creator": "Ies",
"location": "rarible",
}
]
example_formatter_template = """
Input name from user: {name}
The information extracted from above command::\n
----
Price: {price}\n
Current owner: {owner}\n
Creator: {creator}\n
MarketPlace: {location}\n
"""
example_prompt = PromptTemplate(
input_variables=["name", "price", "owner", "creator", "location"],
template=example_formatter_template,
)
few_shot_prompt = FewShotPromptTemplate(
examples = examples,
example_prompt = example_prompt,
suffix="Input command from user: {name}\nThe information extracted from above command:",
input_variables=["name"],
example_separator="\n\n",
)
chain = LLMChain(llm=llm, prompt=few_shot_prompt)
print(chain.run(name = "Nexian")) | [
"\n\n",
"location",
"Input command from user: {name}\nThe information extracted from above command:",
"name",
"owner",
"\n Search exactly price, current owner, creator information for {name} NFTs on {name_place} marketplace.\n",
"\n Input name from user: {name}\n The information extracted from above command::\n\n ----\n Price: {price}\n\n Current owner: {owner}\n\n Creator: {creator}\n\n MarketPlace: {location}\n\n",
"content"
] |
2024-01-10 | KalenShamy/peer-help | prompts~success_criteria.py | import os
import openai
from dotenv import load_dotenv
load_dotenv()
openai.api_key = os.getenv("API_KEY")
def success_criteria(string):
""" To evaluate a product's success criteria
Args:
string (str): section of text extracted from Notion
Returns:
str: GPT's evaluation of the input
"""
try:
response = openai.Completion.create(
model = "text-davinci-002",
prompt = f"The following paragraph is the success criteria section of a product specification. Evaluate how well the success criteria has been described. Give specific feedback on what can be improved. Write several in-depth sentences.\n{string}",
temperature = 1,
max_tokens = 512,
top_p = 0.3,
frequency_penalty = 0,
presence_penalty = 0
)
return response["choices"][0]["text"]
except Exception as e:
return f"solution: {e}" # placeholder for now
# for internal testing
"""TEST_INPUT = "If our product isn’t appealing to users through the feedback it provides, and they don’t find the product useful, then users won’t use it — simple as that. For that reason, the user experience is also an important aspect of our success criteria and one that we will use to make adjustments to the underlying model. In addition, another criterion for success is whether our user trusts that the model is competent at generating quality product specs. If the model were to take the feedback it gives to the user, and iterates on it’s own by editing the section it’s assessing, would the edited section be an improvement over the original section? Basically, does the model knows what it’s doing when it comes to assessing product specs?"
print(success_criteria(TEST_INPUT))"""
| [
"The following paragraph is the success criteria section of a product specification. Evaluate how well the success criteria has been described. Give specific feedback on what can be improved. Write several in-depth sentences.\nPLACEHOLDER"
] |
2024-01-10 | KalenShamy/peer-help | prompts~schedule.py | import os
import openai
from dotenv import load_dotenv
load_dotenv()
openai.api_key = os.getenv("API_KEY")
def schedule_model(string):
""" To evaluate a team's planned schedule of building their product
Args:
string (str): section of text extracted from Notion
Returns:
str: GPT's evaluation of the input
"""
try:
response = openai.Completion.create(
model = "text-davinci-002",
prompt = f"The following paragraph is the schedule section of a product specification. Evaluate how well the schedule has been written and planned out while giving specific feedback what can be improved. Write several in-depth sentences.\n{string}",
temperature = 1,
max_tokens = 512,
top_p = 0.2,
frequency_penalty = 0,
presence_penalty = 0
)
return response["choices"][0]["text"]
except Exception as e:
return f"schedule: {e}" # placeholder for now
# for internal testing
"""TEST_INPUT = "**Week 2:** Finalized product spec + each team members must have completed tutorials / reviewed resources and be able to speak to what is required as part of the best practices for their part in the project\
**Week 5:** Local MVP of the product\
**Week 8:** Live MVP of the product that someone else can use\
**Week 10:** Launched on [Product Hunt](https://producthunt.com/) (Note it takes 1 week after account creation to post)"
print(schedule_model(TEST_INPUT))"""
| [
"The following paragraph is the schedule section of a product specification. Evaluate how well the schedule has been written and planned out while giving specific feedback what can be improved. Write several in-depth sentences.\nPLACEHOLDER"
] |
2024-01-10 | KalenShamy/peer-help | prompts~happy_path.py | import os
import openai
from dotenv import load_dotenv
load_dotenv()
openai.api_key = os.getenv("API_KEY")
def happy_path_model(string):
""" To evaluate a product's intended happy path
Args:
string (str): section of text extracted from Notion
Returns:
str: GPT's evaluation of the text
"""
try:
response = openai.Completion.create(
model = "text-davinci-002",
prompt = f"The following paragraph is the happy path section of a product specification. Evaluate how well the happy path has been written and give specific feedback on what can be improved. Write several in-depth sentences.\n{string}",
temperature = 0.5,
max_tokens = 512,
top_p = 0.5,
frequency_penalty = 0,
presence_penalty = 0
)
return response["choices"][0]["text"]
except Exception as e:
return f"happy_path: {e}" # placeholder for now
# for internal testing
"""TEST_INPUT = "1. Prompt-engineering GPT-3 to numerically evaluate sections to determine the prompt that is given to the GPT-3 model for feedback generation.\
2. Prompt-engineering GPT-3 to generate feedback based on numerical scores given by the scoring method.\
3. Extracting and parsing text from Notion product specs into individual sections (problem statement, solution, etc.)\
4. Backend to feed sections to the scoring method to the GPT-3 model using a bot command and generating an embed."
print(happy_path_model(TEST_INPUT))"""
| [
"The following paragraph is the happy path section of a product specification. Evaluate how well the happy path has been written and give specific feedback on what can be improved. Write several in-depth sentences.\nPLACEHOLDER"
] |
2024-01-10 | KalenShamy/peer-help | prompts~problem.py | import os
import openai
from dotenv import load_dotenv
load_dotenv()
openai.api_key = os.getenv("API_KEY")
def problem_model(string):
""" To evaluate a product's problem statement
Args:
string (str): section of text extracted from Notion
Returns:
str: GPT's evaluation of the input
"""
try:
response = openai.Completion.create(
model = "text-davinci-002",
prompt = f"The following paragraph is the problem statement section of a product specification. Evaluate how well the problem statement has been written and give specific feedback on what can be improved. Proposed solutions are not a part of the problem statement and should not be addressed. Write several in-depth sentences.\n{string}",
temperature = 0.5,
max_tokens = 512,
top_p = 1,
frequency_penalty = 0,
presence_penalty = 0
)
return response["choices"][0]["text"]
except Exception as e:
return f"problem: {e}" # placeholder for now
# for internal testing
"""TEST_INPUT = "As a company, we rely on community feedback to ensure that our products bring value to the company by following industry best practices. However, there is not enough time in the day to give detailed, thoughtful feedback on the work of each person. This lack of feedback often leads to products that are not up to industry standards. Thoughtful feedback is especially important when drafting product specs. Without a solid product spec that clearly defines your problem statement with a high-level solution, your product is doomed from the start. Our company needs a way to give detailed feedback on product specs in a way that is efficient and effective despite the fact that not everyone has time to do so."
print(problem_model(TEST_INPUT))"""
| [
"The following paragraph is the problem statement section of a product specification. Evaluate how well the problem statement has been written and give specific feedback on what can be improved. Proposed solutions are not a part of the problem statement and should not be addressed. Write several in-depth sentences.\nPLACEHOLDER"
] |
2024-01-10 | KalenShamy/peer-help | prompts~solution.py | import os
import openai
from dotenv import load_dotenv
load_dotenv()
openai.api_key = os.getenv("API_KEY")
def solution_model(string):
""" To evaluate a product's solution statement
Args:
string (str): section of text extracted from Notion
Returns:
str: GPT's evaluation of the input
"""
try:
response = openai.Completion.create(
model = "text-davinci-002",
prompt = f"The following paragraph is the solution statement section of a product specification. Evaluate how well the solution statement has been written and give specific feedback on what can be improved. Write several in-depth sentences.\n{string}",
temperature = 1,
max_tokens = 512,
top_p = 0.3,
frequency_penalty = 0,
presence_penalty = 0
)
return response["choices"][0]["text"]
except Exception as e:
return f"solution: {e}" # placeholder for now
# for internal testing
"""TEST_INPUT = "We introduce PEER — the Peer Editing and Efficiency Robot. PEER allows anyone to receive detailed, constructive feedback on their product spec to ensure that they are following best industry practices and that their product is off to a good start. Simply give PEER a link to your product spec as input, and PEER will give constructive feedback and suggestions on how to make sure you can make the value of your product clear, relieving you from spending the time needed to get feedback from a peer. Just go directly to the end goal with PEER. PEER will bring value to our company through its time-saving abilities by automating a crucial task generally performed by humans."
print(solution_model(TEST_INPUT))"""
| [
"The following paragraph is the solution statement section of a product specification. Evaluate how well the solution statement has been written and give specific feedback on what can be improved. Write several in-depth sentences.\nPLACEHOLDER"
] |
2024-01-10 | KalenShamy/peer-help | prompts~milestones.py | import os
import openai
from dotenv import load_dotenv
load_dotenv()
openai.api_key = os.getenv("API_KEY")
def milestones_model(string):
""" To evaluate a product's milestones
Args:
string (str): section of text extracted from Notion
Returns:
str: GPT's evaluation of the input
"""
try:
response = openai.Completion.create(
model = "text-davinci-002",
prompt = f"The following paragraph is the milestones section of a product specification. Evaluate how well the milestones have been written and give specific feedback on what can be improved. Write several in-depth sentences.\n{string}",
temperature = 1,
max_tokens = 512,
top_p = 0.5,
frequency_penalty = 0,
presence_penalty = 0,
)
return response["choices"][0]["text"]
except Exception as e:
return f"milestones: {e}" # placeholder for now
# for internal testing
"""TEST_INPUT = "- ML:\
- Automating prompt-engineering the GPT-3 API to generate valuable feedback.\
- Developing a scoring method to assign each section a score to determine the prompt that will be given to the GPT-3 model.\
- Testing and Evaluation of Generated Output.\
- Backend:\
- Engineering the Notion API to extract text from product specs.\
- Parsing the product spec into individual sections (e.g., problem statement, solution).\
- Pipeline That Passes Text to the Model\
- Testing and Deployment\
- Frontend:\
- Bot Development and Output Display\
- Testing and Deployment\
\
In addition, our milestones also include ensuring that our success criteria is being met by monitoring and prioritizing our success metrics - or at least the metrics we can monitor for now (linter score, amount of testing code)."
print(milestones_model(TEST_INPUT))"""
| [
"The following paragraph is the milestones section of a product specification. Evaluate how well the milestones have been written and give specific feedback on what can be improved. Write several in-depth sentences.\nPLACEHOLDER"
] |
2024-01-10 | KalenShamy/peer-help | prompts~target_users.py | import os
import openai
from dotenv import load_dotenv
load_dotenv()
openai.api_key = os.getenv("API_KEY")
def target_users_model(string):
""" To evaluate how well the product spec explains its target userbase
Args:
string (str): section of text extracted from Notion
Returns:
str: GPT's evaluation of the input
"""
try:
response = openai.Completion.create(
model = "text-davinci-002",
prompt = f"The following paragraph is the target users section of a product specification. Evaluate how well it has been written and give specific feedback on what can be improved. Write several in-depth sentences.\n{string}",
temperature = 0.2,
max_tokens = 512,
top_p = 1,
frequency_penalty = 0,
presence_penalty = 1
)
return response["choices"][0]["text"]
except Exception as e:
return f"target_users: {e}" # placeholder for now
# for internal testing
"""TEST_INPUT = "Our solution saves time for internal team members and their peers or supervisors by reducing the number of exchanges needed to iterate and improve their product spec. It also saves time for members of the AI Camp Discord who are looking for feedback on their product spec in the same manner. On a larger scale, anyone writing product specs has this problem, which presents a greater opportunity for us. However, for the MVP, we’re focused on product spec drafting by the AI Camp Internal Team."
print(target_users_model(TEST_INPUT))"""
| [
"The following paragraph is the target users section of a product specification. Evaluate how well it has been written and give specific feedback on what can be improved. Write several in-depth sentences.\nPLACEHOLDER"
] |
2024-01-10 | KalenShamy/peer-help | prompts~tech_stack.py | import os
import openai
from dotenv import load_dotenv
load_dotenv()
openai.api_key = os.getenv("API_KEY")
def tech_stack_model(string):
""" To evaluate how well the product spec lists their tech stack
Args:
string (str): section of text extracted from Notion
Returns:
str: GPT's evaluation of the input
"""
try:
response = openai.Completion.create(
model = "text-davinci-002",
prompt = f"The following paragraph is the technology stack section of a product specification. Evaluate how well the technology stack has been written and give specific feedback on what can be improved. Write several in-depth sentences.\n{string}",
temperature = 1,
max_tokens = 512,
top_p = 0.5,
frequency_penalty = 0,
presence_penalty = 1
)
return response["choices"][0]["text"]
except Exception as e:
return f"tech_stack: {e}" # placeholder for now
# for internal testing
"""TEST_INPUT = "- Python (Discord.py, Django)\
- OpenAI’s GPT-3 API\
- Amazon EC2"
print(tech_stack_model(TEST_INPUT))"""
| [
"The following paragraph is the technology stack section of a product specification. Evaluate how well the technology stack has been written and give specific feedback on what can be improved. Write several in-depth sentences.\nPLACEHOLDER"
] |
2024-01-10 | Heidelberg-NLP/VALSE | unimodal_valse_eval.py | import torch
import numpy as np
import sys
from tqdm import tqdm
from config import DATA
from read_foil_dataset import read_foils
def load_model(which):
device = 'cuda'
if which == 'gpt2':
from transformers import GPT2LMHeadModel, GPT2TokenizerFast
model_id = 'gpt2-large'
model = GPT2LMHeadModel.from_pretrained(model_id).to(device)
tokenizer = GPT2TokenizerFast.from_pretrained(model_id)
max_length = model.config.n_positions
elif which == 'gpt1':
from transformers import OpenAIGPTTokenizer, OpenAIGPTLMHeadModel
model = OpenAIGPTLMHeadModel.from_pretrained('openai-gpt').to(device)
tokenizer = OpenAIGPTTokenizer.from_pretrained('openai-gpt')
max_length = model.config.n_positions
else:
raise NotImplementedError("Implemented models are gpt2 and gpt1.")
return model, device, tokenizer, max_length
def compute_ppl(test_sentence):
''' Compute the perplexity of a sentence. '''
encodings = tokenizer(test_sentence, return_tensors='pt')
lls = []
for i in range(0, encodings.input_ids.size(1), stride):
begin_loc = max(i + stride - max_length, 0)
end_loc = i + stride
input_ids = encodings.input_ids[:, begin_loc:end_loc].to(device)
target_ids = input_ids.clone()
target_ids[:, :-stride] = -100
with torch.no_grad():
outputs = model(input_ids, labels=target_ids)
log_likelihood = outputs[0] * stride
lls.append(log_likelihood)
ppl = torch.exp(torch.stack(lls).sum() / i)
return ppl
which = sys.argv[1]
print(f"Running experiments with {which} unimodal model.")
model, device, tokenizer, max_length = load_model(which)
for instrument, foils_path in DATA.items():
foils_data = read_foils(foils_path)
count, foil_detected = 0, 0
ppl_correct = []
ppl_foils = []
tested_foil_ids = {}
for foil_id, foil in tqdm(foils_data.items()):
caption_fits = foil['mturk']['caption']
if caption_fits >= 2: # MTURK filtering!
test_sentences = [foil["caption"], foil["foil"]]
stride = 3
max_length = model.config.n_positions
ppls = []
for i, test_sentence in enumerate(test_sentences):
ppl = compute_ppl(test_sentence).to('cpu')
ppls.append(ppl)
if i == 0:
ppl_correct.append(ppl)
else:
ppl_foils.append(ppl)
if ppls[0] < ppls[1]: # ppl smaller is better
foil_detected += 1
count += 1
print(f'{instrument}: {which} could detect {foil_detected / count * 100:.2f}% of foils (pairwise).')
print(f'The average perplexity for correct sentences is {np.mean(ppl_correct)} and for foils is {np.mean(ppl_foils)}.')
| [] |
2024-01-10 | afg1/uniprot-demos | residue-specific-information~residue_specific_data.py | from langchain.prompts.chat import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
SystemMessagePromptTemplate,
)
from langchain.chains import LLMChain
import logging
from langchain.chat_models import ChatAnthropic, ChatOpenAI
from langchain.callbacks import get_openai_callback
import polars as pl
def get_model(source: str, kwargs):
assert "temperature" in kwargs, "temperature must be specified"
## Langchain wants temp explicitly stated, so here we go
temperature = kwargs["temperature"]
del kwargs["temperature"]
if source.lower() == "chatgpt":
logging.info("Initializing OpenAI chatGPT LLM")
llm = ChatOpenAI(
model_name="gpt-3.5-turbo-0613",
temperature=temperature,
model_kwargs=kwargs,
)
elif source.lower() == "claude":
logging.info("Initializing Anthropic Claude LLM")
llm = ChatAnthropic(
model="claude-instant-1.1", temperature=temperature, model_kwargs=kwargs
)
return llm
system_instruction = (
"You are an academic with experience in molecular biology. "
"You always answer in a factual unbiased way."
)
def get_extraction_prompt():
extraction_context = (
"Does the paper delimited with triple quotes below contain any per residue information? "
"Per-residue information might look like the following: "
"His96, meaning residue position 99 is a histidine. "
"For example in text it might look like this: "
"A hydrophilic pocket formed by the residues Thr199, Thr200, "
"and His96, as mentioned above, and a hydrophobic one defined by "
"Val121 which is known to represent the binding pocket for the enzyme substrate CO2.\n"
"Summarise the information in a table which should look like this:\n"
"Residue Position|Amino Acid Residue|Notes|\n"
"Thr199|Threonine|Hydrogen bond with sulphonamide oxygen\n"
"Thr200|Threonine|Hydrophilic pocket for the nitro group\n"
"His96|Histidine|Hydrophilic pocket for the nitro group\n"
"Val121|Valine|Lost hydrophobic interaction with the aromatic ring due to steric hindrance from the N-substitution\n\n"
"Here is another example sentence:\n"
"This 100-Å-long tunnel starts at the active site residue Lys219 of urease, "
"exits HpUreB near Asp336 of the switch II region, passes through HpUreD "
"between the two layers of β sheets, enters HpUreF near Ala233, and reaches "
"the dimerization interface of HpUreF (Fig. 4A).\n"
"Residue Position|Amino Acid Residue|Notes\n"
"219|Lysine|Active site residue\n"
"336|Aspartate|Residue in switch II region\n"
"233|Ala|N/A\n"
"'''\n"
"{paper}\n"
"'''\n"
"Residue information table:\n"
)
system_prompt = SystemMessagePromptTemplate.from_template(system_instruction)
human_prompt = HumanMessagePromptTemplate.from_template(extraction_context)
chat_prompt = ChatPromptTemplate.from_messages([system_prompt, human_prompt])
return chat_prompt
def get_extraction_chain(llm, verbose=False) -> LLMChain:
prompt = get_extraction_prompt()
chain = LLMChain(llm=llm, prompt=prompt, verbose=verbose)
return chain
def find_residue_information(chain, paper):
with get_openai_callback() as cb:
summary = chain.run(
paper=paper
)
print(cb)
print(summary)
return summary
extra_args={}
extraction_chain = get_extraction_chain(
get_model(
"chatgpt",
{"temperature": 0.1, "presence_penalty": 0, "frequency_penalty": 0}
| extra_args,
),
verbose=True,
)
paper = """The VapBC system, which belongs to the type II toxin–antitoxin (TA) system, is the most abundant and widely studied system in Mycobacterium tuberculosis. The VapB antitoxin suppresses the activity of the VapC toxin through a stable protein–protein complex. However, under environmental stress, the balance between toxin and antitoxin is disrupted, leading to the release of free toxin and bacteriostatic state. This study introduces the Rv0229c, a putative VapC51 toxin, and aims to provide a better understanding of its discovered function. The structure of the Rv0229c shows a typical PIN-domain protein, exhibiting an β1-α1-α2-β2-α3-α4-β3-α5-α6-β4-α7-β5 topology. The structure-based sequence alignment showed four electronegative residues in the active site of Rv0229c, which is composed of Asp8, Glu42, Asp95, and Asp113. By comparing the active site with existing VapC proteins, we have demonstrated the justification for naming it VapC51 at the molecular level. In an in vitro ribonuclease activity assay, Rv0229c showed ribonuclease activity dependent on the concentration of metal ions such as Mg2+ and Mn2+. In addition, magnesium was found to have a greater effect on VapC51 activity than manganese. Through these structural and experimental studies, we provide evidence for the functional role of Rv0229c as a VapC51 toxin. Overall, this study aims to enhance our understanding of the VapBC system in M. tuberculosis."""
examples = pl.read_csv("examples.csv")
print(examples)
examples = examples.with_columns(residue_table=pl.col("input_text").apply(lambda x: find_residue_information(extraction_chain, x)))
examples.write_csv("examples_with_residue_table.csv")
exit()
| [
"[PLACEHOLDER, PLACEHOLDER]"
] |
2024-01-10 | afg1/uniprot-demos | evidence-retrofit~retrofit.py | import os
import pickle
from tqdm import tqdm
import gradio as gr
from langchain.vectorstores import FAISS
from langchain.embeddings import OpenAIEmbeddings
from langchain.schema import Document
docs = []
for d in tqdm(os.listdir("papers")):
abstract = open(os.path.join("papers", d)).read().split("\n\n")[0]
try:
pmcid = d.strip(".txt")
docs.append(Document(page_content=abstract, metadata={"pmcid": pmcid, "section": "abstract"}) )
except:
print(f"error with {d}")
passage_encoder = OpenAIEmbeddings()
if os.path.exists("ADH"):
faiss = FAISS.load_local("ADH", passage_encoder)
else:
faiss = FAISS.from_documents(docs, passage_encoder)
faiss.save_local("ADH")
def make_query(query, faiss):
docs = faiss.max_marginal_relevance_search(query, k=10)
result = "".join(f"{doc.metadata['pmcid']}: {doc.page_content}\n" for doc in docs)
return result
visualisation = gr.Blocks()
with visualisation:
gr.Markdown(
"Search queries about linc00174"
)
with gr.Row():
query_input = gr.Textbox(label="Query")
search_button = gr.Button(value="Run...")
with gr.Row():
result = gr.Textbox(label="Result")
query_input.submit(lambda x: make_query(x, faiss),inputs=query_input,outputs=[result])
search_button.click(lambda x: make_query(x, faiss),inputs=query_input,outputs=[result])
visualisation.queue(concurrency_count=1)
visualisation.launch(server_name="0.0.0.0", enable_queue=True, server_port=7860)
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~unit_tests~runnables~test_hub.py | from typing import Any
from unittest.mock import Mock, patch
from langchain.prompts import ChatPromptTemplate
from langchain.runnables.hub import HubRunnable
from langchain.schema.runnable.base import ConfigurableField
@patch("langchain.hub.pull")
def test_hub_runnable(mock_pull: Mock) -> None:
mock_pull.return_value = ChatPromptTemplate.from_messages(
[("system", "a"), ("user", "b")]
)
basic: HubRunnable = HubRunnable("efriis/my-prompt")
bound = basic.bound
assert isinstance(bound, ChatPromptTemplate)
assert len(bound.messages) == 2
repo_dict = {
"efriis/my-prompt-1": ChatPromptTemplate.from_messages(
[("system", "a"), ("user", "1")]
),
"efriis/my-prompt-2": ChatPromptTemplate.from_messages(
[("system", "a"), ("user", "2")]
),
}
def repo_lookup(owner_repo_commit: str, **kwargs: Any) -> ChatPromptTemplate:
return repo_dict[owner_repo_commit]
@patch("langchain.hub.pull")
def test_hub_runnable_configurable_alternative(mock_pull: Mock) -> None:
mock_pull.side_effect = repo_lookup
original: HubRunnable[Any, Any] = HubRunnable("efriis/my-prompt-1")
obj_a1 = original.configurable_alternatives(
ConfigurableField(id="owner_repo_commit", name="Hub ID"),
default_key="a1",
a2=HubRunnable("efriis/my-prompt-2"),
)
obj_a2 = obj_a1.with_config(configurable={"owner_repo_commit": "a2"})
templated = obj_a1.invoke({})
message_a1 = templated.messages[1]
assert message_a1.content == "1"
templated_2 = obj_a2.invoke({})
message_a2 = templated_2.messages[1]
assert message_a2.content == "2"
@patch("langchain.hub.pull")
def test_hub_runnable_configurable_fields(mock_pull: Mock) -> None:
mock_pull.side_effect = repo_lookup
original: HubRunnable[Any, Any] = HubRunnable("efriis/my-prompt-1")
obj_configurable = original.configurable_fields(
owner_repo_commit=ConfigurableField(id="owner_repo_commit", name="Hub ID"),
)
templated_1 = obj_configurable.invoke({})
assert templated_1.messages[1].content == "1"
templated_2 = obj_configurable.with_config(
configurable={"owner_repo_commit": "efriis/my-prompt-2"}
).invoke({})
assert templated_2.messages[1].content == "2"
| [
"[('system', 'a'), ('user', '2')]",
"owner_repo_commit",
"[('system', 'a'), ('user', 'b')]",
"efriis/my-prompt-2",
"[('system', 'a'), ('user', '1')]"
] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~integration_tests~vectorstores~test_vearch.py | # flake8: noqa
from langchain.docstore.document import Document
from langchain.vectorstores.vearch import Vearch
from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings
def test_vearch() -> None:
"""
Test end to end create vearch ,store vector into it and search
"""
texts = [
"Vearch 是一款存储大语言模型数据的向量数据库,用于存储和快速搜索模型embedding后的向量,可用于基于个人知识库的大模型应用",
"Vearch 支持OpenAI, Llama, ChatGLM等模型,以及LangChain库",
"vearch 是基于C语言,go语言开发的,并提供python接口,可以直接通过pip安装",
]
metadatas = [
{
"source": (
"/data/zhx/zhx/langchain-ChatGLM_new/knowledge_base/santi/three_body.txt"
)
},
{
"source": (
"/data/zhx/zhx/langchain-ChatGLM_new/knowledge_base/santi/three_body.txt"
)
},
{
"source": (
"/data/zhx/zhx/langchain-ChatGLM_new/knowledge_base/santi/three_body.txt"
)
},
]
vearch_db = Vearch.from_texts(
texts=texts,
embedding=FakeEmbeddings(),
metadatas=metadatas,
table_name="test_vearch",
metadata_path="./",
)
result = vearch_db.similarity_search(
"Vearch 支持OpenAI, Llama, ChatGLM等模型,以及LangChain库", 1
)
assert result == [
Document(
page_content="Vearch 支持OpenAI, Llama, ChatGLM等模型,以及LangChain库",
metadata={
"source": (
"/data/zhx/zhx/langchain-ChatGLM_new/knowledge_base/santi/"
"three_body.txt"
)
},
)
]
def test_vearch_add_texts() -> None:
"""Test end to end adding of texts."""
texts = [
("Vearch 是一款存储大语言模型数据的向量数据库,用于存储和快速搜索模型embedding后的向量," "可用于基于个人知识库的大模型应用"),
"Vearch 支持OpenAI, Llama, ChatGLM等模型,以及LangChain库",
"vearch 是基于C语言,go语言开发的,并提供python接口,可以直接通过pip安装",
]
metadatas = [
{
"source": "/data/zhx/zhx/langchain-ChatGLM_new/knowledge_base/santi/"
"three_body.txt"
},
{
"source": "/data/zhx/zhx/langchain-ChatGLM_new/knowledge_base/santi/"
"three_body.txt"
},
{
"source": "/data/zhx/zhx/langchain-ChatGLM_new/knowledge_base/santi/"
"three_body.txt"
},
]
vearch_db = Vearch.from_texts(
texts=texts,
embedding=FakeEmbeddings(),
metadatas=metadatas,
table_name="test_vearch",
metadata_path="./",
)
vearch_db.add_texts(
texts=["Vearch 支持OpenAI, Llama, ChatGLM等模型,以及LangChain库"],
metadatas=[
{
"source": "/data/zhx/zhx/langchain-ChatGLM_new/knowledge_base/santi/three_body.txt"
},
],
)
result = vearch_db.similarity_search(
"Vearch 支持OpenAI, Llama, ChatGLM等模型,以及LangChain库", 2
)
assert result == [
Document(
page_content="Vearch 支持OpenAI, Llama, ChatGLM等模型,以及LangChain库",
metadata={
"source": "/data/zhx/zhx/langchain-ChatGLM_new/knowledge_base/santi/three_body.txt"
},
),
Document(
page_content="Vearch 支持OpenAI, Llama, ChatGLM等模型,以及LangChain库",
metadata={
"source": "/data/zhx/zhx/langchain-ChatGLM_new/knowledge_base/santi/three_body.txt"
},
),
]
| [] |
2024-01-10 | RohanDey02/langchain | libs~cli~langchain_cli~dev_scripts.py | """
Development Scripts for Hub Packages
"""
from fastapi import FastAPI
from langserve.packages import add_package_route
from langchain_cli.utils.packages import get_package_root
def create_demo_server():
"""
Creates a demo server for the current hub package.
"""
app = FastAPI()
package_root = get_package_root()
add_package_route(app, package_root, "")
return app
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~unit_tests~chains~test_transform.py | """Test transform chain."""
from typing import Dict
import pytest
from langchain.chains.transform import TransformChain
def dummy_transform(inputs: Dict[str, str]) -> Dict[str, str]:
"""Transform a dummy input for tests."""
outputs = inputs
outputs["greeting"] = f"{inputs['first_name']} {inputs['last_name']} says hello"
del outputs["first_name"]
del outputs["last_name"]
return outputs
def test_transform_chain() -> None:
"""Test basic transform chain."""
transform_chain = TransformChain(
input_variables=["first_name", "last_name"],
output_variables=["greeting"],
transform=dummy_transform,
)
input_dict = {"first_name": "Leroy", "last_name": "Jenkins"}
response = transform_chain(input_dict)
expected_response = {"greeting": "Leroy Jenkins says hello"}
assert response == expected_response
def test_transform_chain_bad_inputs() -> None:
"""Test basic transform chain."""
transform_chain = TransformChain(
input_variables=["first_name", "last_name"],
output_variables=["greeting"],
transform=dummy_transform,
)
input_dict = {"name": "Leroy", "last_name": "Jenkins"}
with pytest.raises(ValueError):
_ = transform_chain(input_dict)
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~vectorstores~pinecone.py | from __future__ import annotations
import logging
import uuid
import warnings
from typing import TYPE_CHECKING, Any, Callable, Iterable, List, Optional, Tuple, Union
import numpy as np
from langchain.docstore.document import Document
from langchain.schema.embeddings import Embeddings
from langchain.schema.vectorstore import VectorStore
from langchain.utils.iter import batch_iterate
from langchain.vectorstores.utils import DistanceStrategy, maximal_marginal_relevance
if TYPE_CHECKING:
from pinecone import Index
logger = logging.getLogger(__name__)
class Pinecone(VectorStore):
"""`Pinecone` vector store.
To use, you should have the ``pinecone-client`` python package installed.
Example:
.. code-block:: python
from langchain.vectorstores import Pinecone
from langchain.embeddings.openai import OpenAIEmbeddings
import pinecone
# The environment should be the one specified next to the API key
# in your Pinecone console
pinecone.init(api_key="***", environment="...")
index = pinecone.Index("langchain-demo")
embeddings = OpenAIEmbeddings()
vectorstore = Pinecone(index, embeddings.embed_query, "text")
"""
def __init__(
self,
index: Any,
embedding: Union[Embeddings, Callable],
text_key: str,
namespace: Optional[str] = None,
distance_strategy: Optional[DistanceStrategy] = DistanceStrategy.COSINE,
):
"""Initialize with Pinecone client."""
try:
import pinecone
except ImportError:
raise ImportError(
"Could not import pinecone python package. "
"Please install it with `pip install pinecone-client`."
)
if not isinstance(embedding, Embeddings):
warnings.warn(
"Passing in `embedding` as a Callable is deprecated. Please pass in an"
" Embeddings object instead."
)
if not isinstance(index, pinecone.index.Index):
raise ValueError(
f"client should be an instance of pinecone.index.Index, "
f"got {type(index)}"
)
self._index = index
self._embedding = embedding
self._text_key = text_key
self._namespace = namespace
self.distance_strategy = distance_strategy
@property
def embeddings(self) -> Optional[Embeddings]:
"""Access the query embedding object if available."""
if isinstance(self._embedding, Embeddings):
return self._embedding
return None
def _embed_documents(self, texts: Iterable[str]) -> List[List[float]]:
"""Embed search docs."""
if isinstance(self._embedding, Embeddings):
return self._embedding.embed_documents(list(texts))
return [self._embedding(t) for t in texts]
def _embed_query(self, text: str) -> List[float]:
"""Embed query text."""
if isinstance(self._embedding, Embeddings):
return self._embedding.embed_query(text)
return self._embedding(text)
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
namespace: Optional[str] = None,
batch_size: int = 32,
embedding_chunk_size: int = 1000,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Upsert optimization is done by chunking the embeddings and upserting them.
This is done to avoid memory issues and optimize using HTTP based embeddings.
For OpenAI embeddings, use pool_threads>4 when constructing the pinecone.Index,
embedding_chunk_size>1000 and batch_size~64 for best performance.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
ids: Optional list of ids to associate with the texts.
namespace: Optional pinecone namespace to add the texts to.
batch_size: Batch size to use when adding the texts to the vectorstore.
embedding_chunk_size: Chunk size to use when embedding the texts.
Returns:
List of ids from adding the texts into the vectorstore.
"""
if namespace is None:
namespace = self._namespace
texts = list(texts)
ids = ids or [str(uuid.uuid4()) for _ in texts]
metadatas = metadatas or [{} for _ in texts]
for metadata, text in zip(metadatas, texts):
metadata[self._text_key] = text
# For loops to avoid memory issues and optimize when using HTTP based embeddings
# The first loop runs the embeddings, it benefits when using OpenAI embeddings
# The second loops runs the pinecone upsert asynchronously.
for i in range(0, len(texts), embedding_chunk_size):
chunk_texts = texts[i : i + embedding_chunk_size]
chunk_ids = ids[i : i + embedding_chunk_size]
chunk_metadatas = metadatas[i : i + embedding_chunk_size]
embeddings = self._embed_documents(chunk_texts)
async_res = [
self._index.upsert(
vectors=batch,
namespace=namespace,
async_req=True,
**kwargs,
)
for batch in batch_iterate(
batch_size, zip(chunk_ids, embeddings, chunk_metadatas)
)
]
[res.get() for res in async_res]
return ids
def similarity_search_with_score(
self,
query: str,
k: int = 4,
filter: Optional[dict] = None,
namespace: Optional[str] = None,
) -> List[Tuple[Document, float]]:
"""Return pinecone documents most similar to query, along with scores.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: Dictionary of argument(s) to filter on metadata
namespace: Namespace to search in. Default will search in '' namespace.
Returns:
List of Documents most similar to the query and score for each
"""
return self.similarity_search_by_vector_with_score(
self._embed_query(query), k=k, filter=filter, namespace=namespace
)
def similarity_search_by_vector_with_score(
self,
embedding: List[float],
*,
k: int = 4,
filter: Optional[dict] = None,
namespace: Optional[str] = None,
) -> List[Tuple[Document, float]]:
"""Return pinecone documents most similar to embedding, along with scores."""
if namespace is None:
namespace = self._namespace
docs = []
results = self._index.query(
[embedding],
top_k=k,
include_metadata=True,
namespace=namespace,
filter=filter,
)
for res in results["matches"]:
metadata = res["metadata"]
if self._text_key in metadata:
text = metadata.pop(self._text_key)
score = res["score"]
docs.append((Document(page_content=text, metadata=metadata), score))
else:
logger.warning(
f"Found document with no `{self._text_key}` key. Skipping."
)
return docs
def similarity_search(
self,
query: str,
k: int = 4,
filter: Optional[dict] = None,
namespace: Optional[str] = None,
**kwargs: Any,
) -> List[Document]:
"""Return pinecone documents most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: Dictionary of argument(s) to filter on metadata
namespace: Namespace to search in. Default will search in '' namespace.
Returns:
List of Documents most similar to the query and score for each
"""
docs_and_scores = self.similarity_search_with_score(
query, k=k, filter=filter, namespace=namespace, **kwargs
)
return [doc for doc, _ in docs_and_scores]
def _select_relevance_score_fn(self) -> Callable[[float], float]:
"""
The 'correct' relevance function
may differ depending on a few things, including:
- the distance / similarity metric used by the VectorStore
- the scale of your embeddings (OpenAI's are unit normed. Many others are not!)
- embedding dimensionality
- etc.
"""
if self.distance_strategy == DistanceStrategy.COSINE:
return self._cosine_relevance_score_fn
elif self.distance_strategy == DistanceStrategy.MAX_INNER_PRODUCT:
return self._max_inner_product_relevance_score_fn
elif self.distance_strategy == DistanceStrategy.EUCLIDEAN_DISTANCE:
return self._euclidean_relevance_score_fn
else:
raise ValueError(
"Unknown distance strategy, must be cosine, max_inner_product "
"(dot product), or euclidean"
)
def max_marginal_relevance_search_by_vector(
self,
embedding: List[float],
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
filter: Optional[dict] = None,
namespace: Optional[str] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents selected by maximal marginal relevance.
"""
if namespace is None:
namespace = self._namespace
results = self._index.query(
[embedding],
top_k=fetch_k,
include_values=True,
include_metadata=True,
namespace=namespace,
filter=filter,
)
mmr_selected = maximal_marginal_relevance(
np.array([embedding], dtype=np.float32),
[item["values"] for item in results["matches"]],
k=k,
lambda_mult=lambda_mult,
)
selected = [results["matches"][i]["metadata"] for i in mmr_selected]
return [
Document(page_content=metadata.pop((self._text_key)), metadata=metadata)
for metadata in selected
]
def max_marginal_relevance_search(
self,
query: str,
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
filter: Optional[dict] = None,
namespace: Optional[str] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
Returns:
List of Documents selected by maximal marginal relevance.
"""
embedding = self._embed_query(query)
return self.max_marginal_relevance_search_by_vector(
embedding, k, fetch_k, lambda_mult, filter, namespace
)
@classmethod
def get_pinecone_index(
cls,
index_name: Optional[str],
pool_threads: int = 4,
) -> Index:
"""Return a Pinecone Index instance.
Args:
index_name: Name of the index to use.
pool_threads: Number of threads to use for index upsert.
Returns:
Pinecone Index instance."""
try:
import pinecone
except ImportError:
raise ValueError(
"Could not import pinecone python package. "
"Please install it with `pip install pinecone-client`."
)
indexes = pinecone.list_indexes() # checks if provided index exists
if index_name in indexes:
index = pinecone.Index(index_name, pool_threads=pool_threads)
elif len(indexes) == 0:
raise ValueError(
"No active indexes found in your Pinecone project, "
"are you sure you're using the right Pinecone API key and Environment? "
"Please double check your Pinecone dashboard."
)
else:
raise ValueError(
f"Index '{index_name}' not found in your Pinecone project. "
f"Did you mean one of the following indexes: {', '.join(indexes)}"
)
return index
@classmethod
def from_texts(
cls,
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
batch_size: int = 32,
text_key: str = "text",
namespace: Optional[str] = None,
index_name: Optional[str] = None,
upsert_kwargs: Optional[dict] = None,
pool_threads: int = 4,
embeddings_chunk_size: int = 1000,
**kwargs: Any,
) -> Pinecone:
"""Construct Pinecone wrapper from raw documents.
This is a user friendly interface that:
1. Embeds documents.
2. Adds the documents to a provided Pinecone index
This is intended to be a quick way to get started.
The `pool_threads` affects the speed of the upsert operations.
Example:
.. code-block:: python
from langchain.vectorstores import Pinecone
from langchain.embeddings import OpenAIEmbeddings
import pinecone
# The environment should be the one specified next to the API key
# in your Pinecone console
pinecone.init(api_key="***", environment="...")
embeddings = OpenAIEmbeddings()
pinecone = Pinecone.from_texts(
texts,
embeddings,
index_name="langchain-demo"
)
"""
pinecone_index = cls.get_pinecone_index(index_name, pool_threads)
pinecone = cls(pinecone_index, embedding, text_key, namespace, **kwargs)
pinecone.add_texts(
texts,
metadatas=metadatas,
ids=ids,
namespace=namespace,
batch_size=batch_size,
embedding_chunk_size=embeddings_chunk_size,
**(upsert_kwargs or {}),
)
return pinecone
@classmethod
def from_existing_index(
cls,
index_name: str,
embedding: Embeddings,
text_key: str = "text",
namespace: Optional[str] = None,
pool_threads: int = 4,
) -> Pinecone:
"""Load pinecone vectorstore from index name."""
pinecone_index = cls.get_pinecone_index(index_name, pool_threads)
return cls(pinecone_index, embedding, text_key, namespace)
def delete(
self,
ids: Optional[List[str]] = None,
delete_all: Optional[bool] = None,
namespace: Optional[str] = None,
filter: Optional[dict] = None,
**kwargs: Any,
) -> None:
"""Delete by vector IDs or filter.
Args:
ids: List of ids to delete.
filter: Dictionary of conditions to filter vectors to delete.
"""
if namespace is None:
namespace = self._namespace
if delete_all:
self._index.delete(delete_all=True, namespace=namespace, **kwargs)
elif ids is not None:
chunk_size = 1000
for i in range(0, len(ids), chunk_size):
chunk = ids[i : i + chunk_size]
self._index.delete(ids=chunk, namespace=namespace, **kwargs)
elif filter is not None:
self._index.delete(filter=filter, namespace=namespace, **kwargs)
else:
raise ValueError("Either ids, delete_all, or filter must be provided.")
return None
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~integration_tests~cache~test_redis_cache.py | """Test Redis cache functionality."""
import uuid
from typing import List, cast
import pytest
from langchain.cache import RedisCache, RedisSemanticCache
from langchain.globals import get_llm_cache, set_llm_cache
from langchain.load.dump import dumps
from langchain.schema import Generation, LLMResult
from langchain.schema.embeddings import Embeddings
from langchain.schema.messages import AIMessage, BaseMessage, HumanMessage
from langchain.schema.output import ChatGeneration
from tests.integration_tests.vectorstores.fake_embeddings import (
ConsistentFakeEmbeddings,
FakeEmbeddings,
)
from tests.unit_tests.llms.fake_chat_model import FakeChatModel
from tests.unit_tests.llms.fake_llm import FakeLLM
REDIS_TEST_URL = "redis://localhost:6379"
def random_string() -> str:
return str(uuid.uuid4())
def test_redis_cache_ttl() -> None:
import redis
set_llm_cache(RedisCache(redis_=redis.Redis.from_url(REDIS_TEST_URL), ttl=1))
llm_cache = cast(RedisCache, get_llm_cache())
llm_cache.update("foo", "bar", [Generation(text="fizz")])
key = llm_cache._key("foo", "bar")
assert llm_cache.redis.pttl(key) > 0
def test_redis_cache() -> None:
import redis
set_llm_cache(RedisCache(redis_=redis.Redis.from_url(REDIS_TEST_URL)))
llm = FakeLLM()
params = llm.dict()
params["stop"] = None
llm_string = str(sorted([(k, v) for k, v in params.items()]))
get_llm_cache().update("foo", llm_string, [Generation(text="fizz")])
output = llm.generate(["foo"])
expected_output = LLMResult(
generations=[[Generation(text="fizz")]],
llm_output={},
)
assert output == expected_output
llm_cache = cast(RedisCache, get_llm_cache())
llm_cache.redis.flushall()
def test_redis_cache_chat() -> None:
import redis
set_llm_cache(RedisCache(redis_=redis.Redis.from_url(REDIS_TEST_URL)))
llm = FakeChatModel()
params = llm.dict()
params["stop"] = None
llm_string = str(sorted([(k, v) for k, v in params.items()]))
prompt: List[BaseMessage] = [HumanMessage(content="foo")]
get_llm_cache().update(
dumps(prompt), llm_string, [ChatGeneration(message=AIMessage(content="fizz"))]
)
output = llm.generate([prompt])
expected_output = LLMResult(
generations=[[ChatGeneration(message=AIMessage(content="fizz"))]],
llm_output={},
)
assert output == expected_output
llm_cache = cast(RedisCache, get_llm_cache())
llm_cache.redis.flushall()
def test_redis_semantic_cache() -> None:
set_llm_cache(
RedisSemanticCache(
embedding=FakeEmbeddings(), redis_url=REDIS_TEST_URL, score_threshold=0.1
)
)
llm = FakeLLM()
params = llm.dict()
params["stop"] = None
llm_string = str(sorted([(k, v) for k, v in params.items()]))
get_llm_cache().update("foo", llm_string, [Generation(text="fizz")])
output = llm.generate(
["bar"]
) # foo and bar will have the same embedding produced by FakeEmbeddings
expected_output = LLMResult(
generations=[[Generation(text="fizz")]],
llm_output={},
)
assert output == expected_output
# clear the cache
get_llm_cache().clear(llm_string=llm_string)
output = llm.generate(
["bar"]
) # foo and bar will have the same embedding produced by FakeEmbeddings
# expect different output now without cached result
assert output != expected_output
get_llm_cache().clear(llm_string=llm_string)
def test_redis_semantic_cache_multi() -> None:
set_llm_cache(
RedisSemanticCache(
embedding=FakeEmbeddings(), redis_url=REDIS_TEST_URL, score_threshold=0.1
)
)
llm = FakeLLM()
params = llm.dict()
params["stop"] = None
llm_string = str(sorted([(k, v) for k, v in params.items()]))
get_llm_cache().update(
"foo", llm_string, [Generation(text="fizz"), Generation(text="Buzz")]
)
output = llm.generate(
["bar"]
) # foo and bar will have the same embedding produced by FakeEmbeddings
expected_output = LLMResult(
generations=[[Generation(text="fizz"), Generation(text="Buzz")]],
llm_output={},
)
assert output == expected_output
# clear the cache
get_llm_cache().clear(llm_string=llm_string)
def test_redis_semantic_cache_chat() -> None:
set_llm_cache(
RedisSemanticCache(
embedding=FakeEmbeddings(), redis_url=REDIS_TEST_URL, score_threshold=0.1
)
)
llm = FakeChatModel()
params = llm.dict()
params["stop"] = None
llm_string = str(sorted([(k, v) for k, v in params.items()]))
prompt: List[BaseMessage] = [HumanMessage(content="foo")]
get_llm_cache().update(
dumps(prompt), llm_string, [ChatGeneration(message=AIMessage(content="fizz"))]
)
output = llm.generate([prompt])
expected_output = LLMResult(
generations=[[ChatGeneration(message=AIMessage(content="fizz"))]],
llm_output={},
)
assert output == expected_output
get_llm_cache().clear(llm_string=llm_string)
@pytest.mark.parametrize("embedding", [ConsistentFakeEmbeddings()])
@pytest.mark.parametrize(
"prompts, generations",
[
# Single prompt, single generation
([random_string()], [[random_string()]]),
# Single prompt, multiple generations
([random_string()], [[random_string(), random_string()]]),
# Single prompt, multiple generations
([random_string()], [[random_string(), random_string(), random_string()]]),
# Multiple prompts, multiple generations
(
[random_string(), random_string()],
[[random_string()], [random_string(), random_string()]],
),
],
ids=[
"single_prompt_single_generation",
"single_prompt_multiple_generations",
"single_prompt_multiple_generations",
"multiple_prompts_multiple_generations",
],
)
def test_redis_semantic_cache_hit(
embedding: Embeddings, prompts: List[str], generations: List[List[str]]
) -> None:
set_llm_cache(RedisSemanticCache(embedding=embedding, redis_url=REDIS_TEST_URL))
llm = FakeLLM()
params = llm.dict()
params["stop"] = None
llm_string = str(sorted([(k, v) for k, v in params.items()]))
llm_generations = [
[
Generation(text=generation, generation_info=params)
for generation in prompt_i_generations
]
for prompt_i_generations in generations
]
for prompt_i, llm_generations_i in zip(prompts, llm_generations):
print(prompt_i)
print(llm_generations_i)
get_llm_cache().update(prompt_i, llm_string, llm_generations_i)
llm.generate(prompts)
assert llm.generate(prompts) == LLMResult(
generations=llm_generations, llm_output={}
)
| [
"foo",
"fizz"
] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~unit_tests~agents~format_scratchpad~test_log.py | from langchain.agents.format_scratchpad.log import format_log_to_str
from langchain.schema.agent import AgentAction
def test_single_agent_action_observation() -> None:
intermediate_steps = [
(AgentAction(tool="Tool1", tool_input="input1", log="Log1"), "Observation1")
]
expected_result = "Log1\nObservation: Observation1\nThought: "
assert format_log_to_str(intermediate_steps) == expected_result
def test_multiple_agent_actions_observations() -> None:
intermediate_steps = [
(AgentAction(tool="Tool1", tool_input="input1", log="Log1"), "Observation1"),
(AgentAction(tool="Tool2", tool_input="input2", log="Log2"), "Observation2"),
(AgentAction(tool="Tool3", tool_input="input3", log="Log3"), "Observation3"),
]
expected_result = """Log1\nObservation: Observation1\nThought: \
Log2\nObservation: Observation2\nThought: Log3\nObservation: \
Observation3\nThought: """
assert format_log_to_str(intermediate_steps) == expected_result
def test_custom_prefixes() -> None:
intermediate_steps = [
(AgentAction(tool="Tool1", tool_input="input1", log="Log1"), "Observation1")
]
observation_prefix = "Custom Observation: "
llm_prefix = "Custom Thought: "
expected_result = "Log1\nCustom Observation: Observation1\nCustom Thought: "
assert (
format_log_to_str(intermediate_steps, observation_prefix, llm_prefix)
== expected_result
)
def test_empty_intermediate_steps() -> None:
output = format_log_to_str([])
assert output == ""
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~unit_tests~tools~file_management~test_read.py | """Test the ReadFile tool."""
from pathlib import Path
from tempfile import TemporaryDirectory
from langchain.tools.file_management.read import ReadFileTool
def test_read_file_with_root_dir() -> None:
"""Test the ReadFile tool."""
with TemporaryDirectory() as temp_dir:
with (Path(temp_dir) / "file.txt").open("w") as f:
f.write("Hello, world!")
tool = ReadFileTool(root_dir=temp_dir)
result = tool.run("file.txt")
assert result == "Hello, world!"
# Check absolute files can still be passed if they lie within the root dir.
result = tool.run(str(Path(temp_dir) / "file.txt"))
assert result == "Hello, world!"
def test_read_file() -> None:
"""Test the ReadFile tool."""
with TemporaryDirectory() as temp_dir:
with (Path(temp_dir) / "file.txt").open("w") as f:
f.write("Hello, world!")
tool = ReadFileTool()
result = tool.run(str(Path(temp_dir) / "file.txt"))
assert result == "Hello, world!"
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~integration_tests~chains~test_react.py | """Integration test for self ask with search."""
from langchain.agents.react.base import ReActChain
from langchain.docstore.wikipedia import Wikipedia
from langchain.llms.openai import OpenAI
def test_react() -> None:
"""Test functionality on a prompt."""
llm = OpenAI(temperature=0, model_name="text-davinci-002")
react = ReActChain(llm=llm, docstore=Wikipedia())
question = (
"Author David Chanoff has collaborated with a U.S. Navy admiral "
"who served as the ambassador to the United Kingdom under "
"which President?"
)
output = react.run(question)
assert output == "Bill Clinton"
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~llms~chatglm.py | import logging
from typing import Any, List, Mapping, Optional
import requests
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
logger = logging.getLogger(__name__)
class ChatGLM(LLM):
"""ChatGLM LLM service.
Example:
.. code-block:: python
from langchain.llms import ChatGLM
endpoint_url = (
"http://127.0.0.1:8000"
)
ChatGLM_llm = ChatGLM(
endpoint_url=endpoint_url
)
"""
endpoint_url: str = "http://127.0.0.1:8000/"
"""Endpoint URL to use."""
model_kwargs: Optional[dict] = None
"""Keyword arguments to pass to the model."""
max_token: int = 20000
"""Max token allowed to pass to the model."""
temperature: float = 0.1
"""LLM model temperature from 0 to 10."""
history: List[List] = []
"""History of the conversation"""
top_p: float = 0.7
"""Top P for nucleus sampling from 0 to 1"""
with_history: bool = False
"""Whether to use history or not"""
@property
def _llm_type(self) -> str:
return "chat_glm"
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
_model_kwargs = self.model_kwargs or {}
return {
**{"endpoint_url": self.endpoint_url},
**{"model_kwargs": _model_kwargs},
}
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to a ChatGLM LLM inference endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = chatglm_llm("Who are you?")
"""
_model_kwargs = self.model_kwargs or {}
# HTTP headers for authorization
headers = {"Content-Type": "application/json"}
payload = {
"prompt": prompt,
"temperature": self.temperature,
"history": self.history,
"max_length": self.max_token,
"top_p": self.top_p,
}
payload.update(_model_kwargs)
payload.update(kwargs)
logger.debug(f"ChatGLM payload: {payload}")
# call api
try:
response = requests.post(self.endpoint_url, headers=headers, json=payload)
except requests.exceptions.RequestException as e:
raise ValueError(f"Error raised by inference endpoint: {e}")
logger.debug(f"ChatGLM response: {response}")
if response.status_code != 200:
raise ValueError(f"Failed with response: {response}")
try:
parsed_response = response.json()
# Check if response content does exists
if isinstance(parsed_response, dict):
content_keys = "response"
if content_keys in parsed_response:
text = parsed_response[content_keys]
else:
raise ValueError(f"No content in response : {parsed_response}")
else:
raise ValueError(f"Unexpected response type: {parsed_response}")
except requests.exceptions.JSONDecodeError as e:
raise ValueError(
f"Error raised during decoding response from inference endpoint: {e}."
f"\nResponse: {response.text}"
)
if stop is not None:
text = enforce_stop_tokens(text, stop)
if self.with_history:
self.history = self.history + [[None, parsed_response["response"]]]
return text
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~unit_tests~llms~test_openai.py | import asyncio
import os
from typing import Any
from unittest.mock import MagicMock, patch
import pytest
from tenacity import wait_none
from langchain.llms import base
from langchain.llms.openai import OpenAI
from tests.unit_tests.callbacks.fake_callback_handler import (
FakeAsyncCallbackHandler,
FakeCallbackHandler,
)
os.environ["OPENAI_API_KEY"] = "foo"
@pytest.mark.requires("openai")
def test_openai_model_param() -> None:
llm = OpenAI(model="foo")
assert llm.model_name == "foo"
llm = OpenAI(model_name="foo")
assert llm.model_name == "foo"
@pytest.mark.requires("openai")
def test_openai_model_kwargs() -> None:
llm = OpenAI(model_kwargs={"foo": "bar"})
assert llm.model_kwargs == {"foo": "bar"}
@pytest.mark.requires("openai")
def test_openai_invalid_model_kwargs() -> None:
with pytest.raises(ValueError):
OpenAI(model_kwargs={"model_name": "foo"})
@pytest.mark.requires("openai")
def test_openai_incorrect_field() -> None:
with pytest.warns(match="not default parameter"):
llm = OpenAI(foo="bar")
assert llm.model_kwargs == {"foo": "bar"}
@pytest.fixture
def mock_completion() -> dict:
return {
"id": "cmpl-3evkmQda5Hu7fcZavknQda3SQ",
"object": "text_completion",
"created": 1689989000,
"model": "text-davinci-003",
"choices": [
{"text": "Bar Baz", "index": 0, "logprobs": None, "finish_reason": "length"}
],
"usage": {"prompt_tokens": 1, "completion_tokens": 2, "total_tokens": 3},
}
def _patched_retry(*args: Any, **kwargs: Any) -> Any:
"""Patched retry for unit tests that does not wait."""
from tenacity import retry
assert "wait" in kwargs
kwargs["wait"] = wait_none()
r = retry(*args, **kwargs)
return r
@pytest.mark.requires("openai")
def test_openai_retries(mock_completion: dict) -> None:
llm = OpenAI()
mock_client = MagicMock()
completed = False
raised = False
import openai
def raise_once(*args: Any, **kwargs: Any) -> Any:
nonlocal completed, raised
if not raised:
raised = True
raise openai.error.APIError
completed = True
return mock_completion
mock_client.create = raise_once
callback_handler = FakeCallbackHandler()
# Patch the retry to avoid waiting during a unit test
with patch.object(base, "retry", _patched_retry):
with patch.object(
llm,
"client",
mock_client,
):
res = llm.predict("bar", callbacks=[callback_handler])
assert res == "Bar Baz"
assert completed
assert raised
assert callback_handler.retries == 1
@pytest.mark.requires("openai")
@pytest.mark.asyncio
async def test_openai_async_retries(mock_completion: dict) -> None:
llm = OpenAI()
mock_client = MagicMock()
completed = False
raised = False
import openai
async def araise_once(*args: Any, **kwargs: Any) -> Any:
nonlocal completed, raised
if not raised:
raised = True
raise openai.error.APIError
await asyncio.sleep(0)
completed = True
return mock_completion
mock_client.acreate = araise_once
callback_handler = FakeAsyncCallbackHandler()
# Patch the retry to avoid waiting during a unit test
with patch.object(base, "retry", _patched_retry):
with patch.object(
llm,
"client",
mock_client,
):
res = await llm.apredict("bar", callbacks=[callback_handler])
assert res == "Bar Baz"
assert completed
assert raised
assert callback_handler.retries == 1
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~unit_tests~storage~test_filesystem.py | import tempfile
from typing import Generator
import pytest
from langchain.storage.exceptions import InvalidKeyException
from langchain.storage.file_system import LocalFileStore
@pytest.fixture
def file_store() -> Generator[LocalFileStore, None, None]:
# Create a temporary directory for testing
with tempfile.TemporaryDirectory() as temp_dir:
# Instantiate the LocalFileStore with the temporary directory as the root path
store = LocalFileStore(temp_dir)
yield store
def test_mset_and_mget(file_store: LocalFileStore) -> None:
# Set values for keys
key_value_pairs = [("key1", b"value1"), ("key2", b"value2")]
file_store.mset(key_value_pairs)
# Get values for keys
values = file_store.mget(["key1", "key2"])
# Assert that the retrieved values match the original values
assert values == [b"value1", b"value2"]
def test_mdelete(file_store: LocalFileStore) -> None:
# Set values for keys
key_value_pairs = [("key1", b"value1"), ("key2", b"value2")]
file_store.mset(key_value_pairs)
# Delete keys
file_store.mdelete(["key1"])
# Check if the deleted key is present
values = file_store.mget(["key1"])
# Assert that the value is None after deletion
assert values == [None]
def test_set_invalid_key(file_store: LocalFileStore) -> None:
"""Test that an exception is raised when an invalid key is set."""
# Set a key-value pair
key = "crying-cat/😿"
value = b"This is a test value"
with pytest.raises(InvalidKeyException):
file_store.mset([(key, value)])
def test_set_key_and_verify_content(file_store: LocalFileStore) -> None:
"""Test that the content of the file is the same as the value set."""
# Set a key-value pair
key = "test_key"
value = b"This is a test value"
file_store.mset([(key, value)])
# Verify the content of the actual file
full_path = file_store._get_full_path(key)
assert full_path.exists()
assert full_path.read_bytes() == b"This is a test value"
def test_yield_keys(file_store: LocalFileStore) -> None:
# Set values for keys
key_value_pairs = [("key1", b"value1"), ("subdir/key2", b"value2")]
file_store.mset(key_value_pairs)
# Iterate over keys
keys = list(file_store.yield_keys())
# Assert that the yielded keys match the expected keys
expected_keys = ["key1", "subdir/key2"]
assert keys == expected_keys
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~integration_tests~embeddings~test_awa.py | """Test Awa Embedding"""
from langchain.embeddings.awa import AwaEmbeddings
def test_awa_embedding_documents() -> None:
"""Test Awa embeddings for documents."""
documents = ["foo bar", "test document"]
embedding = AwaEmbeddings()
output = embedding.embed_documents(documents)
assert len(output) == 2
assert len(output[0]) == 768
def test_awa_embedding_query() -> None:
"""Test Awa embeddings for query."""
document = "foo bar"
embedding = AwaEmbeddings()
output = embedding.embed_query(document)
assert len(output) == 768
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~utils~loading.py | """Utilities for loading configurations from langchain-hub."""
import os
import re
import tempfile
from pathlib import Path, PurePosixPath
from typing import Any, Callable, Optional, Set, TypeVar, Union
from urllib.parse import urljoin
import requests
DEFAULT_REF = os.environ.get("LANGCHAIN_HUB_DEFAULT_REF", "master")
URL_BASE = os.environ.get(
"LANGCHAIN_HUB_URL_BASE",
"https://raw.githubusercontent.com/hwchase17/langchain-hub/{ref}/",
)
HUB_PATH_RE = re.compile(r"lc(?P<ref>@[^:]+)?://(?P<path>.*)")
T = TypeVar("T")
def try_load_from_hub(
path: Union[str, Path],
loader: Callable[[str], T],
valid_prefix: str,
valid_suffixes: Set[str],
**kwargs: Any,
) -> Optional[T]:
"""Load configuration from hub. Returns None if path is not a hub path."""
if not isinstance(path, str) or not (match := HUB_PATH_RE.match(path)):
return None
ref, remote_path_str = match.groups()
ref = ref[1:] if ref else DEFAULT_REF
remote_path = Path(remote_path_str)
if remote_path.parts[0] != valid_prefix:
return None
if remote_path.suffix[1:] not in valid_suffixes:
raise ValueError(f"Unsupported file type, must be one of {valid_suffixes}.")
# Using Path with URLs is not recommended, because on Windows
# the backslash is used as the path separator, which can cause issues
# when working with URLs that use forward slashes as the path separator.
# Instead, use PurePosixPath to ensure that forward slashes are used as the
# path separator, regardless of the operating system.
full_url = urljoin(URL_BASE.format(ref=ref), PurePosixPath(remote_path).__str__())
r = requests.get(full_url, timeout=5)
if r.status_code != 200:
raise ValueError(f"Could not find file at {full_url}")
with tempfile.TemporaryDirectory() as tmpdirname:
file = Path(tmpdirname) / remote_path.name
with open(file, "wb") as f:
f.write(r.content)
return loader(str(file), **kwargs)
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~integration_tests~document_loaders~test_rss.py | from pathlib import Path
from langchain.document_loaders.rss import RSSFeedLoader
def test_rss_loader() -> None:
loader = RSSFeedLoader(urls=["https://www.engadget.com/rss.xml"])
docs = loader.load()
assert docs[0] is not None
assert hasattr(docs[0], "page_content")
assert hasattr(docs[0], "metadata")
metadata = docs[0].metadata
assert "feed" in metadata
assert "title" in metadata
assert "link" in metadata
assert "authors" in metadata
assert "language" in metadata
assert "description" in metadata
assert "publish_date" in metadata
def test_rss_loader_with_opml() -> None:
file_path = Path(__file__).parent.parent / "examples"
with open(file_path.joinpath("sample_rss_feeds.opml"), "r") as f:
loader = RSSFeedLoader(opml=f.read())
docs = loader.load()
assert docs[0] is not None
assert hasattr(docs[0], "page_content")
assert hasattr(docs[0], "metadata")
metadata = docs[0].metadata
assert "feed" in metadata
assert "title" in metadata
assert "link" in metadata
assert "authors" in metadata
assert "language" in metadata
assert "description" in metadata
assert "publish_date" in metadata
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~retrievers~elastic_search_bm25.py | """Wrapper around Elasticsearch vector database."""
from __future__ import annotations
import uuid
from typing import Any, Iterable, List
from langchain.callbacks.manager import CallbackManagerForRetrieverRun
from langchain.docstore.document import Document
from langchain.schema import BaseRetriever
class ElasticSearchBM25Retriever(BaseRetriever):
"""`Elasticsearch` retriever that uses `BM25`.
To connect to an Elasticsearch instance that requires login credentials,
including Elastic Cloud, use the Elasticsearch URL format
https://username:password@es_host:9243. For example, to connect to Elastic
Cloud, create the Elasticsearch URL with the required authentication details and
pass it to the ElasticVectorSearch constructor as the named parameter
elasticsearch_url.
You can obtain your Elastic Cloud URL and login credentials by logging in to the
Elastic Cloud console at https://cloud.elastic.co, selecting your deployment, and
navigating to the "Deployments" page.
To obtain your Elastic Cloud password for the default "elastic" user:
1. Log in to the Elastic Cloud console at https://cloud.elastic.co
2. Go to "Security" > "Users"
3. Locate the "elastic" user and click "Edit"
4. Click "Reset password"
5. Follow the prompts to reset the password
The format for Elastic Cloud URLs is
https://username:password@cluster_id.region_id.gcp.cloud.es.io:9243.
"""
client: Any
"""Elasticsearch client."""
index_name: str
"""Name of the index to use in Elasticsearch."""
@classmethod
def create(
cls, elasticsearch_url: str, index_name: str, k1: float = 2.0, b: float = 0.75
) -> ElasticSearchBM25Retriever:
"""
Create a ElasticSearchBM25Retriever from a list of texts.
Args:
elasticsearch_url: URL of the Elasticsearch instance to connect to.
index_name: Name of the index to use in Elasticsearch.
k1: BM25 parameter k1.
b: BM25 parameter b.
Returns:
"""
from elasticsearch import Elasticsearch
# Create an Elasticsearch client instance
es = Elasticsearch(elasticsearch_url)
# Define the index settings and mappings
settings = {
"analysis": {"analyzer": {"default": {"type": "standard"}}},
"similarity": {
"custom_bm25": {
"type": "BM25",
"k1": k1,
"b": b,
}
},
}
mappings = {
"properties": {
"content": {
"type": "text",
"similarity": "custom_bm25", # Use the custom BM25 similarity
}
}
}
# Create the index with the specified settings and mappings
es.indices.create(index=index_name, mappings=mappings, settings=settings)
return cls(client=es, index_name=index_name)
def add_texts(
self,
texts: Iterable[str],
refresh_indices: bool = True,
) -> List[str]:
"""Run more texts through the embeddings and add to the retriever.
Args:
texts: Iterable of strings to add to the retriever.
refresh_indices: bool to refresh ElasticSearch indices
Returns:
List of ids from adding the texts into the retriever.
"""
try:
from elasticsearch.helpers import bulk
except ImportError:
raise ValueError(
"Could not import elasticsearch python package. "
"Please install it with `pip install elasticsearch`."
)
requests = []
ids = []
for i, text in enumerate(texts):
_id = str(uuid.uuid4())
request = {
"_op_type": "index",
"_index": self.index_name,
"content": text,
"_id": _id,
}
ids.append(_id)
requests.append(request)
bulk(self.client, requests)
if refresh_indices:
self.client.indices.refresh(index=self.index_name)
return ids
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> List[Document]:
query_dict = {"query": {"match": {"content": query}}}
res = self.client.search(index=self.index_name, body=query_dict)
docs = []
for r in res["hits"]["hits"]:
docs.append(Document(page_content=r["_source"]["content"]))
return docs
| [
"{'type': 'text', 'similarity': 'custom_bm25'}"
] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~integration_tests~vectorstores~docarray~test_hnsw.py | from pathlib import Path
from typing import List
import numpy as np
import pytest
from langchain.schema import Document
from langchain.vectorstores.docarray import DocArrayHnswSearch
from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings
@pytest.fixture
def texts() -> List[str]:
return ["foo", "bar", "baz"]
def test_from_texts(texts: List[str], tmp_path: Path) -> None:
"""Test end to end construction and simple similarity search."""
docsearch = DocArrayHnswSearch.from_texts(
texts,
FakeEmbeddings(),
work_dir=str(tmp_path),
n_dim=10,
)
assert docsearch.doc_index.num_docs() == 3
def test_add_texts(texts: List[str], tmp_path: Path) -> None:
"""Test end to end construction and simple similarity search."""
docsearch = DocArrayHnswSearch.from_params(
work_dir=str(tmp_path),
n_dim=10,
embedding=FakeEmbeddings(),
)
docsearch.add_texts(texts=texts)
assert docsearch.doc_index.num_docs() == 3
@pytest.mark.parametrize("metric", ["cosine", "l2"])
def test_sim_search(metric: str, texts: List[str], tmp_path: Path) -> None:
"""Test end to end construction and simple similarity search."""
hnsw_vec_store = DocArrayHnswSearch.from_texts(
texts,
FakeEmbeddings(),
work_dir=str(tmp_path),
n_dim=10,
dist_metric=metric,
index=True,
)
output = hnsw_vec_store.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
@pytest.mark.parametrize("metric", ["cosine", "l2"])
def test_sim_search_all_configurations(
metric: str, texts: List[str], tmp_path: Path
) -> None:
"""Test end to end construction and simple similarity search."""
hnsw_vec_store = DocArrayHnswSearch.from_texts(
texts,
FakeEmbeddings(),
work_dir=str(tmp_path),
dist_metric=metric,
n_dim=10,
max_elements=8,
ef_construction=300,
ef=20,
M=8,
allow_replace_deleted=False,
num_threads=2,
)
output = hnsw_vec_store.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
@pytest.mark.parametrize("metric", ["cosine", "l2"])
def test_sim_search_by_vector(metric: str, texts: List[str], tmp_path: Path) -> None:
"""Test end to end construction and similarity search by vector."""
hnsw_vec_store = DocArrayHnswSearch.from_texts(
texts,
FakeEmbeddings(),
work_dir=str(tmp_path),
n_dim=10,
dist_metric=metric,
)
embedding = [1.0] * 10
output = hnsw_vec_store.similarity_search_by_vector(embedding, k=1)
assert output == [Document(page_content="bar")]
@pytest.mark.parametrize("metric", ["cosine", "l2"])
def test_sim_search_with_score(metric: str, tmp_path: Path) -> None:
"""Test end to end construction and similarity search with score."""
texts = ["foo", "bar", "baz"]
hnsw_vec_store = DocArrayHnswSearch.from_texts(
texts,
FakeEmbeddings(),
work_dir=str(tmp_path),
n_dim=10,
dist_metric=metric,
)
output = hnsw_vec_store.similarity_search_with_score("foo", k=1)
assert len(output) == 1
out_doc, out_score = output[0]
assert out_doc == Document(page_content="foo")
assert np.isclose(out_score, 0.0, atol=1.0e-6)
def test_sim_search_with_score_for_ip_metric(texts: List[str], tmp_path: Path) -> None:
"""
Test end to end construction and similarity search with score for ip
(inner-product) metric.
"""
hnsw_vec_store = DocArrayHnswSearch.from_texts(
texts,
FakeEmbeddings(),
work_dir=str(tmp_path),
n_dim=10,
dist_metric="ip",
)
output = hnsw_vec_store.similarity_search_with_score("foo", k=3)
assert len(output) == 3
for result in output:
assert result[1] == -8.0
@pytest.mark.parametrize("metric", ["cosine", "l2"])
def test_max_marginal_relevance_search(
metric: str, texts: List[str], tmp_path: Path
) -> None:
"""Test MRR search."""
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = DocArrayHnswSearch.from_texts(
texts,
FakeEmbeddings(),
metadatas=metadatas,
dist_metric=metric,
work_dir=str(tmp_path),
n_dim=10,
)
output = docsearch.max_marginal_relevance_search("foo", k=2, fetch_k=3)
assert output == [
Document(page_content="foo", metadata={"page": 0}),
Document(page_content="bar", metadata={"page": 1}),
]
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~unit_tests~retrievers~self_query~test_pinecone.py | from typing import Dict, Tuple
from langchain.chains.query_constructor.ir import (
Comparator,
Comparison,
Operation,
Operator,
StructuredQuery,
)
from langchain.retrievers.self_query.pinecone import PineconeTranslator
DEFAULT_TRANSLATOR = PineconeTranslator()
def test_visit_comparison() -> None:
comp = Comparison(comparator=Comparator.LT, attribute="foo", value=["1", "2"])
expected = {"foo": {"$lt": ["1", "2"]}}
actual = DEFAULT_TRANSLATOR.visit_comparison(comp)
assert expected == actual
def test_visit_operation() -> None:
op = Operation(
operator=Operator.AND,
arguments=[
Comparison(comparator=Comparator.LT, attribute="foo", value=2),
Comparison(comparator=Comparator.EQ, attribute="bar", value="baz"),
],
)
expected = {"$and": [{"foo": {"$lt": 2}}, {"bar": {"$eq": "baz"}}]}
actual = DEFAULT_TRANSLATOR.visit_operation(op)
assert expected == actual
def test_visit_structured_query() -> None:
query = "What is the capital of France?"
structured_query = StructuredQuery(
query=query,
filter=None,
)
expected: Tuple[str, Dict] = (query, {})
actual = DEFAULT_TRANSLATOR.visit_structured_query(structured_query)
assert expected == actual
comp = Comparison(comparator=Comparator.LT, attribute="foo", value=["1", "2"])
structured_query = StructuredQuery(
query=query,
filter=comp,
)
expected = (
query,
{"filter": {"foo": {"$lt": ["1", "2"]}}},
)
actual = DEFAULT_TRANSLATOR.visit_structured_query(structured_query)
assert expected == actual
op = Operation(
operator=Operator.AND,
arguments=[
Comparison(comparator=Comparator.LT, attribute="foo", value=2),
Comparison(comparator=Comparator.EQ, attribute="bar", value="baz"),
],
)
structured_query = StructuredQuery(
query=query,
filter=op,
)
expected = (
query,
{"filter": {"$and": [{"foo": {"$lt": 2}}, {"bar": {"$eq": "baz"}}]}},
)
actual = DEFAULT_TRANSLATOR.visit_structured_query(structured_query)
assert expected == actual
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~retrievers~azure_cognitive_search.py | from __future__ import annotations
import json
from typing import Dict, List, Optional
import aiohttp
import requests
from langchain.callbacks.manager import (
AsyncCallbackManagerForRetrieverRun,
CallbackManagerForRetrieverRun,
)
from langchain.pydantic_v1 import Extra, root_validator
from langchain.schema import BaseRetriever, Document
from langchain.utils import get_from_dict_or_env
class AzureCognitiveSearchRetriever(BaseRetriever):
"""`Azure Cognitive Search` service retriever."""
service_name: str = ""
"""Name of Azure Cognitive Search service"""
index_name: str = ""
"""Name of Index inside Azure Cognitive Search service"""
api_key: str = ""
"""API Key. Both Admin and Query keys work, but for reading data it's
recommended to use a Query key."""
api_version: str = "2020-06-30"
"""API version"""
aiosession: Optional[aiohttp.ClientSession] = None
"""ClientSession, in case we want to reuse connection for better performance."""
content_key: str = "content"
"""Key in a retrieved result to set as the Document page_content."""
top_k: Optional[int] = None
"""Number of results to retrieve. Set to None to retrieve all results."""
class Config:
extra = Extra.forbid
arbitrary_types_allowed = True
@root_validator(pre=True)
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that service name, index name and api key exists in environment."""
values["service_name"] = get_from_dict_or_env(
values, "service_name", "AZURE_COGNITIVE_SEARCH_SERVICE_NAME"
)
values["index_name"] = get_from_dict_or_env(
values, "index_name", "AZURE_COGNITIVE_SEARCH_INDEX_NAME"
)
values["api_key"] = get_from_dict_or_env(
values, "api_key", "AZURE_COGNITIVE_SEARCH_API_KEY"
)
return values
def _build_search_url(self, query: str) -> str:
base_url = f"https://{self.service_name}.search.windows.net/"
endpoint_path = f"indexes/{self.index_name}/docs?api-version={self.api_version}"
top_param = f"&$top={self.top_k}" if self.top_k else ""
return base_url + endpoint_path + f"&search={query}" + top_param
@property
def _headers(self) -> Dict[str, str]:
return {
"Content-Type": "application/json",
"api-key": self.api_key,
}
def _search(self, query: str) -> List[dict]:
search_url = self._build_search_url(query)
response = requests.get(search_url, headers=self._headers)
if response.status_code != 200:
raise Exception(f"Error in search request: {response}")
return json.loads(response.text)["value"]
async def _asearch(self, query: str) -> List[dict]:
search_url = self._build_search_url(query)
if not self.aiosession:
async with aiohttp.ClientSession() as session:
async with session.get(search_url, headers=self._headers) as response:
response_json = await response.json()
else:
async with self.aiosession.get(
search_url, headers=self._headers
) as response:
response_json = await response.json()
return response_json["value"]
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> List[Document]:
search_results = self._search(query)
return [
Document(page_content=result.pop(self.content_key), metadata=result)
for result in search_results
]
async def _aget_relevant_documents(
self, query: str, *, run_manager: AsyncCallbackManagerForRetrieverRun
) -> List[Document]:
search_results = await self._asearch(query)
return [
Document(page_content=result.pop(self.content_key), metadata=result)
for result in search_results
]
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~integration_tests~memory~test_singlestoredb.py | import json
from langchain.memory import ConversationBufferMemory, SingleStoreDBChatMessageHistory
from langchain.schema.messages import _message_to_dict
# Replace these with your mongodb connection string
TEST_SINGLESTOREDB_URL = "root:pass@localhost:3306/db"
def test_memory_with_message_store() -> None:
"""Test the memory with a message store."""
# setup SingleStoreDB as a message store
message_history = SingleStoreDBChatMessageHistory(
session_id="test-session",
host=TEST_SINGLESTOREDB_URL,
)
memory = ConversationBufferMemory(
memory_key="baz", chat_memory=message_history, return_messages=True
)
# add some messages
memory.chat_memory.add_ai_message("This is me, the AI")
memory.chat_memory.add_user_message("This is me, the human")
# get the message history from the memory store and turn it into a json
messages = memory.chat_memory.messages
messages_json = json.dumps([_message_to_dict(msg) for msg in messages])
assert "This is me, the AI" in messages_json
assert "This is me, the human" in messages_json
# remove the record from SingleStoreDB, so the next test run won't pick it up
memory.chat_memory.clear()
assert memory.chat_memory.messages == []
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~integration_tests~document_loaders~test_email.py | from pathlib import Path
from langchain.document_loaders import OutlookMessageLoader, UnstructuredEmailLoader
def test_outlook_message_loader() -> None:
"""Test OutlookMessageLoader."""
file_path = Path(__file__).parent.parent / "examples/hello.msg"
loader = OutlookMessageLoader(str(file_path))
docs = loader.load()
assert len(docs) == 1
assert docs[0].metadata["subject"] == "Test for TIF files"
assert docs[0].metadata["sender"] == "Brian Zhou <[email protected]>"
assert docs[0].metadata["date"] == "Mon, 18 Nov 2013 16:26:24 +0800"
assert docs[0].page_content == (
"This is a test email to experiment with the MS Outlook MSG "
"Extractor\r\n\r\n\r\n-- \r\n\r\n\r\nKind regards"
"\r\n\r\n\r\n\r\n\r\nBrian Zhou\r\n\r\n"
)
def test_unstructured_email_loader_with_attachments() -> None:
file_path = Path(__file__).parent.parent / "examples/fake-email-attachment.eml"
loader = UnstructuredEmailLoader(
str(file_path), mode="elements", process_attachments=True
)
docs = loader.load()
assert docs[-1].page_content == "Hey this is a fake attachment!"
assert docs[-1].metadata["filename"] == "fake-attachment.txt"
assert docs[-1].metadata["source"].endswith("fake-email-attachment.eml")
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~embeddings~cohere.py | from typing import Any, Dict, List, Optional
from langchain.pydantic_v1 import BaseModel, Extra, root_validator
from langchain.schema.embeddings import Embeddings
from langchain.utils import get_from_dict_or_env
class CohereEmbeddings(BaseModel, Embeddings):
"""Cohere embedding models.
To use, you should have the ``cohere`` python package installed, and the
environment variable ``COHERE_API_KEY`` set with your API key or pass it
as a named parameter to the constructor.
Example:
.. code-block:: python
from langchain.embeddings import CohereEmbeddings
cohere = CohereEmbeddings(
model="embed-english-light-v2.0", cohere_api_key="my-api-key"
)
"""
client: Any #: :meta private:
"""Cohere client."""
async_client: Any #: :meta private:
"""Cohere async client."""
model: str = "embed-english-v2.0"
"""Model name to use."""
truncate: Optional[str] = None
"""Truncate embeddings that are too long from start or end ("NONE"|"START"|"END")"""
cohere_api_key: Optional[str] = None
max_retries: Optional[int] = None
"""Maximum number of retries to make when generating."""
request_timeout: Optional[float] = None
"""Timeout in seconds for the Cohere API request."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
cohere_api_key = get_from_dict_or_env(
values, "cohere_api_key", "COHERE_API_KEY"
)
max_retries = values.get("max_retries")
request_timeout = values.get("request_timeout")
try:
import cohere
values["client"] = cohere.Client(
cohere_api_key, max_retries=max_retries, timeout=request_timeout
)
values["async_client"] = cohere.AsyncClient(
cohere_api_key, max_retries=max_retries, timeout=request_timeout
)
except ImportError:
raise ValueError(
"Could not import cohere python package. "
"Please install it with `pip install cohere`."
)
return values
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Call out to Cohere's embedding endpoint.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
embeddings = self.client.embed(
model=self.model, texts=texts, truncate=self.truncate
).embeddings
return [list(map(float, e)) for e in embeddings]
async def aembed_documents(self, texts: List[str]) -> List[List[float]]:
"""Async call out to Cohere's embedding endpoint.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
embeddings = await self.async_client.embed(
model=self.model, texts=texts, truncate=self.truncate
)
return [list(map(float, e)) for e in embeddings.embeddings]
def embed_query(self, text: str) -> List[float]:
"""Call out to Cohere's embedding endpoint.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
return self.embed_documents([text])[0]
async def aembed_query(self, text: str) -> List[float]:
"""Async call out to Cohere's embedding endpoint.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
embeddings = await self.aembed_documents([text])
return embeddings[0]
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~document_loaders~acreom.py | import re
from pathlib import Path
from typing import Iterator, List
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
class AcreomLoader(BaseLoader):
"""Load `acreom` vault from a directory."""
FRONT_MATTER_REGEX = re.compile(r"^---\n(.*?)\n---\n", re.MULTILINE | re.DOTALL)
"""Regex to match front matter metadata in markdown files."""
def __init__(
self, path: str, encoding: str = "UTF-8", collect_metadata: bool = True
):
"""Initialize the loader."""
self.file_path = path
"""Path to the directory containing the markdown files."""
self.encoding = encoding
"""Encoding to use when reading the files."""
self.collect_metadata = collect_metadata
"""Whether to collect metadata from the front matter."""
def _parse_front_matter(self, content: str) -> dict:
"""Parse front matter metadata from the content and return it as a dict."""
if not self.collect_metadata:
return {}
match = self.FRONT_MATTER_REGEX.search(content)
front_matter = {}
if match:
lines = match.group(1).split("\n")
for line in lines:
if ":" in line:
key, value = line.split(":", 1)
front_matter[key.strip()] = value.strip()
else:
# Skip lines without a colon
continue
return front_matter
def _remove_front_matter(self, content: str) -> str:
"""Remove front matter metadata from the given content."""
if not self.collect_metadata:
return content
return self.FRONT_MATTER_REGEX.sub("", content)
def _process_acreom_content(self, content: str) -> str:
# remove acreom specific elements from content that
# do not contribute to the context of current document
content = re.sub(r"\s*-\s\[\s\]\s.*|\s*\[\s\]\s.*", "", content) # rm tasks
content = re.sub(r"#", "", content) # rm hashtags
content = re.sub(r"\[\[.*?\]\]", "", content) # rm doclinks
return content
def lazy_load(self) -> Iterator[Document]:
ps = list(Path(self.file_path).glob("**/*.md"))
for p in ps:
with open(p, encoding=self.encoding) as f:
text = f.read()
front_matter = self._parse_front_matter(text)
text = self._remove_front_matter(text)
text = self._process_acreom_content(text)
metadata = {
"source": str(p.name),
"path": str(p),
**front_matter,
}
yield Document(page_content=text, metadata=metadata)
def load(self) -> List[Document]:
return list(self.lazy_load())
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~integration_tests~llms~test_baseten.py | """Test Baseten API wrapper."""
import os
from langchain.llms.baseten import Baseten
def test_baseten_call() -> None:
"""Test valid call to Baseten."""
import baseten
baseten.login(os.environ["BASETEN_API_KEY"])
llm = Baseten(model=os.environ["BASETEN_MODEL_ID"])
output = llm("Say foo:")
assert isinstance(output, str)
| [] |
2024-01-10 | RohanDey02/langchain | libs~experimental~tests~integration_tests~chains~test_synthetic_data_openai.py | import pytest
from langchain.chat_models import ChatOpenAI
from langchain.prompts.few_shot import FewShotPromptTemplate
from langchain.pydantic_v1 import BaseModel
from langchain_experimental.tabular_synthetic_data.base import SyntheticDataGenerator
from langchain_experimental.tabular_synthetic_data.openai import (
OPENAI_TEMPLATE,
create_openai_data_generator,
)
from langchain_experimental.tabular_synthetic_data.prompts import (
SYNTHETIC_FEW_SHOT_PREFIX,
SYNTHETIC_FEW_SHOT_SUFFIX,
)
# Define the desired output schema for individual medical billing record
class MedicalBilling(BaseModel):
patient_id: int
patient_name: str
diagnosis_code: str
procedure_code: str
total_charge: float
insurance_claim_amount: float
examples = [
{
"example": """Patient ID: 123456, Patient Name: John Doe, Diagnosis Code:
J20.9, Procedure Code: 99203, Total Charge: $500, Insurance Claim Amount:
$350"""
},
{
"example": """Patient ID: 789012, Patient Name: Johnson Smith, Diagnosis
Code: M54.5, Procedure Code: 99213, Total Charge: $150, Insurance Claim
Amount: $120"""
},
{
"example": """Patient ID: 345678, Patient Name: Emily Stone, Diagnosis Code:
E11.9, Procedure Code: 99214, Total Charge: $300, Insurance Claim Amount:
$250"""
},
{
"example": """Patient ID: 901234, Patient Name: Robert Miles, Diagnosis Code:
B07.9, Procedure Code: 99204, Total Charge: $200, Insurance Claim Amount:
$160"""
},
{
"example": """Patient ID: 567890, Patient Name: Clara Jensen, Diagnosis Code:
F41.9, Procedure Code: 99205, Total Charge: $450, Insurance Claim Amount:
$310"""
},
{
"example": """Patient ID: 234567, Patient Name: Alan Turing, Diagnosis Code:
G40.909, Procedure Code: 99215, Total Charge: $220, Insurance Claim Amount:
$180"""
},
]
prompt_template = FewShotPromptTemplate(
prefix=SYNTHETIC_FEW_SHOT_PREFIX,
examples=examples,
suffix=SYNTHETIC_FEW_SHOT_SUFFIX,
input_variables=["subject", "extra"],
example_prompt=OPENAI_TEMPLATE,
)
@pytest.fixture(scope="function")
def synthetic_data_generator() -> SyntheticDataGenerator:
return create_openai_data_generator(
output_schema=MedicalBilling,
llm=ChatOpenAI(temperature=1), # replace with your LLM instance
prompt=prompt_template,
)
@pytest.mark.requires("openai")
def test_generate_synthetic(synthetic_data_generator: SyntheticDataGenerator) -> None:
synthetic_results = synthetic_data_generator.generate(
subject="medical_billing",
extra="""the name must be chosen at random. Make it something you wouldn't
normally choose.""",
runs=10,
)
assert len(synthetic_results) == 10
for row in synthetic_results:
assert isinstance(row, MedicalBilling)
@pytest.mark.requires("openai")
@pytest.mark.asyncio
async def test_agenerate_synthetic(
synthetic_data_generator: SyntheticDataGenerator,
) -> None:
synthetic_results = await synthetic_data_generator.agenerate(
subject="medical_billing",
extra="""the name must be chosen at random. Make it something you wouldn't
normally choose.""",
runs=10,
)
assert len(synthetic_results) == 10
for row in synthetic_results:
assert isinstance(row, MedicalBilling)
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~retrievers~self_query~elasticsearch.py | from typing import Dict, Tuple, Union
from langchain.chains.query_constructor.ir import (
Comparator,
Comparison,
Operation,
Operator,
StructuredQuery,
Visitor,
)
class ElasticsearchTranslator(Visitor):
"""Translate `Elasticsearch` internal query language elements to valid filters."""
allowed_comparators = [
Comparator.EQ,
Comparator.GT,
Comparator.GTE,
Comparator.LT,
Comparator.LTE,
Comparator.CONTAIN,
Comparator.LIKE,
]
"""Subset of allowed logical comparators."""
allowed_operators = [Operator.AND, Operator.OR, Operator.NOT]
"""Subset of allowed logical operators."""
def _format_func(self, func: Union[Operator, Comparator]) -> str:
self._validate_func(func)
map_dict = {
Operator.OR: "should",
Operator.NOT: "must_not",
Operator.AND: "must",
Comparator.EQ: "term",
Comparator.GT: "gt",
Comparator.GTE: "gte",
Comparator.LT: "lt",
Comparator.LTE: "lte",
Comparator.CONTAIN: "match",
Comparator.LIKE: "match",
}
return map_dict[func]
def visit_operation(self, operation: Operation) -> Dict:
args = [arg.accept(self) for arg in operation.arguments]
return {"bool": {self._format_func(operation.operator): args}}
def visit_comparison(self, comparison: Comparison) -> Dict:
# ElasticsearchStore filters require to target
# the metadata object field
field = f"metadata.{comparison.attribute}"
is_range_comparator = comparison.comparator in [
Comparator.GT,
Comparator.GTE,
Comparator.LT,
Comparator.LTE,
]
if is_range_comparator:
return {
"range": {
field: {self._format_func(comparison.comparator): comparison.value}
}
}
if comparison.comparator == Comparator.CONTAIN:
return {
self._format_func(comparison.comparator): {
field: {"query": comparison.value}
}
}
if comparison.comparator == Comparator.LIKE:
return {
self._format_func(comparison.comparator): {
field: {"query": comparison.value, "fuzziness": "AUTO"}
}
}
# we assume that if the value is a string,
# we want to use the keyword field
field = f"{field}.keyword" if isinstance(comparison.value, str) else field
return {self._format_func(comparison.comparator): {field: comparison.value}}
def visit_structured_query(
self, structured_query: StructuredQuery
) -> Tuple[str, dict]:
if structured_query.filter is None:
kwargs = {}
else:
kwargs = {"filter": [structured_query.filter.accept(self)]}
return structured_query.query, kwargs
| [] |
2024-01-10 | RohanDey02/langchain | templates~anthropic-iterative-search~anthropic_iterative_search~retriever_agent.py | from langchain.chat_models import ChatAnthropic
from langchain.prompts import ChatPromptTemplate
from langchain.schema.runnable import RunnablePassthrough, RunnableMap
from langchain.schema.output_parser import StrOutputParser
from langchain.agents import AgentExecutor
from .retriever import search, RETRIEVER_TOOL_NAME, retriever_description
from .prompts import retrieval_prompt
from .agent_scratchpad import format_agent_scratchpad
from .output_parser import parse_output
prompt = ChatPromptTemplate.from_messages([
("user", retrieval_prompt),
("ai", "{agent_scratchpad}"),
])
prompt = prompt.partial(retriever_description=retriever_description)
model = ChatAnthropic(model="claude-2", temperature=0, max_tokens_to_sample=1000)
chain = RunnablePassthrough.assign(
agent_scratchpad=lambda x: format_agent_scratchpad(x['intermediate_steps'])
) | prompt | model.bind( stop_sequences=['</search_query>']) | StrOutputParser()
agent_chain = RunnableMap({
"partial_completion": chain,
"intermediate_steps": lambda x: x['intermediate_steps']
}) | parse_output
executor = AgentExecutor(agent=agent_chain, tools = [search], verbose=True)
| [
"{agent_scratchpad}",
"[('user', PLACEHOLDER), ('ai', '{agent_scratchpad}')]"
] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~unit_tests~document_loaders~test_obsidian.py | from pathlib import Path
from langchain.document_loaders.obsidian import ObsidianLoader
OBSIDIAN_EXAMPLE_PATH = Path(__file__).parent / "sample_documents" / "obsidian"
STANDARD_METADATA_FIELDS = {
"created",
"path",
"source",
"last_accessed",
"last_modified",
}
loader = ObsidianLoader(str(OBSIDIAN_EXAMPLE_PATH))
docs = loader.load()
def test_page_content_loaded() -> None:
"""Verify that all docs have page_content"""
assert len(docs) == 5
assert all(doc.page_content for doc in docs)
def test_disable_collect_metadata() -> None:
"""If collect_metadata is False, no additional metadata should be collected."""
loader_without_metadata = ObsidianLoader(
str(OBSIDIAN_EXAMPLE_PATH), collect_metadata=False
)
docs_wo = loader_without_metadata.load()
assert len(docs_wo) == 5
assert all(doc.page_content for doc in docs_wo)
assert all(set(doc.metadata) == STANDARD_METADATA_FIELDS for doc in docs_wo)
def test_metadata_without_frontmatter() -> None:
"""Verify docs without frontmatter, still have basic metadata."""
doc = next(doc for doc in docs if doc.metadata["source"] == "no_metadata.md")
assert set(doc.metadata) == STANDARD_METADATA_FIELDS
def test_metadata_with_frontmatter() -> None:
"""Verify a standard frontmatter field is loaded."""
doc = next(doc for doc in docs if doc.metadata["source"] == "frontmatter.md")
assert set(doc.metadata) == STANDARD_METADATA_FIELDS | {"tags"}
assert set(doc.metadata["tags"].split(",")) == {"journal/entry", "obsidian"}
def test_metadata_with_bad_frontmatter() -> None:
"""Verify a doc with non-yaml frontmatter."""
doc = next(doc for doc in docs if doc.metadata["source"] == "bad_frontmatter.md")
assert set(doc.metadata) == STANDARD_METADATA_FIELDS
def test_metadata_with_tags_and_frontmatter() -> None:
"""Verify a doc with frontmatter and tags/dataview tags are all added to
metadata."""
doc = next(
doc for doc in docs if doc.metadata["source"] == "tags_and_frontmatter.md"
)
FRONTMATTER_FIELDS = {
"aBool",
"aFloat",
"anInt",
"anArray",
"aString",
"aDict",
"tags",
}
DATAVIEW_FIELDS = {"dataview1", "dataview2", "dataview3"}
assert (
set(doc.metadata)
== STANDARD_METADATA_FIELDS | FRONTMATTER_FIELDS | DATAVIEW_FIELDS
)
def test_tags_in_page_content() -> None:
"""Verify a doc with tags are included in the metadata"""
doc = next(doc for doc in docs if doc.metadata["source"] == "no_frontmatter.md")
assert set(doc.metadata) == STANDARD_METADATA_FIELDS | {"tags"}
def test_boolean_metadata() -> None:
"""Verify boolean metadata is loaded correctly"""
doc = next(
doc for doc in docs if doc.metadata["source"] == "tags_and_frontmatter.md"
)
assert doc.metadata["aBool"]
def test_float_metadata() -> None:
"""Verify float metadata is loaded correctly"""
doc = next(
doc for doc in docs if doc.metadata["source"] == "tags_and_frontmatter.md"
)
assert doc.metadata["aFloat"] == 13.12345
def test_int_metadata() -> None:
"""Verify int metadata is loaded correctly"""
doc = next(
doc for doc in docs if doc.metadata["source"] == "tags_and_frontmatter.md"
)
assert doc.metadata["anInt"] == 15
def test_string_metadata() -> None:
"""Verify string metadata is loaded correctly"""
doc = next(
doc for doc in docs if doc.metadata["source"] == "tags_and_frontmatter.md"
)
assert doc.metadata["aString"] == "string value"
def test_array_metadata() -> None:
"""Verify array metadata is loaded as a string"""
doc = next(
doc for doc in docs if doc.metadata["source"] == "tags_and_frontmatter.md"
)
assert doc.metadata["anArray"] == "['one', 'two', 'three']"
def test_dict_metadata() -> None:
"""Verify dict metadata is stored as a string"""
doc = next(
doc for doc in docs if doc.metadata["source"] == "tags_and_frontmatter.md"
)
assert doc.metadata["aDict"] == "{'dictId1': '58417', 'dictId2': 1500}"
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~integration_tests~llms~test_together.py | """Test Together API wrapper.
In order to run this test, you need to have an Together api key.
You can get it by registering for free at https://api.together.xyz/.
A test key can be found at https://api.together.xyz/settings/api-keys
You'll then need to set TOGETHER_API_KEY environment variable to your api key.
"""
import pytest as pytest
from langchain.llms import Together
def test_together_call() -> None:
"""Test simple call to together."""
llm = Together(
model="togethercomputer/RedPajama-INCITE-7B-Base",
temperature=0.2,
max_tokens=250,
)
output = llm("Say foo:")
assert llm._llm_type == "together"
assert isinstance(output, str)
@pytest.mark.asyncio
async def test_together_acall() -> None:
"""Test simple call to together."""
llm = Together(
model="togethercomputer/RedPajama-INCITE-7B-Base",
temperature=0.2,
max_tokens=250,
)
output = await llm.agenerate(["Say foo:"], stop=["bar"])
assert llm._llm_type == "together"
output_text = output.generations[0][0].text
assert isinstance(output_text, str)
assert output_text.count("bar") <= 1
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~llms~cohere.py | from __future__ import annotations
import logging
from typing import Any, Callable, Dict, List, Optional
from tenacity import (
before_sleep_log,
retry,
retry_if_exception_type,
stop_after_attempt,
wait_exponential,
)
from langchain.callbacks.manager import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.load.serializable import Serializable
from langchain.pydantic_v1 import Extra, Field, root_validator
from langchain.utils import get_from_dict_or_env
logger = logging.getLogger(__name__)
def _create_retry_decorator(llm: Cohere) -> Callable[[Any], Any]:
import cohere
min_seconds = 4
max_seconds = 10
# Wait 2^x * 1 second between each retry starting with
# 4 seconds, then up to 10 seconds, then 10 seconds afterwards
return retry(
reraise=True,
stop=stop_after_attempt(llm.max_retries),
wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds),
retry=(retry_if_exception_type(cohere.error.CohereError)),
before_sleep=before_sleep_log(logger, logging.WARNING),
)
def completion_with_retry(llm: Cohere, **kwargs: Any) -> Any:
"""Use tenacity to retry the completion call."""
retry_decorator = _create_retry_decorator(llm)
@retry_decorator
def _completion_with_retry(**kwargs: Any) -> Any:
return llm.client.generate(**kwargs)
return _completion_with_retry(**kwargs)
def acompletion_with_retry(llm: Cohere, **kwargs: Any) -> Any:
"""Use tenacity to retry the completion call."""
retry_decorator = _create_retry_decorator(llm)
@retry_decorator
async def _completion_with_retry(**kwargs: Any) -> Any:
return await llm.async_client.generate(**kwargs)
return _completion_with_retry(**kwargs)
class BaseCohere(Serializable):
"""Base class for Cohere models."""
client: Any #: :meta private:
async_client: Any #: :meta private:
model: Optional[str] = Field(default=None)
"""Model name to use."""
temperature: float = 0.75
"""A non-negative float that tunes the degree of randomness in generation."""
cohere_api_key: Optional[str] = None
stop: Optional[List[str]] = None
streaming: bool = Field(default=False)
"""Whether to stream the results."""
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
try:
import cohere
except ImportError:
raise ImportError(
"Could not import cohere python package. "
"Please install it with `pip install cohere`."
)
else:
cohere_api_key = get_from_dict_or_env(
values, "cohere_api_key", "COHERE_API_KEY"
)
values["client"] = cohere.Client(cohere_api_key)
values["async_client"] = cohere.AsyncClient(cohere_api_key)
return values
class Cohere(LLM, BaseCohere):
"""Cohere large language models.
To use, you should have the ``cohere`` python package installed, and the
environment variable ``COHERE_API_KEY`` set with your API key, or pass
it as a named parameter to the constructor.
Example:
.. code-block:: python
from langchain.llms import Cohere
cohere = Cohere(model="gptd-instruct-tft", cohere_api_key="my-api-key")
"""
max_tokens: int = 256
"""Denotes the number of tokens to predict per generation."""
k: int = 0
"""Number of most likely tokens to consider at each step."""
p: int = 1
"""Total probability mass of tokens to consider at each step."""
frequency_penalty: float = 0.0
"""Penalizes repeated tokens according to frequency. Between 0 and 1."""
presence_penalty: float = 0.0
"""Penalizes repeated tokens. Between 0 and 1."""
truncate: Optional[str] = None
"""Specify how the client handles inputs longer than the maximum token
length: Truncate from START, END or NONE"""
max_retries: int = 10
"""Maximum number of retries to make when generating."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling Cohere API."""
return {
"max_tokens": self.max_tokens,
"temperature": self.temperature,
"k": self.k,
"p": self.p,
"frequency_penalty": self.frequency_penalty,
"presence_penalty": self.presence_penalty,
"truncate": self.truncate,
}
@property
def lc_secrets(self) -> Dict[str, str]:
return {"cohere_api_key": "COHERE_API_KEY"}
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
return {**{"model": self.model}, **self._default_params}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "cohere"
def _invocation_params(self, stop: Optional[List[str]], **kwargs: Any) -> dict:
params = self._default_params
if self.stop is not None and stop is not None:
raise ValueError("`stop` found in both the input and default params.")
elif self.stop is not None:
params["stop_sequences"] = self.stop
else:
params["stop_sequences"] = stop
return {**params, **kwargs}
def _process_response(self, response: Any, stop: Optional[List[str]]) -> str:
text = response.generations[0].text
# If stop tokens are provided, Cohere's endpoint returns them.
# In order to make this consistent with other endpoints, we strip them.
if stop:
text = enforce_stop_tokens(text, stop)
return text
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to Cohere's generate endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = cohere("Tell me a joke.")
"""
params = self._invocation_params(stop, **kwargs)
response = completion_with_retry(
self, model=self.model, prompt=prompt, **params
)
_stop = params.get("stop_sequences")
return self._process_response(response, _stop)
async def _acall(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Async call out to Cohere's generate endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = await cohere("Tell me a joke.")
"""
params = self._invocation_params(stop, **kwargs)
response = await acompletion_with_retry(
self, model=self.model, prompt=prompt, **params
)
_stop = params.get("stop_sequences")
return self._process_response(response, _stop)
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~integration_tests~tools~edenai~test_image_explicitcontent.py | """Test EdenAi's image moderation Tool .
In order to run this test, you need to have an EdenAI api key.
You can get it by registering for free at https://app.edenai.run/user/register.
A test key can be found at https://app.edenai.run/admin/account/settings by
clicking on the 'sandbox' toggle.
(calls will be free, and will return dummy results)
You'll then need to set EDENAI_API_KEY environment variable to your api key.
"""
from langchain.tools.edenai import EdenAiExplicitImageTool
def test_edenai_call() -> None:
"""Test simple call to edenai's image moderation endpoint."""
image_moderation = EdenAiExplicitImageTool(providers=["amazon"])
output = image_moderation("https://static.javatpoint.com/images/objects.jpg")
assert image_moderation.name == "edenai_image_explicit_content_detection"
assert image_moderation.feature == "image"
assert image_moderation.subfeature == "explicit_content"
assert isinstance(output, str)
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~document_loaders~bigquery.py | from __future__ import annotations
from typing import TYPE_CHECKING, List, Optional
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
from langchain.utilities.vertexai import get_client_info
if TYPE_CHECKING:
from google.auth.credentials import Credentials
class BigQueryLoader(BaseLoader):
"""Load from the Google Cloud Platform `BigQuery`.
Each document represents one row of the result. The `page_content_columns`
are written into the `page_content` of the document. The `metadata_columns`
are written into the `metadata` of the document. By default, all columns
are written into the `page_content` and none into the `metadata`.
"""
def __init__(
self,
query: str,
project: Optional[str] = None,
page_content_columns: Optional[List[str]] = None,
metadata_columns: Optional[List[str]] = None,
credentials: Optional[Credentials] = None,
):
"""Initialize BigQuery document loader.
Args:
query: The query to run in BigQuery.
project: Optional. The project to run the query in.
page_content_columns: Optional. The columns to write into the `page_content`
of the document.
metadata_columns: Optional. The columns to write into the `metadata` of the
document.
credentials : google.auth.credentials.Credentials, optional
Credentials for accessing Google APIs. Use this parameter to override
default credentials, such as to use Compute Engine
(`google.auth.compute_engine.Credentials`) or Service Account
(`google.oauth2.service_account.Credentials`) credentials directly.
"""
self.query = query
self.project = project
self.page_content_columns = page_content_columns
self.metadata_columns = metadata_columns
self.credentials = credentials
def load(self) -> List[Document]:
try:
from google.cloud import bigquery
except ImportError as ex:
raise ImportError(
"Could not import google-cloud-bigquery python package. "
"Please install it with `pip install google-cloud-bigquery`."
) from ex
bq_client = bigquery.Client(
credentials=self.credentials,
project=self.project,
client_info=get_client_info(module="bigquery"),
)
if not bq_client.project:
error_desc = (
"GCP project for Big Query is not set! Either provide a "
"`project` argument during BigQueryLoader instantiation, "
"or set a default project with `gcloud config set project` "
"command."
)
raise ValueError(error_desc)
query_result = bq_client.query(self.query).result()
docs: List[Document] = []
page_content_columns = self.page_content_columns
metadata_columns = self.metadata_columns
if page_content_columns is None:
page_content_columns = [column.name for column in query_result.schema]
if metadata_columns is None:
metadata_columns = []
for row in query_result:
page_content = "\n".join(
f"{k}: {v}" for k, v in row.items() if k in page_content_columns
)
metadata = {k: v for k, v in row.items() if k in metadata_columns}
doc = Document(page_content=page_content, metadata=metadata)
docs.append(doc)
return docs
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~unit_tests~evaluation~comparison~test_eval_chain.py | """Test the comparison chains."""
import re
import pytest
from langchain.evaluation.comparison.eval_chain import (
LabeledPairwiseStringEvalChain,
PairwiseStringEvalChain,
PairwiseStringResultOutputParser,
resolve_pairwise_criteria,
)
from langchain.evaluation.criteria.eval_chain import Criteria
from tests.unit_tests.llms.fake_llm import FakeLLM
@pytest.mark.parametrize("criterion", list(Criteria))
def test_resolve_criteria_enum(criterion: Criteria) -> None:
val = resolve_pairwise_criteria(criterion)
assert isinstance(val, dict)
assert next(iter(val)) == criterion.value
def test_resolve_criteria_list_enum() -> None:
val = resolve_pairwise_criteria(list(Criteria))
assert isinstance(val, dict)
assert set(val.keys()) == set(c.value for c in list(Criteria))
def test_PairwiseStringResultOutputParser_parse() -> None:
output_parser = PairwiseStringResultOutputParser()
text = """I like pie better than cake.
[[A]]"""
got = output_parser.parse(text)
want = {
"reasoning": text,
"value": "A",
"score": 1,
}
assert got.get("reasoning") == want["reasoning"]
assert got.get("value") == want["value"]
assert got.get("score") == want["score"]
text = """I like cake better than pie.
[[B]]"""
got = output_parser.parse(text)
want = {
"reasoning": text,
"value": "B",
"score": 0,
}
assert got.get("reasoning") == want["reasoning"]
assert got.get("value") == want["value"]
assert got.get("score") == want["score"]
text = """I like cake and pie.
[[C]]"""
got = output_parser.parse(text)
want = {
"reasoning": text,
"value": None,
"score": 0.5,
}
assert got.get("reasoning") == want["reasoning"]
assert got.get("value") == want["value"]
assert got.get("score") == want["score"]
def test_pairwise_string_comparison_chain() -> None:
llm = FakeLLM(
queries={
"a": "The values are the same.\n[[C]]",
"b": "A is clearly better than b.\n[[A]]",
"c": "B is clearly better than a.\n[[B]]",
},
sequential_responses=True,
)
chain = PairwiseStringEvalChain.from_llm(llm=llm)
res = chain.evaluate_string_pairs(
prediction="I like pie.",
prediction_b="I love pie.",
input="What is your favorite food?",
)
assert res["value"] is None
assert res["score"] == 0.5
assert res["reasoning"] == "The values are the same.\n[[C]]"
res = chain.evaluate_string_pairs(
prediction="I like pie.",
prediction_b="I like pie.",
input="What is your favorite food?",
)
assert res["value"] == "A"
assert res["score"] == 1
with pytest.warns(UserWarning, match=re.escape(chain._skip_reference_warning)):
res = chain.evaluate_string_pairs(
prediction="I like pie.",
prediction_b="I hate pie.",
input="What is your favorite food?",
reference="I enjoy pie.",
)
assert res["value"] == "B"
assert res["score"] == 0
def test_labeled_pairwise_string_comparison_chain_missing_ref() -> None:
llm = FakeLLM(
queries={
"a": "The values are the same.\n[[C]]",
"b": "A is clearly better than b.\n[[A]]",
"c": "B is clearly better than a.\n[[B]]",
},
sequential_responses=True,
)
chain = LabeledPairwiseStringEvalChain.from_llm(llm=llm)
with pytest.raises(ValueError):
chain.evaluate_string_pairs(
prediction="I like pie.",
prediction_b="I love pie.",
input="What is your favorite food?",
)
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~integration_tests~llms~test_llamacpp.py | # flake8: noqa
"""Test Llama.cpp wrapper."""
import os
from typing import Generator
from urllib.request import urlretrieve
import pytest
from langchain.llms import LlamaCpp
from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler
def get_model() -> str:
"""Download model. f
From https://huggingface.co/Sosaka/Alpaca-native-4bit-ggml/,
convert to new ggml format and return model path."""
model_url = "https://huggingface.co/Sosaka/Alpaca-native-4bit-ggml/resolve/main/ggml-alpaca-7b-q4.bin"
tokenizer_url = "https://huggingface.co/decapoda-research/llama-7b-hf/resolve/main/tokenizer.model"
conversion_script = "https://github.com/ggerganov/llama.cpp/raw/master/convert-unversioned-ggml-to-ggml.py"
local_filename = model_url.split("/")[-1]
if not os.path.exists("convert-unversioned-ggml-to-ggml.py"):
urlretrieve(conversion_script, "convert-unversioned-ggml-to-ggml.py")
if not os.path.exists("tokenizer.model"):
urlretrieve(tokenizer_url, "tokenizer.model")
if not os.path.exists(local_filename):
urlretrieve(model_url, local_filename)
os.system(f"python convert-unversioned-ggml-to-ggml.py . tokenizer.model")
return local_filename
def test_llamacpp_inference() -> None:
"""Test valid llama.cpp inference."""
model_path = get_model()
llm = LlamaCpp(model_path=model_path)
output = llm("Say foo:")
assert isinstance(output, str)
assert len(output) > 1
def test_llamacpp_streaming() -> None:
"""Test streaming tokens from LlamaCpp."""
model_path = get_model()
llm = LlamaCpp(model_path=model_path, max_tokens=10)
generator = llm.stream("Q: How do you say 'hello' in German? A:'", stop=["'"])
stream_results_string = ""
assert isinstance(generator, Generator)
for chunk in generator:
assert not isinstance(chunk, str)
# Note that this matches the OpenAI format:
assert isinstance(chunk["choices"][0]["text"], str)
stream_results_string += chunk["choices"][0]["text"]
assert len(stream_results_string.strip()) > 1
def test_llamacpp_streaming_callback() -> None:
"""Test that streaming correctly invokes on_llm_new_token callback."""
MAX_TOKENS = 5
OFF_BY_ONE = 1 # There may be an off by one error in the upstream code!
callback_handler = FakeCallbackHandler()
llm = LlamaCpp(
model_path=get_model(),
callbacks=[callback_handler],
verbose=True,
max_tokens=MAX_TOKENS,
)
llm("Q: Can you count to 10? A:'1, ")
assert callback_handler.llm_streams <= MAX_TOKENS + OFF_BY_ONE
def test_llamacpp_model_kwargs() -> None:
llm = LlamaCpp(model_path=get_model(), model_kwargs={"n_gqa": None})
assert llm.model_kwargs == {"n_gqa": None}
def test_llamacpp_invalid_model_kwargs() -> None:
with pytest.raises(ValueError):
LlamaCpp(model_path=get_model(), model_kwargs={"n_ctx": 1024})
def test_llamacpp_incorrect_field() -> None:
with pytest.warns(match="not default parameter"):
llm = LlamaCpp(model_path=get_model(), n_gqa=None)
llm.model_kwargs == {"n_gqa": None}
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~unit_tests~agents~output_parsers~test_self_ask.py | from langchain.agents.output_parsers.self_ask import SelfAskOutputParser
from langchain.schema.agent import AgentAction, AgentFinish
def test_follow_up() -> None:
"""Test follow up parsing."""
parser = SelfAskOutputParser()
_input = "Follow up: what is two + 2"
output = parser.invoke(_input)
expected_output = AgentAction(
tool="Intermediate Answer", tool_input="what is two + 2", log=_input
)
assert output == expected_output
# Test that also handles one word by default
_input = "Followup: what is two + 2"
output = parser.invoke(_input)
expected_output = AgentAction(
tool="Intermediate Answer", tool_input="what is two + 2", log=_input
)
assert output == expected_output
def test_follow_up_custom() -> None:
"""Test follow up parsing for custom followups."""
parser = SelfAskOutputParser(followups=("Now:",))
_input = "Now: what is two + 2"
output = parser.invoke(_input)
expected_output = AgentAction(
tool="Intermediate Answer", tool_input="what is two + 2", log=_input
)
assert output == expected_output
def test_finish() -> None:
"""Test standard finish."""
parser = SelfAskOutputParser()
_input = "So the final answer is: 4"
output = parser.invoke(_input)
expected_output = AgentFinish(return_values={"output": "4"}, log=_input)
assert output == expected_output
def test_finish_custom() -> None:
"""Test custom finish."""
parser = SelfAskOutputParser(finish_string="Finally: ")
_input = "Finally: 4"
output = parser.invoke(_input)
expected_output = AgentFinish(return_values={"output": "4"}, log=_input)
assert output == expected_output
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~chains~qa_with_sources~loading.py | """Load question answering with sources chains."""
from __future__ import annotations
from typing import Any, Mapping, Optional, Protocol
from langchain.chains.combine_documents.base import BaseCombineDocumentsChain
from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain
from langchain.chains.combine_documents.map_rerank import MapRerankDocumentsChain
from langchain.chains.combine_documents.reduce import ReduceDocumentsChain
from langchain.chains.combine_documents.refine import RefineDocumentsChain
from langchain.chains.combine_documents.stuff import StuffDocumentsChain
from langchain.chains.llm import LLMChain
from langchain.chains.qa_with_sources import (
map_reduce_prompt,
refine_prompts,
stuff_prompt,
)
from langchain.chains.question_answering.map_rerank_prompt import (
PROMPT as MAP_RERANK_PROMPT,
)
from langchain.schema.language_model import BaseLanguageModel
from langchain.schema.prompt_template import BasePromptTemplate
class LoadingCallable(Protocol):
"""Interface for loading the combine documents chain."""
def __call__(
self, llm: BaseLanguageModel, **kwargs: Any
) -> BaseCombineDocumentsChain:
"""Callable to load the combine documents chain."""
def _load_map_rerank_chain(
llm: BaseLanguageModel,
prompt: BasePromptTemplate = MAP_RERANK_PROMPT,
verbose: bool = False,
document_variable_name: str = "context",
rank_key: str = "score",
answer_key: str = "answer",
**kwargs: Any,
) -> MapRerankDocumentsChain:
llm_chain = LLMChain(llm=llm, prompt=prompt, verbose=verbose)
return MapRerankDocumentsChain(
llm_chain=llm_chain,
rank_key=rank_key,
answer_key=answer_key,
document_variable_name=document_variable_name,
**kwargs,
)
def _load_stuff_chain(
llm: BaseLanguageModel,
prompt: BasePromptTemplate = stuff_prompt.PROMPT,
document_prompt: BasePromptTemplate = stuff_prompt.EXAMPLE_PROMPT,
document_variable_name: str = "summaries",
verbose: Optional[bool] = None,
**kwargs: Any,
) -> StuffDocumentsChain:
llm_chain = LLMChain(llm=llm, prompt=prompt, verbose=verbose)
return StuffDocumentsChain(
llm_chain=llm_chain,
document_variable_name=document_variable_name,
document_prompt=document_prompt,
verbose=verbose,
**kwargs,
)
def _load_map_reduce_chain(
llm: BaseLanguageModel,
question_prompt: BasePromptTemplate = map_reduce_prompt.QUESTION_PROMPT,
combine_prompt: BasePromptTemplate = map_reduce_prompt.COMBINE_PROMPT,
document_prompt: BasePromptTemplate = map_reduce_prompt.EXAMPLE_PROMPT,
combine_document_variable_name: str = "summaries",
map_reduce_document_variable_name: str = "context",
collapse_prompt: Optional[BasePromptTemplate] = None,
reduce_llm: Optional[BaseLanguageModel] = None,
collapse_llm: Optional[BaseLanguageModel] = None,
verbose: Optional[bool] = None,
token_max: int = 3000,
**kwargs: Any,
) -> MapReduceDocumentsChain:
map_chain = LLMChain(llm=llm, prompt=question_prompt, verbose=verbose)
_reduce_llm = reduce_llm or llm
reduce_chain = LLMChain(llm=_reduce_llm, prompt=combine_prompt, verbose=verbose)
combine_documents_chain = StuffDocumentsChain(
llm_chain=reduce_chain,
document_variable_name=combine_document_variable_name,
document_prompt=document_prompt,
verbose=verbose,
)
if collapse_prompt is None:
collapse_chain = None
if collapse_llm is not None:
raise ValueError(
"collapse_llm provided, but collapse_prompt was not: please "
"provide one or stop providing collapse_llm."
)
else:
_collapse_llm = collapse_llm or llm
collapse_chain = StuffDocumentsChain(
llm_chain=LLMChain(
llm=_collapse_llm,
prompt=collapse_prompt,
verbose=verbose,
),
document_variable_name=combine_document_variable_name,
document_prompt=document_prompt,
)
reduce_documents_chain = ReduceDocumentsChain(
combine_documents_chain=combine_documents_chain,
collapse_documents_chain=collapse_chain,
token_max=token_max,
verbose=verbose,
)
return MapReduceDocumentsChain(
llm_chain=map_chain,
reduce_documents_chain=reduce_documents_chain,
document_variable_name=map_reduce_document_variable_name,
verbose=verbose,
**kwargs,
)
def _load_refine_chain(
llm: BaseLanguageModel,
question_prompt: BasePromptTemplate = refine_prompts.DEFAULT_TEXT_QA_PROMPT,
refine_prompt: BasePromptTemplate = refine_prompts.DEFAULT_REFINE_PROMPT,
document_prompt: BasePromptTemplate = refine_prompts.EXAMPLE_PROMPT,
document_variable_name: str = "context_str",
initial_response_name: str = "existing_answer",
refine_llm: Optional[BaseLanguageModel] = None,
verbose: Optional[bool] = None,
**kwargs: Any,
) -> RefineDocumentsChain:
initial_chain = LLMChain(llm=llm, prompt=question_prompt, verbose=verbose)
_refine_llm = refine_llm or llm
refine_chain = LLMChain(llm=_refine_llm, prompt=refine_prompt, verbose=verbose)
return RefineDocumentsChain(
initial_llm_chain=initial_chain,
refine_llm_chain=refine_chain,
document_variable_name=document_variable_name,
initial_response_name=initial_response_name,
document_prompt=document_prompt,
verbose=verbose,
**kwargs,
)
def load_qa_with_sources_chain(
llm: BaseLanguageModel,
chain_type: str = "stuff",
verbose: Optional[bool] = None,
**kwargs: Any,
) -> BaseCombineDocumentsChain:
"""Load a question answering with sources chain.
Args:
llm: Language Model to use in the chain.
chain_type: Type of document combining chain to use. Should be one of "stuff",
"map_reduce", "refine" and "map_rerank".
verbose: Whether chains should be run in verbose mode or not. Note that this
applies to all chains that make up the final chain.
Returns:
A chain to use for question answering with sources.
"""
loader_mapping: Mapping[str, LoadingCallable] = {
"stuff": _load_stuff_chain,
"map_reduce": _load_map_reduce_chain,
"refine": _load_refine_chain,
"map_rerank": _load_map_rerank_chain,
}
if chain_type not in loader_mapping:
raise ValueError(
f"Got unsupported chain type: {chain_type}. "
f"Should be one of {loader_mapping.keys()}"
)
_func: LoadingCallable = loader_mapping[chain_type]
return _func(llm, verbose=verbose, **kwargs)
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~llms~gooseai.py | import logging
from typing import Any, Dict, List, Mapping, Optional
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.pydantic_v1 import Extra, Field, root_validator
from langchain.utils import get_from_dict_or_env
logger = logging.getLogger(__name__)
class GooseAI(LLM):
"""GooseAI large language models.
To use, you should have the ``openai`` python package installed, and the
environment variable ``GOOSEAI_API_KEY`` set with your API key.
Any parameters that are valid to be passed to the openai.create call can be passed
in, even if not explicitly saved on this class.
Example:
.. code-block:: python
from langchain.llms import GooseAI
gooseai = GooseAI(model_name="gpt-neo-20b")
"""
client: Any
model_name: str = "gpt-neo-20b"
"""Model name to use"""
temperature: float = 0.7
"""What sampling temperature to use"""
max_tokens: int = 256
"""The maximum number of tokens to generate in the completion.
-1 returns as many tokens as possible given the prompt and
the models maximal context size."""
top_p: float = 1
"""Total probability mass of tokens to consider at each step."""
min_tokens: int = 1
"""The minimum number of tokens to generate in the completion."""
frequency_penalty: float = 0
"""Penalizes repeated tokens according to frequency."""
presence_penalty: float = 0
"""Penalizes repeated tokens."""
n: int = 1
"""How many completions to generate for each prompt."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for `create` call not explicitly specified."""
logit_bias: Optional[Dict[str, float]] = Field(default_factory=dict)
"""Adjust the probability of specific tokens being generated."""
gooseai_api_key: Optional[str] = None
class Config:
"""Configuration for this pydantic config."""
extra = Extra.ignore
@root_validator(pre=True)
def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = {field.alias for field in cls.__fields__.values()}
extra = values.get("model_kwargs", {})
for field_name in list(values):
if field_name not in all_required_field_names:
if field_name in extra:
raise ValueError(f"Found {field_name} supplied twice.")
logger.warning(
f"""WARNING! {field_name} is not default parameter.
{field_name} was transferred to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
values["model_kwargs"] = extra
return values
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
gooseai_api_key = get_from_dict_or_env(
values, "gooseai_api_key", "GOOSEAI_API_KEY"
)
try:
import openai
openai.api_key = gooseai_api_key
openai.api_base = "https://api.goose.ai/v1"
values["client"] = openai.Completion
except ImportError:
raise ImportError(
"Could not import openai python package. "
"Please install it with `pip install openai`."
)
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling GooseAI API."""
normal_params = {
"temperature": self.temperature,
"max_tokens": self.max_tokens,
"top_p": self.top_p,
"min_tokens": self.min_tokens,
"frequency_penalty": self.frequency_penalty,
"presence_penalty": self.presence_penalty,
"n": self.n,
"logit_bias": self.logit_bias,
}
return {**normal_params, **self.model_kwargs}
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {**{"model_name": self.model_name}, **self._default_params}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "gooseai"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call the GooseAI API."""
params = self._default_params
if stop is not None:
if "stop" in params:
raise ValueError("`stop` found in both the input and default params.")
params["stop"] = stop
params = {**params, **kwargs}
response = self.client.create(engine=self.model_name, prompt=prompt, **params)
text = response.choices[0].text
return text
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~integration_tests~memory~test_rockset.py | """Tests RocksetChatMessageHistory by creating a collection
for message history, adding to it, and clearing it.
To run these tests, make sure you have the ROCKSET_API_KEY
and ROCKSET_REGION environment variables set.
"""
import json
import os
from langchain.memory import ConversationBufferMemory
from langchain.memory.chat_message_histories import RocksetChatMessageHistory
from langchain.schema.messages import _message_to_dict
collection_name = "langchain_demo"
session_id = "MySession"
class TestRockset:
memory: RocksetChatMessageHistory
@classmethod
def setup_class(cls) -> None:
from rockset import DevRegions, Regions, RocksetClient
assert os.environ.get("ROCKSET_API_KEY") is not None
assert os.environ.get("ROCKSET_REGION") is not None
api_key = os.environ.get("ROCKSET_API_KEY")
region = os.environ.get("ROCKSET_REGION")
if region == "use1a1":
host = Regions.use1a1
elif region == "usw2a1" or not region:
host = Regions.usw2a1
elif region == "euc1a1":
host = Regions.euc1a1
elif region == "dev":
host = DevRegions.usw2a1
else:
host = region
client = RocksetClient(host, api_key)
cls.memory = RocksetChatMessageHistory(
session_id, client, collection_name, sync=True
)
def test_memory_with_message_store(self) -> None:
memory = ConversationBufferMemory(
memory_key="messages", chat_memory=self.memory, return_messages=True
)
memory.chat_memory.add_ai_message("This is me, the AI")
memory.chat_memory.add_user_message("This is me, the human")
messages = memory.chat_memory.messages
messages_json = json.dumps([_message_to_dict(msg) for msg in messages])
assert "This is me, the AI" in messages_json
assert "This is me, the human" in messages_json
memory.chat_memory.clear()
assert memory.chat_memory.messages == []
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~agents~agent_toolkits~github~toolkit.py | """GitHub Toolkit."""
from typing import Dict, List
from langchain.agents.agent_toolkits.base import BaseToolkit
from langchain.tools import BaseTool
from langchain.tools.github.prompt import (
COMMENT_ON_ISSUE_PROMPT,
CREATE_FILE_PROMPT,
CREATE_PULL_REQUEST_PROMPT,
DELETE_FILE_PROMPT,
GET_ISSUE_PROMPT,
GET_ISSUES_PROMPT,
READ_FILE_PROMPT,
UPDATE_FILE_PROMPT,
)
from langchain.tools.github.tool import GitHubAction
from langchain.utilities.github import GitHubAPIWrapper
class GitHubToolkit(BaseToolkit):
"""GitHub Toolkit.
*Security Note*: This toolkit contains tools that can read and modify
the state of a service; e.g., by creating, deleting, or updating,
reading underlying data.
For example, this toolkit can be used to create issues, pull requests,
and comments on GitHub.
See [Security](https://python.langchain.com/docs/security) for more information.
"""
tools: List[BaseTool] = []
@classmethod
def from_github_api_wrapper(
cls, github_api_wrapper: GitHubAPIWrapper
) -> "GitHubToolkit":
operations: List[Dict] = [
{
"mode": "get_issues",
"name": "Get Issues",
"description": GET_ISSUES_PROMPT,
},
{
"mode": "get_issue",
"name": "Get Issue",
"description": GET_ISSUE_PROMPT,
},
{
"mode": "comment_on_issue",
"name": "Comment on Issue",
"description": COMMENT_ON_ISSUE_PROMPT,
},
{
"mode": "create_pull_request",
"name": "Create Pull Request",
"description": CREATE_PULL_REQUEST_PROMPT,
},
{
"mode": "create_file",
"name": "Create File",
"description": CREATE_FILE_PROMPT,
},
{
"mode": "read_file",
"name": "Read File",
"description": READ_FILE_PROMPT,
},
{
"mode": "update_file",
"name": "Update File",
"description": UPDATE_FILE_PROMPT,
},
{
"mode": "delete_file",
"name": "Delete File",
"description": DELETE_FILE_PROMPT,
},
]
tools = [
GitHubAction(
name=action["name"],
description=action["description"],
mode=action["mode"],
api_wrapper=github_api_wrapper,
)
for action in operations
]
return cls(tools=tools)
def get_tools(self) -> List[BaseTool]:
"""Get the tools in the toolkit."""
return self.tools
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~integration_tests~chains~test_self_ask_with_search.py | """Integration test for self ask with search."""
from langchain.agents.self_ask_with_search.base import SelfAskWithSearchChain
from langchain.llms.openai import OpenAI
from langchain.utilities.google_serper import GoogleSerperAPIWrapper
def test_self_ask_with_search() -> None:
"""Test functionality on a prompt."""
question = "What is the hometown of the reigning men's U.S. Open champion?"
chain = SelfAskWithSearchChain(
llm=OpenAI(temperature=0),
search_chain=GoogleSerperAPIWrapper(),
input_key="q",
output_key="a",
)
answer = chain.run(question)
final_answer = answer.split("\n")[-1]
assert final_answer == "El Palmar, Spain"
| [] |
2024-01-10 | RohanDey02/langchain | libs~experimental~langchain_experimental~comprehend_moderation~pii.py | import asyncio
from typing import Any, Dict, Optional
from langchain_experimental.comprehend_moderation.base_moderation_exceptions import (
ModerationPiiError,
)
class ComprehendPII:
def __init__(
self,
client: Any,
callback: Optional[Any] = None,
unique_id: Optional[str] = None,
chain_id: Optional[str] = None,
) -> None:
self.client = client
self.moderation_beacon = {
"moderation_chain_id": chain_id,
"moderation_type": "PII",
"moderation_status": "LABELS_NOT_FOUND",
}
self.callback = callback
self.unique_id = unique_id
def validate(self, prompt_value: str, config: Any = None) -> str:
redact = config.get("redact")
return (
self._detect_pii(prompt_value=prompt_value, config=config)
if redact
else self._contains_pii(prompt_value=prompt_value, config=config)
)
def _contains_pii(self, prompt_value: str, config: Any = None) -> str:
"""
Checks for Personally Identifiable Information (PII) labels above a
specified threshold. Uses Amazon Comprehend Contains PII Entities API. See -
https://docs.aws.amazon.com/comprehend/latest/APIReference/API_ContainsPiiEntities.html
Args:
prompt_value (str): The input text to be checked for PII labels.
config (Dict[str, Any]): Configuration for PII check and actions.
Returns:
str: the original prompt
Note:
- The provided client should be initialized with valid AWS credentials.
"""
pii_identified = self.client.contains_pii_entities(
Text=prompt_value, LanguageCode="en"
)
if self.callback and self.callback.pii_callback:
self.moderation_beacon["moderation_input"] = prompt_value
self.moderation_beacon["moderation_output"] = pii_identified
threshold = config.get("threshold")
pii_labels = config.get("labels")
pii_found = False
for entity in pii_identified["Labels"]:
if (entity["Score"] >= threshold and entity["Name"] in pii_labels) or (
entity["Score"] >= threshold and not pii_labels
):
pii_found = True
break
if self.callback and self.callback.pii_callback:
if pii_found:
self.moderation_beacon["moderation_status"] = "LABELS_FOUND"
asyncio.create_task(
self.callback.on_after_pii(self.moderation_beacon, self.unique_id)
)
if pii_found:
raise ModerationPiiError
return prompt_value
def _detect_pii(self, prompt_value: str, config: Optional[Dict[str, Any]]) -> str:
"""
Detects and handles Personally Identifiable Information (PII) entities in the
given prompt text using Amazon Comprehend's detect_pii_entities API. The
function provides options to redact or stop processing based on the identified
PII entities and a provided configuration. Uses Amazon Comprehend Detect PII
Entities API.
Args:
prompt_value (str): The input text to be checked for PII entities.
config (Dict[str, Any]): A configuration specifying how to handle
PII entities.
Returns:
str: The processed prompt text with redacted PII entities or raised
exceptions.
Raises:
ValueError: If the prompt contains configured PII entities for
stopping processing.
Note:
- If PII is not found in the prompt, the original prompt is returned.
- The client should be initialized with valid AWS credentials.
"""
pii_identified = self.client.detect_pii_entities(
Text=prompt_value, LanguageCode="en"
)
if self.callback and self.callback.pii_callback:
self.moderation_beacon["moderation_input"] = prompt_value
self.moderation_beacon["moderation_output"] = pii_identified
if (pii_identified["Entities"]) == []:
if self.callback and self.callback.pii_callback:
asyncio.create_task(
self.callback.on_after_pii(self.moderation_beacon, self.unique_id)
)
return prompt_value
pii_found = False
if not config and pii_identified["Entities"]:
for entity in pii_identified["Entities"]:
if entity["Score"] >= 0.5:
pii_found = True
break
if self.callback and self.callback.pii_callback:
if pii_found:
self.moderation_beacon["moderation_status"] = "LABELS_FOUND"
asyncio.create_task(
self.callback.on_after_pii(self.moderation_beacon, self.unique_id)
)
if pii_found:
raise ModerationPiiError
else:
threshold = config.get("threshold") # type: ignore
pii_labels = config.get("labels") # type: ignore
mask_marker = config.get("mask_character") # type: ignore
pii_found = False
for entity in pii_identified["Entities"]:
if (
pii_labels
and entity["Type"] in pii_labels
and entity["Score"] >= threshold
) or (not pii_labels and entity["Score"] >= threshold):
pii_found = True
char_offset_begin = entity["BeginOffset"]
char_offset_end = entity["EndOffset"]
mask_length = char_offset_end - char_offset_begin + 1
masked_part = mask_marker * mask_length
prompt_value = (
prompt_value[:char_offset_begin]
+ masked_part
+ prompt_value[char_offset_end + 1 :]
)
if self.callback and self.callback.pii_callback:
if pii_found:
self.moderation_beacon["moderation_status"] = "LABELS_FOUND"
asyncio.create_task(
self.callback.on_after_pii(self.moderation_beacon, self.unique_id)
)
return prompt_value
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~embeddings~clarifai.py | import logging
from typing import Any, Dict, List, Optional
from langchain.pydantic_v1 import BaseModel, Extra, root_validator
from langchain.schema.embeddings import Embeddings
from langchain.utils import get_from_dict_or_env
logger = logging.getLogger(__name__)
class ClarifaiEmbeddings(BaseModel, Embeddings):
"""Clarifai embedding models.
To use, you should have the ``clarifai`` python package installed, and the
environment variable ``CLARIFAI_PAT`` set with your personal access token or pass it
as a named parameter to the constructor.
Example:
.. code-block:: python
from langchain.embeddings import ClarifaiEmbeddings
clarifai = ClarifaiEmbeddings(
model="embed-english-light-v2.0", clarifai_api_key="my-api-key"
)
"""
stub: Any #: :meta private:
"""Clarifai stub."""
userDataObject: Any
"""Clarifai user data object."""
model_id: Optional[str] = None
"""Model id to use."""
model_version_id: Optional[str] = None
"""Model version id to use."""
app_id: Optional[str] = None
"""Clarifai application id to use."""
user_id: Optional[str] = None
"""Clarifai user id to use."""
pat: Optional[str] = None
"""Clarifai personal access token to use."""
api_base: str = "https://api.clarifai.com"
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
values["pat"] = get_from_dict_or_env(values, "pat", "CLARIFAI_PAT")
user_id = values.get("user_id")
app_id = values.get("app_id")
model_id = values.get("model_id")
if values["pat"] is None:
raise ValueError("Please provide a pat.")
if user_id is None:
raise ValueError("Please provide a user_id.")
if app_id is None:
raise ValueError("Please provide a app_id.")
if model_id is None:
raise ValueError("Please provide a model_id.")
try:
from clarifai.auth.helper import ClarifaiAuthHelper
from clarifai.client import create_stub
except ImportError:
raise ImportError(
"Could not import clarifai python package. "
"Please install it with `pip install clarifai`."
)
auth = ClarifaiAuthHelper(
user_id=user_id,
app_id=app_id,
pat=values["pat"],
base=values["api_base"],
)
values["userDataObject"] = auth.get_user_app_id_proto()
values["stub"] = create_stub(auth)
return values
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Call out to Clarifai's embedding models.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
try:
from clarifai_grpc.grpc.api import (
resources_pb2,
service_pb2,
)
from clarifai_grpc.grpc.api.status import status_code_pb2
except ImportError:
raise ImportError(
"Could not import clarifai python package. "
"Please install it with `pip install clarifai`."
)
batch_size = 32
embeddings = []
for i in range(0, len(texts), batch_size):
batch = texts[i : i + batch_size]
post_model_outputs_request = service_pb2.PostModelOutputsRequest(
user_app_id=self.userDataObject,
model_id=self.model_id,
version_id=self.model_version_id,
inputs=[
resources_pb2.Input(
data=resources_pb2.Data(text=resources_pb2.Text(raw=t))
)
for t in batch
],
)
post_model_outputs_response = self.stub.PostModelOutputs(
post_model_outputs_request
)
if post_model_outputs_response.status.code != status_code_pb2.SUCCESS:
logger.error(post_model_outputs_response.status)
first_output_failure = (
post_model_outputs_response.outputs[0].status
if len(post_model_outputs_response.outputs)
else None
)
raise Exception(
f"Post model outputs failed, status: "
f"{post_model_outputs_response.status}, first output failure: "
f"{first_output_failure}"
)
embeddings.extend(
[
list(o.data.embeddings[0].vector)
for o in post_model_outputs_response.outputs
]
)
return embeddings
def embed_query(self, text: str) -> List[float]:
"""Call out to Clarifai's embedding models.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
try:
from clarifai_grpc.grpc.api import (
resources_pb2,
service_pb2,
)
from clarifai_grpc.grpc.api.status import status_code_pb2
except ImportError:
raise ImportError(
"Could not import clarifai python package. "
"Please install it with `pip install clarifai`."
)
post_model_outputs_request = service_pb2.PostModelOutputsRequest(
user_app_id=self.userDataObject,
model_id=self.model_id,
version_id=self.model_version_id,
inputs=[
resources_pb2.Input(
data=resources_pb2.Data(text=resources_pb2.Text(raw=text))
)
],
)
post_model_outputs_response = self.stub.PostModelOutputs(
post_model_outputs_request
)
if post_model_outputs_response.status.code != status_code_pb2.SUCCESS:
logger.error(post_model_outputs_response.status)
first_output_failure = (
post_model_outputs_response.outputs[0].status
if len(post_model_outputs_response.outputs[0])
else None
)
raise Exception(
f"Post model outputs failed, status: "
f"{post_model_outputs_response.status}, first output failure: "
f"{first_output_failure}"
)
embeddings = [
list(o.data.embeddings[0].vector)
for o in post_model_outputs_response.outputs
]
return embeddings[0]
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~unit_tests~chains~test_sequential.py | """Test pipeline functionality."""
from typing import Dict, List, Optional
import pytest
from langchain.callbacks.manager import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
)
from langchain.chains.base import Chain
from langchain.chains.sequential import SequentialChain, SimpleSequentialChain
from langchain.memory import ConversationBufferMemory
from langchain.memory.simple import SimpleMemory
from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler
class FakeChain(Chain):
"""Fake Chain for testing purposes."""
input_variables: List[str]
output_variables: List[str]
@property
def input_keys(self) -> List[str]:
"""Input keys this chain returns."""
return self.input_variables
@property
def output_keys(self) -> List[str]:
"""Input keys this chain returns."""
return self.output_variables
def _call(
self,
inputs: Dict[str, str],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, str]:
outputs = {}
for var in self.output_variables:
variables = [inputs[k] for k in self.input_variables]
outputs[var] = f"{' '.join(variables)}foo"
return outputs
async def _acall(
self,
inputs: Dict[str, str],
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
) -> Dict[str, str]:
outputs = {}
for var in self.output_variables:
variables = [inputs[k] for k in self.input_variables]
outputs[var] = f"{' '.join(variables)}foo"
return outputs
def test_sequential_usage_single_inputs() -> None:
"""Test sequential on single input chains."""
chain_1 = FakeChain(input_variables=["foo"], output_variables=["bar"])
chain_2 = FakeChain(input_variables=["bar"], output_variables=["baz"])
chain = SequentialChain(chains=[chain_1, chain_2], input_variables=["foo"])
output = chain({"foo": "123"})
expected_output = {"baz": "123foofoo", "foo": "123"}
assert output == expected_output
def test_sequential_usage_multiple_inputs() -> None:
"""Test sequential on multiple input chains."""
chain_1 = FakeChain(input_variables=["foo", "test"], output_variables=["bar"])
chain_2 = FakeChain(input_variables=["bar", "foo"], output_variables=["baz"])
chain = SequentialChain(chains=[chain_1, chain_2], input_variables=["foo", "test"])
output = chain({"foo": "123", "test": "456"})
expected_output = {
"baz": "123 456foo 123foo",
"foo": "123",
"test": "456",
}
assert output == expected_output
def test_sequential_usage_memory() -> None:
"""Test sequential usage with memory."""
memory = SimpleMemory(memories={"zab": "rab"})
chain_1 = FakeChain(input_variables=["foo"], output_variables=["bar"])
chain_2 = FakeChain(input_variables=["bar"], output_variables=["baz"])
chain = SequentialChain(
memory=memory, chains=[chain_1, chain_2], input_variables=["foo"]
)
output = chain({"foo": "123"})
expected_output = {"baz": "123foofoo", "foo": "123", "zab": "rab"}
assert output == expected_output
memory = SimpleMemory(memories={"zab": "rab", "foo": "rab"})
chain_1 = FakeChain(input_variables=["foo"], output_variables=["bar"])
chain_2 = FakeChain(input_variables=["bar"], output_variables=["baz"])
with pytest.raises(ValueError):
SequentialChain(
memory=memory, chains=[chain_1, chain_2], input_variables=["foo"]
)
def test_sequential_internal_chain_use_memory() -> None:
"""Test sequential usage with memory for one of the internal chains."""
memory = ConversationBufferMemory(memory_key="bla")
memory.save_context({"input": "yo"}, {"output": "ya"})
chain_1 = FakeChain(
input_variables=["foo", "bla"], output_variables=["bar"], memory=memory
)
chain_2 = FakeChain(input_variables=["bar"], output_variables=["baz"])
chain = SequentialChain(chains=[chain_1, chain_2], input_variables=["foo"])
output = chain({"foo": "123"})
print("HEYYY OUTPUT", output)
expected_output = {"foo": "123", "baz": "123 Human: yo\nAI: yafoofoo"}
assert output == expected_output
def test_sequential_usage_multiple_outputs() -> None:
"""Test sequential usage on multiple output chains."""
chain_1 = FakeChain(input_variables=["foo"], output_variables=["bar", "test"])
chain_2 = FakeChain(input_variables=["bar", "foo"], output_variables=["baz"])
chain = SequentialChain(chains=[chain_1, chain_2], input_variables=["foo"])
output = chain({"foo": "123"})
expected_output = {
"baz": "123foo 123foo",
"foo": "123",
}
assert output == expected_output
def test_sequential_missing_inputs() -> None:
"""Test error is raised when input variables are missing."""
chain_1 = FakeChain(input_variables=["foo"], output_variables=["bar"])
chain_2 = FakeChain(input_variables=["bar", "test"], output_variables=["baz"])
with pytest.raises(ValueError):
# Also needs "test" as an input
SequentialChain(chains=[chain_1, chain_2], input_variables=["foo"])
def test_sequential_bad_outputs() -> None:
"""Test error is raised when bad outputs are specified."""
chain_1 = FakeChain(input_variables=["foo"], output_variables=["bar"])
chain_2 = FakeChain(input_variables=["bar"], output_variables=["baz"])
with pytest.raises(ValueError):
# "test" is not present as an output variable.
SequentialChain(
chains=[chain_1, chain_2],
input_variables=["foo"],
output_variables=["test"],
)
def test_sequential_valid_outputs() -> None:
"""Test chain runs when valid outputs are specified."""
chain_1 = FakeChain(input_variables=["foo"], output_variables=["bar"])
chain_2 = FakeChain(input_variables=["bar"], output_variables=["baz"])
chain = SequentialChain(
chains=[chain_1, chain_2],
input_variables=["foo"],
output_variables=["bar", "baz"],
)
output = chain({"foo": "123"}, return_only_outputs=True)
expected_output = {"baz": "123foofoo", "bar": "123foo"}
assert output == expected_output
def test_sequential_overlapping_inputs() -> None:
"""Test error is raised when input variables are overlapping."""
chain_1 = FakeChain(input_variables=["foo"], output_variables=["bar", "test"])
chain_2 = FakeChain(input_variables=["bar"], output_variables=["baz"])
with pytest.raises(ValueError):
# "test" is specified as an input, but also is an output of one step
SequentialChain(chains=[chain_1, chain_2], input_variables=["foo", "test"])
def test_simple_sequential_functionality() -> None:
"""Test simple sequential functionality."""
chain_1 = FakeChain(input_variables=["foo"], output_variables=["bar"])
chain_2 = FakeChain(input_variables=["bar"], output_variables=["baz"])
chain = SimpleSequentialChain(chains=[chain_1, chain_2])
output = chain({"input": "123"})
expected_output = {"output": "123foofoo", "input": "123"}
assert output == expected_output
@pytest.mark.asyncio
@pytest.mark.parametrize("isAsync", [False, True])
async def test_simple_sequential_functionality_with_callbacks(isAsync: bool) -> None:
"""Test simple sequential functionality."""
handler_1 = FakeCallbackHandler()
handler_2 = FakeCallbackHandler()
handler_3 = FakeCallbackHandler()
chain_1 = FakeChain(
input_variables=["foo"], output_variables=["bar"], callbacks=[handler_1]
)
chain_2 = FakeChain(
input_variables=["bar"], output_variables=["baz"], callbacks=[handler_2]
)
chain_3 = FakeChain(
input_variables=["jack"], output_variables=["baf"], callbacks=[handler_3]
)
chain = SimpleSequentialChain(chains=[chain_1, chain_2, chain_3])
if isAsync:
output = await chain.ainvoke({"input": "123"})
else:
output = chain({"input": "123"})
expected_output = {"output": "123foofoofoo", "input": "123"}
assert output == expected_output
# Check that each of the callbacks were invoked once per the entire run
for handler in [handler_1, handler_2, handler_3]:
assert handler.starts == 1
assert handler.ends == 1
assert handler.errors == 0
def test_multi_input_errors() -> None:
"""Test simple sequential errors if multiple input variables are expected."""
chain_1 = FakeChain(input_variables=["foo"], output_variables=["bar"])
chain_2 = FakeChain(input_variables=["bar", "foo"], output_variables=["baz"])
with pytest.raises(ValueError):
SimpleSequentialChain(chains=[chain_1, chain_2])
def test_multi_output_errors() -> None:
"""Test simple sequential errors if multiple output variables are expected."""
chain_1 = FakeChain(input_variables=["foo"], output_variables=["bar", "grok"])
chain_2 = FakeChain(input_variables=["bar"], output_variables=["baz"])
with pytest.raises(ValueError):
SimpleSequentialChain(chains=[chain_1, chain_2])
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~evaluation~loading.py | """Loading datasets and evaluators."""
from typing import Any, Dict, List, Optional, Sequence, Type, Union
from langchain.chains.base import Chain
from langchain.chat_models.openai import ChatOpenAI
from langchain.evaluation.agents.trajectory_eval_chain import TrajectoryEvalChain
from langchain.evaluation.comparison import PairwiseStringEvalChain
from langchain.evaluation.comparison.eval_chain import LabeledPairwiseStringEvalChain
from langchain.evaluation.criteria.eval_chain import (
CriteriaEvalChain,
LabeledCriteriaEvalChain,
)
from langchain.evaluation.embedding_distance.base import (
EmbeddingDistanceEvalChain,
PairwiseEmbeddingDistanceEvalChain,
)
from langchain.evaluation.exact_match.base import ExactMatchStringEvaluator
from langchain.evaluation.parsing.base import (
JsonEqualityEvaluator,
JsonValidityEvaluator,
)
from langchain.evaluation.qa import ContextQAEvalChain, CotQAEvalChain, QAEvalChain
from langchain.evaluation.regex_match.base import RegexMatchStringEvaluator
from langchain.evaluation.schema import EvaluatorType, LLMEvalChain, StringEvaluator
from langchain.evaluation.scoring.eval_chain import (
LabeledScoreStringEvalChain,
ScoreStringEvalChain,
)
from langchain.evaluation.string_distance.base import (
PairwiseStringDistanceEvalChain,
StringDistanceEvalChain,
)
from langchain.schema.language_model import BaseLanguageModel
def load_dataset(uri: str) -> List[Dict]:
"""Load a dataset from the `LangChainDatasets on HuggingFace <https://huggingface.co/LangChainDatasets>`_.
Args:
uri: The uri of the dataset to load.
Returns:
A list of dictionaries, each representing a row in the dataset.
**Prerequisites**
.. code-block:: shell
pip install datasets
Examples
--------
.. code-block:: python
from langchain.evaluation import load_dataset
ds = load_dataset("llm-math")
""" # noqa: E501
try:
from datasets import load_dataset
except ImportError:
raise ImportError(
"load_dataset requires the `datasets` package."
" Please install with `pip install datasets`"
)
dataset = load_dataset(f"LangChainDatasets/{uri}")
return [d for d in dataset["train"]]
_EVALUATOR_MAP: Dict[
EvaluatorType, Union[Type[LLMEvalChain], Type[Chain], Type[StringEvaluator]]
] = {
EvaluatorType.QA: QAEvalChain,
EvaluatorType.COT_QA: CotQAEvalChain,
EvaluatorType.CONTEXT_QA: ContextQAEvalChain,
EvaluatorType.PAIRWISE_STRING: PairwiseStringEvalChain,
EvaluatorType.SCORE_STRING: ScoreStringEvalChain,
EvaluatorType.LABELED_PAIRWISE_STRING: LabeledPairwiseStringEvalChain,
EvaluatorType.LABELED_SCORE_STRING: LabeledScoreStringEvalChain,
EvaluatorType.AGENT_TRAJECTORY: TrajectoryEvalChain,
EvaluatorType.CRITERIA: CriteriaEvalChain,
EvaluatorType.LABELED_CRITERIA: LabeledCriteriaEvalChain,
EvaluatorType.STRING_DISTANCE: StringDistanceEvalChain,
EvaluatorType.PAIRWISE_STRING_DISTANCE: PairwiseStringDistanceEvalChain,
EvaluatorType.EMBEDDING_DISTANCE: EmbeddingDistanceEvalChain,
EvaluatorType.PAIRWISE_EMBEDDING_DISTANCE: PairwiseEmbeddingDistanceEvalChain,
EvaluatorType.JSON_VALIDITY: JsonValidityEvaluator,
EvaluatorType.JSON_EQUALITY: JsonEqualityEvaluator,
EvaluatorType.REGEX_MATCH: RegexMatchStringEvaluator,
EvaluatorType.EXACT_MATCH: ExactMatchStringEvaluator,
}
def load_evaluator(
evaluator: EvaluatorType,
*,
llm: Optional[BaseLanguageModel] = None,
**kwargs: Any,
) -> Union[Chain, StringEvaluator]:
"""Load the requested evaluation chain specified by a string.
Parameters
----------
evaluator : EvaluatorType
The type of evaluator to load.
llm : BaseLanguageModel, optional
The language model to use for evaluation, by default None
**kwargs : Any
Additional keyword arguments to pass to the evaluator.
Returns
-------
Chain
The loaded evaluation chain.
Examples
--------
>>> from langchain.evaluation import load_evaluator, EvaluatorType
>>> evaluator = load_evaluator(EvaluatorType.QA)
"""
if evaluator not in _EVALUATOR_MAP:
raise ValueError(
f"Unknown evaluator type: {evaluator}"
f"\nValid types are: {list(_EVALUATOR_MAP.keys())}"
)
evaluator_cls = _EVALUATOR_MAP[evaluator]
if issubclass(evaluator_cls, LLMEvalChain):
try:
llm = llm or ChatOpenAI(model="gpt-4", temperature=0)
except Exception as e:
raise ValueError(
f"Evaluation with the {evaluator_cls} requires a "
"language model to function."
" Failed to create the default 'gpt-4' model."
" Please manually provide an evaluation LLM"
" or check your openai credentials."
) from e
return evaluator_cls.from_llm(llm=llm, **kwargs)
else:
return evaluator_cls(**kwargs)
def load_evaluators(
evaluators: Sequence[EvaluatorType],
*,
llm: Optional[BaseLanguageModel] = None,
config: Optional[dict] = None,
**kwargs: Any,
) -> List[Union[Chain, StringEvaluator]]:
"""Load evaluators specified by a list of evaluator types.
Parameters
----------
evaluators : Sequence[EvaluatorType]
The list of evaluator types to load.
llm : BaseLanguageModel, optional
The language model to use for evaluation, if none is provided, a default
ChatOpenAI gpt-4 model will be used.
config : dict, optional
A dictionary mapping evaluator types to additional keyword arguments,
by default None
**kwargs : Any
Additional keyword arguments to pass to all evaluators.
Returns
-------
List[Chain]
The loaded evaluators.
Examples
--------
>>> from langchain.evaluation import load_evaluators, EvaluatorType
>>> evaluators = [EvaluatorType.QA, EvaluatorType.CRITERIA]
>>> loaded_evaluators = load_evaluators(evaluators, criteria="helpfulness")
"""
loaded = []
for evaluator in evaluators:
_kwargs = config.get(evaluator, {}) if config else {}
loaded.append(load_evaluator(evaluator, llm=llm, **{**kwargs, **_kwargs}))
return loaded
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~unit_tests~storage~test_lc_store.py | import tempfile
from typing import Generator, cast
import pytest
from langchain.schema import Document
from langchain.storage._lc_store import create_kv_docstore, create_lc_store
from langchain.storage.file_system import LocalFileStore
@pytest.fixture
def file_store() -> Generator[LocalFileStore, None, None]:
# Create a temporary directory for testing
with tempfile.TemporaryDirectory() as temp_dir:
# Instantiate the LocalFileStore with the temporary directory as the root path
store = LocalFileStore(temp_dir)
yield store
def test_create_lc_store(file_store: LocalFileStore) -> None:
"""Test that a docstore is created from a base store."""
docstore = create_lc_store(file_store)
docstore.mset([("key1", Document(page_content="hello", metadata={"key": "value"}))])
fetched_doc = cast(Document, docstore.mget(["key1"])[0])
assert fetched_doc.page_content == "hello"
assert fetched_doc.metadata == {"key": "value"}
def test_create_kv_store(file_store: LocalFileStore) -> None:
"""Test that a docstore is created from a base store."""
docstore = create_kv_docstore(file_store)
docstore.mset([("key1", Document(page_content="hello", metadata={"key": "value"}))])
fetched_doc = docstore.mget(["key1"])[0]
assert isinstance(fetched_doc, Document)
assert fetched_doc.page_content == "hello"
assert fetched_doc.metadata == {"key": "value"}
| [] |
2024-01-10 | RohanDey02/langchain | libs~experimental~langchain_experimental~comprehend_moderation~base_moderation.py | import uuid
from typing import Any, Callable, Optional
from langchain.callbacks.manager import CallbackManagerForChainRun
from langchain.prompts.base import StringPromptValue
from langchain.prompts.chat import ChatPromptValue
from langchain.schema import AIMessage, HumanMessage
from langchain_experimental.comprehend_moderation.pii import ComprehendPII
from langchain_experimental.comprehend_moderation.prompt_safety import (
ComprehendPromptSafety,
)
from langchain_experimental.comprehend_moderation.toxicity import ComprehendToxicity
class BaseModeration:
def __init__(
self,
client: Any,
config: Optional[Any] = None,
moderation_callback: Optional[Any] = None,
unique_id: Optional[str] = None,
run_manager: Optional[CallbackManagerForChainRun] = None,
):
self.client = client
self.config = config
self.moderation_callback = moderation_callback
self.unique_id = unique_id
self.chat_message_index = 0
self.run_manager = run_manager
self.chain_id = str(uuid.uuid4())
def _convert_prompt_to_text(self, prompt: Any) -> str:
input_text = str()
if isinstance(prompt, StringPromptValue):
input_text = prompt.text
elif isinstance(prompt, str):
input_text = prompt
elif isinstance(prompt, ChatPromptValue):
"""
We will just check the last message in the message Chain of a
ChatPromptTemplate. The typical chronology is
SystemMessage > HumanMessage > AIMessage and so on. However assuming
that with every chat the chain is invoked we will only check the last
message. This is assuming that all previous messages have been checked
already. Only HumanMessage and AIMessage will be checked. We can perhaps
loop through and take advantage of the additional_kwargs property in the
HumanMessage and AIMessage schema to mark messages that have been moderated.
However that means that this class could generate multiple text chunks
and moderate() logics would need to be updated. This also means some
complexity in re-constructing the prompt while keeping the messages in
sequence.
"""
message = prompt.messages[-1]
self.chat_message_index = len(prompt.messages) - 1
if isinstance(message, HumanMessage):
input_text = message.content
if isinstance(message, AIMessage):
input_text = message.content
else:
raise ValueError(
f"Invalid input type {type(input)}. "
"Must be a PromptValue, str, or list of BaseMessages."
)
return input_text
def _convert_text_to_prompt(self, prompt: Any, text: str) -> Any:
if isinstance(prompt, StringPromptValue):
return StringPromptValue(text=text)
elif isinstance(prompt, str):
return text
elif isinstance(prompt, ChatPromptValue):
# Copy the messages because we may need to mutate them.
# We don't want to mutate data we don't own.
messages = list(prompt.messages)
message = messages[self.chat_message_index]
if isinstance(message, HumanMessage):
messages[self.chat_message_index] = HumanMessage(
content=text,
example=message.example,
additional_kwargs=message.additional_kwargs,
)
if isinstance(message, AIMessage):
messages[self.chat_message_index] = AIMessage(
content=text,
example=message.example,
additional_kwargs=message.additional_kwargs,
)
return ChatPromptValue(messages=messages)
else:
raise ValueError(
f"Invalid input type {type(input)}. "
"Must be a PromptValue, str, or list of BaseMessages."
)
def _moderation_class(self, moderation_class: Any) -> Callable:
return moderation_class(
client=self.client,
callback=self.moderation_callback,
unique_id=self.unique_id,
chain_id=self.chain_id,
).validate
def _log_message_for_verbose(self, message: str) -> None:
if self.run_manager:
self.run_manager.on_text(message)
def moderate(self, prompt: Any) -> str:
from langchain_experimental.comprehend_moderation.base_moderation_config import ( # noqa: E501
ModerationPiiConfig,
ModerationPromptSafetyConfig,
ModerationToxicityConfig,
)
from langchain_experimental.comprehend_moderation.base_moderation_exceptions import ( # noqa: E501
ModerationPiiError,
ModerationPromptSafetyError,
ModerationToxicityError,
)
try:
# convert prompt to text
input_text = self._convert_prompt_to_text(prompt=prompt)
output_text = str()
# perform moderation
filter_functions = {
"pii": ComprehendPII,
"toxicity": ComprehendToxicity,
"prompt_safety": ComprehendPromptSafety,
}
filters = self.config.filters # type: ignore
for _filter in filters:
filter_name = (
"pii"
if isinstance(_filter, ModerationPiiConfig)
else (
"toxicity"
if isinstance(_filter, ModerationToxicityConfig)
else (
"prompt_safety"
if isinstance(_filter, ModerationPromptSafetyConfig)
else None
)
)
)
if filter_name in filter_functions:
self._log_message_for_verbose(
f"Running {filter_name} Validation...\n"
)
validation_fn = self._moderation_class(
moderation_class=filter_functions[filter_name]
)
input_text = input_text if not output_text else output_text
output_text = validation_fn(
prompt_value=input_text,
config=_filter.dict(),
)
# convert text to prompt and return
return self._convert_text_to_prompt(prompt=prompt, text=output_text)
except ModerationPiiError as e:
self._log_message_for_verbose(f"Found PII content..stopping..\n{str(e)}\n")
raise e
except ModerationToxicityError as e:
self._log_message_for_verbose(
f"Found Toxic content..stopping..\n{str(e)}\n"
)
raise e
except ModerationPromptSafetyError as e:
self._log_message_for_verbose(
f"Found Harmful intention..stopping..\n{str(e)}\n"
)
raise e
except Exception as e:
raise e
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~unit_tests~evaluation~criteria~test_eval_chain.py | """Test the criteria eval chain."""
import pytest
from langchain.evaluation.criteria.eval_chain import (
_SUPPORTED_CRITERIA,
Criteria,
CriteriaEvalChain,
CriteriaResultOutputParser,
LabeledCriteriaEvalChain,
)
from langchain.evaluation.schema import StringEvaluator
from tests.unit_tests.llms.fake_llm import FakeLLM
def test_resolve_criteria_str() -> None:
# type: ignore
assert CriteriaEvalChain.resolve_criteria("helpfulness") == {
"helpfulness": _SUPPORTED_CRITERIA[Criteria.HELPFULNESS]
}
assert CriteriaEvalChain.resolve_criteria("correctness") == {
"correctness": _SUPPORTED_CRITERIA[Criteria.CORRECTNESS]
}
@pytest.mark.parametrize(
"text,want",
[
("Y", {"reasoning": "", "value": "Y", "score": 1}),
(
"""Here is my step-by-step reasoning for the given criteria:
The criterion is: "Do you like cake?" I like cake.
Y""",
{
"reasoning": """Here is my step-by-step reasoning for the given criteria:
The criterion is: "Do you like cake?" I like cake.""", # noqa: E501
"value": "Y",
"score": 1,
},
),
(
" NThe submission N is correct, accurate, and factual. It accurately"
" identifies the specific effects of knowledge and interest on"
" these factors. Therefore, the submission Y meets the criteria. Y",
{
"reasoning": "NThe submission N is correct, accurate, and factual. It"
" accurately identifies the specific effects of knowledge and interest"
" on these factors. Therefore, the submission Y meets the criteria.",
"value": "Y",
"score": 1,
},
),
],
)
def test_CriteriaResultOutputParser_parse(text: str, want: dict) -> None:
output_parser = CriteriaResultOutputParser()
got = output_parser.parse(text)
assert got.get("reasoning") == want["reasoning"]
assert got.get("value") == want["value"]
assert got.get("score") == want["score"]
@pytest.mark.parametrize("criterion", list(Criteria))
def test_resolve_criteria_enum(criterion: Criteria) -> None:
assert CriteriaEvalChain.resolve_criteria(criterion) == {
criterion.value: _SUPPORTED_CRITERIA[criterion]
}
def test_criteria_eval_chain() -> None:
chain = CriteriaEvalChain.from_llm(
llm=FakeLLM(
queries={"text": "The meaning of life\nY"}, sequential_responses=True
),
criteria={"my criterion": "my criterion description"},
)
with pytest.warns(UserWarning, match=chain._skip_reference_warning):
result = chain.evaluate_strings(
prediction="my prediction", reference="my reference", input="my input"
)
assert result["reasoning"] == "The meaning of life"
def test_criteria_eval_chain_missing_reference() -> None:
chain = LabeledCriteriaEvalChain.from_llm(
llm=FakeLLM(
queries={"text": "The meaning of life\nY"},
sequential_responses=True,
),
criteria={"my criterion": "my criterion description"},
)
with pytest.raises(ValueError):
chain.evaluate_strings(prediction="my prediction", input="my input")
def test_implements_string_protocol() -> None:
assert issubclass(CriteriaEvalChain, StringEvaluator)
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~unit_tests~document_loaders~test_rss.py | import pytest
from langchain.document_loaders import RSSFeedLoader
@pytest.mark.requires("feedparser", "newspaper")
def test_continue_on_failure_true() -> None:
"""Test exception is not raised when continue_on_failure=True."""
loader = RSSFeedLoader(["badurl.foobar"])
loader.load()
@pytest.mark.requires("feedparser", "newspaper")
def test_continue_on_failure_false() -> None:
"""Test exception is raised when continue_on_failure=False."""
loader = RSSFeedLoader(["badurl.foobar"], continue_on_failure=False)
with pytest.raises(Exception):
loader.load()
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~retrievers~time_weighted_retriever.py | import datetime
from copy import deepcopy
from typing import Any, Dict, List, Optional, Tuple
from langchain.callbacks.manager import CallbackManagerForRetrieverRun
from langchain.pydantic_v1 import Field
from langchain.schema import BaseRetriever, Document
from langchain.schema.vectorstore import VectorStore
def _get_hours_passed(time: datetime.datetime, ref_time: datetime.datetime) -> float:
"""Get the hours passed between two datetimes."""
return (time - ref_time).total_seconds() / 3600
class TimeWeightedVectorStoreRetriever(BaseRetriever):
"""Retriever that combines embedding similarity with
recency in retrieving values."""
vectorstore: VectorStore
"""The vectorstore to store documents and determine salience."""
search_kwargs: dict = Field(default_factory=lambda: dict(k=100))
"""Keyword arguments to pass to the vectorstore similarity search."""
# TODO: abstract as a queue
memory_stream: List[Document] = Field(default_factory=list)
"""The memory_stream of documents to search through."""
decay_rate: float = Field(default=0.01)
"""The exponential decay factor used as (1.0-decay_rate)**(hrs_passed)."""
k: int = 4
"""The maximum number of documents to retrieve in a given call."""
other_score_keys: List[str] = []
"""Other keys in the metadata to factor into the score, e.g. 'importance'."""
default_salience: Optional[float] = None
"""The salience to assign memories not retrieved from the vector store.
None assigns no salience to documents not fetched from the vector store.
"""
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
def _document_get_date(self, field: str, document: Document) -> datetime.datetime:
"""Return the value of the date field of a document."""
if field in document.metadata:
if isinstance(document.metadata[field], float):
return datetime.datetime.fromtimestamp(document.metadata[field])
return document.metadata[field]
return datetime.datetime.now()
def _get_combined_score(
self,
document: Document,
vector_relevance: Optional[float],
current_time: datetime.datetime,
) -> float:
"""Return the combined score for a document."""
hours_passed = _get_hours_passed(
current_time,
self._document_get_date("last_accessed_at", document),
)
score = (1.0 - self.decay_rate) ** hours_passed
for key in self.other_score_keys:
if key in document.metadata:
score += document.metadata[key]
if vector_relevance is not None:
score += vector_relevance
return score
def get_salient_docs(self, query: str) -> Dict[int, Tuple[Document, float]]:
"""Return documents that are salient to the query."""
docs_and_scores: List[Tuple[Document, float]]
docs_and_scores = self.vectorstore.similarity_search_with_relevance_scores(
query, **self.search_kwargs
)
results = {}
for fetched_doc, relevance in docs_and_scores:
if "buffer_idx" in fetched_doc.metadata:
buffer_idx = fetched_doc.metadata["buffer_idx"]
doc = self.memory_stream[buffer_idx]
results[buffer_idx] = (doc, relevance)
return results
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> List[Document]:
"""Return documents that are relevant to the query."""
current_time = datetime.datetime.now()
docs_and_scores = {
doc.metadata["buffer_idx"]: (doc, self.default_salience)
for doc in self.memory_stream[-self.k :]
}
# If a doc is considered salient, update the salience score
docs_and_scores.update(self.get_salient_docs(query))
rescored_docs = [
(doc, self._get_combined_score(doc, relevance, current_time))
for doc, relevance in docs_and_scores.values()
]
rescored_docs.sort(key=lambda x: x[1], reverse=True)
result = []
# Ensure frequently accessed memories aren't forgotten
for doc, _ in rescored_docs[: self.k]:
# TODO: Update vector store doc once `update` method is exposed.
buffered_doc = self.memory_stream[doc.metadata["buffer_idx"]]
buffered_doc.metadata["last_accessed_at"] = current_time
result.append(buffered_doc)
return result
def add_documents(self, documents: List[Document], **kwargs: Any) -> List[str]:
"""Add documents to vectorstore."""
current_time = kwargs.get("current_time")
if current_time is None:
current_time = datetime.datetime.now()
# Avoid mutating input documents
dup_docs = [deepcopy(d) for d in documents]
for i, doc in enumerate(dup_docs):
if "last_accessed_at" not in doc.metadata:
doc.metadata["last_accessed_at"] = current_time
if "created_at" not in doc.metadata:
doc.metadata["created_at"] = current_time
doc.metadata["buffer_idx"] = len(self.memory_stream) + i
self.memory_stream.extend(dup_docs)
return self.vectorstore.add_documents(dup_docs, **kwargs)
async def aadd_documents(
self, documents: List[Document], **kwargs: Any
) -> List[str]:
"""Add documents to vectorstore."""
current_time = kwargs.get("current_time")
if current_time is None:
current_time = datetime.datetime.now()
# Avoid mutating input documents
dup_docs = [deepcopy(d) for d in documents]
for i, doc in enumerate(dup_docs):
if "last_accessed_at" not in doc.metadata:
doc.metadata["last_accessed_at"] = current_time
if "created_at" not in doc.metadata:
doc.metadata["created_at"] = current_time
doc.metadata["buffer_idx"] = len(self.memory_stream) + i
self.memory_stream.extend(dup_docs)
return await self.vectorstore.aadd_documents(dup_docs, **kwargs)
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~utilities~bibtex.py | """Util that calls bibtexparser."""
import logging
from typing import Any, Dict, List, Mapping
from langchain.pydantic_v1 import BaseModel, Extra, root_validator
logger = logging.getLogger(__name__)
OPTIONAL_FIELDS = [
"annotate",
"booktitle",
"editor",
"howpublished",
"journal",
"keywords",
"note",
"organization",
"publisher",
"school",
"series",
"type",
"doi",
"issn",
"isbn",
]
class BibtexparserWrapper(BaseModel):
"""Wrapper around bibtexparser.
To use, you should have the ``bibtexparser`` python package installed.
https://bibtexparser.readthedocs.io/en/master/
This wrapper will use bibtexparser to load a collection of references from
a bibtex file and fetch document summaries.
"""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that the python package exists in environment."""
try:
import bibtexparser # noqa
except ImportError:
raise ImportError(
"Could not import bibtexparser python package. "
"Please install it with `pip install bibtexparser`."
)
return values
def load_bibtex_entries(self, path: str) -> List[Dict[str, Any]]:
"""Load bibtex entries from the bibtex file at the given path."""
import bibtexparser
with open(path) as file:
entries = bibtexparser.load(file).entries
return entries
def get_metadata(
self, entry: Mapping[str, Any], load_extra: bool = False
) -> Dict[str, Any]:
"""Get metadata for the given entry."""
publication = entry.get("journal") or entry.get("booktitle")
if "url" in entry:
url = entry["url"]
elif "doi" in entry:
url = f'https://doi.org/{entry["doi"]}'
else:
url = None
meta = {
"id": entry.get("ID"),
"published_year": entry.get("year"),
"title": entry.get("title"),
"publication": publication,
"authors": entry.get("author"),
"abstract": entry.get("abstract"),
"url": url,
}
if load_extra:
for field in OPTIONAL_FIELDS:
meta[field] = entry.get(field)
return {k: v for k, v in meta.items() if v is not None}
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~unit_tests~agents~test_types.py | import unittest
from langchain.agents.agent_types import AgentType
from langchain.agents.types import AGENT_TO_CLASS
class TestTypes(unittest.TestCase):
def test_confirm_full_coverage(self) -> None:
self.assertEqual(list(AgentType), list(AGENT_TO_CLASS.keys()))
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~chat_models~azure_openai.py | """Azure OpenAI chat wrapper."""
from __future__ import annotations
import logging
from typing import Any, Dict, Mapping
from langchain.chat_models.openai import ChatOpenAI
from langchain.pydantic_v1 import root_validator
from langchain.schema import ChatResult
from langchain.utils import get_from_dict_or_env
logger = logging.getLogger(__name__)
class AzureChatOpenAI(ChatOpenAI):
"""`Azure OpenAI` Chat Completion API.
To use this class you
must have a deployed model on Azure OpenAI. Use `deployment_name` in the
constructor to refer to the "Model deployment name" in the Azure portal.
In addition, you should have the ``openai`` python package installed, and the
following environment variables set or passed in constructor in lower case:
- ``OPENAI_API_TYPE`` (default: ``azure``)
- ``OPENAI_API_KEY``
- ``OPENAI_API_BASE``
- ``OPENAI_API_VERSION``
- ``OPENAI_PROXY``
For example, if you have `gpt-35-turbo` deployed, with the deployment name
`35-turbo-dev`, the constructor should look like:
.. code-block:: python
AzureChatOpenAI(
deployment_name="35-turbo-dev",
openai_api_version="2023-05-15",
)
Be aware the API version may change.
You can also specify the version of the model using ``model_version`` constructor
parameter, as Azure OpenAI doesn't return model version with the response.
Default is empty. When you specify the version, it will be appended to the
model name in the response. Setting correct version will help you to calculate the
cost properly. Model version is not validated, so make sure you set it correctly
to get the correct cost.
Any parameters that are valid to be passed to the openai.create call can be passed
in, even if not explicitly saved on this class.
"""
deployment_name: str = ""
model_version: str = ""
openai_api_type: str = ""
openai_api_base: str = ""
openai_api_version: str = ""
openai_api_key: str = ""
openai_organization: str = ""
openai_proxy: str = ""
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
values["openai_api_key"] = get_from_dict_or_env(
values,
"openai_api_key",
"OPENAI_API_KEY",
)
values["openai_api_base"] = get_from_dict_or_env(
values,
"openai_api_base",
"OPENAI_API_BASE",
)
values["openai_api_version"] = get_from_dict_or_env(
values,
"openai_api_version",
"OPENAI_API_VERSION",
)
values["openai_api_type"] = get_from_dict_or_env(
values, "openai_api_type", "OPENAI_API_TYPE", default="azure"
)
values["openai_organization"] = get_from_dict_or_env(
values,
"openai_organization",
"OPENAI_ORGANIZATION",
default="",
)
values["openai_proxy"] = get_from_dict_or_env(
values,
"openai_proxy",
"OPENAI_PROXY",
default="",
)
try:
import openai
except ImportError:
raise ImportError(
"Could not import openai python package. "
"Please install it with `pip install openai`."
)
try:
values["client"] = openai.ChatCompletion
except AttributeError:
raise ValueError(
"`openai` has no `ChatCompletion` attribute, this is likely "
"due to an old version of the openai package. Try upgrading it "
"with `pip install --upgrade openai`."
)
if values["n"] < 1:
raise ValueError("n must be at least 1.")
if values["n"] > 1 and values["streaming"]:
raise ValueError("n must be 1 when streaming.")
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling OpenAI API."""
return {
**super()._default_params,
"engine": self.deployment_name,
}
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
return {**self._default_params}
@property
def _client_params(self) -> Dict[str, Any]:
"""Get the config params used for the openai client."""
return {
**super()._client_params,
"api_type": self.openai_api_type,
"api_version": self.openai_api_version,
}
@property
def _llm_type(self) -> str:
return "azure-openai-chat"
@property
def lc_attributes(self) -> Dict[str, Any]:
return {
"openai_api_type": self.openai_api_type,
"openai_api_version": self.openai_api_version,
}
def _create_chat_result(self, response: Mapping[str, Any]) -> ChatResult:
for res in response["choices"]:
if res.get("finish_reason", None) == "content_filter":
raise ValueError(
"Azure has not provided the response due to a content filter "
"being triggered"
)
chat_result = super()._create_chat_result(response)
if "model" in response:
model = response["model"]
if self.model_version:
model = f"{model}-{self.model_version}"
if chat_result.llm_output is not None and isinstance(
chat_result.llm_output, dict
):
chat_result.llm_output["model_name"] = model
return chat_result
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~integration_tests~embeddings~test_elasticsearch.py | """Test elasticsearch_embeddings embeddings."""
import pytest
from langchain.embeddings.elasticsearch import ElasticsearchEmbeddings
@pytest.fixture
def model_id() -> str:
# Replace with your actual model_id
return "your_model_id"
def test_elasticsearch_embedding_documents(model_id: str) -> None:
"""Test Elasticsearch embedding documents."""
documents = ["foo bar", "bar foo", "foo"]
embedding = ElasticsearchEmbeddings.from_credentials(model_id)
output = embedding.embed_documents(documents)
assert len(output) == 3
assert len(output[0]) == 768 # Change 768 to the expected embedding size
assert len(output[1]) == 768 # Change 768 to the expected embedding size
assert len(output[2]) == 768 # Change 768 to the expected embedding size
def test_elasticsearch_embedding_query(model_id: str) -> None:
"""Test Elasticsearch embedding query."""
document = "foo bar"
embedding = ElasticsearchEmbeddings.from_credentials(model_id)
output = embedding.embed_query(document)
assert len(output) == 768 # Change 768 to the expected embedding size
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~docstore~wikipedia.py | """Wrapper around wikipedia API."""
from typing import Union
from langchain.docstore.base import Docstore
from langchain.docstore.document import Document
class Wikipedia(Docstore):
"""Wrapper around wikipedia API."""
def __init__(self) -> None:
"""Check that wikipedia package is installed."""
try:
import wikipedia # noqa: F401
except ImportError:
raise ImportError(
"Could not import wikipedia python package. "
"Please install it with `pip install wikipedia`."
)
def search(self, search: str) -> Union[str, Document]:
"""Try to search for wiki page.
If page exists, return the page summary, and a PageWithLookups object.
If page does not exist, return similar entries.
Args:
search: search string.
Returns: a Document object or error message.
"""
import wikipedia
try:
page_content = wikipedia.page(search).content
url = wikipedia.page(search).url
result: Union[str, Document] = Document(
page_content=page_content, metadata={"page": url}
)
except wikipedia.PageError:
result = f"Could not find [{search}]. Similar: {wikipedia.search(search)}"
except wikipedia.DisambiguationError:
result = f"Could not find [{search}]. Similar: {wikipedia.search(search)}"
return result
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~integration_tests~embeddings~test_deepinfra.py | """Test DeepInfra API wrapper."""
from langchain.embeddings import DeepInfraEmbeddings
def test_deepinfra_call() -> None:
"""Test valid call to DeepInfra."""
deepinfra_emb = DeepInfraEmbeddings(model_id="sentence-transformers/clip-ViT-B-32")
r1 = deepinfra_emb.embed_documents(
[
"Alpha is the first letter of Greek alphabet",
"Beta is the second letter of Greek alphabet",
]
)
assert len(r1) == 2
assert len(r1[0]) == 512
assert len(r1[1]) == 512
r2 = deepinfra_emb.embed_query("What is the third letter of Greek alphabet")
assert len(r2) == 512
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~integration_tests~llms~test_edenai.py | """Test EdenAi API wrapper.
In order to run this test, you need to have an EdenAI api key.
You can get it by registering for free at https://app.edenai.run/user/register.
A test key can be found at https://app.edenai.run/admin/account/settings by
clicking on the 'sandbox' toggle.
(calls will be free, and will return dummy results)
You'll then need to set EDENAI_API_KEY environment variable to your api key.
"""
from langchain.llms import EdenAI
def test_edenai_call() -> None:
"""Test simple call to edenai."""
llm = EdenAI(provider="openai", temperature=0.2, max_tokens=250)
output = llm("Say foo:")
assert llm._llm_type == "edenai"
assert llm.feature == "text"
assert llm.subfeature == "generation"
assert isinstance(output, str)
async def test_edenai_acall() -> None:
"""Test simple call to edenai."""
llm = EdenAI(provider="openai", temperature=0.2, max_tokens=250)
output = await llm.agenerate(["Say foo:"])
assert llm._llm_type == "edenai"
assert llm.feature == "text"
assert llm.subfeature == "generation"
assert isinstance(output, str)
def test_edenai_call_with_old_params() -> None:
"""
Test simple call to edenai with using `params`
to pass optional parameters to api
"""
llm = EdenAI(provider="openai", params={"temperature": 0.2, "max_tokens": 250})
output = llm("Say foo:")
assert llm._llm_type == "edenai"
assert llm.feature == "text"
assert llm.subfeature == "generation"
assert isinstance(output, str)
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~integration_tests~document_loaders~test_polars_dataframe.py | from __future__ import annotations
from typing import TYPE_CHECKING
import pytest
from langchain.document_loaders import PolarsDataFrameLoader
from langchain.schema import Document
if TYPE_CHECKING:
import polars as pl
@pytest.fixture
def sample_data_frame() -> pl.DataFrame:
import polars as pl
data = {
"text": ["Hello", "World"],
"author": ["Alice", "Bob"],
"date": ["2022-01-01", "2022-01-02"],
}
return pl.DataFrame(data)
def test_load_returns_list_of_documents(sample_data_frame: pl.DataFrame) -> None:
loader = PolarsDataFrameLoader(sample_data_frame)
docs = loader.load()
assert isinstance(docs, list)
assert all(isinstance(doc, Document) for doc in docs)
assert len(docs) == 2
def test_load_converts_dataframe_columns_to_document_metadata(
sample_data_frame: pl.DataFrame,
) -> None:
loader = PolarsDataFrameLoader(sample_data_frame)
docs = loader.load()
for i, doc in enumerate(docs):
df: pl.DataFrame = sample_data_frame[i]
assert df is not None
assert doc.metadata["author"] == df.select("author").item()
assert doc.metadata["date"] == df.select("date").item()
def test_load_uses_page_content_column_to_create_document_text(
sample_data_frame: pl.DataFrame,
) -> None:
sample_data_frame = sample_data_frame.rename(mapping={"text": "dummy_test_column"})
loader = PolarsDataFrameLoader(
sample_data_frame, page_content_column="dummy_test_column"
)
docs = loader.load()
assert docs[0].page_content == "Hello"
assert docs[1].page_content == "World"
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~chains~constitutional_ai~models.py | """Models for the Constitutional AI chain."""
from langchain.pydantic_v1 import BaseModel
class ConstitutionalPrinciple(BaseModel):
"""Class for a constitutional principle."""
critique_request: str
revision_request: str
name: str = "Constitutional Principle"
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~agents~agent_toolkits~gitlab~toolkit.py | """GitHub Toolkit."""
from typing import Dict, List
from langchain.agents.agent_toolkits.base import BaseToolkit
from langchain.tools import BaseTool
from langchain.tools.gitlab.prompt import (
COMMENT_ON_ISSUE_PROMPT,
CREATE_FILE_PROMPT,
CREATE_PULL_REQUEST_PROMPT,
DELETE_FILE_PROMPT,
GET_ISSUE_PROMPT,
GET_ISSUES_PROMPT,
READ_FILE_PROMPT,
UPDATE_FILE_PROMPT,
)
from langchain.tools.gitlab.tool import GitLabAction
from langchain.utilities.gitlab import GitLabAPIWrapper
class GitLabToolkit(BaseToolkit):
"""GitLab Toolkit.
*Security Note*: This toolkit contains tools that can read and modify
the state of a service; e.g., by creating, deleting, or updating,
reading underlying data.
For example, this toolkit can be used to create issues, pull requests,
and comments on GitLab.
See https://python.langchain.com/docs/security for more information.
"""
tools: List[BaseTool] = []
@classmethod
def from_gitlab_api_wrapper(
cls, gitlab_api_wrapper: GitLabAPIWrapper
) -> "GitLabToolkit":
operations: List[Dict] = [
{
"mode": "get_issues",
"name": "Get Issues",
"description": GET_ISSUES_PROMPT,
},
{
"mode": "get_issue",
"name": "Get Issue",
"description": GET_ISSUE_PROMPT,
},
{
"mode": "comment_on_issue",
"name": "Comment on Issue",
"description": COMMENT_ON_ISSUE_PROMPT,
},
{
"mode": "create_pull_request",
"name": "Create Pull Request",
"description": CREATE_PULL_REQUEST_PROMPT,
},
{
"mode": "create_file",
"name": "Create File",
"description": CREATE_FILE_PROMPT,
},
{
"mode": "read_file",
"name": "Read File",
"description": READ_FILE_PROMPT,
},
{
"mode": "update_file",
"name": "Update File",
"description": UPDATE_FILE_PROMPT,
},
{
"mode": "delete_file",
"name": "Delete File",
"description": DELETE_FILE_PROMPT,
},
]
tools = [
GitLabAction(
name=action["name"],
description=action["description"],
mode=action["mode"],
api_wrapper=gitlab_api_wrapper,
)
for action in operations
]
return cls(tools=tools)
def get_tools(self) -> List[BaseTool]:
"""Get the tools in the toolkit."""
return self.tools
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~chat_models~anthropic.py | from typing import Any, AsyncIterator, Dict, Iterator, List, Optional
from langchain.callbacks.manager import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain.chat_models.base import (
BaseChatModel,
_agenerate_from_stream,
_generate_from_stream,
)
from langchain.llms.anthropic import _AnthropicCommon
from langchain.schema.messages import (
AIMessage,
AIMessageChunk,
BaseMessage,
ChatMessage,
HumanMessage,
SystemMessage,
)
from langchain.schema.output import ChatGeneration, ChatGenerationChunk, ChatResult
from langchain.schema.prompt import PromptValue
def _convert_one_message_to_text(
message: BaseMessage,
human_prompt: str,
ai_prompt: str,
) -> str:
if isinstance(message, ChatMessage):
message_text = f"\n\n{message.role.capitalize()}: {message.content}"
elif isinstance(message, HumanMessage):
message_text = f"{human_prompt} {message.content}"
elif isinstance(message, AIMessage):
message_text = f"{ai_prompt} {message.content}"
elif isinstance(message, SystemMessage):
message_text = message.content
else:
raise ValueError(f"Got unknown type {message}")
return message_text
def convert_messages_to_prompt_anthropic(
messages: List[BaseMessage],
*,
human_prompt: str = "\n\nHuman:",
ai_prompt: str = "\n\nAssistant:",
) -> str:
"""Format a list of messages into a full prompt for the Anthropic model
Args:
messages (List[BaseMessage]): List of BaseMessage to combine.
human_prompt (str, optional): Human prompt tag. Defaults to "\n\nHuman:".
ai_prompt (str, optional): AI prompt tag. Defaults to "\n\nAssistant:".
Returns:
str: Combined string with necessary human_prompt and ai_prompt tags.
"""
messages = messages.copy() # don't mutate the original list
if not isinstance(messages[-1], AIMessage):
messages.append(AIMessage(content=""))
text = "".join(
_convert_one_message_to_text(message, human_prompt, ai_prompt)
for message in messages
)
# trim off the trailing ' ' that might come from the "Assistant: "
return text.rstrip()
class ChatAnthropic(BaseChatModel, _AnthropicCommon):
"""`Anthropic` chat large language models.
To use, you should have the ``anthropic`` python package installed, and the
environment variable ``ANTHROPIC_API_KEY`` set with your API key, or pass
it as a named parameter to the constructor.
Example:
.. code-block:: python
import anthropic
from langchain.chat_models import ChatAnthropic
model = ChatAnthropic(model="<model_name>", anthropic_api_key="my-api-key")
"""
class Config:
"""Configuration for this pydantic object."""
allow_population_by_field_name = True
arbitrary_types_allowed = True
@property
def lc_secrets(self) -> Dict[str, str]:
return {"anthropic_api_key": "ANTHROPIC_API_KEY"}
@property
def _llm_type(self) -> str:
"""Return type of chat model."""
return "anthropic-chat"
@classmethod
def is_lc_serializable(cls) -> bool:
"""Return whether this model can be serialized by Langchain."""
return True
def _convert_messages_to_prompt(self, messages: List[BaseMessage]) -> str:
"""Format a list of messages into a full prompt for the Anthropic model
Args:
messages (List[BaseMessage]): List of BaseMessage to combine.
Returns:
str: Combined string with necessary HUMAN_PROMPT and AI_PROMPT tags.
"""
prompt_params = {}
if self.HUMAN_PROMPT:
prompt_params["human_prompt"] = self.HUMAN_PROMPT
if self.AI_PROMPT:
prompt_params["ai_prompt"] = self.AI_PROMPT
return convert_messages_to_prompt_anthropic(messages=messages, **prompt_params)
def convert_prompt(self, prompt: PromptValue) -> str:
return self._convert_messages_to_prompt(prompt.to_messages())
def _stream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[ChatGenerationChunk]:
prompt = self._convert_messages_to_prompt(messages)
params: Dict[str, Any] = {"prompt": prompt, **self._default_params, **kwargs}
if stop:
params["stop_sequences"] = stop
stream_resp = self.client.completions.create(**params, stream=True)
for data in stream_resp:
delta = data.completion
yield ChatGenerationChunk(message=AIMessageChunk(content=delta))
if run_manager:
run_manager.on_llm_new_token(delta)
async def _astream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> AsyncIterator[ChatGenerationChunk]:
prompt = self._convert_messages_to_prompt(messages)
params: Dict[str, Any] = {"prompt": prompt, **self._default_params, **kwargs}
if stop:
params["stop_sequences"] = stop
stream_resp = await self.async_client.completions.create(**params, stream=True)
async for data in stream_resp:
delta = data.completion
yield ChatGenerationChunk(message=AIMessageChunk(content=delta))
if run_manager:
await run_manager.on_llm_new_token(delta)
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
if self.streaming:
stream_iter = self._stream(
messages, stop=stop, run_manager=run_manager, **kwargs
)
return _generate_from_stream(stream_iter)
prompt = self._convert_messages_to_prompt(
messages,
)
params: Dict[str, Any] = {
"prompt": prompt,
**self._default_params,
**kwargs,
}
if stop:
params["stop_sequences"] = stop
response = self.client.completions.create(**params)
completion = response.completion
message = AIMessage(content=completion)
return ChatResult(generations=[ChatGeneration(message=message)])
async def _agenerate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
if self.streaming:
stream_iter = self._astream(
messages, stop=stop, run_manager=run_manager, **kwargs
)
return await _agenerate_from_stream(stream_iter)
prompt = self._convert_messages_to_prompt(
messages,
)
params: Dict[str, Any] = {
"prompt": prompt,
**self._default_params,
**kwargs,
}
if stop:
params["stop_sequences"] = stop
response = await self.async_client.completions.create(**params)
completion = response.completion
message = AIMessage(content=completion)
return ChatResult(generations=[ChatGeneration(message=message)])
def get_num_tokens(self, text: str) -> int:
"""Calculate number of tokens."""
if not self.count_tokens:
raise NameError("Please ensure the anthropic package is loaded")
return self.count_tokens(text)
| [
"{}"
] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~callbacks~promptlayer_callback.py | """Callback handler for promptlayer."""
from __future__ import annotations
import datetime
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple
from uuid import UUID
from langchain.callbacks.base import BaseCallbackHandler
from langchain.schema import (
ChatGeneration,
LLMResult,
)
from langchain.schema.messages import (
AIMessage,
BaseMessage,
ChatMessage,
HumanMessage,
SystemMessage,
)
if TYPE_CHECKING:
import promptlayer
def _lazy_import_promptlayer() -> promptlayer:
"""Lazy import promptlayer to avoid circular imports."""
try:
import promptlayer
except ImportError:
raise ImportError(
"The PromptLayerCallbackHandler requires the promptlayer package. "
" Please install it with `pip install promptlayer`."
)
return promptlayer
class PromptLayerCallbackHandler(BaseCallbackHandler):
"""Callback handler for promptlayer."""
def __init__(
self,
pl_id_callback: Optional[Callable[..., Any]] = None,
pl_tags: Optional[List[str]] = None,
) -> None:
"""Initialize the PromptLayerCallbackHandler."""
_lazy_import_promptlayer()
self.pl_id_callback = pl_id_callback
self.pl_tags = pl_tags or []
self.runs: Dict[UUID, Dict[str, Any]] = {}
def on_chat_model_start(
self,
serialized: Dict[str, Any],
messages: List[List[BaseMessage]],
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
tags: Optional[List[str]] = None,
**kwargs: Any,
) -> Any:
self.runs[run_id] = {
"messages": [self._create_message_dicts(m)[0] for m in messages],
"invocation_params": kwargs.get("invocation_params", {}),
"name": ".".join(serialized["id"]),
"request_start_time": datetime.datetime.now().timestamp(),
"tags": tags,
}
def on_llm_start(
self,
serialized: Dict[str, Any],
prompts: List[str],
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
tags: Optional[List[str]] = None,
**kwargs: Any,
) -> Any:
self.runs[run_id] = {
"prompts": prompts,
"invocation_params": kwargs.get("invocation_params", {}),
"name": ".".join(serialized["id"]),
"request_start_time": datetime.datetime.now().timestamp(),
"tags": tags,
}
def on_llm_end(
self,
response: LLMResult,
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> None:
from promptlayer.utils import get_api_key, promptlayer_api_request
run_info = self.runs.get(run_id, {})
if not run_info:
return
run_info["request_end_time"] = datetime.datetime.now().timestamp()
for i in range(len(response.generations)):
generation = response.generations[i][0]
resp = {
"text": generation.text,
"llm_output": response.llm_output,
}
model_params = run_info.get("invocation_params", {})
is_chat_model = run_info.get("messages", None) is not None
model_input = (
run_info.get("messages", [])[i]
if is_chat_model
else [run_info.get("prompts", [])[i]]
)
model_response = (
[self._convert_message_to_dict(generation.message)]
if is_chat_model and isinstance(generation, ChatGeneration)
else resp
)
pl_request_id = promptlayer_api_request(
run_info.get("name"),
"langchain",
model_input,
model_params,
self.pl_tags,
model_response,
run_info.get("request_start_time"),
run_info.get("request_end_time"),
get_api_key(),
return_pl_id=bool(self.pl_id_callback is not None),
metadata={
"_langchain_run_id": str(run_id),
"_langchain_parent_run_id": str(parent_run_id),
"_langchain_tags": str(run_info.get("tags", [])),
},
)
if self.pl_id_callback:
self.pl_id_callback(pl_request_id)
def _convert_message_to_dict(self, message: BaseMessage) -> Dict[str, Any]:
if isinstance(message, HumanMessage):
message_dict = {"role": "user", "content": message.content}
elif isinstance(message, AIMessage):
message_dict = {"role": "assistant", "content": message.content}
elif isinstance(message, SystemMessage):
message_dict = {"role": "system", "content": message.content}
elif isinstance(message, ChatMessage):
message_dict = {"role": message.role, "content": message.content}
else:
raise ValueError(f"Got unknown type {message}")
if "name" in message.additional_kwargs:
message_dict["name"] = message.additional_kwargs["name"]
return message_dict
def _create_message_dicts(
self, messages: List[BaseMessage]
) -> Tuple[List[Dict[str, Any]], Dict[str, Any]]:
params: Dict[str, Any] = {}
message_dicts = [self._convert_message_to_dict(m) for m in messages]
return message_dicts, params
| [] |
2024-01-10 | RohanDey02/langchain | libs~experimental~tests~integration_tests~llms~test_anthropic_functions.py | """Test AnthropicFunctions"""
import unittest
from langchain.chat_models.anthropic import ChatAnthropic
from langchain.chat_models.bedrock import BedrockChat
from langchain_experimental.llms.anthropic_functions import AnthropicFunctions
class TestAnthropicFunctions(unittest.TestCase):
"""
Test AnthropicFunctions with default llm (ChatAnthropic) as well as a passed-in llm
"""
def test_default_chat_anthropic(self) -> None:
base_model = AnthropicFunctions(model="claude-2")
self.assertIsInstance(base_model.model, ChatAnthropic)
# bind functions
model = base_model.bind(
functions=[
{
"name": "get_current_weather",
"description": "Get the current weather in a given location",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city and state, "
"e.g. San Francisco, CA",
},
"unit": {
"type": "string",
"enum": ["celsius", "fahrenheit"],
},
},
"required": ["location"],
},
}
],
function_call={"name": "get_current_weather"},
)
res = model.invoke("What's the weather in San Francisco?")
function_call = res.additional_kwargs.get("function_call")
assert function_call
self.assertEqual(function_call.get("name"), "get_current_weather")
self.assertEqual(
function_call.get("arguments"),
'{"location": "San Francisco, CA", "unit": "fahrenheit"}',
)
def test_bedrock_chat_anthropic(self) -> None:
"""
const chatBedrock = new ChatBedrock({
region: process.env.BEDROCK_AWS_REGION ?? "us-east-1",
model: "anthropic.claude-v2",
temperature: 0.1,
credentials: {
secretAccessKey: process.env.BEDROCK_AWS_SECRET_ACCESS_KEY!,
accessKeyId: process.env.BEDROCK_AWS_ACCESS_KEY_ID!,
},
});"""
llm = BedrockChat(
model_id="anthropic.claude-v2",
model_kwargs={"temperature": 0.1},
region_name="us-east-1",
)
base_model = AnthropicFunctions(llm=llm)
assert isinstance(base_model.model, BedrockChat)
# bind functions
model = base_model.bind(
functions=[
{
"name": "get_current_weather",
"description": "Get the current weather in a given location",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city and state, "
"e.g. San Francisco, CA",
},
"unit": {
"type": "string",
"enum": ["celsius", "fahrenheit"],
},
},
"required": ["location"],
},
}
],
function_call={"name": "get_current_weather"},
)
res = model.invoke("What's the weather in San Francisco?")
function_call = res.additional_kwargs.get("function_call")
assert function_call
self.assertEqual(function_call.get("name"), "get_current_weather")
self.assertEqual(
function_call.get("arguments"),
'{"location": "San Francisco, CA", "unit": "fahrenheit"}',
)
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~retrievers~document_compressors~cohere_rerank.py | from __future__ import annotations
from typing import TYPE_CHECKING, Dict, Optional, Sequence
from langchain.callbacks.manager import Callbacks
from langchain.pydantic_v1 import Extra, root_validator
from langchain.retrievers.document_compressors.base import BaseDocumentCompressor
from langchain.schema import Document
from langchain.utils import get_from_dict_or_env
if TYPE_CHECKING:
from cohere import Client
else:
# We do to avoid pydantic annotation issues when actually instantiating
# while keeping this import optional
try:
from cohere import Client
except ImportError:
pass
class CohereRerank(BaseDocumentCompressor):
"""Document compressor that uses `Cohere Rerank API`."""
client: Client
"""Cohere client to use for compressing documents."""
top_n: int = 3
"""Number of documents to return."""
model: str = "rerank-english-v2.0"
"""Model to use for reranking."""
cohere_api_key: Optional[str] = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@root_validator(pre=True)
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
cohere_api_key = get_from_dict_or_env(
values, "cohere_api_key", "COHERE_API_KEY"
)
try:
import cohere
values["client"] = cohere.Client(cohere_api_key)
except ImportError:
raise ImportError(
"Could not import cohere python package. "
"Please install it with `pip install cohere`."
)
return values
def compress_documents(
self,
documents: Sequence[Document],
query: str,
callbacks: Optional[Callbacks] = None,
) -> Sequence[Document]:
"""
Compress documents using Cohere's rerank API.
Args:
documents: A sequence of documents to compress.
query: The query to use for compressing the documents.
callbacks: Callbacks to run during the compression process.
Returns:
A sequence of compressed documents.
"""
if len(documents) == 0: # to avoid empty api call
return []
doc_list = list(documents)
_docs = [d.page_content for d in doc_list]
results = self.client.rerank(
model=self.model, query=query, documents=_docs, top_n=self.top_n
)
final_results = []
for r in results:
doc = doc_list[r.index]
doc.metadata["relevance_score"] = r.relevance_score
final_results.append(doc)
return final_results
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~integration_tests~llms~test_opaqueprompts.py | import langchain.utilities.opaqueprompts as op
from langchain.chains.llm import LLMChain
from langchain.llms import OpenAI
from langchain.llms.opaqueprompts import OpaquePrompts
from langchain.memory import ConversationBufferWindowMemory
from langchain.prompts import PromptTemplate
from langchain.schema.output_parser import StrOutputParser
from langchain.schema.runnable import RunnableParallel
prompt_template = """
As an AI assistant, you will answer questions according to given context.
Sensitive personal information in the question is masked for privacy.
For instance, if the original text says "Giana is good," it will be changed
to "PERSON_998 is good."
Here's how to handle these changes:
* Consider these masked phrases just as placeholders, but still refer to
them in a relevant way when answering.
* It's possible that different masked terms might mean the same thing.
Stick with the given term and don't modify it.
* All masked terms follow the "TYPE_ID" pattern.
* Please don't invent new masked terms. For instance, if you see "PERSON_998,"
don't come up with "PERSON_997" or "PERSON_999" unless they're already in the question.
Conversation History: ```{history}```
Context : ```During our recent meeting on February 23, 2023, at 10:30 AM,
John Doe provided me with his personal details. His email is [email protected]
and his contact number is 650-456-7890. He lives in New York City, USA, and
belongs to the American nationality with Christian beliefs and a leaning towards
the Democratic party. He mentioned that he recently made a transaction using his
credit card 4111 1111 1111 1111 and transferred bitcoins to the wallet address
1A1zP1eP5QGefi2DMPTfTL5SLmv7DivfNa. While discussing his European travels, he
noted down his IBAN as GB29 NWBK 6016 1331 9268 19. Additionally, he provided
his website as https://johndoeportfolio.com. John also discussed
some of his US-specific details. He said his bank account number is
1234567890123456 and his drivers license is Y12345678. His ITIN is 987-65-4321,
and he recently renewed his passport,
the number for which is 123456789. He emphasized not to share his SSN, which is
669-45-6789. Furthermore, he mentioned that he accesses his work files remotely
through the IP 192.168.1.1 and has a medical license number MED-123456. ```
Question: ```{question}```
"""
def test_opaqueprompts() -> None:
chain = LLMChain(
prompt=PromptTemplate.from_template(prompt_template),
llm=OpaquePrompts(llm=OpenAI()),
memory=ConversationBufferWindowMemory(k=2),
)
output = chain.run(
{
"question": "Write a text message to remind John to do password reset \
for his website through his email to stay secure."
}
)
assert isinstance(output, str)
def test_opaqueprompts_functions() -> None:
prompt = (PromptTemplate.from_template(prompt_template),)
llm = OpenAI()
pg_chain = (
op.sanitize
| RunnableParallel(
secure_context=lambda x: x["secure_context"], # type: ignore
response=(lambda x: x["sanitized_input"]) # type: ignore
| prompt
| llm
| StrOutputParser(),
)
| (lambda x: op.desanitize(x["response"], x["secure_context"]))
)
pg_chain.invoke(
{
"question": "Write a text message to remind John to do password reset\
for his website through his email to stay secure.",
"history": "",
}
)
| [
"PERSON_998 is good.",
"PERSON_998,",
"\nAs an AI assistant, you will answer questions according to given context.\n\nSensitive personal information in the question is masked for privacy.\nFor instance, if the original text says \"Giana is good,\" it will be changed\nto \"PERSON_998 is good.\"\n\nHere's how to handle these changes:\n* Consider these masked phrases just as placeholders, but still refer to\nthem in a relevant way when answering.\n* It's possible that different masked terms might mean the same thing.\nStick with the given term and don't modify it.\n* All masked terms follow the \"TYPE_ID\" pattern.\n* Please don't invent new masked terms. For instance, if you see \"PERSON_998,\"\ndon't come up with \"PERSON_997\" or \"PERSON_999\" unless they're already in the question.\n\nConversation History: ```{history}```\nContext : ```During our recent meeting on February 23, 2023, at 10:30 AM,\nJohn Doe provided me with his personal details. His email is [email protected]\nand his contact number is 650-456-7890. He lives in New York City, USA, and\nbelongs to the American nationality with Christian beliefs and a leaning towards\nthe Democratic party. He mentioned that he recently made a transaction using his\ncredit card 4111 1111 1111 1111 and transferred bitcoins to the wallet address\n1A1zP1eP5QGefi2DMPTfTL5SLmv7DivfNa. While discussing his European travels, he\nnoted down his IBAN as GB29 NWBK 6016 1331 9268 19. Additionally, he provided\nhis website as https://johndoeportfolio.com. John also discussed\nsome of his US-specific details. He said his bank account number is\n1234567890123456 and his drivers license is Y12345678. His ITIN is 987-65-4321,\nand he recently renewed his passport,\nthe number for which is 123456789. He emphasized not to share his SSN, which is\n669-45-6789. Furthermore, he mentioned that he accesses his work files remotely\nthrough the IP 192.168.1.1 and has a medical license number MED-123456. ```\nQuestion: ```{question}```\n",
"Giana is good,",
"t come up with \"PERSON_997\" or \"PERSON_999\" unless they"
] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~integration_tests~vectorstores~test_meilisearch.py | """Test Meilisearch functionality."""
from typing import TYPE_CHECKING, Generator
import pytest
import requests
from langchain.docstore.document import Document
from langchain.vectorstores import Meilisearch
from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings
if TYPE_CHECKING:
import meilisearch
INDEX_NAME = "test-langchain-demo"
TEST_MEILI_HTTP_ADDR = "http://localhost:7700"
TEST_MEILI_MASTER_KEY = "masterKey"
class TestMeilisearchVectorSearch:
@pytest.fixture(scope="class", autouse=True)
def enable_vector_search(self) -> Generator[str, None, None]:
requests.patch(
f"{TEST_MEILI_HTTP_ADDR}/experimental-features",
headers={"Authorization": f"Bearer {TEST_MEILI_MASTER_KEY}"},
json={"vectorStore": True},
timeout=10,
)
yield "done"
requests.patch(
f"{TEST_MEILI_HTTP_ADDR}/experimental-features",
headers={"Authorization": f"Bearer {TEST_MEILI_MASTER_KEY}"},
json={"vectorStore": False},
timeout=10,
)
@pytest.fixture(autouse=True)
def setup(self) -> None:
self.delete_all_indexes()
@pytest.fixture(scope="class", autouse=True)
def teardown_test(self) -> Generator[str, None, None]:
# Yields back to the test function.
yield "done"
self.delete_all_indexes()
def delete_all_indexes(self) -> None:
client = self.client()
# Deletes all the indexes in the Meilisearch instance.
indexes = client.get_indexes()
for index in indexes["results"]:
task = client.index(index.uid).delete()
client.wait_for_task(task.task_uid)
def client(self) -> "meilisearch.Client":
import meilisearch
return meilisearch.Client(TEST_MEILI_HTTP_ADDR, TEST_MEILI_MASTER_KEY)
def _wait_last_task(self) -> None:
client = self.client()
# Get the last task
tasks = client.get_tasks()
# Wait for the last task to be completed
client.wait_for_task(tasks.results[0].uid)
def test_meilisearch(self) -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
vectorstore = Meilisearch.from_texts(
texts=texts,
embedding=FakeEmbeddings(),
url=TEST_MEILI_HTTP_ADDR,
api_key=TEST_MEILI_MASTER_KEY,
index_name=INDEX_NAME,
)
self._wait_last_task()
output = vectorstore.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
def test_meilisearch_with_client(self) -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
vectorstore = Meilisearch.from_texts(
texts=texts,
embedding=FakeEmbeddings(),
client=self.client(),
index_name=INDEX_NAME,
)
self._wait_last_task()
output = vectorstore.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
def test_meilisearch_with_metadatas(self) -> None:
"""Test end to end construction and search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": i} for i in range(len(texts))]
docsearch = Meilisearch.from_texts(
texts=texts,
embedding=FakeEmbeddings(),
url=TEST_MEILI_HTTP_ADDR,
api_key=TEST_MEILI_MASTER_KEY,
index_name=INDEX_NAME,
metadatas=metadatas,
)
self._wait_last_task()
output = docsearch.similarity_search("foo", k=1)
assert len(output) == 1
assert output[0].page_content == "foo"
assert output[0].metadata["page"] == 0
assert output == [Document(page_content="foo", metadata={"page": 0})]
def test_meilisearch_with_metadatas_with_scores(self) -> None:
"""Test end to end construction and scored search."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
docsearch = Meilisearch.from_texts(
texts=texts,
embedding=FakeEmbeddings(),
url=TEST_MEILI_HTTP_ADDR,
api_key=TEST_MEILI_MASTER_KEY,
index_name=INDEX_NAME,
metadatas=metadatas,
)
self._wait_last_task()
output = docsearch.similarity_search_with_score("foo", k=1)
assert output == [(Document(page_content="foo", metadata={"page": "0"}), 9.0)]
def test_meilisearch_with_metadatas_with_scores_using_vector(self) -> None:
"""Test end to end construction and scored search, using embedding vector."""
texts = ["foo", "bar", "baz"]
metadatas = [{"page": str(i)} for i in range(len(texts))]
embeddings = FakeEmbeddings()
docsearch = Meilisearch.from_texts(
texts=texts,
embedding=FakeEmbeddings(),
url=TEST_MEILI_HTTP_ADDR,
api_key=TEST_MEILI_MASTER_KEY,
index_name=INDEX_NAME,
metadatas=metadatas,
)
embedded_query = embeddings.embed_query("foo")
self._wait_last_task()
output = docsearch.similarity_search_by_vector_with_scores(
embedding=embedded_query, k=1
)
assert output == [(Document(page_content="foo", metadata={"page": "0"}), 9.0)]
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~integration_tests~embeddings~test_ernie.py | import pytest
from langchain.embeddings.ernie import ErnieEmbeddings
def test_embedding_documents_1() -> None:
documents = ["foo bar"]
embedding = ErnieEmbeddings()
output = embedding.embed_documents(documents)
assert len(output) == 1
assert len(output[0]) == 384
def test_embedding_documents_2() -> None:
documents = ["foo", "bar"]
embedding = ErnieEmbeddings()
output = embedding.embed_documents(documents)
assert len(output) == 2
assert len(output[0]) == 384
assert len(output[1]) == 384
def test_embedding_query() -> None:
query = "foo"
embedding = ErnieEmbeddings()
output = embedding.embed_query(query)
assert len(output) == 384
def test_max_chunks() -> None:
documents = [f"text-{i}" for i in range(20)]
embedding = ErnieEmbeddings()
output = embedding.embed_documents(documents)
assert len(output) == 20
def test_too_many_chunks() -> None:
documents = [f"text-{i}" for i in range(20)]
embedding = ErnieEmbeddings(chunk_size=20)
with pytest.raises(ValueError):
embedding.embed_documents(documents)
| [] |
2024-01-10 | RohanDey02/langchain | templates~anthropic-iterative-search~main.py | from anthropic_iterative_search import final_chain
if __name__ == "__main__":
query = "Which movie came out first: Oppenheimer, or Are You There God It's Me Margaret?"
print(final_chain.with_config(configurable={"chain": "retrieve"}).invoke({"query": query}))
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~document_loaders~hugging_face_dataset.py | from typing import Iterator, List, Mapping, Optional, Sequence, Union
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
class HuggingFaceDatasetLoader(BaseLoader):
"""Load from `Hugging Face Hub` datasets."""
def __init__(
self,
path: str,
page_content_column: str = "text",
name: Optional[str] = None,
data_dir: Optional[str] = None,
data_files: Optional[
Union[str, Sequence[str], Mapping[str, Union[str, Sequence[str]]]]
] = None,
cache_dir: Optional[str] = None,
keep_in_memory: Optional[bool] = None,
save_infos: bool = False,
use_auth_token: Optional[Union[bool, str]] = None,
num_proc: Optional[int] = None,
):
"""Initialize the HuggingFaceDatasetLoader.
Args:
path: Path or name of the dataset.
page_content_column: Page content column name. Default is "text".
Note: Currently the function assumes the content is a string.
If it is not download the dataset using huggingface library and convert
using the json or pandas loaders.
https://github.com/langchain-ai/langchain/issues/10674
name: Name of the dataset configuration.
data_dir: Data directory of the dataset configuration.
data_files: Path(s) to source data file(s).
cache_dir: Directory to read/write data.
keep_in_memory: Whether to copy the dataset in-memory.
save_infos: Save the dataset information (checksums/size/splits/...).
Default is False.
use_auth_token: Bearer token for remote files on the Dataset Hub.
num_proc: Number of processes.
"""
self.path = path
self.page_content_column = page_content_column
self.name = name
self.data_dir = data_dir
self.data_files = data_files
self.cache_dir = cache_dir
self.keep_in_memory = keep_in_memory
self.save_infos = save_infos
self.use_auth_token = use_auth_token
self.num_proc = num_proc
def lazy_load(
self,
) -> Iterator[Document]:
"""Load documents lazily."""
try:
from datasets import load_dataset
except ImportError:
raise ImportError(
"Could not import datasets python package. "
"Please install it with `pip install datasets`."
)
dataset = load_dataset(
path=self.path,
name=self.name,
data_dir=self.data_dir,
data_files=self.data_files,
cache_dir=self.cache_dir,
keep_in_memory=self.keep_in_memory,
save_infos=self.save_infos,
use_auth_token=self.use_auth_token,
num_proc=self.num_proc,
)
yield from (
Document(
page_content=row.pop(self.page_content_column),
metadata=row,
)
for key in dataset.keys()
for row in dataset[key]
)
def load(self) -> List[Document]:
"""Load documents."""
return list(self.lazy_load())
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~vectorstores~timescalevector.py | """VectorStore wrapper around a Postgres-TimescaleVector database."""
from __future__ import annotations
import enum
import logging
import uuid
from datetime import timedelta
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Iterable,
List,
Optional,
Tuple,
Type,
Union,
)
from langchain.schema.document import Document
from langchain.schema.embeddings import Embeddings
from langchain.schema.vectorstore import VectorStore
from langchain.utils import get_from_dict_or_env
from langchain.vectorstores.utils import DistanceStrategy
if TYPE_CHECKING:
from timescale_vector import Predicates
DEFAULT_DISTANCE_STRATEGY = DistanceStrategy.COSINE
ADA_TOKEN_COUNT = 1536
_LANGCHAIN_DEFAULT_COLLECTION_NAME = "langchain_store"
class TimescaleVector(VectorStore):
"""VectorStore implementation using the timescale vector client to store vectors
in Postgres.
To use, you should have the ``timescale_vector`` python package installed.
Args:
service_url: Service url on timescale cloud.
embedding: Any embedding function implementing
`langchain.embeddings.base.Embeddings` interface.
collection_name: The name of the collection to use. (default: langchain_store)
This will become the table name used for the collection.
distance_strategy: The distance strategy to use. (default: COSINE)
pre_delete_collection: If True, will delete the collection if it exists.
(default: False). Useful for testing.
Example:
.. code-block:: python
from langchain.vectorstores import TimescaleVector
from langchain.embeddings.openai import OpenAIEmbeddings
SERVICE_URL = "postgres://tsdbadmin:<password>@<id>.tsdb.cloud.timescale.com:<port>/tsdb?sslmode=require"
COLLECTION_NAME = "state_of_the_union_test"
embeddings = OpenAIEmbeddings()
vectorestore = TimescaleVector.from_documents(
embedding=embeddings,
documents=docs,
collection_name=COLLECTION_NAME,
service_url=SERVICE_URL,
)
""" # noqa: E501
def __init__(
self,
service_url: str,
embedding: Embeddings,
collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME,
num_dimensions: int = ADA_TOKEN_COUNT,
distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY,
pre_delete_collection: bool = False,
logger: Optional[logging.Logger] = None,
relevance_score_fn: Optional[Callable[[float], float]] = None,
time_partition_interval: Optional[timedelta] = None,
) -> None:
try:
from timescale_vector import client
except ImportError:
raise ImportError(
"Could not import timescale_vector python package. "
"Please install it with `pip install timescale-vector`."
)
self.service_url = service_url
self.embedding = embedding
self.collection_name = collection_name
self.num_dimensions = num_dimensions
self._distance_strategy = distance_strategy
self.pre_delete_collection = pre_delete_collection
self.logger = logger or logging.getLogger(__name__)
self.override_relevance_score_fn = relevance_score_fn
self._time_partition_interval = time_partition_interval
self.sync_client = client.Sync(
self.service_url,
self.collection_name,
self.num_dimensions,
self._distance_strategy.value.lower(),
time_partition_interval=self._time_partition_interval,
)
self.async_client = client.Async(
self.service_url,
self.collection_name,
self.num_dimensions,
self._distance_strategy.value.lower(),
time_partition_interval=self._time_partition_interval,
)
self.__post_init__()
def __post_init__(
self,
) -> None:
"""
Initialize the store.
"""
self.sync_client.create_tables()
if self.pre_delete_collection:
self.sync_client.delete_all()
@property
def embeddings(self) -> Embeddings:
return self.embedding
def drop_tables(self) -> None:
self.sync_client.drop_table()
@classmethod
def __from(
cls,
texts: List[str],
embeddings: List[List[float]],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME,
distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY,
service_url: Optional[str] = None,
pre_delete_collection: bool = False,
**kwargs: Any,
) -> TimescaleVector:
num_dimensions = len(embeddings[0])
if ids is None:
ids = [str(uuid.uuid1()) for _ in texts]
if not metadatas:
metadatas = [{} for _ in texts]
if service_url is None:
service_url = cls.get_service_url(kwargs)
store = cls(
service_url=service_url,
num_dimensions=num_dimensions,
collection_name=collection_name,
embedding=embedding,
distance_strategy=distance_strategy,
pre_delete_collection=pre_delete_collection,
**kwargs,
)
store.add_embeddings(
texts=texts, embeddings=embeddings, metadatas=metadatas, ids=ids, **kwargs
)
return store
@classmethod
async def __afrom(
cls,
texts: List[str],
embeddings: List[List[float]],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME,
distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY,
service_url: Optional[str] = None,
pre_delete_collection: bool = False,
**kwargs: Any,
) -> TimescaleVector:
num_dimensions = len(embeddings[0])
if ids is None:
ids = [str(uuid.uuid1()) for _ in texts]
if not metadatas:
metadatas = [{} for _ in texts]
if service_url is None:
service_url = cls.get_service_url(kwargs)
store = cls(
service_url=service_url,
num_dimensions=num_dimensions,
collection_name=collection_name,
embedding=embedding,
distance_strategy=distance_strategy,
pre_delete_collection=pre_delete_collection,
**kwargs,
)
await store.aadd_embeddings(
texts=texts, embeddings=embeddings, metadatas=metadatas, ids=ids, **kwargs
)
return store
def add_embeddings(
self,
texts: Iterable[str],
embeddings: List[List[float]],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> List[str]:
"""Add embeddings to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
embeddings: List of list of embedding vectors.
metadatas: List of metadatas associated with the texts.
kwargs: vectorstore specific parameters
"""
if ids is None:
ids = [str(uuid.uuid1()) for _ in texts]
if not metadatas:
metadatas = [{} for _ in texts]
records = list(zip(ids, metadatas, texts, embeddings))
self.sync_client.upsert(records)
return ids
async def aadd_embeddings(
self,
texts: Iterable[str],
embeddings: List[List[float]],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> List[str]:
"""Add embeddings to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
embeddings: List of list of embedding vectors.
metadatas: List of metadatas associated with the texts.
kwargs: vectorstore specific parameters
"""
if ids is None:
ids = [str(uuid.uuid1()) for _ in texts]
if not metadatas:
metadatas = [{} for _ in texts]
records = list(zip(ids, metadatas, texts, embeddings))
await self.async_client.upsert(records)
return ids
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
kwargs: vectorstore specific parameters
Returns:
List of ids from adding the texts into the vectorstore.
"""
embeddings = self.embedding.embed_documents(list(texts))
return self.add_embeddings(
texts=texts, embeddings=embeddings, metadatas=metadatas, ids=ids, **kwargs
)
async def aadd_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
kwargs: vectorstore specific parameters
Returns:
List of ids from adding the texts into the vectorstore.
"""
embeddings = self.embedding.embed_documents(list(texts))
return await self.aadd_embeddings(
texts=texts, embeddings=embeddings, metadatas=metadatas, ids=ids, **kwargs
)
def similarity_search(
self,
query: str,
k: int = 4,
filter: Optional[Union[dict, list]] = None,
predicates: Optional[Predicates] = None,
**kwargs: Any,
) -> List[Document]:
"""Run similarity search with TimescaleVector with distance.
Args:
query (str): Query text to search for.
k (int): Number of results to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List of Documents most similar to the query.
"""
embedding = self.embedding.embed_query(text=query)
return self.similarity_search_by_vector(
embedding=embedding,
k=k,
filter=filter,
predicates=predicates,
**kwargs,
)
async def asimilarity_search(
self,
query: str,
k: int = 4,
filter: Optional[Union[dict, list]] = None,
predicates: Optional[Predicates] = None,
**kwargs: Any,
) -> List[Document]:
"""Run similarity search with TimescaleVector with distance.
Args:
query (str): Query text to search for.
k (int): Number of results to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List of Documents most similar to the query.
"""
embedding = self.embedding.embed_query(text=query)
return await self.asimilarity_search_by_vector(
embedding=embedding,
k=k,
filter=filter,
predicates=predicates,
**kwargs,
)
def similarity_search_with_score(
self,
query: str,
k: int = 4,
filter: Optional[Union[dict, list]] = None,
predicates: Optional[Predicates] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List of Documents most similar to the query and score for each
"""
embedding = self.embedding.embed_query(query)
docs = self.similarity_search_with_score_by_vector(
embedding=embedding,
k=k,
filter=filter,
predicates=predicates,
**kwargs,
)
return docs
async def asimilarity_search_with_score(
self,
query: str,
k: int = 4,
filter: Optional[Union[dict, list]] = None,
predicates: Optional[Predicates] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List of Documents most similar to the query and score for each
"""
embedding = self.embedding.embed_query(query)
return await self.asimilarity_search_with_score_by_vector(
embedding=embedding,
k=k,
filter=filter,
predicates=predicates,
**kwargs,
)
def date_to_range_filter(self, **kwargs: Any) -> Any:
constructor_args = {
key: kwargs[key]
for key in [
"start_date",
"end_date",
"time_delta",
"start_inclusive",
"end_inclusive",
]
if key in kwargs
}
if not constructor_args or len(constructor_args) == 0:
return None
try:
from timescale_vector import client
except ImportError:
raise ImportError(
"Could not import timescale_vector python package. "
"Please install it with `pip install timescale-vector`."
)
return client.UUIDTimeRange(**constructor_args)
def similarity_search_with_score_by_vector(
self,
embedding: List[float],
k: int = 4,
filter: Optional[Union[dict, list]] = None,
predicates: Optional[Predicates] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
try:
from timescale_vector import client
except ImportError:
raise ImportError(
"Could not import timescale_vector python package. "
"Please install it with `pip install timescale-vector`."
)
results = self.sync_client.search(
embedding,
limit=k,
filter=filter,
predicates=predicates,
uuid_time_filter=self.date_to_range_filter(**kwargs),
)
docs = [
(
Document(
page_content=result[client.SEARCH_RESULT_CONTENTS_IDX],
metadata=result[client.SEARCH_RESULT_METADATA_IDX],
),
result[client.SEARCH_RESULT_DISTANCE_IDX],
)
for result in results
]
return docs
async def asimilarity_search_with_score_by_vector(
self,
embedding: List[float],
k: int = 4,
filter: Optional[Union[dict, list]] = None,
predicates: Optional[Predicates] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
try:
from timescale_vector import client
except ImportError:
raise ImportError(
"Could not import timescale_vector python package. "
"Please install it with `pip install timescale-vector`."
)
results = await self.async_client.search(
embedding,
limit=k,
filter=filter,
predicates=predicates,
uuid_time_filter=self.date_to_range_filter(**kwargs),
)
docs = [
(
Document(
page_content=result[client.SEARCH_RESULT_CONTENTS_IDX],
metadata=result[client.SEARCH_RESULT_METADATA_IDX],
),
result[client.SEARCH_RESULT_DISTANCE_IDX],
)
for result in results
]
return docs
def similarity_search_by_vector(
self,
embedding: List[float],
k: int = 4,
filter: Optional[Union[dict, list]] = None,
predicates: Optional[Predicates] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs most similar to embedding vector.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List of Documents most similar to the query vector.
"""
docs_and_scores = self.similarity_search_with_score_by_vector(
embedding=embedding, k=k, filter=filter, predicates=predicates, **kwargs
)
return [doc for doc, _ in docs_and_scores]
async def asimilarity_search_by_vector(
self,
embedding: List[float],
k: int = 4,
filter: Optional[Union[dict, list]] = None,
predicates: Optional[Predicates] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs most similar to embedding vector.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List of Documents most similar to the query vector.
"""
docs_and_scores = await self.asimilarity_search_with_score_by_vector(
embedding=embedding, k=k, filter=filter, predicates=predicates, **kwargs
)
return [doc for doc, _ in docs_and_scores]
@classmethod
def from_texts(
cls: Type[TimescaleVector],
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME,
distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY,
ids: Optional[List[str]] = None,
pre_delete_collection: bool = False,
**kwargs: Any,
) -> TimescaleVector:
"""
Return VectorStore initialized from texts and embeddings.
Postgres connection string is required
"Either pass it as a parameter
or set the TIMESCALE_SERVICE_URL environment variable.
"""
embeddings = embedding.embed_documents(list(texts))
return cls.__from(
texts,
embeddings,
embedding,
metadatas=metadatas,
ids=ids,
collection_name=collection_name,
distance_strategy=distance_strategy,
pre_delete_collection=pre_delete_collection,
**kwargs,
)
@classmethod
async def afrom_texts(
cls: Type[TimescaleVector],
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME,
distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY,
ids: Optional[List[str]] = None,
pre_delete_collection: bool = False,
**kwargs: Any,
) -> TimescaleVector:
"""
Return VectorStore initialized from texts and embeddings.
Postgres connection string is required
"Either pass it as a parameter
or set the TIMESCALE_SERVICE_URL environment variable.
"""
embeddings = embedding.embed_documents(list(texts))
return await cls.__afrom(
texts,
embeddings,
embedding,
metadatas=metadatas,
ids=ids,
collection_name=collection_name,
distance_strategy=distance_strategy,
pre_delete_collection=pre_delete_collection,
**kwargs,
)
@classmethod
def from_embeddings(
cls,
text_embeddings: List[Tuple[str, List[float]]],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME,
distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY,
ids: Optional[List[str]] = None,
pre_delete_collection: bool = False,
**kwargs: Any,
) -> TimescaleVector:
"""Construct TimescaleVector wrapper from raw documents and pre-
generated embeddings.
Return VectorStore initialized from documents and embeddings.
Postgres connection string is required
"Either pass it as a parameter
or set the TIMESCALE_SERVICE_URL environment variable.
Example:
.. code-block:: python
from langchain.vectorstores import TimescaleVector
from langchain.embeddings import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
text_embeddings = embeddings.embed_documents(texts)
text_embedding_pairs = list(zip(texts, text_embeddings))
tvs = TimescaleVector.from_embeddings(text_embedding_pairs, embeddings)
"""
texts = [t[0] for t in text_embeddings]
embeddings = [t[1] for t in text_embeddings]
return cls.__from(
texts,
embeddings,
embedding,
metadatas=metadatas,
ids=ids,
collection_name=collection_name,
distance_strategy=distance_strategy,
pre_delete_collection=pre_delete_collection,
**kwargs,
)
@classmethod
async def afrom_embeddings(
cls,
text_embeddings: List[Tuple[str, List[float]]],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME,
distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY,
ids: Optional[List[str]] = None,
pre_delete_collection: bool = False,
**kwargs: Any,
) -> TimescaleVector:
"""Construct TimescaleVector wrapper from raw documents and pre-
generated embeddings.
Return VectorStore initialized from documents and embeddings.
Postgres connection string is required
"Either pass it as a parameter
or set the TIMESCALE_SERVICE_URL environment variable.
Example:
.. code-block:: python
from langchain.vectorstores import TimescaleVector
from langchain.embeddings import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
text_embeddings = embeddings.embed_documents(texts)
text_embedding_pairs = list(zip(texts, text_embeddings))
tvs = TimescaleVector.from_embeddings(text_embedding_pairs, embeddings)
"""
texts = [t[0] for t in text_embeddings]
embeddings = [t[1] for t in text_embeddings]
return await cls.__afrom(
texts,
embeddings,
embedding,
metadatas=metadatas,
ids=ids,
collection_name=collection_name,
distance_strategy=distance_strategy,
pre_delete_collection=pre_delete_collection,
**kwargs,
)
@classmethod
def from_existing_index(
cls: Type[TimescaleVector],
embedding: Embeddings,
collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME,
distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY,
pre_delete_collection: bool = False,
**kwargs: Any,
) -> TimescaleVector:
"""
Get instance of an existing TimescaleVector store.This method will
return the instance of the store without inserting any new
embeddings
"""
service_url = cls.get_service_url(kwargs)
store = cls(
service_url=service_url,
collection_name=collection_name,
embedding=embedding,
distance_strategy=distance_strategy,
pre_delete_collection=pre_delete_collection,
)
return store
@classmethod
def get_service_url(cls, kwargs: Dict[str, Any]) -> str:
service_url: str = get_from_dict_or_env(
data=kwargs,
key="service_url",
env_key="TIMESCALE_SERVICE_URL",
)
if not service_url:
raise ValueError(
"Postgres connection string is required"
"Either pass it as a parameter"
"or set the TIMESCALE_SERVICE_URL environment variable."
)
return service_url
@classmethod
def service_url_from_db_params(
cls,
host: str,
port: int,
database: str,
user: str,
password: str,
) -> str:
"""Return connection string from database parameters."""
return f"postgresql://{user}:{password}@{host}:{port}/{database}"
def _select_relevance_score_fn(self) -> Callable[[float], float]:
"""
The 'correct' relevance function
may differ depending on a few things, including:
- the distance / similarity metric used by the VectorStore
- the scale of your embeddings (OpenAI's are unit normed. Many others are not!)
- embedding dimensionality
- etc.
"""
if self.override_relevance_score_fn is not None:
return self.override_relevance_score_fn
# Default strategy is to rely on distance strategy provided
# in vectorstore constructor
if self._distance_strategy == DistanceStrategy.COSINE:
return self._cosine_relevance_score_fn
elif self._distance_strategy == DistanceStrategy.EUCLIDEAN_DISTANCE:
return self._euclidean_relevance_score_fn
elif self._distance_strategy == DistanceStrategy.MAX_INNER_PRODUCT:
return self._max_inner_product_relevance_score_fn
else:
raise ValueError(
"No supported normalization function"
f" for distance_strategy of {self._distance_strategy}."
"Consider providing relevance_score_fn to TimescaleVector constructor."
)
def delete(self, ids: Optional[List[str]] = None, **kwargs: Any) -> Optional[bool]:
"""Delete by vector ID or other criteria.
Args:
ids: List of ids to delete.
**kwargs: Other keyword arguments that subclasses might use.
Returns:
Optional[bool]: True if deletion is successful,
False otherwise, None if not implemented.
"""
if ids is None:
raise ValueError("No ids provided to delete.")
self.sync_client.delete_by_ids(ids)
return True
# todo should this be part of delete|()?
def delete_by_metadata(
self, filter: Union[Dict[str, str], List[Dict[str, str]]], **kwargs: Any
) -> Optional[bool]:
"""Delete by vector ID or other criteria.
Args:
ids: List of ids to delete.
**kwargs: Other keyword arguments that subclasses might use.
Returns:
Optional[bool]: True if deletion is successful,
False otherwise, None if not implemented.
"""
self.sync_client.delete_by_metadata(filter)
return True
class IndexType(str, enum.Enum):
"""Enumerator for the supported Index types"""
TIMESCALE_VECTOR = "tsv"
PGVECTOR_IVFFLAT = "ivfflat"
PGVECTOR_HNSW = "hnsw"
DEFAULT_INDEX_TYPE = IndexType.TIMESCALE_VECTOR
def create_index(
self, index_type: Union[IndexType, str] = DEFAULT_INDEX_TYPE, **kwargs: Any
) -> None:
try:
from timescale_vector import client
except ImportError:
raise ImportError(
"Could not import timescale_vector python package. "
"Please install it with `pip install timescale-vector`."
)
index_type = (
index_type.value if isinstance(index_type, self.IndexType) else index_type
)
if index_type == self.IndexType.PGVECTOR_IVFFLAT.value:
self.sync_client.create_embedding_index(client.IvfflatIndex(**kwargs))
if index_type == self.IndexType.PGVECTOR_HNSW.value:
self.sync_client.create_embedding_index(client.HNSWIndex(**kwargs))
if index_type == self.IndexType.TIMESCALE_VECTOR.value:
self.sync_client.create_embedding_index(
client.TimescaleVectorIndex(**kwargs)
)
def drop_index(self) -> None:
self.sync_client.drop_embedding_index()
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~vectorstores~chroma.py | from __future__ import annotations
import logging
import uuid
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Iterable,
List,
Optional,
Tuple,
Type,
)
import numpy as np
from langchain.docstore.document import Document
from langchain.schema.embeddings import Embeddings
from langchain.schema.vectorstore import VectorStore
from langchain.utils import xor_args
from langchain.vectorstores.utils import maximal_marginal_relevance
if TYPE_CHECKING:
import chromadb
import chromadb.config
from chromadb.api.types import ID, OneOrMany, Where, WhereDocument
logger = logging.getLogger()
DEFAULT_K = 4 # Number of Documents to return.
def _results_to_docs(results: Any) -> List[Document]:
return [doc for doc, _ in _results_to_docs_and_scores(results)]
def _results_to_docs_and_scores(results: Any) -> List[Tuple[Document, float]]:
return [
# TODO: Chroma can do batch querying,
# we shouldn't hard code to the 1st result
(Document(page_content=result[0], metadata=result[1] or {}), result[2])
for result in zip(
results["documents"][0],
results["metadatas"][0],
results["distances"][0],
)
]
class Chroma(VectorStore):
"""`ChromaDB` vector store.
To use, you should have the ``chromadb`` python package installed.
Example:
.. code-block:: python
from langchain.vectorstores import Chroma
from langchain.embeddings.openai import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
vectorstore = Chroma("langchain_store", embeddings)
"""
_LANGCHAIN_DEFAULT_COLLECTION_NAME = "langchain"
def __init__(
self,
collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME,
embedding_function: Optional[Embeddings] = None,
persist_directory: Optional[str] = None,
client_settings: Optional[chromadb.config.Settings] = None,
collection_metadata: Optional[Dict] = None,
client: Optional[chromadb.Client] = None,
relevance_score_fn: Optional[Callable[[float], float]] = None,
) -> None:
"""Initialize with a Chroma client."""
try:
import chromadb
import chromadb.config
except ImportError:
raise ImportError(
"Could not import chromadb python package. "
"Please install it with `pip install chromadb`."
)
if client is not None:
self._client_settings = client_settings
self._client = client
self._persist_directory = persist_directory
else:
if client_settings:
# If client_settings is provided with persist_directory specified,
# then it is "in-memory and persisting to disk" mode.
client_settings.persist_directory = (
persist_directory or client_settings.persist_directory
)
if client_settings.persist_directory is not None:
# Maintain backwards compatibility with chromadb < 0.4.0
major, minor, _ = chromadb.__version__.split(".")
if int(major) == 0 and int(minor) < 4:
client_settings.chroma_db_impl = "duckdb+parquet"
_client_settings = client_settings
elif persist_directory:
# Maintain backwards compatibility with chromadb < 0.4.0
major, minor, _ = chromadb.__version__.split(".")
if int(major) == 0 and int(minor) < 4:
_client_settings = chromadb.config.Settings(
chroma_db_impl="duckdb+parquet",
)
else:
_client_settings = chromadb.config.Settings(is_persistent=True)
_client_settings.persist_directory = persist_directory
else:
_client_settings = chromadb.config.Settings()
self._client_settings = _client_settings
self._client = chromadb.Client(_client_settings)
self._persist_directory = (
_client_settings.persist_directory or persist_directory
)
self._embedding_function = embedding_function
self._collection = self._client.get_or_create_collection(
name=collection_name,
embedding_function=self._embedding_function.embed_documents
if self._embedding_function is not None
else None,
metadata=collection_metadata,
)
self.override_relevance_score_fn = relevance_score_fn
@property
def embeddings(self) -> Optional[Embeddings]:
return self._embedding_function
@xor_args(("query_texts", "query_embeddings"))
def __query_collection(
self,
query_texts: Optional[List[str]] = None,
query_embeddings: Optional[List[List[float]]] = None,
n_results: int = 4,
where: Optional[Dict[str, str]] = None,
where_document: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Document]:
"""Query the chroma collection."""
try:
import chromadb # noqa: F401
except ImportError:
raise ValueError(
"Could not import chromadb python package. "
"Please install it with `pip install chromadb`."
)
return self._collection.query(
query_texts=query_texts,
query_embeddings=query_embeddings,
n_results=n_results,
where=where,
where_document=where_document,
**kwargs,
)
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts (Iterable[str]): Texts to add to the vectorstore.
metadatas (Optional[List[dict]], optional): Optional list of metadatas.
ids (Optional[List[str]], optional): Optional list of IDs.
Returns:
List[str]: List of IDs of the added texts.
"""
# TODO: Handle the case where the user doesn't provide ids on the Collection
if ids is None:
ids = [str(uuid.uuid1()) for _ in texts]
embeddings = None
texts = list(texts)
if self._embedding_function is not None:
embeddings = self._embedding_function.embed_documents(texts)
if metadatas:
# fill metadatas with empty dicts if somebody
# did not specify metadata for all texts
length_diff = len(texts) - len(metadatas)
if length_diff:
metadatas = metadatas + [{}] * length_diff
empty_ids = []
non_empty_ids = []
for idx, m in enumerate(metadatas):
if m:
non_empty_ids.append(idx)
else:
empty_ids.append(idx)
if non_empty_ids:
metadatas = [metadatas[idx] for idx in non_empty_ids]
texts_with_metadatas = [texts[idx] for idx in non_empty_ids]
embeddings_with_metadatas = (
[embeddings[idx] for idx in non_empty_ids] if embeddings else None
)
ids_with_metadata = [ids[idx] for idx in non_empty_ids]
try:
self._collection.upsert(
metadatas=metadatas,
embeddings=embeddings_with_metadatas,
documents=texts_with_metadatas,
ids=ids_with_metadata,
)
except ValueError as e:
if "Expected metadata value to be" in str(e):
msg = (
"Try filtering complex metadata from the document using "
"langchain.vectorstores.utils.filter_complex_metadata."
)
raise ValueError(e.args[0] + "\n\n" + msg)
else:
raise e
if empty_ids:
texts_without_metadatas = [texts[j] for j in empty_ids]
embeddings_without_metadatas = (
[embeddings[j] for j in empty_ids] if embeddings else None
)
ids_without_metadatas = [ids[j] for j in empty_ids]
self._collection.upsert(
embeddings=embeddings_without_metadatas,
documents=texts_without_metadatas,
ids=ids_without_metadatas,
)
else:
self._collection.upsert(
embeddings=embeddings,
documents=texts,
ids=ids,
)
return ids
def similarity_search(
self,
query: str,
k: int = DEFAULT_K,
filter: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Document]:
"""Run similarity search with Chroma.
Args:
query (str): Query text to search for.
k (int): Number of results to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List[Document]: List of documents most similar to the query text.
"""
docs_and_scores = self.similarity_search_with_score(query, k, filter=filter)
return [doc for doc, _ in docs_and_scores]
def similarity_search_by_vector(
self,
embedding: List[float],
k: int = DEFAULT_K,
filter: Optional[Dict[str, str]] = None,
where_document: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs most similar to embedding vector.
Args:
embedding (List[float]): Embedding to look up documents similar to.
k (int): Number of Documents to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List of Documents most similar to the query vector.
"""
results = self.__query_collection(
query_embeddings=embedding,
n_results=k,
where=filter,
where_document=where_document,
)
return _results_to_docs(results)
def similarity_search_by_vector_with_relevance_scores(
self,
embedding: List[float],
k: int = DEFAULT_K,
filter: Optional[Dict[str, str]] = None,
where_document: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""
Return docs most similar to embedding vector and similarity score.
Args:
embedding (List[float]): Embedding to look up documents similar to.
k (int): Number of Documents to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List[Tuple[Document, float]]: List of documents most similar to
the query text and cosine distance in float for each.
Lower score represents more similarity.
"""
results = self.__query_collection(
query_embeddings=embedding,
n_results=k,
where=filter,
where_document=where_document,
)
return _results_to_docs_and_scores(results)
def similarity_search_with_score(
self,
query: str,
k: int = DEFAULT_K,
filter: Optional[Dict[str, str]] = None,
where_document: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Run similarity search with Chroma with distance.
Args:
query (str): Query text to search for.
k (int): Number of results to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List[Tuple[Document, float]]: List of documents most similar to
the query text and cosine distance in float for each.
Lower score represents more similarity.
"""
if self._embedding_function is None:
results = self.__query_collection(
query_texts=[query],
n_results=k,
where=filter,
where_document=where_document,
)
else:
query_embedding = self._embedding_function.embed_query(query)
results = self.__query_collection(
query_embeddings=[query_embedding],
n_results=k,
where=filter,
where_document=where_document,
)
return _results_to_docs_and_scores(results)
def _select_relevance_score_fn(self) -> Callable[[float], float]:
"""
The 'correct' relevance function
may differ depending on a few things, including:
- the distance / similarity metric used by the VectorStore
- the scale of your embeddings (OpenAI's are unit normed. Many others are not!)
- embedding dimensionality
- etc.
"""
if self.override_relevance_score_fn:
return self.override_relevance_score_fn
distance = "l2"
distance_key = "hnsw:space"
metadata = self._collection.metadata
if metadata and distance_key in metadata:
distance = metadata[distance_key]
if distance == "cosine":
return self._cosine_relevance_score_fn
elif distance == "l2":
return self._euclidean_relevance_score_fn
elif distance == "ip":
return self._max_inner_product_relevance_score_fn
else:
raise ValueError(
"No supported normalization function"
f" for distance metric of type: {distance}."
"Consider providing relevance_score_fn to Chroma constructor."
)
def max_marginal_relevance_search_by_vector(
self,
embedding: List[float],
k: int = DEFAULT_K,
fetch_k: int = 20,
lambda_mult: float = 0.5,
filter: Optional[Dict[str, str]] = None,
where_document: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List of Documents selected by maximal marginal relevance.
"""
results = self.__query_collection(
query_embeddings=embedding,
n_results=fetch_k,
where=filter,
where_document=where_document,
include=["metadatas", "documents", "distances", "embeddings"],
)
mmr_selected = maximal_marginal_relevance(
np.array(embedding, dtype=np.float32),
results["embeddings"][0],
k=k,
lambda_mult=lambda_mult,
)
candidates = _results_to_docs(results)
selected_results = [r for i, r in enumerate(candidates) if i in mmr_selected]
return selected_results
def max_marginal_relevance_search(
self,
query: str,
k: int = DEFAULT_K,
fetch_k: int = 20,
lambda_mult: float = 0.5,
filter: Optional[Dict[str, str]] = None,
where_document: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
lambda_mult: Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List of Documents selected by maximal marginal relevance.
"""
if self._embedding_function is None:
raise ValueError(
"For MMR search, you must specify an embedding function on" "creation."
)
embedding = self._embedding_function.embed_query(query)
docs = self.max_marginal_relevance_search_by_vector(
embedding,
k,
fetch_k,
lambda_mult=lambda_mult,
filter=filter,
where_document=where_document,
)
return docs
def delete_collection(self) -> None:
"""Delete the collection."""
self._client.delete_collection(self._collection.name)
def get(
self,
ids: Optional[OneOrMany[ID]] = None,
where: Optional[Where] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
where_document: Optional[WhereDocument] = None,
include: Optional[List[str]] = None,
) -> Dict[str, Any]:
"""Gets the collection.
Args:
ids: The ids of the embeddings to get. Optional.
where: A Where type dict used to filter results by.
E.g. `{"color" : "red", "price": 4.20}`. Optional.
limit: The number of documents to return. Optional.
offset: The offset to start returning results from.
Useful for paging results with limit. Optional.
where_document: A WhereDocument type dict used to filter by the documents.
E.g. `{$contains: "hello"}`. Optional.
include: A list of what to include in the results.
Can contain `"embeddings"`, `"metadatas"`, `"documents"`.
Ids are always included.
Defaults to `["metadatas", "documents"]`. Optional.
"""
kwargs = {
"ids": ids,
"where": where,
"limit": limit,
"offset": offset,
"where_document": where_document,
}
if include is not None:
kwargs["include"] = include
return self._collection.get(**kwargs)
def persist(self) -> None:
"""Persist the collection.
This can be used to explicitly persist the data to disk.
It will also be called automatically when the object is destroyed.
"""
if self._persist_directory is None:
raise ValueError(
"You must specify a persist_directory on"
"creation to persist the collection."
)
import chromadb
# Maintain backwards compatibility with chromadb < 0.4.0
major, minor, _ = chromadb.__version__.split(".")
if int(major) == 0 and int(minor) < 4:
self._client.persist()
def update_document(self, document_id: str, document: Document) -> None:
"""Update a document in the collection.
Args:
document_id (str): ID of the document to update.
document (Document): Document to update.
"""
return self.update_documents([document_id], [document])
def update_documents(self, ids: List[str], documents: List[Document]) -> None:
"""Update a document in the collection.
Args:
ids (List[str]): List of ids of the document to update.
documents (List[Document]): List of documents to update.
"""
text = [document.page_content for document in documents]
metadata = [document.metadata for document in documents]
if self._embedding_function is None:
raise ValueError(
"For update, you must specify an embedding function on creation."
)
embeddings = self._embedding_function.embed_documents(text)
if hasattr(
self._collection._client, "max_batch_size"
): # for Chroma 0.4.10 and above
from chromadb.utils.batch_utils import create_batches
for batch in create_batches(
api=self._collection._client,
ids=ids,
metadatas=metadata,
documents=text,
embeddings=embeddings,
):
self._collection.update(
ids=batch[0],
embeddings=batch[1],
documents=batch[3],
metadatas=batch[2],
)
else:
self._collection.update(
ids=ids,
embeddings=embeddings,
documents=text,
metadatas=metadata,
)
@classmethod
def from_texts(
cls: Type[Chroma],
texts: List[str],
embedding: Optional[Embeddings] = None,
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME,
persist_directory: Optional[str] = None,
client_settings: Optional[chromadb.config.Settings] = None,
client: Optional[chromadb.Client] = None,
collection_metadata: Optional[Dict] = None,
**kwargs: Any,
) -> Chroma:
"""Create a Chroma vectorstore from a raw documents.
If a persist_directory is specified, the collection will be persisted there.
Otherwise, the data will be ephemeral in-memory.
Args:
texts (List[str]): List of texts to add to the collection.
collection_name (str): Name of the collection to create.
persist_directory (Optional[str]): Directory to persist the collection.
embedding (Optional[Embeddings]): Embedding function. Defaults to None.
metadatas (Optional[List[dict]]): List of metadatas. Defaults to None.
ids (Optional[List[str]]): List of document IDs. Defaults to None.
client_settings (Optional[chromadb.config.Settings]): Chroma client settings
collection_metadata (Optional[Dict]): Collection configurations.
Defaults to None.
Returns:
Chroma: Chroma vectorstore.
"""
chroma_collection = cls(
collection_name=collection_name,
embedding_function=embedding,
persist_directory=persist_directory,
client_settings=client_settings,
client=client,
collection_metadata=collection_metadata,
**kwargs,
)
if ids is None:
ids = [str(uuid.uuid1()) for _ in texts]
if hasattr(
chroma_collection._client, "max_batch_size"
): # for Chroma 0.4.10 and above
from chromadb.utils.batch_utils import create_batches
for batch in create_batches(
api=chroma_collection._client,
ids=ids,
metadatas=metadatas,
documents=texts,
):
chroma_collection.add_texts(
texts=batch[3] if batch[3] else [],
metadatas=batch[2] if batch[2] else None,
ids=batch[0],
)
else:
chroma_collection.add_texts(texts=texts, metadatas=metadatas, ids=ids)
return chroma_collection
@classmethod
def from_documents(
cls: Type[Chroma],
documents: List[Document],
embedding: Optional[Embeddings] = None,
ids: Optional[List[str]] = None,
collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME,
persist_directory: Optional[str] = None,
client_settings: Optional[chromadb.config.Settings] = None,
client: Optional[chromadb.Client] = None, # Add this line
collection_metadata: Optional[Dict] = None,
**kwargs: Any,
) -> Chroma:
"""Create a Chroma vectorstore from a list of documents.
If a persist_directory is specified, the collection will be persisted there.
Otherwise, the data will be ephemeral in-memory.
Args:
collection_name (str): Name of the collection to create.
persist_directory (Optional[str]): Directory to persist the collection.
ids (Optional[List[str]]): List of document IDs. Defaults to None.
documents (List[Document]): List of documents to add to the vectorstore.
embedding (Optional[Embeddings]): Embedding function. Defaults to None.
client_settings (Optional[chromadb.config.Settings]): Chroma client settings
collection_metadata (Optional[Dict]): Collection configurations.
Defaults to None.
Returns:
Chroma: Chroma vectorstore.
"""
texts = [doc.page_content for doc in documents]
metadatas = [doc.metadata for doc in documents]
return cls.from_texts(
texts=texts,
embedding=embedding,
metadatas=metadatas,
ids=ids,
collection_name=collection_name,
persist_directory=persist_directory,
client_settings=client_settings,
client=client,
collection_metadata=collection_metadata,
**kwargs,
)
def delete(self, ids: Optional[List[str]] = None, **kwargs: Any) -> None:
"""Delete by vector IDs.
Args:
ids: List of ids to delete.
"""
self._collection.delete(ids=ids)
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~langchain~chat_models~vertexai.py | """Wrapper around Google VertexAI chat-based models."""
from __future__ import annotations
import logging
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, Dict, Iterator, List, Optional, Union
from langchain.callbacks.manager import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain.chat_models.base import BaseChatModel, _generate_from_stream
from langchain.llms.vertexai import _VertexAICommon, is_codey_model
from langchain.pydantic_v1 import root_validator
from langchain.schema import ChatGeneration, ChatResult
from langchain.schema.messages import (
AIMessage,
AIMessageChunk,
BaseMessage,
HumanMessage,
SystemMessage,
)
from langchain.schema.output import ChatGenerationChunk
from langchain.utilities.vertexai import raise_vertex_import_error
if TYPE_CHECKING:
from vertexai.language_models import (
ChatMessage,
ChatSession,
CodeChatSession,
InputOutputTextPair,
)
logger = logging.getLogger(__name__)
@dataclass
class _ChatHistory:
"""Represents a context and a history of messages."""
history: List["ChatMessage"] = field(default_factory=list)
context: Optional[str] = None
def _parse_chat_history(history: List[BaseMessage]) -> _ChatHistory:
"""Parse a sequence of messages into history.
Args:
history: The list of messages to re-create the history of the chat.
Returns:
A parsed chat history.
Raises:
ValueError: If a sequence of message has a SystemMessage not at the
first place.
"""
from vertexai.language_models import ChatMessage
vertex_messages, context = [], None
for i, message in enumerate(history):
if i == 0 and isinstance(message, SystemMessage):
context = message.content
elif isinstance(message, AIMessage):
vertex_message = ChatMessage(content=message.content, author="bot")
vertex_messages.append(vertex_message)
elif isinstance(message, HumanMessage):
vertex_message = ChatMessage(content=message.content, author="user")
vertex_messages.append(vertex_message)
else:
raise ValueError(
f"Unexpected message with type {type(message)} at the position {i}."
)
chat_history = _ChatHistory(context=context, history=vertex_messages)
return chat_history
def _parse_examples(examples: List[BaseMessage]) -> List["InputOutputTextPair"]:
from vertexai.language_models import InputOutputTextPair
if len(examples) % 2 != 0:
raise ValueError(
f"Expect examples to have an even amount of messages, got {len(examples)}."
)
example_pairs = []
input_text = None
for i, example in enumerate(examples):
if i % 2 == 0:
if not isinstance(example, HumanMessage):
raise ValueError(
f"Expected the first message in a part to be from human, got "
f"{type(example)} for the {i}th message."
)
input_text = example.content
if i % 2 == 1:
if not isinstance(example, AIMessage):
raise ValueError(
f"Expected the second message in a part to be from AI, got "
f"{type(example)} for the {i}th message."
)
pair = InputOutputTextPair(
input_text=input_text, output_text=example.content
)
example_pairs.append(pair)
return example_pairs
def _get_question(messages: List[BaseMessage]) -> HumanMessage:
"""Get the human message at the end of a list of input messages to a chat model."""
if not messages:
raise ValueError("You should provide at least one message to start the chat!")
question = messages[-1]
if not isinstance(question, HumanMessage):
raise ValueError(
f"Last message in the list should be from human, got {question.type}."
)
return question
class ChatVertexAI(_VertexAICommon, BaseChatModel):
"""`Vertex AI` Chat large language models API."""
model_name: str = "chat-bison"
"Underlying model name."
examples: Optional[List[BaseMessage]] = None
@classmethod
def is_lc_serializable(self) -> bool:
return True
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that the python package exists in environment."""
cls._try_init_vertexai(values)
try:
if is_codey_model(values["model_name"]):
from vertexai.preview.language_models import CodeChatModel
values["client"] = CodeChatModel.from_pretrained(values["model_name"])
else:
from vertexai.preview.language_models import ChatModel
values["client"] = ChatModel.from_pretrained(values["model_name"])
except ImportError:
raise_vertex_import_error()
return values
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
stream: Optional[bool] = None,
**kwargs: Any,
) -> ChatResult:
"""Generate next turn in the conversation.
Args:
messages: The history of the conversation as a list of messages. Code chat
does not support context.
stop: The list of stop words (optional).
run_manager: The CallbackManager for LLM run, it's not used at the moment.
stream: Whether to use the streaming endpoint.
Returns:
The ChatResult that contains outputs generated by the model.
Raises:
ValueError: if the last message in the list is not from human.
"""
should_stream = stream if stream is not None else self.streaming
if should_stream:
stream_iter = self._stream(
messages, stop=stop, run_manager=run_manager, **kwargs
)
return _generate_from_stream(stream_iter)
question = _get_question(messages)
history = _parse_chat_history(messages[:-1])
params = self._prepare_params(stop=stop, stream=False, **kwargs)
examples = kwargs.get("examples") or self.examples
if examples:
params["examples"] = _parse_examples(examples)
msg_params = {}
if "candidate_count" in params:
msg_params["candidate_count"] = params.pop("candidate_count")
chat = self._start_chat(history, **params)
response = chat.send_message(question.content, **msg_params)
generations = [
ChatGeneration(message=AIMessage(content=r.text))
for r in response.candidates
]
return ChatResult(generations=generations)
async def _agenerate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
"""Asynchronously generate next turn in the conversation.
Args:
messages: The history of the conversation as a list of messages. Code chat
does not support context.
stop: The list of stop words (optional).
run_manager: The CallbackManager for LLM run, it's not used at the moment.
Returns:
The ChatResult that contains outputs generated by the model.
Raises:
ValueError: if the last message in the list is not from human.
"""
if "stream" in kwargs:
kwargs.pop("stream")
logger.warning("ChatVertexAI does not currently support async streaming.")
question = _get_question(messages)
history = _parse_chat_history(messages[:-1])
params = self._prepare_params(stop=stop, **kwargs)
examples = kwargs.get("examples", None)
if examples:
params["examples"] = _parse_examples(examples)
msg_params = {}
if "candidate_count" in params:
msg_params["candidate_count"] = params.pop("candidate_count")
chat = self._start_chat(history, **params)
response = await chat.send_message_async(question.content, **msg_params)
generations = [
ChatGeneration(message=AIMessage(content=r.text))
for r in response.candidates
]
return ChatResult(generations=generations)
def _stream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[ChatGenerationChunk]:
question = _get_question(messages)
history = _parse_chat_history(messages[:-1])
params = self._prepare_params(stop=stop, **kwargs)
examples = kwargs.get("examples", None)
if examples:
params["examples"] = _parse_examples(examples)
chat = self._start_chat(history, **params)
responses = chat.send_message_streaming(question.content, **params)
for response in responses:
if run_manager:
run_manager.on_llm_new_token(response.text)
yield ChatGenerationChunk(message=AIMessageChunk(content=response.text))
def _start_chat(
self, history: _ChatHistory, **kwargs: Any
) -> Union[ChatSession, CodeChatSession]:
if not self.is_codey_model:
return self.client.start_chat(
context=history.context, message_history=history.history, **kwargs
)
else:
return self.client.start_chat(message_history=history.history, **kwargs)
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~integration_tests~document_loaders~test_geodataframe.py | from __future__ import annotations
from typing import TYPE_CHECKING
import pytest
from langchain.document_loaders import GeoDataFrameLoader
from langchain.schema import Document
if TYPE_CHECKING:
from geopandas import GeoDataFrame
else:
GeoDataFrame = "geopandas.GeoDataFrame"
@pytest.mark.requires("geopandas")
def sample_gdf() -> GeoDataFrame:
import geopandas
# TODO: geopandas.datasets will be deprecated in 1.0
path_to_data = geopandas.datasets.get_path("nybb")
gdf = geopandas.read_file(path_to_data)
gdf["area"] = gdf.area
gdf["crs"] = gdf.crs.to_string()
return gdf.head(2)
@pytest.mark.requires("geopandas")
def test_load_returns_list_of_documents(sample_gdf: GeoDataFrame) -> None:
loader = GeoDataFrameLoader(sample_gdf)
docs = loader.load()
assert isinstance(docs, list)
assert all(isinstance(doc, Document) for doc in docs)
assert len(docs) == 2
@pytest.mark.requires("geopandas")
def test_load_converts_dataframe_columns_to_document_metadata(
sample_gdf: GeoDataFrame,
) -> None:
loader = GeoDataFrameLoader(sample_gdf)
docs = loader.load()
for i, doc in enumerate(docs):
assert doc.metadata["area"] == sample_gdf.loc[i, "area"]
assert doc.metadata["crs"] == sample_gdf.loc[i, "crs"]
| [] |
2024-01-10 | RohanDey02/langchain | libs~langchain~tests~integration_tests~chat_models~test_bedrock.py | """Test Bedrock chat model."""
from typing import Any
import pytest
from langchain.callbacks.manager import CallbackManager
from langchain.chat_models import BedrockChat
from langchain.schema import ChatGeneration, LLMResult
from langchain.schema.messages import BaseMessage, HumanMessage, SystemMessage
from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler
@pytest.fixture
def chat() -> BedrockChat:
return BedrockChat(model_id="anthropic.claude-v2", model_kwargs={"temperature": 0})
@pytest.mark.scheduled
def test_chat_bedrock(chat: BedrockChat) -> None:
"""Test BedrockChat wrapper."""
system = SystemMessage(content="You are a helpful assistant.")
human = HumanMessage(content="Hello")
response = chat([system, human])
assert isinstance(response, BaseMessage)
assert isinstance(response.content, str)
@pytest.mark.scheduled
def test_chat_bedrock_generate(chat: BedrockChat) -> None:
"""Test BedrockChat wrapper with generate."""
message = HumanMessage(content="Hello")
response = chat.generate([[message], [message]])
assert isinstance(response, LLMResult)
assert len(response.generations) == 2
for generations in response.generations:
for generation in generations:
assert isinstance(generation, ChatGeneration)
assert isinstance(generation.text, str)
assert generation.text == generation.message.content
@pytest.mark.scheduled
def test_chat_bedrock_streaming() -> None:
"""Test that streaming correctly invokes on_llm_new_token callback."""
callback_handler = FakeCallbackHandler()
callback_manager = CallbackManager([callback_handler])
chat = BedrockChat(
model_id="anthropic.claude-v2",
streaming=True,
callback_manager=callback_manager,
verbose=True,
)
message = HumanMessage(content="Hello")
response = chat([message])
assert callback_handler.llm_streams > 0
assert isinstance(response, BaseMessage)
@pytest.mark.scheduled
def test_chat_bedrock_streaming_generation_info() -> None:
"""Test that generation info is preserved when streaming."""
class _FakeCallback(FakeCallbackHandler):
saved_things: dict = {}
def on_llm_end(
self,
*args: Any,
**kwargs: Any,
) -> Any:
# Save the generation
self.saved_things["generation"] = args[0]
callback = _FakeCallback()
callback_manager = CallbackManager([callback])
chat = BedrockChat(
model_id="anthropic.claude-v2",
callback_manager=callback_manager,
)
list(chat.stream("hi"))
generation = callback.saved_things["generation"]
# `Hello!` is two tokens, assert that that is what is returned
assert generation.generations[0][0].text == " Hello!"
@pytest.mark.scheduled
def test_bedrock_streaming(chat: BedrockChat) -> None:
"""Test streaming tokens from OpenAI."""
for token in chat.stream("I'm Pickle Rick"):
assert isinstance(token.content, str)
@pytest.mark.scheduled
@pytest.mark.asyncio
async def test_bedrock_astream(chat: BedrockChat) -> None:
"""Test streaming tokens from OpenAI."""
async for token in chat.astream("I'm Pickle Rick"):
assert isinstance(token.content, str)
@pytest.mark.scheduled
@pytest.mark.asyncio
async def test_bedrock_abatch(chat: BedrockChat) -> None:
"""Test streaming tokens from BedrockChat."""
result = await chat.abatch(["I'm Pickle Rick", "I'm not Pickle Rick"])
for token in result:
assert isinstance(token.content, str)
@pytest.mark.scheduled
@pytest.mark.asyncio
async def test_bedrock_abatch_tags(chat: BedrockChat) -> None:
"""Test batch tokens from BedrockChat."""
result = await chat.abatch(
["I'm Pickle Rick", "I'm not Pickle Rick"], config={"tags": ["foo"]}
)
for token in result:
assert isinstance(token.content, str)
@pytest.mark.scheduled
def test_bedrock_batch(chat: BedrockChat) -> None:
"""Test batch tokens from BedrockChat."""
result = chat.batch(["I'm Pickle Rick", "I'm not Pickle Rick"])
for token in result:
assert isinstance(token.content, str)
@pytest.mark.scheduled
@pytest.mark.asyncio
async def test_bedrock_ainvoke(chat: BedrockChat) -> None:
"""Test invoke tokens from BedrockChat."""
result = await chat.ainvoke("I'm Pickle Rick", config={"tags": ["foo"]})
assert isinstance(result.content, str)
@pytest.mark.scheduled
def test_bedrock_invoke(chat: BedrockChat) -> None:
"""Test invoke tokens from BedrockChat."""
result = chat.invoke("I'm Pickle Rick", config=dict(tags=["foo"]))
assert isinstance(result.content, str)
| [
"You are a helpful assistant.",
"Hello"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.