id
stringlengths
14
16
text
stringlengths
31
3.14k
source
stringlengths
58
124
9baa826ab02b-10
else: search_query = _default_approximate_search_query( embedding, size, k, vector_field ) elif search_type == SCRIPT_SCORING_SEARCH: space_type = _get_kwargs_value(kwargs, "space_type", "l2") pre_filter = _get_kwargs_value(kwargs, "pre_filter", MATCH_ALL_QUERY) search_query = _default_script_query( embedding, space_type, pre_filter, vector_field ) elif search_type == PAINLESS_SCRIPTING_SEARCH: space_type = _get_kwargs_value(kwargs, "space_type", "l2Squared") pre_filter = _get_kwargs_value(kwargs, "pre_filter", MATCH_ALL_QUERY) search_query = _default_painless_scripting_query( embedding, space_type, pre_filter, vector_field ) else: raise ValueError("Invalid `search_type` provided as an argument") response = self.client.search(index=self.index_name, body=search_query) hits = [hit["_source"] for hit in response["hits"]["hits"][:k]] documents = [ Document( page_content=hit[text_field], metadata=hit if metadata_field == "*" or metadata_field not in hit else hit[metadata_field], ) for hit in hits ] return documents [docs] @classmethod def from_texts( cls,
/content/https://python.langchain.com/en/latest/_modules/langchain/vectorstores/opensearch_vector_search.html
9baa826ab02b-11
return documents [docs] @classmethod def from_texts( cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, bulk_size: int = 500, **kwargs: Any, ) -> OpenSearchVectorSearch: """Construct OpenSearchVectorSearch wrapper from raw documents. Example: .. code-block:: python from langchain import OpenSearchVectorSearch from langchain.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() opensearch_vector_search = OpenSearchVectorSearch.from_texts( texts, embeddings, opensearch_url="http://localhost:9200" ) OpenSearch by default supports Approximate Search powered by nmslib, faiss and lucene engines recommended for large datasets. Also supports brute force search through Script Scoring and Painless Scripting. Optional Args: vector_field: Document field embeddings are stored in. Defaults to "vector_field". text_field: Document field the text of the document is stored in. Defaults to "text". Optional Keyword Args for Approximate Search: engine: "nmslib", "faiss", "lucene"; default: "nmslib" space_type: "l2", "l1", "cosinesimil", "linf", "innerproduct"; default: "l2" ef_search: Size of the dynamic list used during k-NN searches. Higher values lead to more accurate but slower searches; default: 512
/content/https://python.langchain.com/en/latest/_modules/langchain/vectorstores/opensearch_vector_search.html
9baa826ab02b-12
lead to more accurate but slower searches; default: 512 ef_construction: Size of the dynamic list used during k-NN graph creation. Higher values lead to more accurate graph but slower indexing speed; default: 512 m: Number of bidirectional links created for each new element. Large impact on memory consumption. Between 2 and 100; default: 16 Keyword Args for Script Scoring or Painless Scripting: is_appx_search: False """ opensearch_url = get_from_dict_or_env( kwargs, "opensearch_url", "OPENSEARCH_URL" ) # List of arguments that needs to be removed from kwargs # before passing kwargs to get opensearch client keys_list = [ "opensearch_url", "index_name", "is_appx_search", "vector_field", "text_field", "engine", "space_type", "ef_search", "ef_construction", "m", ] embeddings = embedding.embed_documents(texts) _validate_embeddings_and_bulk_size(len(embeddings), bulk_size) dim = len(embeddings[0]) # Get the index name from either from kwargs or ENV Variable # before falling back to random generation index_name = get_from_dict_or_env( kwargs, "index_name", "OPENSEARCH_INDEX_NAME", default=uuid.uuid4().hex ) is_appx_search = _get_kwargs_value(kwargs, "is_appx_search", True)
/content/https://python.langchain.com/en/latest/_modules/langchain/vectorstores/opensearch_vector_search.html
9baa826ab02b-13
vector_field = _get_kwargs_value(kwargs, "vector_field", "vector_field") text_field = _get_kwargs_value(kwargs, "text_field", "text") if is_appx_search: engine = _get_kwargs_value(kwargs, "engine", "nmslib") space_type = _get_kwargs_value(kwargs, "space_type", "l2") ef_search = _get_kwargs_value(kwargs, "ef_search", 512) ef_construction = _get_kwargs_value(kwargs, "ef_construction", 512) m = _get_kwargs_value(kwargs, "m", 16) mapping = _default_text_mapping( dim, engine, space_type, ef_search, ef_construction, m, vector_field ) else: mapping = _default_scripting_text_mapping(dim) [kwargs.pop(key, None) for key in keys_list] client = _get_opensearch_client(opensearch_url, **kwargs) client.indices.create(index=index_name, body=mapping) _bulk_ingest_embeddings( client, index_name, embeddings, texts, metadatas, vector_field, text_field ) return cls(opensearch_url, index_name, embedding, **kwargs) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Apr 26, 2023.
/content/https://python.langchain.com/en/latest/_modules/langchain/vectorstores/opensearch_vector_search.html
132d9862be1c-0
Source code for langchain.vectorstores.analyticdb """VectorStore wrapper around a Postgres/PGVector database.""" from __future__ import annotations import logging import uuid from typing import Any, Dict, Iterable, List, Optional, Tuple import sqlalchemy from sqlalchemy import REAL, Index from sqlalchemy.dialects.postgresql import ARRAY, JSON, UUID from sqlalchemy.orm import Mapped, Session, declarative_base, relationship from sqlalchemy.sql.expression import func from langchain.docstore.document import Document from langchain.embeddings.base import Embeddings from langchain.utils import get_from_dict_or_env from langchain.vectorstores.base import VectorStore Base = declarative_base() # type: Any ADA_TOKEN_COUNT = 1536 _LANGCHAIN_DEFAULT_COLLECTION_NAME = "langchain" class BaseModel(Base): __abstract__ = True uuid = sqlalchemy.Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4) class CollectionStore(BaseModel): __tablename__ = "langchain_pg_collection" name = sqlalchemy.Column(sqlalchemy.String) cmetadata = sqlalchemy.Column(JSON) embeddings = relationship( "EmbeddingStore", back_populates="collection", passive_deletes=True, ) @classmethod
/content/https://python.langchain.com/en/latest/_modules/langchain/vectorstores/analyticdb.html
132d9862be1c-1
passive_deletes=True, ) @classmethod def get_by_name(cls, session: Session, name: str) -> Optional["CollectionStore"]: return session.query(cls).filter(cls.name == name).first() @classmethod def get_or_create( cls, session: Session, name: str, cmetadata: Optional[dict] = None, ) -> Tuple["CollectionStore", bool]: """ Get or create a collection. Returns [Collection, bool] where the bool is True if the collection was created. """ created = False collection = cls.get_by_name(session, name) if collection: return collection, created collection = cls(name=name, cmetadata=cmetadata) session.add(collection) session.commit() created = True return collection, created class EmbeddingStore(BaseModel): __tablename__ = "langchain_pg_embedding" collection_id: Mapped[UUID] = sqlalchemy.Column( UUID(as_uuid=True), sqlalchemy.ForeignKey( f"{CollectionStore.__tablename__}.uuid", ondelete="CASCADE", ), ) collection = relationship(CollectionStore, back_populates="embeddings") embedding = sqlalchemy.Column(ARRAY(REAL)) document = sqlalchemy.Column(sqlalchemy.String, nullable=True)
/content/https://python.langchain.com/en/latest/_modules/langchain/vectorstores/analyticdb.html
132d9862be1c-2
cmetadata = sqlalchemy.Column(JSON, nullable=True) # custom_id : any user defined id custom_id = sqlalchemy.Column(sqlalchemy.String, nullable=True) # The following line creates an index named 'langchain_pg_embedding_vector_idx' langchain_pg_embedding_vector_idx = Index( "langchain_pg_embedding_vector_idx", embedding, postgresql_using="ann", postgresql_with={ "distancemeasure": "L2", "dim": 1536, "pq_segments": 64, "hnsw_m": 100, "pq_centers": 2048, }, ) class QueryResult: EmbeddingStore: EmbeddingStore distance: float [docs]class AnalyticDB(VectorStore): """ VectorStore implementation using AnalyticDB. AnalyticDB is a distributed full PostgresSQL syntax cloud-native database. - `connection_string` is a postgres connection string. - `embedding_function` any embedding function implementing `langchain.embeddings.base.Embeddings` interface. - `collection_name` is the name of the collection to use. (default: langchain) - NOTE: This is not the name of the table, but the name of the collection. The tables will be created when initializing the store (if not exists) So, make sure the user has the right permissions to create tables. - `pre_delete_collection` if True, will delete the collection if it exists.
/content/https://python.langchain.com/en/latest/_modules/langchain/vectorstores/analyticdb.html
132d9862be1c-3
(default: False) - Useful for testing. """ def __init__( self, connection_string: str, embedding_function: Embeddings, collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME, collection_metadata: Optional[dict] = None, pre_delete_collection: bool = False, logger: Optional[logging.Logger] = None, ) -> None: self.connection_string = connection_string self.embedding_function = embedding_function self.collection_name = collection_name self.collection_metadata = collection_metadata self.pre_delete_collection = pre_delete_collection self.logger = logger or logging.getLogger(__name__) self.__post_init__() def __post_init__( self, ) -> None: """ Initialize the store. """ self._conn = self.connect() self.create_tables_if_not_exists() self.create_collection() [docs] def connect(self) -> sqlalchemy.engine.Connection: engine = sqlalchemy.create_engine(self.connection_string) conn = engine.connect() return conn [docs] def create_tables_if_not_exists(self) -> None: Base.metadata.create_all(self._conn) [docs] def drop_tables(self) -> None: Base.metadata.drop_all(self._conn)
/content/https://python.langchain.com/en/latest/_modules/langchain/vectorstores/analyticdb.html
132d9862be1c-4
Base.metadata.drop_all(self._conn) [docs] def create_collection(self) -> None: if self.pre_delete_collection: self.delete_collection() with Session(self._conn) as session: CollectionStore.get_or_create( session, self.collection_name, cmetadata=self.collection_metadata ) [docs] def delete_collection(self) -> None: self.logger.debug("Trying to delete collection") with Session(self._conn) as session: collection = self.get_collection(session) if not collection: self.logger.error("Collection not found") return session.delete(collection) session.commit() [docs] def get_collection(self, session: Session) -> Optional["CollectionStore"]: return CollectionStore.get_by_name(session, self.collection_name) [docs] def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, ids: Optional[List[str]] = None, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. kwargs: vectorstore specific parameters Returns: List of ids from adding the texts into the vectorstore. """ if ids is None:
/content/https://python.langchain.com/en/latest/_modules/langchain/vectorstores/analyticdb.html
132d9862be1c-5
""" if ids is None: ids = [str(uuid.uuid1()) for _ in texts] embeddings = self.embedding_function.embed_documents(list(texts)) if not metadatas: metadatas = [{} for _ in texts] with Session(self._conn) as session: collection = self.get_collection(session) if not collection: raise ValueError("Collection not found") for text, metadata, embedding, id in zip(texts, metadatas, embeddings, ids): embedding_store = EmbeddingStore( embedding=embedding, document=text, cmetadata=metadata, custom_id=id, ) collection.embeddings.append(embedding_store) session.add(embedding_store) session.commit() return ids [docs] def similarity_search( self, query: str, k: int = 4, filter: Optional[dict] = None, **kwargs: Any, ) -> List[Document]: """Run similarity search with AnalyticDB with distance. Args: query (str): Query text to search for. k (int): Number of results to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List of Documents most similar to the query. """ embedding = self.embedding_function.embed_query(text=query) return self.similarity_search_by_vector( embedding=embedding, k=k, filter=filter, )
/content/https://python.langchain.com/en/latest/_modules/langchain/vectorstores/analyticdb.html
132d9862be1c-6
embedding=embedding, k=k, filter=filter, ) [docs] def similarity_search_with_score( self, query: str, k: int = 4, filter: Optional[dict] = None, ) -> List[Tuple[Document, float]]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List of Documents most similar to the query and score for each """ embedding = self.embedding_function.embed_query(query) docs = self.similarity_search_with_score_by_vector( embedding=embedding, k=k, filter=filter ) return docs [docs] def similarity_search_with_score_by_vector( self, embedding: List[float], k: int = 4, filter: Optional[dict] = None, ) -> List[Tuple[Document, float]]: with Session(self._conn) as session: collection = self.get_collection(session) if not collection: raise ValueError("Collection not found") filter_by = EmbeddingStore.collection_id == collection.uuid if filter is not None: filter_clauses = [] for key, value in filter.items(): filter_by_metadata = EmbeddingStore.cmetadata[key].astext == str(value)
/content/https://python.langchain.com/en/latest/_modules/langchain/vectorstores/analyticdb.html
132d9862be1c-7
filter_clauses.append(filter_by_metadata) filter_by = sqlalchemy.and_(filter_by, *filter_clauses) results: List[QueryResult] = ( session.query( EmbeddingStore, func.l2_distance(EmbeddingStore.embedding, embedding).label("distance"), ) .filter(filter_by) .order_by(EmbeddingStore.embedding.op("<->")(embedding)) .join( CollectionStore, EmbeddingStore.collection_id == CollectionStore.uuid, ) .limit(k) .all() ) docs = [ ( Document( page_content=result.EmbeddingStore.document, metadata=result.EmbeddingStore.cmetadata, ), result.distance if self.embedding_function is not None else None, ) for result in results ] return docs [docs] def similarity_search_by_vector( self, embedding: List[float], k: int = 4, filter: Optional[dict] = None, **kwargs: Any, ) -> List[Document]: """Return docs most similar to embedding vector. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None. Returns: List of Documents most similar to the query vector. """
/content/https://python.langchain.com/en/latest/_modules/langchain/vectorstores/analyticdb.html
132d9862be1c-8
Returns: List of Documents most similar to the query vector. """ docs_and_scores = self.similarity_search_with_score_by_vector( embedding=embedding, k=k, filter=filter ) return [doc for doc, _ in docs_and_scores] [docs] @classmethod def from_texts( cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME, ids: Optional[List[str]] = None, pre_delete_collection: bool = False, **kwargs: Any, ) -> AnalyticDB: """ Return VectorStore initialized from texts and embeddings. Postgres connection string is required Either pass it as a parameter or set the PGVECTOR_CONNECTION_STRING environment variable. """ connection_string = cls.get_connection_string(kwargs) store = cls( connection_string=connection_string, collection_name=collection_name, embedding_function=embedding, pre_delete_collection=pre_delete_collection, ) store.add_texts(texts=texts, metadatas=metadatas, ids=ids, **kwargs) return store [docs] @classmethod def get_connection_string(cls, kwargs: Dict[str, Any]) -> str: connection_string: str = get_from_dict_or_env( data=kwargs,
/content/https://python.langchain.com/en/latest/_modules/langchain/vectorstores/analyticdb.html
132d9862be1c-9
data=kwargs, key="connection_string", env_key="PGVECTOR_CONNECTION_STRING", ) if not connection_string: raise ValueError( "Postgres connection string is required" "Either pass it as a parameter" "or set the PGVECTOR_CONNECTION_STRING environment variable." ) return connection_string [docs] @classmethod def from_documents( cls, documents: List[Document], embedding: Embeddings, collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME, ids: Optional[List[str]] = None, pre_delete_collection: bool = False, **kwargs: Any, ) -> AnalyticDB: """ Return VectorStore initialized from documents and embeddings. Postgres connection string is required Either pass it as a parameter or set the PGVECTOR_CONNECTION_STRING environment variable. """ texts = [d.page_content for d in documents] metadatas = [d.metadata for d in documents] connection_string = cls.get_connection_string(kwargs) kwargs["connection_string"] = connection_string return cls.from_texts( texts=texts, pre_delete_collection=pre_delete_collection, embedding=embedding, metadatas=metadatas, ids=ids, collection_name=collection_name, **kwargs, ) [docs] @classmethod def connection_string_from_db_params( cls, driver: str, host: str, port: int, database: str,
/content/https://python.langchain.com/en/latest/_modules/langchain/vectorstores/analyticdb.html
132d9862be1c-10
cls, driver: str, host: str, port: int, database: str, user: str, password: str, ) -> str: """Return connection string from database parameters.""" return f"postgresql+{driver}://{user}:{password}@{host}:{port}/{database}" By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Apr 26, 2023.
/content/https://python.langchain.com/en/latest/_modules/langchain/vectorstores/analyticdb.html
9cca7ce59b56-0
Source code for langchain.vectorstores.faiss """Wrapper around FAISS vector database.""" from __future__ import annotations import math import pickle import uuid from pathlib import Path from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple import numpy as np from langchain.docstore.base import AddableMixin, Docstore from langchain.docstore.document import Document from langchain.docstore.in_memory import InMemoryDocstore from langchain.embeddings.base import Embeddings from langchain.vectorstores.base import VectorStore from langchain.vectorstores.utils import maximal_marginal_relevance def dependable_faiss_import() -> Any: """Import faiss if available, otherwise raise error.""" try: import faiss except ImportError: raise ValueError( "Could not import faiss python package. " "Please install it with `pip install faiss` " "or `pip install faiss-cpu` (depending on Python version)." ) return faiss def _default_relevance_score_fn(score: float) -> float: """Return a similarity score on a scale [0, 1].""" # The 'correct' relevance function # may differ depending on a few things, including: # - the distance / similarity metric used by the VectorStore # - the scale of your embeddings (OpenAI's are unit normed. Many others are not!) # - embedding dimensionality # - etc. # This function converts the euclidean norm of normalized embeddings
/content/https://python.langchain.com/en/latest/_modules/langchain/vectorstores/faiss.html
9cca7ce59b56-1
# This function converts the euclidean norm of normalized embeddings # (0 is most similar, sqrt(2) most dissimilar) # to a similarity function (0 to 1) return 1.0 - score / math.sqrt(2) [docs]class FAISS(VectorStore): """Wrapper around FAISS vector database. To use, you should have the ``faiss`` python package installed. Example: .. code-block:: python from langchain import FAISS faiss = FAISS(embedding_function, index, docstore, index_to_docstore_id) """ def __init__( self, embedding_function: Callable, index: Any, docstore: Docstore, index_to_docstore_id: Dict[int, str], relevance_score_fn: Optional[ Callable[[float], float] ] = _default_relevance_score_fn, ): """Initialize with necessary components.""" self.embedding_function = embedding_function self.index = index self.docstore = docstore self.index_to_docstore_id = index_to_docstore_id self.relevance_score_fn = relevance_score_fn def __add( self, texts: Iterable[str], embeddings: Iterable[List[float]], metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> List[str]: if not isinstance(self.docstore, AddableMixin): raise ValueError( "If trying to add texts, the underlying docstore should support "
/content/https://python.langchain.com/en/latest/_modules/langchain/vectorstores/faiss.html
9cca7ce59b56-2
"If trying to add texts, the underlying docstore should support " f"adding items, which {self.docstore} does not" ) documents = [] for i, text in enumerate(texts): metadata = metadatas[i] if metadatas else {} documents.append(Document(page_content=text, metadata=metadata)) # Add to the index, the index_to_id mapping, and the docstore. starting_len = len(self.index_to_docstore_id) self.index.add(np.array(embeddings, dtype=np.float32)) # Get list of index, id, and docs. full_info = [ (starting_len + i, str(uuid.uuid4()), doc) for i, doc in enumerate(documents) ] # Add information to docstore and index. self.docstore.add({_id: doc for _, _id, doc in full_info}) index_to_id = {index: _id for index, _id, _ in full_info} self.index_to_docstore_id.update(index_to_id) return [_id for _, _id, _ in full_info] [docs] def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts.
/content/https://python.langchain.com/en/latest/_modules/langchain/vectorstores/faiss.html
9cca7ce59b56-3
metadatas: Optional list of metadatas associated with the texts. Returns: List of ids from adding the texts into the vectorstore. """ if not isinstance(self.docstore, AddableMixin): raise ValueError( "If trying to add texts, the underlying docstore should support " f"adding items, which {self.docstore} does not" ) # Embed and create the documents. embeddings = [self.embedding_function(text) for text in texts] return self.__add(texts, embeddings, metadatas, **kwargs) [docs] def add_embeddings( self, text_embeddings: Iterable[Tuple[str, List[float]]], metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: text_embeddings: Iterable pairs of string and embedding to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. Returns: List of ids from adding the texts into the vectorstore. """ if not isinstance(self.docstore, AddableMixin): raise ValueError( "If trying to add texts, the underlying docstore should support " f"adding items, which {self.docstore} does not" ) # Embed and create the documents. texts = [te[0] for te in text_embeddings] embeddings = [te[1] for te in text_embeddings]
/content/https://python.langchain.com/en/latest/_modules/langchain/vectorstores/faiss.html
9cca7ce59b56-4
embeddings = [te[1] for te in text_embeddings] return self.__add(texts, embeddings, metadatas, **kwargs) [docs] def similarity_search_with_score_by_vector( self, embedding: List[float], k: int = 4 ) -> List[Tuple[Document, float]]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. Returns: List of Documents most similar to the query and score for each """ scores, indices = self.index.search(np.array([embedding], dtype=np.float32), k) docs = [] for j, i in enumerate(indices[0]): if i == -1: # This happens when not enough docs are returned. continue _id = self.index_to_docstore_id[i] doc = self.docstore.search(_id) if not isinstance(doc, Document): raise ValueError(f"Could not find document for id {_id}, got {doc}") docs.append((doc, scores[0][j])) return docs [docs] def similarity_search_with_score( self, query: str, k: int = 4 ) -> List[Tuple[Document, float]]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. Returns:
/content/https://python.langchain.com/en/latest/_modules/langchain/vectorstores/faiss.html
9cca7ce59b56-5
k: Number of Documents to return. Defaults to 4. Returns: List of Documents most similar to the query and score for each """ embedding = self.embedding_function(query) docs = self.similarity_search_with_score_by_vector(embedding, k) return docs [docs] def similarity_search_by_vector( self, embedding: List[float], k: int = 4, **kwargs: Any ) -> List[Document]: """Return docs most similar to embedding vector. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. Returns: List of Documents most similar to the embedding. """ docs_and_scores = self.similarity_search_with_score_by_vector(embedding, k) return [doc for doc, _ in docs_and_scores] [docs] def similarity_search( self, query: str, k: int = 4, **kwargs: Any ) -> List[Document]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. Returns: List of Documents most similar to the query. """ docs_and_scores = self.similarity_search_with_score(query, k) return [doc for doc, _ in docs_and_scores]
/content/https://python.langchain.com/en/latest/_modules/langchain/vectorstores/faiss.html
9cca7ce59b56-6
return [doc for doc, _ in docs_and_scores] [docs] def max_marginal_relevance_search_by_vector( self, embedding: List[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns: List of Documents selected by maximal marginal relevance. """ _, indices = self.index.search(np.array([embedding], dtype=np.float32), fetch_k) # -1 happens when not enough docs are returned. embeddings = [self.index.reconstruct(int(i)) for i in indices[0] if i != -1] mmr_selected = maximal_marginal_relevance( np.array([embedding], dtype=np.float32), embeddings, k=k, lambda_mult=lambda_mult, ) selected_indices = [indices[0][i] for i in mmr_selected]
/content/https://python.langchain.com/en/latest/_modules/langchain/vectorstores/faiss.html
9cca7ce59b56-7
docs = [] for i in selected_indices: if i == -1: # This happens when not enough docs are returned. continue _id = self.index_to_docstore_id[i] doc = self.docstore.search(_id) if not isinstance(doc, Document): raise ValueError(f"Could not find document for id {_id}, got {doc}") docs.append(doc) return docs [docs] def max_marginal_relevance_search( self, query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns: List of Documents selected by maximal marginal relevance. """ embedding = self.embedding_function(query) docs = self.max_marginal_relevance_search_by_vector( embedding, k, fetch_k, lambda_mult=lambda_mult ) return docs [docs] def merge_from(self, target: FAISS) -> None:
/content/https://python.langchain.com/en/latest/_modules/langchain/vectorstores/faiss.html
9cca7ce59b56-8
"""Merge another FAISS object with the current one. Add the target FAISS to the current one. Args: target: FAISS object you wish to merge into the current one Returns: None. """ if not isinstance(self.docstore, AddableMixin): raise ValueError("Cannot merge with this type of docstore") # Numerical index for target docs are incremental on existing ones starting_len = len(self.index_to_docstore_id) # Merge two IndexFlatL2 self.index.merge_from(target.index) # Create new id for docs from target FAISS object full_info = [] for i in target.index_to_docstore_id: doc = target.docstore.search(target.index_to_docstore_id[i]) if not isinstance(doc, Document): raise ValueError("Document should be returned") full_info.append((starting_len + i, str(uuid.uuid4()), doc)) # Add information to docstore and index_to_docstore_id. self.docstore.add({_id: doc for _, _id, doc in full_info}) index_to_id = {index: _id for index, _id, _ in full_info} self.index_to_docstore_id.update(index_to_id) @classmethod def __from( cls, texts: List[str], embeddings: List[List[float]], embedding: Embeddings, metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> FAISS:
/content/https://python.langchain.com/en/latest/_modules/langchain/vectorstores/faiss.html
9cca7ce59b56-9
**kwargs: Any, ) -> FAISS: faiss = dependable_faiss_import() index = faiss.IndexFlatL2(len(embeddings[0])) index.add(np.array(embeddings, dtype=np.float32)) documents = [] for i, text in enumerate(texts): metadata = metadatas[i] if metadatas else {} documents.append(Document(page_content=text, metadata=metadata)) index_to_id = {i: str(uuid.uuid4()) for i in range(len(documents))} docstore = InMemoryDocstore( {index_to_id[i]: doc for i, doc in enumerate(documents)} ) return cls(embedding.embed_query, index, docstore, index_to_id, **kwargs) [docs] @classmethod def from_texts( cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> FAISS: """Construct FAISS wrapper from raw documents. This is a user friendly interface that: 1. Embeds documents. 2. Creates an in memory docstore 3. Initializes the FAISS database This is intended to be a quick way to get started. Example: .. code-block:: python from langchain import FAISS from langchain.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() faiss = FAISS.from_texts(texts, embeddings)
/content/https://python.langchain.com/en/latest/_modules/langchain/vectorstores/faiss.html
9cca7ce59b56-10
faiss = FAISS.from_texts(texts, embeddings) """ embeddings = embedding.embed_documents(texts) return cls.__from( texts, embeddings, embedding, metadatas, **kwargs, ) [docs] @classmethod def from_embeddings( cls, text_embeddings: List[Tuple[str, List[float]]], embedding: Embeddings, metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> FAISS: """Construct FAISS wrapper from raw documents. This is a user friendly interface that: 1. Embeds documents. 2. Creates an in memory docstore 3. Initializes the FAISS database This is intended to be a quick way to get started. Example: .. code-block:: python from langchain import FAISS from langchain.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() text_embeddings = embeddings.embed_documents(texts) text_embedding_pairs = list(zip(texts, text_embeddings)) faiss = FAISS.from_embeddings(text_embedding_pairs, embeddings) """ texts = [t[0] for t in text_embeddings] embeddings = [t[1] for t in text_embeddings] return cls.__from( texts, embeddings, embedding, metadatas, **kwargs, )
/content/https://python.langchain.com/en/latest/_modules/langchain/vectorstores/faiss.html
9cca7ce59b56-11
embeddings, embedding, metadatas, **kwargs, ) [docs] def save_local(self, folder_path: str, index_name: str = "index") -> None: """Save FAISS index, docstore, and index_to_docstore_id to disk. Args: folder_path: folder path to save index, docstore, and index_to_docstore_id to. index_name: for saving with a specific index file name """ path = Path(folder_path) path.mkdir(exist_ok=True, parents=True) # save index separately since it is not picklable faiss = dependable_faiss_import() faiss.write_index( self.index, str(path / "{index_name}.faiss".format(index_name=index_name)) ) # save docstore and index_to_docstore_id with open(path / "{index_name}.pkl".format(index_name=index_name), "wb") as f: pickle.dump((self.docstore, self.index_to_docstore_id), f) [docs] @classmethod def load_local( cls, folder_path: str, embeddings: Embeddings, index_name: str = "index" ) -> FAISS: """Load FAISS index, docstore, and index_to_docstore_id to disk. Args: folder_path: folder path to load index, docstore, and index_to_docstore_id from. embeddings: Embeddings to use when generating queries
/content/https://python.langchain.com/en/latest/_modules/langchain/vectorstores/faiss.html
9cca7ce59b56-12
embeddings: Embeddings to use when generating queries index_name: for saving with a specific index file name """ path = Path(folder_path) # load index separately since it is not picklable faiss = dependable_faiss_import() index = faiss.read_index( str(path / "{index_name}.faiss".format(index_name=index_name)) ) # load docstore and index_to_docstore_id with open(path / "{index_name}.pkl".format(index_name=index_name), "rb") as f: docstore, index_to_docstore_id = pickle.load(f) return cls(embeddings.embed_query, index, docstore, index_to_docstore_id) def _similarity_search_with_relevance_scores( self, query: str, k: int = 4, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Return docs and their similarity scores on a scale from 0 to 1.""" if self.relevance_score_fn is None: raise ValueError( "normalize_score_fn must be provided to" " FAISS constructor to normalize scores" ) docs_and_scores = self.similarity_search_with_score(query, k=k) return [(doc, self.relevance_score_fn(score)) for doc, score in docs_and_scores] By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Apr 26, 2023.
/content/https://python.langchain.com/en/latest/_modules/langchain/vectorstores/faiss.html
8131cf302e6f-0
Source code for langchain.vectorstores.milvus """Wrapper around the Milvus vector database.""" from __future__ import annotations import logging from typing import Any, Iterable, List, Optional, Tuple, Union from uuid import uuid4 import numpy as np from langchain.docstore.document import Document from langchain.embeddings.base import Embeddings from langchain.vectorstores.base import VectorStore from langchain.vectorstores.utils import maximal_marginal_relevance logger = logging.getLogger(__name__) DEFAULT_MILVUS_CONNECTION = { "host": "localhost", "port": "19530", "user": "", "password": "", "secure": False, } [docs]class Milvus(VectorStore): """Wrapper around the Milvus vector database.""" def __init__( self, embedding_function: Embeddings, collection_name: str = "LangChainCollection", connection_args: Optional[dict[str, Any]] = None, consistency_level: str = "Session", index_params: Optional[dict] = None, search_params: Optional[dict] = None, drop_old: Optional[bool] = False, ): """Initialize wrapper around the milvus vector database. In order to use this you need to have `pymilvus` installed and a running Milvus/Zilliz Cloud instance. See the following documentation for how to run a Milvus instance:
/content/https://python.langchain.com/en/latest/_modules/langchain/vectorstores/milvus.html
8131cf302e6f-1
See the following documentation for how to run a Milvus instance: https://milvus.io/docs/install_standalone-docker.md If looking for a hosted Milvus, take a looka this documentation: https://zilliz.com/cloud IF USING L2/IP metric IT IS HIGHLY SUGGESTED TO NORMALIZE YOUR DATA. The connection args used for this class comes in the form of a dict, here are a few of the options: address (str): The actual address of Milvus instance. Example address: "localhost:19530" uri (str): The uri of Milvus instance. Example uri: "http://randomwebsite:19530", "tcp:foobarsite:19530", "https://ok.s3.south.com:19530". host (str): The host of Milvus instance. Default at "localhost", PyMilvus will fill in the default host if only port is provided. port (str/int): The port of Milvus instance. Default at 19530, PyMilvus will fill in the default port if only host is provided. user (str): Use which user to connect to Milvus instance. If user and password are provided, we will add related header in every RPC call. password (str): Required when user is provided. The password corresponding to the user. secure (bool): Default is false. If set to true, tls will be enabled. client_key_path (str): If use tls two-way authentication, need to write the client.key path. client_pem_path (str): If use tls two-way authentication, need to write the client.pem path.
/content/https://python.langchain.com/en/latest/_modules/langchain/vectorstores/milvus.html
8131cf302e6f-2
write the client.pem path. ca_pem_path (str): If use tls two-way authentication, need to write the ca.pem path. server_pem_path (str): If use tls one-way authentication, need to write the server.pem path. server_name (str): If use tls, need to write the common name. Args: embedding_function (Embeddings): Function used to embed the text. collection_name (str): Which Milvus collection to use. Defaults to "LangChainCollection". connection_args (Optional[dict[str, any]]): The arguments for connection to Milvus/Zilliz instance. Defaults to DEFAULT_MILVUS_CONNECTION. consistency_level (str): The consistency level to use for a collection. Defaults to "Session". index_params (Optional[dict]): Which index params to use. Defaults to HNSW/AUTOINDEX depending on service. search_params (Optional[dict]): Which search params to use. Defaults to default of index. drop_old (Optional[bool]): Whether to drop the current collection. Defaults to False. """ try: from pymilvus import Collection, utility except ImportError: raise ValueError( "Could not import pymilvus python package. " "Please install it with `pip install pymilvus`." ) # Default search params when one is not provided. self.default_search_params = { "IVF_FLAT": {"metric_type": "L2", "params": {"nprobe": 10}},
/content/https://python.langchain.com/en/latest/_modules/langchain/vectorstores/milvus.html
8131cf302e6f-3
"IVF_SQ8": {"metric_type": "L2", "params": {"nprobe": 10}}, "IVF_PQ": {"metric_type": "L2", "params": {"nprobe": 10}}, "HNSW": {"metric_type": "L2", "params": {"ef": 10}}, "RHNSW_FLAT": {"metric_type": "L2", "params": {"ef": 10}}, "RHNSW_SQ": {"metric_type": "L2", "params": {"ef": 10}}, "RHNSW_PQ": {"metric_type": "L2", "params": {"ef": 10}}, "IVF_HNSW": {"metric_type": "L2", "params": {"nprobe": 10, "ef": 10}}, "ANNOY": {"metric_type": "L2", "params": {"search_k": 10}}, "AUTOINDEX": {"metric_type": "L2", "params": {}}, } self.embedding_func = embedding_function self.collection_name = collection_name self.index_params = index_params self.search_params = search_params self.consistency_level = consistency_level # In order for a collection to be compatible, pk needs to be auto'id and int self._primary_field = "pk"
/content/https://python.langchain.com/en/latest/_modules/langchain/vectorstores/milvus.html
8131cf302e6f-4
self._primary_field = "pk" # In order for compatiblility, the text field will need to be called "text" self._text_field = "text" # In order for compatbility, the vector field needs to be called "vector" self._vector_field = "vector" self.fields: list[str] = [] # Create the connection to the server if connection_args is None: connection_args = DEFAULT_MILVUS_CONNECTION self.alias = self._create_connection_alias(connection_args) self.col: Optional[Collection] = None # Grab the existing colection if it exists if utility.has_collection(self.collection_name, using=self.alias): self.col = Collection( self.collection_name, using=self.alias, ) # If need to drop old, drop it if drop_old and isinstance(self.col, Collection): self.col.drop() self.col = None # Initialize the vector store self._init() def _create_connection_alias(self, connection_args: dict) -> str: """Create the connection to the Milvus server.""" from pymilvus import MilvusException, connections # Grab the connection arguments that are used for checking existing connection host: str = connection_args.get("host", None) port: Union[str, int] = connection_args.get("port", None) address: str = connection_args.get("address", None) uri: str = connection_args.get("uri", None)
/content/https://python.langchain.com/en/latest/_modules/langchain/vectorstores/milvus.html
8131cf302e6f-5
uri: str = connection_args.get("uri", None) user = connection_args.get("user", None) # Order of use is host/port, uri, address if host is not None and port is not None: given_address = str(host) + ":" + str(port) elif uri is not None: given_address = uri.split("https://")[1] elif address is not None: given_address = address else: given_address = None logger.debug("Missing standard address type for reuse atttempt") # User defaults to empty string when getting connection info if user is not None: tmp_user = user else: tmp_user = "" # If a valid address was given, then check if a connection exists if given_address is not None: for con in connections.list_connections(): addr = connections.get_connection_addr(con[0]) if ( con[1] and ("address" in addr) and (addr["address"] == given_address) and ("user" in addr) and (addr["user"] == tmp_user) ): logger.debug("Using previous connection: %s", con[0]) return con[0] # Generate a new connection if one doesnt exist alias = uuid4().hex try: connections.connect(alias=alias, **connection_args) logger.debug("Created new connection using: %s", alias) return alias except MilvusException as e: logger.error("Failed to create new connection using: %s", alias) raise e
/content/https://python.langchain.com/en/latest/_modules/langchain/vectorstores/milvus.html
8131cf302e6f-6
raise e def _init( self, embeddings: Optional[list] = None, metadatas: Optional[list[dict]] = None ) -> None: if embeddings is not None: self._create_collection(embeddings, metadatas) self._extract_fields() self._create_index() self._create_search_params() self._load() def _create_collection( self, embeddings: list, metadatas: Optional[list[dict]] = None ) -> None: from pymilvus import ( Collection, CollectionSchema, DataType, FieldSchema, MilvusException, ) from pymilvus.orm.types import infer_dtype_bydata # Determine embedding dim dim = len(embeddings[0]) fields = [] # Determine metadata schema if metadatas: # Create FieldSchema for each entry in metadata. for key, value in metadatas[0].items(): # Infer the corresponding datatype of the metadata dtype = infer_dtype_bydata(value) # Datatype isnt compatible if dtype == DataType.UNKNOWN or dtype == DataType.NONE: logger.error( "Failure to create collection, unrecognized dtype for key: %s", key, ) raise ValueError(f"Unrecognized datatype for {key}.") # Dataype is a string/varchar equivalent elif dtype == DataType.VARCHAR:
/content/https://python.langchain.com/en/latest/_modules/langchain/vectorstores/milvus.html
8131cf302e6f-7
elif dtype == DataType.VARCHAR: fields.append(FieldSchema(key, DataType.VARCHAR, max_length=65_535)) else: fields.append(FieldSchema(key, dtype)) # Create the text field fields.append( FieldSchema(self._text_field, DataType.VARCHAR, max_length=65_535) ) # Create the primary key field fields.append( FieldSchema( self._primary_field, DataType.INT64, is_primary=True, auto_id=True ) ) # Create the vector field, supports binary or float vectors fields.append( FieldSchema(self._vector_field, infer_dtype_bydata(embeddings[0]), dim=dim) ) # Create the schema for the collection schema = CollectionSchema(fields) # Create the collection try: self.col = Collection( name=self.collection_name, schema=schema, consistency_level=self.consistency_level, using=self.alias, ) except MilvusException as e: logger.error( "Failed to create collection: %s error: %s", self.collection_name, e ) raise e def _extract_fields(self) -> None: """Grab the existing fields from the Collection""" from pymilvus import Collection if isinstance(self.col, Collection): schema = self.col.schema for x in schema.fields: self.fields.append(x.name) # Since primary field is auto-id, no need to track it
/content/https://python.langchain.com/en/latest/_modules/langchain/vectorstores/milvus.html
8131cf302e6f-8
# Since primary field is auto-id, no need to track it self.fields.remove(self._primary_field) def _get_index(self) -> Optional[dict[str, Any]]: """Return the vector index information if it exists""" from pymilvus import Collection if isinstance(self.col, Collection): for x in self.col.indexes: if x.field_name == self._vector_field: return x.to_dict() return None def _create_index(self) -> None: """Create a index on the collection""" from pymilvus import Collection, MilvusException if isinstance(self.col, Collection) and self._get_index() is None: try: # If no index params, use a default HNSW based one if self.index_params is None: self.index_params = { "metric_type": "L2", "index_type": "HNSW", "params": {"M": 8, "efConstruction": 64}, } try: self.col.create_index( self._vector_field, index_params=self.index_params, using=self.alias, ) # If default did not work, most likely on Zilliz Cloud except MilvusException: # Use AUTOINDEX based index self.index_params = { "metric_type": "L2", "index_type": "AUTOINDEX", "params": {}, } self.col.create_index( self._vector_field, index_params=self.index_params,
/content/https://python.langchain.com/en/latest/_modules/langchain/vectorstores/milvus.html
8131cf302e6f-9
self._vector_field, index_params=self.index_params, using=self.alias, ) logger.debug( "Successfully created an index on collection: %s", self.collection_name, ) except MilvusException as e: logger.error( "Failed to create an index on collection: %s", self.collection_name ) raise e def _create_search_params(self) -> None: """Generate search params based on the current index type""" from pymilvus import Collection if isinstance(self.col, Collection) and self.search_params is None: index = self._get_index() if index is not None: index_type: str = index["index_param"]["index_type"] metric_type: str = index["index_param"]["metric_type"] self.search_params = self.default_search_params[index_type] self.search_params["metric_type"] = metric_type def _load(self) -> None: """Load the collection if available.""" from pymilvus import Collection if isinstance(self.col, Collection) and self._get_index() is not None: self.col.load() [docs] def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, timeout: Optional[int] = None, batch_size: int = 1000, **kwargs: Any, ) -> List[str]: """Insert text data into Milvus.
/content/https://python.langchain.com/en/latest/_modules/langchain/vectorstores/milvus.html
8131cf302e6f-10
) -> List[str]: """Insert text data into Milvus. Inserting data when the collection has not be made yet will result in creating a new Collection. The data of the first entity decides the schema of the new collection, the dim is extracted from the first embedding and the columns are decided by the first metadata dict. Metada keys will need to be present for all inserted values. At the moment there is no None equivalent in Milvus. Args: texts (Iterable[str]): The texts to embed, it is assumed that they all fit in memory. metadatas (Optional[List[dict]]): Metadata dicts attached to each of the texts. Defaults to None. timeout (Optional[int]): Timeout for each batch insert. Defaults to None. batch_size (int, optional): Batch size to use for insertion. Defaults to 1000. Raises: MilvusException: Failure to add texts Returns: List[str]: The resulting keys for each inserted element. """ from pymilvus import Collection, MilvusException texts = list(texts) try: embeddings = self.embedding_func.embed_documents(texts) except NotImplementedError: embeddings = [self.embedding_func.embed_query(x) for x in texts] if len(embeddings) == 0: logger.debug("Nothing to insert, skipping.") return [] # If the collection hasnt been initialized yet, perform all steps to do so if not isinstance(self.col, Collection):
/content/https://python.langchain.com/en/latest/_modules/langchain/vectorstores/milvus.html
8131cf302e6f-11
if not isinstance(self.col, Collection): self._init(embeddings, metadatas) # Dict to hold all insert columns insert_dict: dict[str, list] = { self._text_field: texts, self._vector_field: embeddings, } # Collect the metadata into the insert dict. if metadatas is not None: for d in metadatas: for key, value in d.items(): if key in self.fields: insert_dict.setdefault(key, []).append(value) # Total insert count vectors: list = insert_dict[self._vector_field] total_count = len(vectors) pks: list[str] = [] assert isinstance(self.col, Collection) for i in range(0, total_count, batch_size): # Grab end index end = min(i + batch_size, total_count) # Convert dict to list of lists batch for insertion insert_list = [insert_dict[x][i:end] for x in self.fields] # Insert into the collection. try: res: Collection res = self.col.insert(insert_list, timeout=timeout, **kwargs) pks.extend(res.primary_keys) except MilvusException as e: logger.error( "Failed to insert batch starting at entity: %s/%s", i, total_count ) raise e return pks [docs] def similarity_search( self, query: str, k: int = 4, param: Optional[dict] = None,
/content/https://python.langchain.com/en/latest/_modules/langchain/vectorstores/milvus.html
8131cf302e6f-12
k: int = 4, param: Optional[dict] = None, expr: Optional[str] = None, timeout: Optional[int] = None, **kwargs: Any, ) -> List[Document]: """Perform a similarity search against the query string. Args: query (str): The text to search. k (int, optional): How many results to return. Defaults to 4. param (dict, optional): The search params for the index type. Defaults to None. expr (str, optional): Filtering expression. Defaults to None. timeout (int, optional): How long to wait before timeout error. Defaults to None. kwargs: Collection.search() keyword arguments. Returns: List[Document]: Document results for search. """ if self.col is None: logger.debug("No existing collection to search.") return [] res = self.similarity_search_with_score( query=query, k=k, param=param, expr=expr, timeout=timeout, **kwargs ) return [doc for doc, _ in res] [docs] def similarity_search_by_vector( self, embedding: List[float], k: int = 4, param: Optional[dict] = None, expr: Optional[str] = None, timeout: Optional[int] = None, **kwargs: Any, ) -> List[Document]: """Perform a similarity search against the query string. Args:
/content/https://python.langchain.com/en/latest/_modules/langchain/vectorstores/milvus.html
8131cf302e6f-13
"""Perform a similarity search against the query string. Args: embedding (List[float]): The embedding vector to search. k (int, optional): How many results to return. Defaults to 4. param (dict, optional): The search params for the index type. Defaults to None. expr (str, optional): Filtering expression. Defaults to None. timeout (int, optional): How long to wait before timeout error. Defaults to None. kwargs: Collection.search() keyword arguments. Returns: List[Document]: Document results for search. """ if self.col is None: logger.debug("No existing collection to search.") return [] res = self.similarity_search_with_score_by_vector( embedding=embedding, k=k, param=param, expr=expr, timeout=timeout, **kwargs ) return [doc for doc, _ in res] [docs] def similarity_search_with_score( self, query: str, k: int = 4, param: Optional[dict] = None, expr: Optional[str] = None, timeout: Optional[int] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Perform a search on a query string and return results with score. For more information about the search parameters, take a look at the pymilvus documentation found here: https://milvus.io/api-reference/pymilvus/v2.2.6/Collection/search().md Args:
/content/https://python.langchain.com/en/latest/_modules/langchain/vectorstores/milvus.html
8131cf302e6f-14
Args: query (str): The text being searched. k (int, optional): The amount of results ot return. Defaults to 4. param (dict): The search params for the specified index. Defaults to None. expr (str, optional): Filtering expression. Defaults to None. timeout (int, optional): How long to wait before timeout error. Defaults to None. kwargs: Collection.search() keyword arguments. Returns: List[float], List[Tuple[Document, any, any]]: """ if self.col is None: logger.debug("No existing collection to search.") return [] # Embed the query text. embedding = self.embedding_func.embed_query(query) # Determine result metadata fields. output_fields = self.fields[:] output_fields.remove(self._vector_field) res = self.similarity_search_with_score_by_vector( embedding=embedding, k=k, param=param, expr=expr, timeout=timeout, **kwargs ) return res [docs] def similarity_search_with_score_by_vector( self, embedding: List[float], k: int = 4, param: Optional[dict] = None, expr: Optional[str] = None, timeout: Optional[int] = None, **kwargs: Any, ) -> List[Tuple[Document, float]]:
/content/https://python.langchain.com/en/latest/_modules/langchain/vectorstores/milvus.html
8131cf302e6f-15
**kwargs: Any, ) -> List[Tuple[Document, float]]: """Perform a search on a query string and return results with score. For more information about the search parameters, take a look at the pymilvus documentation found here: https://milvus.io/api-reference/pymilvus/v2.2.6/Collection/search().md Args: embedding (List[float]): The embedding vector being searched. k (int, optional): The amount of results ot return. Defaults to 4. param (dict): The search params for the specified index. Defaults to None. expr (str, optional): Filtering expression. Defaults to None. timeout (int, optional): How long to wait before timeout error. Defaults to None. kwargs: Collection.search() keyword arguments. Returns: List[Tuple[Document, float]]: Result doc and score. """ if self.col is None: logger.debug("No existing collection to search.") return [] if param is None: param = self.search_params # Determine result metadata fields. output_fields = self.fields[:] output_fields.remove(self._vector_field) # Perform the search. res = self.col.search( data=[embedding], anns_field=self._vector_field, param=param, limit=k, expr=expr, output_fields=output_fields, timeout=timeout, **kwargs, ) # Organize results. ret = [] for result in res[0]:
/content/https://python.langchain.com/en/latest/_modules/langchain/vectorstores/milvus.html
8131cf302e6f-16
) # Organize results. ret = [] for result in res[0]: meta = {x: result.entity.get(x) for x in output_fields} doc = Document(page_content=meta.pop(self._text_field), metadata=meta) pair = (doc, result.score) ret.append(pair) return ret [docs] def max_marginal_relevance_search( self, query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, param: Optional[dict] = None, expr: Optional[str] = None, timeout: Optional[int] = None, **kwargs: Any, ) -> List[Document]: """Perform a search and return results that are reordered by MMR. Args: query (str): The text being searched. k (int, optional): How many results to give. Defaults to 4. fetch_k (int, optional): Total results to select k from. Defaults to 20. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5 param (dict, optional): The search params for the specified index. Defaults to None. expr (str, optional): Filtering expression. Defaults to None. timeout (int, optional): How long to wait before timeout error. Defaults to None. kwargs: Collection.search() keyword arguments. Returns: List[Document]: Document results for search.
/content/https://python.langchain.com/en/latest/_modules/langchain/vectorstores/milvus.html
8131cf302e6f-17
Returns: List[Document]: Document results for search. """ if self.col is None: logger.debug("No existing collection to search.") return [] embedding = self.embedding_func.embed_query(query) return self.max_marginal_relevance_search_by_vector( embedding=embedding, k=k, fetch_k=fetch_k, lambda_mult=lambda_mult, param=param, expr=expr, timeout=timeout, **kwargs, ) [docs] def max_marginal_relevance_search_by_vector( self, embedding: list[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, param: Optional[dict] = None, expr: Optional[str] = None, timeout: Optional[int] = None, **kwargs: Any, ) -> List[Document]: """Perform a search and return results that are reordered by MMR. Args: embedding (str): The embedding vector being searched. k (int, optional): How many results to give. Defaults to 4. fetch_k (int, optional): Total results to select k from. Defaults to 20. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5 param (dict, optional): The search params for the specified index. Defaults to None.
/content/https://python.langchain.com/en/latest/_modules/langchain/vectorstores/milvus.html
8131cf302e6f-18
Defaults to None. expr (str, optional): Filtering expression. Defaults to None. timeout (int, optional): How long to wait before timeout error. Defaults to None. kwargs: Collection.search() keyword arguments. Returns: List[Document]: Document results for search. """ if self.col is None: logger.debug("No existing collection to search.") return [] if param is None: param = self.search_params # Determine result metadata fields. output_fields = self.fields[:] output_fields.remove(self._vector_field) # Perform the search. res = self.col.search( data=[embedding], anns_field=self._vector_field, param=param, limit=fetch_k, expr=expr, output_fields=output_fields, timeout=timeout, **kwargs, ) # Organize results. ids = [] documents = [] scores = [] for result in res[0]: meta = {x: result.entity.get(x) for x in output_fields} doc = Document(page_content=meta.pop(self._text_field), metadata=meta) documents.append(doc) scores.append(result.score) ids.append(result.id) vectors = self.col.query( expr=f"{self._primary_field} in {ids}", output_fields=[self._primary_field, self._vector_field], timeout=timeout, )
/content/https://python.langchain.com/en/latest/_modules/langchain/vectorstores/milvus.html
8131cf302e6f-19
timeout=timeout, ) # Reorganize the results from query to match search order. vectors = {x[self._primary_field]: x[self._vector_field] for x in vectors} ordered_result_embeddings = [vectors[x] for x in ids] # Get the new order of results. new_ordering = maximal_marginal_relevance( np.array(embedding), ordered_result_embeddings, k=k, lambda_mult=lambda_mult ) # Reorder the values and return. ret = [] for x in new_ordering: # Function can return -1 index if x == -1: break else: ret.append(documents[x]) return ret [docs] @classmethod def from_texts( cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, collection_name: str = "LangChainCollection", connection_args: dict[str, Any] = DEFAULT_MILVUS_CONNECTION, consistency_level: str = "Session", index_params: Optional[dict] = None, search_params: Optional[dict] = None, drop_old: bool = False, **kwargs: Any, ) -> Milvus: """Create a Milvus collection, indexes it with HNSW, and insert data. Args: texts (List[str]): Text data. embedding (Embeddings): Embedding function.
/content/https://python.langchain.com/en/latest/_modules/langchain/vectorstores/milvus.html
8131cf302e6f-20
embedding (Embeddings): Embedding function. metadatas (Optional[List[dict]]): Metadata for each text if it exists. Defaults to None. collection_name (str, optional): Collection name to use. Defaults to "LangChainCollection". connection_args (dict[str, Any], optional): Connection args to use. Defaults to DEFAULT_MILVUS_CONNECTION. consistency_level (str, optional): Which consistency level to use. Defaults to "Session". index_params (Optional[dict], optional): Which index_params to use. Defaults to None. search_params (Optional[dict], optional): Which search params to use. Defaults to None. drop_old (Optional[bool], optional): Whether to drop the collection with that name if it exists. Defaults to False. Returns: Milvus: Milvus Vector Store """ vector_db = cls( embedding_function=embedding, collection_name=collection_name, connection_args=connection_args, consistency_level=consistency_level, index_params=index_params, search_params=search_params, drop_old=drop_old, **kwargs, ) vector_db.add_texts(texts=texts, metadatas=metadatas) return vector_db By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Apr 26, 2023.
/content/https://python.langchain.com/en/latest/_modules/langchain/vectorstores/milvus.html
7ec076f78d89-0
Source code for langchain.vectorstores.annoy """Wrapper around Annoy vector database.""" from __future__ import annotations import os import pickle import uuid from configparser import ConfigParser from pathlib import Path from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple import numpy as np from langchain.docstore.base import Docstore from langchain.docstore.document import Document from langchain.docstore.in_memory import InMemoryDocstore from langchain.embeddings.base import Embeddings from langchain.vectorstores.base import VectorStore from langchain.vectorstores.utils import maximal_marginal_relevance INDEX_METRICS = frozenset(["angular", "euclidean", "manhattan", "hamming", "dot"]) DEFAULT_METRIC = "angular" def dependable_annoy_import() -> Any: """Import annoy if available, otherwise raise error.""" try: import annoy except ImportError: raise ValueError( "Could not import annoy python package. " "Please install it with `pip install --user annoy` " ) return annoy [docs]class Annoy(VectorStore): """Wrapper around Annoy vector database. To use, you should have the ``annoy`` python package installed. Example: .. code-block:: python from langchain import Annoy db = Annoy(embedding_function, index, docstore, index_to_docstore_id) """ def __init__( self, embedding_function: Callable,
/content/https://python.langchain.com/en/latest/_modules/langchain/vectorstores/annoy.html
7ec076f78d89-1
def __init__( self, embedding_function: Callable, index: Any, metric: str, docstore: Docstore, index_to_docstore_id: Dict[int, str], ): """Initialize with necessary components.""" self.embedding_function = embedding_function self.index = index self.metric = metric self.docstore = docstore self.index_to_docstore_id = index_to_docstore_id [docs] def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> List[str]: raise NotImplementedError( "Annoy does not allow to add new data once the index is build." ) [docs] def process_index_results( self, idxs: List[int], dists: List[float] ) -> List[Tuple[Document, float]]: """Turns annoy results into a list of documents and scores. Args: idxs: List of indices of the documents in the index. dists: List of distances of the documents in the index. Returns: List of Documents and scores. """ docs = [] for idx, dist in zip(idxs, dists): _id = self.index_to_docstore_id[idx] doc = self.docstore.search(_id) if not isinstance(doc, Document): raise ValueError(f"Could not find document for id {_id}, got {doc}") docs.append((doc, dist))
/content/https://python.langchain.com/en/latest/_modules/langchain/vectorstores/annoy.html
7ec076f78d89-2
docs.append((doc, dist)) return docs [docs] def similarity_search_with_score_by_vector( self, embedding: List[float], k: int = 4, search_k: int = -1 ) -> List[Tuple[Document, float]]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. search_k: inspect up to search_k nodes which defaults to n_trees * n if not provided Returns: List of Documents most similar to the query and score for each """ idxs, dists = self.index.get_nns_by_vector( embedding, k, search_k=search_k, include_distances=True ) return self.process_index_results(idxs, dists) [docs] def similarity_search_with_score_by_index( self, docstore_index: int, k: int = 4, search_k: int = -1 ) -> List[Tuple[Document, float]]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. search_k: inspect up to search_k nodes which defaults to n_trees * n if not provided Returns: List of Documents most similar to the query and score for each """ idxs, dists = self.index.get_nns_by_item(
/content/https://python.langchain.com/en/latest/_modules/langchain/vectorstores/annoy.html
7ec076f78d89-3
docstore_index, k, search_k=search_k, include_distances=True ) return self.process_index_results(idxs, dists) [docs] def similarity_search_with_score( self, query: str, k: int = 4, search_k: int = -1 ) -> List[Tuple[Document, float]]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. search_k: inspect up to search_k nodes which defaults to n_trees * n if not provided Returns: List of Documents most similar to the query and score for each """ embedding = self.embedding_function(query) docs = self.similarity_search_with_score_by_vector(embedding, k, search_k) return docs [docs] def similarity_search_by_vector( self, embedding: List[float], k: int = 4, search_k: int = -1, **kwargs: Any ) -> List[Document]: """Return docs most similar to embedding vector. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. search_k: inspect up to search_k nodes which defaults to n_trees * n if not provided Returns: List of Documents most similar to the embedding. """
/content/https://python.langchain.com/en/latest/_modules/langchain/vectorstores/annoy.html
7ec076f78d89-4
Returns: List of Documents most similar to the embedding. """ docs_and_scores = self.similarity_search_with_score_by_vector( embedding, k, search_k ) return [doc for doc, _ in docs_and_scores] [docs] def similarity_search_by_index( self, docstore_index: int, k: int = 4, search_k: int = -1, **kwargs: Any ) -> List[Document]: """Return docs most similar to docstore_index. Args: docstore_index: Index of document in docstore k: Number of Documents to return. Defaults to 4. search_k: inspect up to search_k nodes which defaults to n_trees * n if not provided Returns: List of Documents most similar to the embedding. """ docs_and_scores = self.similarity_search_with_score_by_index( docstore_index, k, search_k ) return [doc for doc, _ in docs_and_scores] [docs] def similarity_search( self, query: str, k: int = 4, search_k: int = -1, **kwargs: Any ) -> List[Document]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. search_k: inspect up to search_k nodes which defaults to n_trees * n if not provided Returns: List of Documents most similar to the query. """
/content/https://python.langchain.com/en/latest/_modules/langchain/vectorstores/annoy.html
7ec076f78d89-5
Returns: List of Documents most similar to the query. """ docs_and_scores = self.similarity_search_with_score(query, k, search_k) return [doc for doc, _ in docs_and_scores] [docs] def max_marginal_relevance_search_by_vector( self, embedding: List[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: embedding: Embedding to look up documents similar to. fetch_k: Number of Documents to fetch to pass to MMR algorithm. k: Number of Documents to return. Defaults to 4. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns: List of Documents selected by maximal marginal relevance. """ idxs = self.index.get_nns_by_vector( embedding, fetch_k, search_k=-1, include_distances=False ) embeddings = [self.index.get_item_vector(i) for i in idxs] mmr_selected = maximal_marginal_relevance( np.array([embedding], dtype=np.float32), embeddings, k=k,
/content/https://python.langchain.com/en/latest/_modules/langchain/vectorstores/annoy.html
7ec076f78d89-6
embeddings, k=k, lambda_mult=lambda_mult, ) # ignore the -1's if not enough docs are returned/indexed selected_indices = [idxs[i] for i in mmr_selected if i != -1] docs = [] for i in selected_indices: _id = self.index_to_docstore_id[i] doc = self.docstore.search(_id) if not isinstance(doc, Document): raise ValueError(f"Could not find document for id {_id}, got {doc}") docs.append(doc) return docs [docs] def max_marginal_relevance_search( self, query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns: List of Documents selected by maximal marginal relevance. """ embedding = self.embedding_function(query) docs = self.max_marginal_relevance_search_by_vector(
/content/https://python.langchain.com/en/latest/_modules/langchain/vectorstores/annoy.html
7ec076f78d89-7
embedding, k, fetch_k, lambda_mult=lambda_mult ) return docs @classmethod def __from( cls, texts: List[str], embeddings: List[List[float]], embedding: Embeddings, metadatas: Optional[List[dict]] = None, metric: str = DEFAULT_METRIC, trees: int = 100, n_jobs: int = -1, **kwargs: Any, ) -> Annoy: if metric not in INDEX_METRICS: raise ValueError( ( f"Unsupported distance metric: {metric}. " f"Expected one of {list(INDEX_METRICS)}" ) ) annoy = dependable_annoy_import() if not embeddings: raise ValueError("embeddings must be provided to build AnnoyIndex") f = len(embeddings[0]) index = annoy.AnnoyIndex(f, metric=metric) for i, emb in enumerate(embeddings): index.add_item(i, emb) index.build(trees, n_jobs=n_jobs) documents = [] for i, text in enumerate(texts): metadata = metadatas[i] if metadatas else {} documents.append(Document(page_content=text, metadata=metadata)) index_to_id = {i: str(uuid.uuid4()) for i in range(len(documents))} docstore = InMemoryDocstore(
/content/https://python.langchain.com/en/latest/_modules/langchain/vectorstores/annoy.html
7ec076f78d89-8
docstore = InMemoryDocstore( {index_to_id[i]: doc for i, doc in enumerate(documents)} ) return cls(embedding.embed_query, index, metric, docstore, index_to_id) [docs] @classmethod def from_texts( cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, metric: str = DEFAULT_METRIC, trees: int = 100, n_jobs: int = -1, **kwargs: Any, ) -> Annoy: """Construct Annoy wrapper from raw documents. Args: texts: List of documents to index. embedding: Embedding function to use. metadatas: List of metadata dictionaries to associate with documents. metric: Metric to use for indexing. Defaults to "angular". trees: Number of trees to use for indexing. Defaults to 100. n_jobs: Number of jobs to use for indexing. Defaults to -1. This is a user friendly interface that: 1. Embeds documents. 2. Creates an in memory docstore 3. Initializes the Annoy database This is intended to be a quick way to get started. Example: .. code-block:: python from langchain import Annoy from langchain.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() index = Annoy.from_texts(texts, embeddings) """ embeddings = embedding.embed_documents(texts)
/content/https://python.langchain.com/en/latest/_modules/langchain/vectorstores/annoy.html
7ec076f78d89-9
embeddings = embedding.embed_documents(texts) return cls.__from( texts, embeddings, embedding, metadatas, metric, trees, n_jobs, **kwargs ) [docs] @classmethod def from_embeddings( cls, text_embeddings: List[Tuple[str, List[float]]], embedding: Embeddings, metadatas: Optional[List[dict]] = None, metric: str = DEFAULT_METRIC, trees: int = 100, n_jobs: int = -1, **kwargs: Any, ) -> Annoy: """Construct Annoy wrapper from embeddings. Args: text_embeddings: List of tuples of (text, embedding) embedding: Embedding function to use. metadatas: List of metadata dictionaries to associate with documents. metric: Metric to use for indexing. Defaults to "angular". trees: Number of trees to use for indexing. Defaults to 100. n_jobs: Number of jobs to use for indexing. Defaults to -1 This is a user friendly interface that: 1. Creates an in memory docstore with provided embeddings 2. Initializes the Annoy database This is intended to be a quick way to get started. Example: .. code-block:: python from langchain import Annoy from langchain.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() text_embeddings = embeddings.embed_documents(texts)
/content/https://python.langchain.com/en/latest/_modules/langchain/vectorstores/annoy.html
7ec076f78d89-10
text_embeddings = embeddings.embed_documents(texts) text_embedding_pairs = list(zip(texts, text_embeddings)) db = Annoy.from_embeddings(text_embedding_pairs, embeddings) """ texts = [t[0] for t in text_embeddings] embeddings = [t[1] for t in text_embeddings] return cls.__from( texts, embeddings, embedding, metadatas, metric, trees, n_jobs, **kwargs ) [docs] def save_local(self, folder_path: str, prefault: bool = False) -> None: """Save Annoy index, docstore, and index_to_docstore_id to disk. Args: folder_path: folder path to save index, docstore, and index_to_docstore_id to. prefault: Whether to pre-load the index into memory. """ path = Path(folder_path) os.makedirs(path, exist_ok=True) # save index, index config, docstore and index_to_docstore_id config_object = ConfigParser() config_object["ANNOY"] = { "f": self.index.f, "metric": self.metric, } self.index.save(str(path / "index.annoy"), prefault=prefault) with open(path / "index.pkl", "wb") as file: pickle.dump((self.docstore, self.index_to_docstore_id, config_object), file)
/content/https://python.langchain.com/en/latest/_modules/langchain/vectorstores/annoy.html
7ec076f78d89-11
[docs] @classmethod def load_local( cls, folder_path: str, embeddings: Embeddings, ) -> Annoy: """Load Annoy index, docstore, and index_to_docstore_id to disk. Args: folder_path: folder path to load index, docstore, and index_to_docstore_id from. embeddings: Embeddings to use when generating queries. """ path = Path(folder_path) # load index separately since it is not picklable annoy = dependable_annoy_import() # load docstore and index_to_docstore_id with open(path / "index.pkl", "rb") as file: docstore, index_to_docstore_id, config_object = pickle.load(file) f = int(config_object["ANNOY"]["f"]) metric = config_object["ANNOY"]["metric"] index = annoy.AnnoyIndex(f, metric=metric) index.load(str(path / "index.annoy")) return cls( embeddings.embed_query, index, metric, docstore, index_to_docstore_id ) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Apr 26, 2023.
/content/https://python.langchain.com/en/latest/_modules/langchain/vectorstores/annoy.html
ec2e1d66e9df-0
Source code for langchain.vectorstores.weaviate """Wrapper around weaviate vector database.""" from __future__ import annotations from typing import Any, Dict, Iterable, List, Optional, Type from uuid import uuid4 import numpy as np from langchain.docstore.document import Document from langchain.embeddings.base import Embeddings from langchain.utils import get_from_dict_or_env from langchain.vectorstores.base import VectorStore from langchain.vectorstores.utils import maximal_marginal_relevance def _default_schema(index_name: str) -> Dict: return { "class": index_name, "properties": [ { "name": "text", "dataType": ["text"], } ], } def _create_weaviate_client(**kwargs: Any) -> Any: client = kwargs.get("client") if client is not None: return client weaviate_url = get_from_dict_or_env(kwargs, "weaviate_url", "WEAVIATE_URL") weaviate_api_key = get_from_dict_or_env( kwargs, "weaviate_api_key", "WEAVIATE_API_KEY", None ) try: import weaviate except ImportError: raise ValueError( "Could not import weaviate python package. " "Please install it with `pip instal weaviate-client`" ) auth = ( weaviate.auth.AuthApiKey(api_key=weaviate_api_key)
/content/https://python.langchain.com/en/latest/_modules/langchain/vectorstores/weaviate.html
ec2e1d66e9df-1
if weaviate_api_key is not None else None ) client = weaviate.Client(weaviate_url, auth_client_secret=auth) return client [docs]class Weaviate(VectorStore): """Wrapper around Weaviate vector database. To use, you should have the ``weaviate-client`` python package installed. Example: .. code-block:: python import weaviate from langchain.vectorstores import Weaviate client = weaviate.Client(url=os.environ["WEAVIATE_URL"], ...) weaviate = Weaviate(client, index_name, text_key) """ def __init__( self, client: Any, index_name: str, text_key: str, embedding: Optional[Embeddings] = None, attributes: Optional[List[str]] = None, ): """Initialize with Weaviate client.""" try: import weaviate except ImportError: raise ValueError( "Could not import weaviate python package. " "Please install it with `pip install weaviate-client`." ) if not isinstance(client, weaviate.Client): raise ValueError( f"client should be an instance of weaviate.Client, got {type(client)}" ) self._client = client self._index_name = index_name self._embedding = embedding self._text_key = text_key self._query_attrs = [self._text_key] if attributes is not None:
/content/https://python.langchain.com/en/latest/_modules/langchain/vectorstores/weaviate.html
ec2e1d66e9df-2
if attributes is not None: self._query_attrs.extend(attributes) [docs] def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> List[str]: """Upload texts with metadata (properties) to Weaviate.""" from weaviate.util import get_valid_uuid with self._client.batch as batch: ids = [] for i, doc in enumerate(texts): data_properties = { self._text_key: doc, } if metadatas is not None: for key in metadatas[i].keys(): data_properties[key] = metadatas[i][key] _id = get_valid_uuid(uuid4()) batch.add_data_object( data_object=data_properties, class_name=self._index_name, uuid=_id ) ids.append(_id) return ids [docs] def similarity_search( self, query: str, k: int = 4, **kwargs: Any ) -> List[Document]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. Returns: List of Documents most similar to the query. """ content: Dict[str, Any] = {"concepts": [query]} if kwargs.get("search_distance"):
/content/https://python.langchain.com/en/latest/_modules/langchain/vectorstores/weaviate.html
ec2e1d66e9df-3
if kwargs.get("search_distance"): content["certainty"] = kwargs.get("search_distance") query_obj = self._client.query.get(self._index_name, self._query_attrs) if kwargs.get("where_filter"): query_obj = query_obj.with_where(kwargs.get("where_filter")) result = query_obj.with_near_text(content).with_limit(k).do() if "errors" in result: raise ValueError(f"Error during query: {result['errors']}") docs = [] for res in result["data"]["Get"][self._index_name]: text = res.pop(self._text_key) docs.append(Document(page_content=text, metadata=res)) return docs [docs] def similarity_search_by_vector( self, embedding: List[float], k: int = 4, **kwargs: Any ) -> List[Document]: """Look up similar documents by embedding vector in Weaviate.""" vector = {"vector": embedding} query_obj = self._client.query.get(self._index_name, self._query_attrs) if kwargs.get("where_filter"): query_obj = query_obj.with_where(kwargs.get("where_filter"))
/content/https://python.langchain.com/en/latest/_modules/langchain/vectorstores/weaviate.html
ec2e1d66e9df-4
result = query_obj.with_near_vector(vector).with_limit(k).do() if "errors" in result: raise ValueError(f"Error during query: {result['errors']}") docs = [] for res in result["data"]["Get"][self._index_name]: text = res.pop(self._text_key) docs.append(Document(page_content=text, metadata=res)) return docs [docs] def max_marginal_relevance_search( self, query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns: List of Documents selected by maximal marginal relevance. """ if self._embedding is not None: embedding = self._embedding.embed_query(query) else: raise ValueError( "max_marginal_relevance_search requires a suitable Embeddings object" )
/content/https://python.langchain.com/en/latest/_modules/langchain/vectorstores/weaviate.html
ec2e1d66e9df-5
) return self.max_marginal_relevance_search_by_vector( embedding, k=k, fetch_k=fetch_k, lambda_mult=lambda_mult, **kwargs ) [docs] def max_marginal_relevance_search_by_vector( self, embedding: List[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns: List of Documents selected by maximal marginal relevance. """ vector = {"vector": embedding} query_obj = self._client.query.get(self._index_name, self._query_attrs) if kwargs.get("where_filter"): query_obj = query_obj.with_where(kwargs.get("where_filter")) results = ( query_obj.with_additional("vector") .with_near_vector(vector)
/content/https://python.langchain.com/en/latest/_modules/langchain/vectorstores/weaviate.html
ec2e1d66e9df-6
.with_near_vector(vector) .with_limit(fetch_k) .do() ) payload = results["data"]["Get"][self._index_name] embeddings = [result["_additional"]["vector"] for result in payload] mmr_selected = maximal_marginal_relevance( np.array(embedding), embeddings, k=k, lambda_mult=lambda_mult ) docs = [] for idx in mmr_selected: text = payload[idx].pop(self._text_key) payload[idx].pop("_additional") meta = payload[idx] docs.append(Document(page_content=text, metadata=meta)) return docs [docs] @classmethod def from_texts( cls: Type[Weaviate], texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> Weaviate: """Construct Weaviate wrapper from raw documents. This is a user-friendly interface that: 1. Embeds documents. 2. Creates a new index for the embeddings in the Weaviate instance. 3. Adds the documents to the newly created Weaviate index. This is intended to be a quick way to get started. Example: .. code-block:: python from langchain.vectorstores.weaviate import Weaviate from langchain.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings()
/content/https://python.langchain.com/en/latest/_modules/langchain/vectorstores/weaviate.html
ec2e1d66e9df-7
embeddings = OpenAIEmbeddings() weaviate = Weaviate.from_texts( texts, embeddings, weaviate_url="http://localhost:8080" ) """ client = _create_weaviate_client(**kwargs) from weaviate.util import get_valid_uuid index_name = kwargs.get("index_name", f"LangChain_{uuid4().hex}") embeddings = embedding.embed_documents(texts) if embedding else None text_key = "text" schema = _default_schema(index_name) attributes = list(metadatas[0].keys()) if metadatas else None # check whether the index already exists if not client.schema.contains(schema): client.schema.create_class(schema) with client.batch as batch: for i, text in enumerate(texts): data_properties = { text_key: text, } if metadatas is not None: for key in metadatas[i].keys(): data_properties[key] = metadatas[i][key] _id = get_valid_uuid(uuid4()) # if an embedding strategy is not provided, we let # weaviate create the embedding. Note that this will only # work if weaviate has been installed with a vectorizer module # like text2vec-contextionary for example params = { "uuid": _id, "data_object": data_properties, "class_name": index_name, }
/content/https://python.langchain.com/en/latest/_modules/langchain/vectorstores/weaviate.html
ec2e1d66e9df-8
"class_name": index_name, } if embeddings is not None: params["vector"] = embeddings[i] batch.add_data_object(**params) batch.flush() return cls(client, index_name, text_key, embedding, attributes) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Apr 26, 2023.
/content/https://python.langchain.com/en/latest/_modules/langchain/vectorstores/weaviate.html
8164e1dcb879-0
Source code for langchain.vectorstores.myscale """Wrapper around MyScale vector database.""" from __future__ import annotations import json import logging from hashlib import sha1 from threading import Thread from typing import Any, Dict, Iterable, List, Optional, Tuple from pydantic import BaseSettings from langchain.docstore.document import Document from langchain.embeddings.base import Embeddings from langchain.vectorstores.base import VectorStore logger = logging.getLogger() def has_mul_sub_str(s: str, *args: Any) -> bool: for a in args: if a not in s: return False return True [docs]class MyScaleSettings(BaseSettings): """MyScale Client Configuration Attribute: myscale_host (str) : An URL to connect to MyScale backend. Defaults to 'localhost'. myscale_port (int) : URL port to connect with HTTP. Defaults to 8443. username (str) : Usernamed to login. Defaults to None. password (str) : Password to login. Defaults to None. index_type (str): index type string. index_param (dict): index build parameter. database (str) : Database name to find the table. Defaults to 'default'. table (str) : Table name to operate on. Defaults to 'vector_table'. metric (str) : Metric to compute distance, supported are ('l2', 'cosine', 'ip'). Defaults to 'cosine'.
/content/https://python.langchain.com/en/latest/_modules/langchain/vectorstores/myscale.html
8164e1dcb879-1
column_map (Dict) : Column type map to project column name onto langchain semantics. Must have keys: `text`, `id`, `vector`, must be same size to number of columns. For example: .. code-block:: python { 'id': 'text_id', 'vector': 'text_embedding', 'text': 'text_plain', 'metadata': 'metadata_dictionary_in_json', } Defaults to identity map. """ host: str = "localhost" port: int = 8443 username: Optional[str] = None password: Optional[str] = None index_type: str = "IVFFLAT" index_param: Optional[Dict[str, str]] = None column_map: Dict[str, str] = { "id": "id", "text": "text", "vector": "vector", "metadata": "metadata", } database: str = "default" table: str = "langchain" metric: str = "cosine" def __getitem__(self, item: str) -> Any: return getattr(self, item) class Config: env_file = ".env" env_prefix = "myscale_" env_file_encoding = "utf-8" [docs]class MyScale(VectorStore): """Wrapper around MyScale vector database You need a `clickhouse-connect` python package, and a valid account to connect to MyScale. MyScale can not only search with simple vector indexes,
/content/https://python.langchain.com/en/latest/_modules/langchain/vectorstores/myscale.html
8164e1dcb879-2
MyScale can not only search with simple vector indexes, it also supports complex query with multiple conditions, constraints and even sub-queries. For more information, please visit [myscale official site](https://docs.myscale.com/en/overview/) """ def __init__( self, embedding: Embeddings, config: Optional[MyScaleSettings] = None, **kwargs: Any, ) -> None: """MyScale Wrapper to LangChain embedding_function (Embeddings): config (MyScaleSettings): Configuration to MyScale Client Other keyword arguments will pass into [clickhouse-connect](https://docs.myscale.com/) """ try: from clickhouse_connect import get_client except ImportError: raise ValueError( "Could not import clickhouse connect python package. " "Please install it with `pip install clickhouse-connect`." ) try: from tqdm import tqdm self.pgbar = tqdm except ImportError: # Just in case if tqdm is not installed self.pgbar = lambda x: x super().__init__() if config is not None: self.config = config else: self.config = MyScaleSettings() assert self.config assert self.config.host and self.config.port assert ( self.config.column_map and self.config.database and self.config.table and self.config.metric ) for k in ["id", "vector", "text", "metadata"]:
/content/https://python.langchain.com/en/latest/_modules/langchain/vectorstores/myscale.html
8164e1dcb879-3
for k in ["id", "vector", "text", "metadata"]: assert k in self.config.column_map assert self.config.metric in ["ip", "cosine", "l2"] # initialize the schema dim = len(embedding.embed_query("try this out")) index_params = ( ", " + ",".join([f"'{k}={v}'" for k, v in self.config.index_param.items()]) if self.config.index_param else "" ) schema_ = f""" CREATE TABLE IF NOT EXISTS {self.config.database}.{self.config.table}( {self.config.column_map['id']} String, {self.config.column_map['text']} String, {self.config.column_map['vector']} Array(Float32), {self.config.column_map['metadata']} JSON, CONSTRAINT cons_vec_len CHECK length(\ {self.config.column_map['vector']}) = {dim}, VECTOR INDEX vidx {self.config.column_map['vector']} \ TYPE {self.config.index_type}(\ 'metric_type={self.config.metric}'{index_params}) ) ENGINE = MergeTree ORDER BY {self.config.column_map['id']} """ self.dim = dim self.BS = "\\"
/content/https://python.langchain.com/en/latest/_modules/langchain/vectorstores/myscale.html
8164e1dcb879-4
""" self.dim = dim self.BS = "\\" self.must_escape = ("\\", "'") self.embedding_function = embedding.embed_query self.dist_order = "ASC" if self.config.metric in ["cosine", "l2"] else "DESC" # Create a connection to myscale self.client = get_client( host=self.config.host, port=self.config.port, username=self.config.username, password=self.config.password, **kwargs, ) self.client.command("SET allow_experimental_object_type=1") self.client.command(schema_) [docs] def escape_str(self, value: str) -> str: return "".join(f"{self.BS}{c}" if c in self.must_escape else c for c in value) def _build_istr(self, transac: Iterable, column_names: Iterable[str]) -> str: ks = ",".join(column_names) _data = [] for n in transac: n = ",".join([f"'{self.escape_str(str(_n))}'" for _n in n]) _data.append(f"({n})") i_str = f""" INSERT INTO TABLE {self.config.database}.{self.config.table}({ks}) VALUES {','.join(_data)} """ return i_str
/content/https://python.langchain.com/en/latest/_modules/langchain/vectorstores/myscale.html
8164e1dcb879-5
{','.join(_data)} """ return i_str def _insert(self, transac: Iterable, column_names: Iterable[str]) -> None: _i_str = self._build_istr(transac, column_names) self.client.command(_i_str) [docs] def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, batch_size: int = 32, ids: Optional[Iterable[str]] = None, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. ids: Optional list of ids to associate with the texts. batch_size: Batch size of insertion metadata: Optional column data to be inserted Returns: List of ids from adding the texts into the vectorstore. """ # Embed and create the documents ids = ids or [sha1(t.encode("utf-8")).hexdigest() for t in texts] colmap_ = self.config.column_map transac = [] column_names = { colmap_["id"]: ids, colmap_["text"]: texts, colmap_["vector"]: map(self.embedding_function, texts), } metadatas = metadatas or [{} for _ in texts]
/content/https://python.langchain.com/en/latest/_modules/langchain/vectorstores/myscale.html
8164e1dcb879-6
} metadatas = metadatas or [{} for _ in texts] column_names[colmap_["metadata"]] = map(json.dumps, metadatas) assert len(set(colmap_) - set(column_names)) >= 0 keys, values = zip(*column_names.items()) try: t = None for v in self.pgbar( zip(*values), desc="Inserting data...", total=len(metadatas) ): assert len(v[keys.index(self.config.column_map["vector"])]) == self.dim transac.append(v) if len(transac) == batch_size: if t: t.join() t = Thread(target=self._insert, args=[transac, keys]) t.start() transac = [] if len(transac) > 0: if t: t.join() self._insert(transac, keys) return [i for i in ids] except Exception as e: logger.error(f"\033[91m\033[1m{type(e)}\033[0m \033[95m{str(e)}\033[0m") return [] [docs] @classmethod def from_texts( cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[Dict[Any, Any]]] = None, config: Optional[MyScaleSettings] = None,
/content/https://python.langchain.com/en/latest/_modules/langchain/vectorstores/myscale.html
8164e1dcb879-7
config: Optional[MyScaleSettings] = None, text_ids: Optional[Iterable[str]] = None, batch_size: int = 32, **kwargs: Any, ) -> MyScale: """Create Myscale wrapper with existing texts Args: embedding_function (Embeddings): Function to extract text embedding texts (Iterable[str]): List or tuple of strings to be added config (MyScaleSettings, Optional): Myscale configuration text_ids (Optional[Iterable], optional): IDs for the texts. Defaults to None. batch_size (int, optional): Batchsize when transmitting data to MyScale. Defaults to 32. metadata (List[dict], optional): metadata to texts. Defaults to None. Other keyword arguments will pass into [clickhouse-connect](https://clickhouse.com/docs/en/integrations/python#clickhouse-connect-driver-api) Returns: MyScale Index """ ctx = cls(embedding, config, **kwargs) ctx.add_texts(texts, ids=text_ids, batch_size=batch_size, metadatas=metadatas) return ctx def __repr__(self) -> str: """Text representation for myscale, prints backends, username and schemas. Easy to use with `str(Myscale())` Returns: repr: string to show connection info and data schema """
/content/https://python.langchain.com/en/latest/_modules/langchain/vectorstores/myscale.html
8164e1dcb879-8
Returns: repr: string to show connection info and data schema """ _repr = f"\033[92m\033[1m{self.config.database}.{self.config.table} @ " _repr += f"{self.config.host}:{self.config.port}\033[0m\n\n" _repr += f"\033[1musername: {self.config.username}\033[0m\n\nTable Schema:\n" _repr += "-" * 51 + "\n" for r in self.client.query( f"DESC {self.config.database}.{self.config.table}" ).named_results(): _repr += ( f"|\033[94m{r['name']:24s}\033[0m|\033[96m{r['type']:24s}\033[0m|\n" ) _repr += "-" * 51 + "\n" return _repr def _build_qstr( self, q_emb: List[float], topk: int, where_str: Optional[str] = None ) -> str: q_emb_str = ",".join(map(str, q_emb)) if where_str: where_str = f"PREWHERE {where_str}" else: where_str = "" q_str = f""" SELECT {self.config.column_map['text']},
/content/https://python.langchain.com/en/latest/_modules/langchain/vectorstores/myscale.html
8164e1dcb879-9
SELECT {self.config.column_map['text']}, {self.config.column_map['metadata']}, dist FROM {self.config.database}.{self.config.table} {where_str} ORDER BY distance({self.config.column_map['vector']}, [{q_emb_str}]) AS dist {self.dist_order} LIMIT {topk} """ return q_str [docs] def similarity_search( self, query: str, k: int = 4, where_str: Optional[str] = None, **kwargs: Any ) -> List[Document]: """Perform a similarity search with MyScale Args: query (str): query string k (int, optional): Top K neighbors to retrieve. Defaults to 4. where_str (Optional[str], optional): where condition string. Defaults to None. NOTE: Please do not let end-user to fill this and always be aware of SQL injection. When dealing with metadatas, remember to use `{self.metadata_column}.attribute` instead of `attribute` alone. The default name for it is `metadata`. Returns: List[Document]: List of Documents """ return self.similarity_search_by_vector( self.embedding_function(query), k, where_str, **kwargs ) [docs] def similarity_search_by_vector( self, embedding: List[float], k: int = 4,
/content/https://python.langchain.com/en/latest/_modules/langchain/vectorstores/myscale.html
8164e1dcb879-10
self, embedding: List[float], k: int = 4, where_str: Optional[str] = None, **kwargs: Any, ) -> List[Document]: """Perform a similarity search with MyScale by vectors Args: query (str): query string k (int, optional): Top K neighbors to retrieve. Defaults to 4. where_str (Optional[str], optional): where condition string. Defaults to None. NOTE: Please do not let end-user to fill this and always be aware of SQL injection. When dealing with metadatas, remember to use `{self.metadata_column}.attribute` instead of `attribute` alone. The default name for it is `metadata`. Returns: List[Document]: List of (Document, similarity) """ q_str = self._build_qstr(embedding, k, where_str) try: return [ Document( page_content=r[self.config.column_map["text"]], metadata=r[self.config.column_map["metadata"]], ) for r in self.client.query(q_str).named_results() ] except Exception as e: logger.error(f"\033[91m\033[1m{type(e)}\033[0m \033[95m{str(e)}\033[0m") return [] [docs] def similarity_search_with_relevance_scores(
/content/https://python.langchain.com/en/latest/_modules/langchain/vectorstores/myscale.html
8164e1dcb879-11
[docs] def similarity_search_with_relevance_scores( self, query: str, k: int = 4, where_str: Optional[str] = None, **kwargs: Any ) -> List[Tuple[Document, float]]: """Perform a similarity search with MyScale Args: query (str): query string k (int, optional): Top K neighbors to retrieve. Defaults to 4. where_str (Optional[str], optional): where condition string. Defaults to None. NOTE: Please do not let end-user to fill this and always be aware of SQL injection. When dealing with metadatas, remember to use `{self.metadata_column}.attribute` instead of `attribute` alone. The default name for it is `metadata`. Returns: List[Document]: List of documents """ q_str = self._build_qstr(self.embedding_function(query), k, where_str) try: return [ ( Document( page_content=r[self.config.column_map["text"]], metadata=r[self.config.column_map["metadata"]], ), r["dist"], ) for r in self.client.query(q_str).named_results() ] except Exception as e: logger.error(f"\033[91m\033[1m{type(e)}\033[0m \033[95m{str(e)}\033[0m") return [] [docs] def drop(self) -> None:
/content/https://python.langchain.com/en/latest/_modules/langchain/vectorstores/myscale.html
8164e1dcb879-12
return [] [docs] def drop(self) -> None: """ Helper function: Drop data """ self.client.command( f"DROP TABLE IF EXISTS {self.config.database}.{self.config.table}" ) @property def metadata_column(self) -> str: return self.config.column_map["metadata"] By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Apr 26, 2023.
/content/https://python.langchain.com/en/latest/_modules/langchain/vectorstores/myscale.html
dd9bdee78b52-0
Source code for langchain.vectorstores.base """Interface for vector stores.""" from __future__ import annotations import asyncio from abc import ABC, abstractmethod from functools import partial from typing import Any, Dict, Iterable, List, Optional, Tuple, Type, TypeVar from pydantic import BaseModel, Field, root_validator from langchain.docstore.document import Document from langchain.embeddings.base import Embeddings from langchain.schema import BaseRetriever VST = TypeVar("VST", bound="VectorStore") [docs]class VectorStore(ABC): """Interface for vector stores.""" [docs] @abstractmethod def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. kwargs: vectorstore specific parameters Returns: List of ids from adding the texts into the vectorstore. """ [docs] async def aadd_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore."""
/content/https://python.langchain.com/en/latest/_modules/langchain/vectorstores/base.html
dd9bdee78b52-1
raise NotImplementedError [docs] def add_documents(self, documents: List[Document], **kwargs: Any) -> List[str]: """Run more documents through the embeddings and add to the vectorstore. Args: documents (List[Document]: Documents to add to the vectorstore. Returns: List[str]: List of IDs of the added texts. """ # TODO: Handle the case where the user doesn't provide ids on the Collection texts = [doc.page_content for doc in documents] metadatas = [doc.metadata for doc in documents] return self.add_texts(texts, metadatas, **kwargs) [docs] async def aadd_documents( self, documents: List[Document], **kwargs: Any ) -> List[str]: """Run more documents through the embeddings and add to the vectorstore. Args: documents (List[Document]: Documents to add to the vectorstore. Returns: List[str]: List of IDs of the added texts. """ texts = [doc.page_content for doc in documents] metadatas = [doc.metadata for doc in documents] return await self.aadd_texts(texts, metadatas, **kwargs) [docs] @abstractmethod def similarity_search( self, query: str, k: int = 4, **kwargs: Any ) -> List[Document]: """Return docs most similar to query.""" [docs] def similarity_search_with_relevance_scores(
/content/https://python.langchain.com/en/latest/_modules/langchain/vectorstores/base.html
dd9bdee78b52-2
[docs] def similarity_search_with_relevance_scores( self, query: str, k: int = 4, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Return docs and relevance scores in the range [0, 1]. 0 is dissimilar, 1 is most similar. """ docs_and_similarities = self._similarity_search_with_relevance_scores( query, k=k, **kwargs ) if any( similarity < 0.0 or similarity > 1.0 for _, similarity in docs_and_similarities ): raise ValueError( "Relevance scores must be between" f" 0 and 1, got {docs_and_similarities}" ) return docs_and_similarities def _similarity_search_with_relevance_scores( self, query: str, k: int = 4, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Return docs and relevance scores, normalized on a scale from 0 to 1. 0 is dissimilar, 1 is most similar. """ raise NotImplementedError [docs] async def asimilarity_search( self, query: str, k: int = 4, **kwargs: Any ) -> List[Document]: """Return docs most similar to query.""" # This is a temporary workaround to make the similarity search # asynchronous. The proper solution is to make the similarity search # asynchronous in the vector store implementations.
/content/https://python.langchain.com/en/latest/_modules/langchain/vectorstores/base.html
dd9bdee78b52-3
# asynchronous in the vector store implementations. func = partial(self.similarity_search, query, k, **kwargs) return await asyncio.get_event_loop().run_in_executor(None, func) [docs] def similarity_search_by_vector( self, embedding: List[float], k: int = 4, **kwargs: Any ) -> List[Document]: """Return docs most similar to embedding vector. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. Returns: List of Documents most similar to the query vector. """ raise NotImplementedError [docs] async def asimilarity_search_by_vector( self, embedding: List[float], k: int = 4, **kwargs: Any ) -> List[Document]: """Return docs most similar to embedding vector.""" # This is a temporary workaround to make the similarity search # asynchronous. The proper solution is to make the similarity search # asynchronous in the vector store implementations. func = partial(self.similarity_search_by_vector, embedding, k, **kwargs) return await asyncio.get_event_loop().run_in_executor(None, func) [docs] def max_marginal_relevance_search( self, query: str, k: int = 4,
/content/https://python.langchain.com/en/latest/_modules/langchain/vectorstores/base.html
dd9bdee78b52-4
self, query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns: List of Documents selected by maximal marginal relevance. """ raise NotImplementedError [docs] async def amax_marginal_relevance_search( self, query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance.""" # This is a temporary workaround to make the similarity search # asynchronous. The proper solution is to make the similarity search # asynchronous in the vector store implementations. func = partial( self.max_marginal_relevance_search, query, k, fetch_k, lambda_mult, **kwargs )
/content/https://python.langchain.com/en/latest/_modules/langchain/vectorstores/base.html
dd9bdee78b52-5
) return await asyncio.get_event_loop().run_in_executor(None, func) [docs] def max_marginal_relevance_search_by_vector( self, embedding: List[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns: List of Documents selected by maximal marginal relevance. """ raise NotImplementedError [docs] async def amax_marginal_relevance_search_by_vector( self, embedding: List[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance.""" raise NotImplementedError [docs] @classmethod def from_documents( cls: Type[VST], documents: List[Document],
/content/https://python.langchain.com/en/latest/_modules/langchain/vectorstores/base.html
dd9bdee78b52-6
cls: Type[VST], documents: List[Document], embedding: Embeddings, **kwargs: Any, ) -> VST: """Return VectorStore initialized from documents and embeddings.""" texts = [d.page_content for d in documents] metadatas = [d.metadata for d in documents] return cls.from_texts(texts, embedding, metadatas=metadatas, **kwargs) [docs] @classmethod async def afrom_documents( cls: Type[VST], documents: List[Document], embedding: Embeddings, **kwargs: Any, ) -> VST: """Return VectorStore initialized from documents and embeddings.""" texts = [d.page_content for d in documents] metadatas = [d.metadata for d in documents] return await cls.afrom_texts(texts, embedding, metadatas=metadatas, **kwargs) [docs] @classmethod @abstractmethod def from_texts( cls: Type[VST], texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> VST: """Return VectorStore initialized from texts and embeddings.""" [docs] @classmethod async def afrom_texts( cls: Type[VST], texts: List[str], embedding: Embeddings,
/content/https://python.langchain.com/en/latest/_modules/langchain/vectorstores/base.html
dd9bdee78b52-7
texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> VST: """Return VectorStore initialized from texts and embeddings.""" raise NotImplementedError [docs] def as_retriever(self, **kwargs: Any) -> BaseRetriever: return VectorStoreRetriever(vectorstore=self, **kwargs) class VectorStoreRetriever(BaseRetriever, BaseModel): vectorstore: VectorStore search_type: str = "similarity" search_kwargs: dict = Field(default_factory=dict) class Config: """Configuration for this pydantic object.""" arbitrary_types_allowed = True @root_validator() def validate_search_type(cls, values: Dict) -> Dict: """Validate search type.""" if "search_type" in values: search_type = values["search_type"] if search_type not in ("similarity", "mmr"): raise ValueError(f"search_type of {search_type} not allowed.") return values def get_relevant_documents(self, query: str) -> List[Document]: if self.search_type == "similarity": docs = self.vectorstore.similarity_search(query, **self.search_kwargs) elif self.search_type == "mmr": docs = self.vectorstore.max_marginal_relevance_search(
/content/https://python.langchain.com/en/latest/_modules/langchain/vectorstores/base.html
dd9bdee78b52-8
docs = self.vectorstore.max_marginal_relevance_search( query, **self.search_kwargs ) else: raise ValueError(f"search_type of {self.search_type} not allowed.") return docs async def aget_relevant_documents(self, query: str) -> List[Document]: if self.search_type == "similarity": docs = await self.vectorstore.asimilarity_search( query, **self.search_kwargs ) elif self.search_type == "mmr": docs = await self.vectorstore.amax_marginal_relevance_search( query, **self.search_kwargs ) else: raise ValueError(f"search_type of {self.search_type} not allowed.") return docs def add_documents(self, documents: List[Document], **kwargs: Any) -> List[str]: """Add documents to vectorstore.""" return self.vectorstore.add_documents(documents, **kwargs) async def aadd_documents( self, documents: List[Document], **kwargs: Any ) -> List[str]: """Add documents to vectorstore.""" return await self.vectorstore.aadd_documents(documents, **kwargs) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Apr 26, 2023.
/content/https://python.langchain.com/en/latest/_modules/langchain/vectorstores/base.html
559767156993-0
Source code for langchain.vectorstores.elastic_vector_search """Wrapper around Elasticsearch vector database.""" from __future__ import annotations import uuid from abc import ABC from typing import Any, Dict, Iterable, List, Optional, Tuple from langchain.docstore.document import Document from langchain.embeddings.base import Embeddings from langchain.utils import get_from_dict_or_env from langchain.vectorstores.base import VectorStore def _default_text_mapping(dim: int) -> Dict: return { "properties": { "text": {"type": "text"}, "vector": {"type": "dense_vector", "dims": dim}, } } def _default_script_query(query_vector: List[float], filter: Optional[dict]) -> Dict: if filter: ((key, value),) = filter.items() filter = {"match": {f"metadata.{key}.keyword": f"{value}"}} else: filter = {"match_all": {}} return { "script_score": { "query": filter, "script": { "source": "cosineSimilarity(params.query_vector, 'vector') + 1.0", "params": {"query_vector": query_vector}, }, } } # ElasticVectorSearch is a concrete implementation of the abstract base class # VectorStore, which defines a common interface for all vector database
/content/https://python.langchain.com/en/latest/_modules/langchain/vectorstores/elastic_vector_search.html
559767156993-1
# VectorStore, which defines a common interface for all vector database # implementations. By inheriting from the ABC class, ElasticVectorSearch can be # defined as an abstract base class itself, allowing the creation of subclasses with # their own specific implementations. If you plan to subclass ElasticVectorSearch, # you can inherit from it and define your own implementation of the necessary methods # and attributes. [docs]class ElasticVectorSearch(VectorStore, ABC): """Wrapper around Elasticsearch as a vector database. To connect to an Elasticsearch instance that does not require login credentials, pass the Elasticsearch URL and index name along with the embedding object to the constructor. Example: .. code-block:: python from langchain import ElasticVectorSearch from langchain.embeddings import OpenAIEmbeddings embedding = OpenAIEmbeddings() elastic_vector_search = ElasticVectorSearch( elasticsearch_url="http://localhost:9200", index_name="test_index", embedding=embedding ) To connect to an Elasticsearch instance that requires login credentials, including Elastic Cloud, use the Elasticsearch URL format https://username:password@es_host:9243. For example, to connect to Elastic Cloud, create the Elasticsearch URL with the required authentication details and pass it to the ElasticVectorSearch constructor as the named parameter elasticsearch_url. You can obtain your Elastic Cloud URL and login credentials by logging in to the Elastic Cloud console at https://cloud.elastic.co, selecting your deployment, and navigating to the "Deployments" page.
/content/https://python.langchain.com/en/latest/_modules/langchain/vectorstores/elastic_vector_search.html
559767156993-2
navigating to the "Deployments" page. To obtain your Elastic Cloud password for the default "elastic" user: 1. Log in to the Elastic Cloud console at https://cloud.elastic.co 2. Go to "Security" > "Users" 3. Locate the "elastic" user and click "Edit" 4. Click "Reset password" 5. Follow the prompts to reset the password The format for Elastic Cloud URLs is https://username:password@cluster_id.region_id.gcp.cloud.es.io:9243. Example: .. code-block:: python from langchain import ElasticVectorSearch from langchain.embeddings import OpenAIEmbeddings embedding = OpenAIEmbeddings() elastic_host = "cluster_id.region_id.gcp.cloud.es.io" elasticsearch_url = f"https://username:password@{elastic_host}:9243" elastic_vector_search = ElasticVectorSearch( elasticsearch_url=elasticsearch_url, index_name="test_index", embedding=embedding ) Args: elasticsearch_url (str): The URL for the Elasticsearch instance. index_name (str): The name of the Elasticsearch index for the embeddings. embedding (Embeddings): An object that provides the ability to embed text. It should be an instance of a class that subclasses the Embeddings abstract base class, such as OpenAIEmbeddings() Raises: ValueError: If the elasticsearch python package is not installed. """
/content/https://python.langchain.com/en/latest/_modules/langchain/vectorstores/elastic_vector_search.html
559767156993-3
ValueError: If the elasticsearch python package is not installed. """ def __init__(self, elasticsearch_url: str, index_name: str, embedding: Embeddings): """Initialize with necessary components.""" try: import elasticsearch except ImportError: raise ValueError( "Could not import elasticsearch python package. " "Please install it with `pip install elasticsearch`." ) self.embedding = embedding self.index_name = index_name try: es_client = elasticsearch.Elasticsearch(elasticsearch_url) # noqa except ValueError as e: raise ValueError( f"Your elasticsearch client string is misformatted. Got error: {e} " ) self.client = es_client [docs] def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, refresh_indices: bool = True, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. refresh_indices: bool to refresh ElasticSearch indices Returns: List of ids from adding the texts into the vectorstore. """ try: from elasticsearch.exceptions import NotFoundError from elasticsearch.helpers import bulk except ImportError: raise ValueError( "Could not import elasticsearch python package. " "Please install it with `pip install elasticsearch`." )
/content/https://python.langchain.com/en/latest/_modules/langchain/vectorstores/elastic_vector_search.html
559767156993-4
"Please install it with `pip install elasticsearch`." ) requests = [] ids = [] embeddings = self.embedding.embed_documents(list(texts)) dim = len(embeddings[0]) mapping = _default_text_mapping(dim) # check to see if the index already exists try: self.client.indices.get(index=self.index_name) except NotFoundError: # TODO would be nice to create index before embedding, # just to save expensive steps for last self.client.indices.create(index=self.index_name, mappings=mapping) for i, text in enumerate(texts): metadata = metadatas[i] if metadatas else {} _id = str(uuid.uuid4()) request = { "_op_type": "index", "_index": self.index_name, "vector": embeddings[i], "text": text, "metadata": metadata, "_id": _id, } ids.append(_id) requests.append(request) bulk(self.client, requests) if refresh_indices: self.client.indices.refresh(index=self.index_name) return ids [docs] def similarity_search( self, query: str, k: int = 4, filter: Optional[dict] = None, **kwargs: Any ) -> List[Document]: """Return docs most similar to query. Args: query: Text to look up documents similar to.
/content/https://python.langchain.com/en/latest/_modules/langchain/vectorstores/elastic_vector_search.html
559767156993-5
Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. Returns: List of Documents most similar to the query. """ docs_and_scores = self.similarity_search_with_score(query, k, filter=filter) documents = [d[0] for d in docs_and_scores] return documents [docs] def similarity_search_with_score( self, query: str, k: int = 4, filter: Optional[dict] = None, **kwargs: Any ) -> List[Tuple[Document, float]]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. Returns: List of Documents most similar to the query. """ embedding = self.embedding.embed_query(query) script_query = _default_script_query(embedding, filter) response = self.client.search(index=self.index_name, query=script_query, size=k) hits = [hit for hit in response["hits"]["hits"]] docs_and_scores = [ ( Document( page_content=hit["_source"]["text"], metadata=hit["_source"]["metadata"], ), hit["_score"], ) for hit in hits ] return docs_and_scores [docs] @classmethod def from_texts( cls, texts: List[str],
/content/https://python.langchain.com/en/latest/_modules/langchain/vectorstores/elastic_vector_search.html
559767156993-6
def from_texts( cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> ElasticVectorSearch: """Construct ElasticVectorSearch wrapper from raw documents. This is a user-friendly interface that: 1. Embeds documents. 2. Creates a new index for the embeddings in the Elasticsearch instance. 3. Adds the documents to the newly created Elasticsearch index. This is intended to be a quick way to get started. Example: .. code-block:: python from langchain import ElasticVectorSearch from langchain.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() elastic_vector_search = ElasticVectorSearch.from_texts( texts, embeddings, elasticsearch_url="http://localhost:9200" ) """ elasticsearch_url = get_from_dict_or_env( kwargs, "elasticsearch_url", "ELASTICSEARCH_URL" ) try: import elasticsearch from elasticsearch.exceptions import NotFoundError from elasticsearch.helpers import bulk except ImportError: raise ValueError( "Could not import elasticsearch python package. " "Please install it with `pip install elasticsearch`." ) try: client = elasticsearch.Elasticsearch(elasticsearch_url) except ValueError as e: raise ValueError( "Your elasticsearch client string is misformatted. " f"Got error: {e} " )
/content/https://python.langchain.com/en/latest/_modules/langchain/vectorstores/elastic_vector_search.html
559767156993-7
) index_name = kwargs.get("index_name", uuid.uuid4().hex) embeddings = embedding.embed_documents(texts) dim = len(embeddings[0]) mapping = _default_text_mapping(dim) # check to see if the index already exists try: client.indices.get(index=index_name) except NotFoundError: # TODO would be nice to create index before embedding, # just to save expensive steps for last client.indices.create(index=index_name, mappings=mapping) requests = [] for i, text in enumerate(texts): metadata = metadatas[i] if metadatas else {} request = { "_op_type": "index", "_index": index_name, "vector": embeddings[i], "text": text, "metadata": metadata, } requests.append(request) bulk(client, requests) client.indices.refresh(index=index_name) return cls(elasticsearch_url, index_name, embedding) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Apr 26, 2023.
/content/https://python.langchain.com/en/latest/_modules/langchain/vectorstores/elastic_vector_search.html