id
stringlengths
14
16
text
stringlengths
45
2.05k
source
stringlengths
53
111
33c5a200824a-3
"""Initialize with the LLM and a docstore.""" docstore_explorer = DocstoreExplorer(docstore) tools = [ Tool( name="Search", func=docstore_explorer.search, description="Search for a term in the docstore.", ), Tool( name="Lookup", func=docstore_explorer.lookup, description="Lookup a term in the docstore.", ), ] agent = ReActDocstoreAgent.from_llm_and_tools(llm, tools) super().__init__(agent=agent, tools=tools, **kwargs) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Mar 24, 2023.
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\agents\\react\\base.html"
a4ee7b18c344-0
Source code for langchain.agents.self_ask_with_search.base """Chain that does self ask with search.""" from typing import Any, Optional, Sequence, Tuple, Union from langchain.agents.agent import Agent, AgentExecutor from langchain.agents.self_ask_with_search.prompt import PROMPT from langchain.agents.tools import Tool from langchain.llms.base import BaseLLM from langchain.prompts.base import BasePromptTemplate from langchain.tools.base import BaseTool from langchain.utilities.google_serper import GoogleSerperAPIWrapper from langchain.utilities.serpapi import SerpAPIWrapper class SelfAskWithSearchAgent(Agent): """Agent for the self-ask-with-search paper.""" @property def _agent_type(self) -> str: """Return Identifier of agent type.""" return "self-ask-with-search" @classmethod def create_prompt(cls, tools: Sequence[BaseTool]) -> BasePromptTemplate: """Prompt does not depend on tools.""" return PROMPT @classmethod def _validate_tools(cls, tools: Sequence[BaseTool]) -> None: if len(tools) != 1: raise ValueError(f"Exactly one tool must be specified, but got {tools}") tool_names = {tool.name for tool in tools} if tool_names != {"Intermediate Answer"}: raise ValueError( f"Tool name should be Intermediate Answer, got {tool_names}" ) def _extract_tool_and_input(self, text: str) -> Optional[Tuple[str, str]]: followup = "Follow up:" last_line = text.split("\n")[-1] if followup not in last_line: finish_string = "So the final answer is: " if finish_string not in last_line:
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\agents\\self_ask_with_search\\base.html"
a4ee7b18c344-1
if finish_string not in last_line: return None return "Final Answer", last_line[len(finish_string) :] after_colon = text.split(":")[-1] if " " == after_colon[0]: after_colon = after_colon[1:] return "Intermediate Answer", after_colon def _fix_text(self, text: str) -> str: return f"{text}\nSo the final answer is:" @property def observation_prefix(self) -> str: """Prefix to append the observation with.""" return "Intermediate answer: " @property def llm_prefix(self) -> str: """Prefix to append the LLM call with.""" return "" @property def starter_string(self) -> str: """Put this string after user input but before first LLM call.""" return "Are follow up questions needed here:" [docs]class SelfAskWithSearchChain(AgentExecutor): """Chain that does self ask with search. Example: .. code-block:: python from langchain import SelfAskWithSearchChain, OpenAI, GoogleSerperAPIWrapper search_chain = GoogleSerperAPIWrapper() self_ask = SelfAskWithSearchChain(llm=OpenAI(), search_chain=search_chain) """ def __init__( self, llm: BaseLLM, search_chain: Union[GoogleSerperAPIWrapper, SerpAPIWrapper], **kwargs: Any, ): """Initialize with just an LLM and a search chain.""" search_tool = Tool( name="Intermediate Answer", func=search_chain.run, description="Search" )
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\agents\\self_ask_with_search\\base.html"
a4ee7b18c344-2
name="Intermediate Answer", func=search_chain.run, description="Search" ) agent = SelfAskWithSearchAgent.from_llm_and_tools(llm, [search_tool]) super().__init__(agent=agent, tools=[search_tool], **kwargs) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Mar 24, 2023.
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\agents\\self_ask_with_search\\base.html"
a935b5d62abd-0
Source code for langchain.chains.llm """Chain that just formats a prompt and calls an LLM.""" from __future__ import annotations from typing import Any, Dict, List, Optional, Sequence, Tuple, Union from pydantic import BaseModel, Extra from langchain.chains.base import Chain from langchain.input import get_colored_text from langchain.prompts.base import BasePromptTemplate from langchain.prompts.prompt import PromptTemplate from langchain.schema import BaseLanguageModel, LLMResult, PromptValue [docs]class LLMChain(Chain, BaseModel): """Chain to run queries against LLMs. Example: .. code-block:: python from langchain import LLMChain, OpenAI, PromptTemplate prompt_template = "Tell me a {adjective} joke" prompt = PromptTemplate( input_variables=["adjective"], template=prompt_template ) llm = LLMChain(llm=OpenAI(), prompt=prompt) """ prompt: BasePromptTemplate """Prompt object to use.""" llm: BaseLanguageModel output_key: str = "text" #: :meta private: class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @property def input_keys(self) -> List[str]: """Will be whatever keys the prompt expects. :meta private: """ return self.prompt.input_variables @property def output_keys(self) -> List[str]: """Will always return text key. :meta private: """ return [self.output_key] def _call(self, inputs: Dict[str, Any]) -> Dict[str, str]:
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\chains\\llm.html"
a935b5d62abd-1
def _call(self, inputs: Dict[str, Any]) -> Dict[str, str]: return self.apply([inputs])[0] [docs] def generate(self, input_list: List[Dict[str, Any]]) -> LLMResult: """Generate LLM result from inputs.""" prompts, stop = self.prep_prompts(input_list) return self.llm.generate_prompt(prompts, stop) [docs] async def agenerate(self, input_list: List[Dict[str, Any]]) -> LLMResult: """Generate LLM result from inputs.""" prompts, stop = await self.aprep_prompts(input_list) return await self.llm.agenerate_prompt(prompts, stop) [docs] def prep_prompts( self, input_list: List[Dict[str, Any]] ) -> Tuple[List[PromptValue], Optional[List[str]]]: """Prepare prompts from inputs.""" stop = None if "stop" in input_list[0]: stop = input_list[0]["stop"] prompts = [] for inputs in input_list: selected_inputs = {k: inputs[k] for k in self.prompt.input_variables} prompt = self.prompt.format_prompt(**selected_inputs) _colored_text = get_colored_text(prompt.to_string(), "green") _text = "Prompt after formatting:\n" + _colored_text self.callback_manager.on_text(_text, end="\n", verbose=self.verbose) if "stop" in inputs and inputs["stop"] != stop: raise ValueError( "If `stop` is present in any inputs, should be present in all." ) prompts.append(prompt) return prompts, stop [docs] async def aprep_prompts(
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\chains\\llm.html"
a935b5d62abd-2
return prompts, stop [docs] async def aprep_prompts( self, input_list: List[Dict[str, Any]] ) -> Tuple[List[PromptValue], Optional[List[str]]]: """Prepare prompts from inputs.""" stop = None if "stop" in input_list[0]: stop = input_list[0]["stop"] prompts = [] for inputs in input_list: selected_inputs = {k: inputs[k] for k in self.prompt.input_variables} prompt = self.prompt.format_prompt(**selected_inputs) _colored_text = get_colored_text(prompt.to_string(), "green") _text = "Prompt after formatting:\n" + _colored_text if self.callback_manager.is_async: await self.callback_manager.on_text( _text, end="\n", verbose=self.verbose ) else: self.callback_manager.on_text(_text, end="\n", verbose=self.verbose) if "stop" in inputs and inputs["stop"] != stop: raise ValueError( "If `stop` is present in any inputs, should be present in all." ) prompts.append(prompt) return prompts, stop [docs] def apply(self, input_list: List[Dict[str, Any]]) -> List[Dict[str, str]]: """Utilize the LLM generate method for speed gains.""" response = self.generate(input_list) return self.create_outputs(response) [docs] async def aapply(self, input_list: List[Dict[str, Any]]) -> List[Dict[str, str]]: """Utilize the LLM generate method for speed gains.""" response = await self.agenerate(input_list) return self.create_outputs(response)
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\chains\\llm.html"
a935b5d62abd-3
response = await self.agenerate(input_list) return self.create_outputs(response) [docs] def create_outputs(self, response: LLMResult) -> List[Dict[str, str]]: """Create outputs from response.""" return [ # Get the text of the top generated string. {self.output_key: generation[0].text} for generation in response.generations ] async def _acall(self, inputs: Dict[str, Any]) -> Dict[str, str]: return (await self.aapply([inputs]))[0] [docs] def predict(self, **kwargs: Any) -> str: """Format prompt with kwargs and pass to LLM. Args: **kwargs: Keys to pass to prompt template. Returns: Completion from LLM. Example: .. code-block:: python completion = llm.predict(adjective="funny") """ return self(kwargs)[self.output_key] [docs] async def apredict(self, **kwargs: Any) -> str: """Format prompt with kwargs and pass to LLM. Args: **kwargs: Keys to pass to prompt template. Returns: Completion from LLM. Example: .. code-block:: python completion = llm.predict(adjective="funny") """ return (await self.acall(kwargs))[self.output_key] [docs] def predict_and_parse(self, **kwargs: Any) -> Union[str, List[str], Dict[str, str]]: """Call predict and then parse the results.""" result = self.predict(**kwargs) if self.prompt.output_parser is not None: return self.prompt.output_parser.parse(result) else: return result
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\chains\\llm.html"
a935b5d62abd-4
return self.prompt.output_parser.parse(result) else: return result [docs] def apply_and_parse( self, input_list: List[Dict[str, Any]] ) -> Sequence[Union[str, List[str], Dict[str, str]]]: """Call apply and then parse the results.""" result = self.apply(input_list) return self._parse_result(result) def _parse_result( self, result: List[Dict[str, str]] ) -> Sequence[Union[str, List[str], Dict[str, str]]]: if self.prompt.output_parser is not None: return [ self.prompt.output_parser.parse(res[self.output_key]) for res in result ] else: return result [docs] async def aapply_and_parse( self, input_list: List[Dict[str, Any]] ) -> Sequence[Union[str, List[str], Dict[str, str]]]: """Call apply and then parse the results.""" result = await self.aapply(input_list) return self._parse_result(result) @property def _chain_type(self) -> str: return "llm_chain" [docs] @classmethod def from_string(cls, llm: BaseLanguageModel, template: str) -> Chain: """Create LLMChain from LLM and template.""" prompt_template = PromptTemplate.from_template(template) return cls(llm=llm, prompt=prompt_template) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Mar 24, 2023.
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\chains\\llm.html"
682afb1fbb69-0
Source code for langchain.chains.llm_requests """Chain that hits a URL and then uses an LLM to parse results.""" from __future__ import annotations from typing import Dict, List from pydantic import BaseModel, Extra, Field, root_validator from langchain.chains import LLMChain from langchain.chains.base import Chain from langchain.requests import RequestsWrapper DEFAULT_HEADERS = { "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36" # noqa: E501 } [docs]class LLMRequestsChain(Chain, BaseModel): """Chain that hits a URL and then uses an LLM to parse results.""" llm_chain: LLMChain requests_wrapper: RequestsWrapper = Field( default_factory=RequestsWrapper, exclude=True ) text_length: int = 8000 requests_key: str = "requests_result" #: :meta private: input_key: str = "url" #: :meta private: output_key: str = "output" #: :meta private: class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @property def input_keys(self) -> List[str]: """Will be whatever keys the prompt expects. :meta private: """ return [self.input_key] @property def output_keys(self) -> List[str]: """Will always return text key. :meta private: """ return [self.output_key] @root_validator()
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\chains\\llm_requests.html"
682afb1fbb69-1
""" return [self.output_key] @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" try: from bs4 import BeautifulSoup # noqa: F401 except ImportError: raise ValueError( "Could not import bs4 python package. " "Please it install it with `pip install bs4`." ) return values def _call(self, inputs: Dict[str, str]) -> Dict[str, str]: from bs4 import BeautifulSoup # Other keys are assumed to be needed for LLM prediction other_keys = {k: v for k, v in inputs.items() if k != self.input_key} url = inputs[self.input_key] res = self.requests_wrapper.get(url) # extract the text from the html soup = BeautifulSoup(res, "html.parser") other_keys[self.requests_key] = soup.get_text()[: self.text_length] result = self.llm_chain.predict(**other_keys) return {self.output_key: result} @property def _chain_type(self) -> str: return "llm_requests_chain" By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Mar 24, 2023.
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\chains\\llm_requests.html"
2f1910ee1bdc-0
Source code for langchain.chains.loading """Functionality for loading chains.""" import json from pathlib import Path from typing import Any, Union import yaml from langchain.chains.api.base import APIChain from langchain.chains.base import Chain from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain from langchain.chains.combine_documents.map_rerank import MapRerankDocumentsChain from langchain.chains.combine_documents.refine import RefineDocumentsChain from langchain.chains.combine_documents.stuff import StuffDocumentsChain from langchain.chains.hyde.base import HypotheticalDocumentEmbedder from langchain.chains.llm import LLMChain from langchain.chains.llm_bash.base import LLMBashChain from langchain.chains.llm_checker.base import LLMCheckerChain from langchain.chains.llm_math.base import LLMMathChain from langchain.chains.llm_requests import LLMRequestsChain from langchain.chains.pal.base import PALChain from langchain.chains.qa_with_sources.base import QAWithSourcesChain from langchain.chains.qa_with_sources.vector_db import VectorDBQAWithSourcesChain from langchain.chains.retrieval_qa.base import VectorDBQA from langchain.chains.sql_database.base import SQLDatabaseChain from langchain.llms.loading import load_llm, load_llm_from_config from langchain.prompts.loading import load_prompt, load_prompt_from_config from langchain.utilities.loading import try_load_from_hub URL_BASE = "https://raw.githubusercontent.com/hwchase17/langchain-hub/master/chains/" def _load_llm_chain(config: dict, **kwargs: Any) -> LLMChain: """Load LLM chain from config dict.""" if "llm" in config:
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\chains\\loading.html"
2f1910ee1bdc-1
"""Load LLM chain from config dict.""" if "llm" in config: llm_config = config.pop("llm") llm = load_llm_from_config(llm_config) elif "llm_path" in config: llm = load_llm(config.pop("llm_path")) else: raise ValueError("One of `llm` or `llm_path` must be present.") if "prompt" in config: prompt_config = config.pop("prompt") prompt = load_prompt_from_config(prompt_config) elif "prompt_path" in config: prompt = load_prompt(config.pop("prompt_path")) else: raise ValueError("One of `prompt` or `prompt_path` must be present.") return LLMChain(llm=llm, prompt=prompt, **config) def _load_hyde_chain(config: dict, **kwargs: Any) -> HypotheticalDocumentEmbedder: """Load hypothetical document embedder chain from config dict.""" if "llm_chain" in config: llm_chain_config = config.pop("llm_chain") llm_chain = load_chain_from_config(llm_chain_config) elif "llm_chain_path" in config: llm_chain = load_chain(config.pop("llm_chain_path")) else: raise ValueError("One of `llm_chain` or `llm_chain_path` must be present.") if "embeddings" in kwargs: embeddings = kwargs.pop("embeddings") else: raise ValueError("`embeddings` must be present.") return HypotheticalDocumentEmbedder( llm_chain=llm_chain, base_embeddings=embeddings, **config )
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\chains\\loading.html"
2f1910ee1bdc-2
llm_chain=llm_chain, base_embeddings=embeddings, **config ) def _load_stuff_documents_chain(config: dict, **kwargs: Any) -> StuffDocumentsChain: if "llm_chain" in config: llm_chain_config = config.pop("llm_chain") llm_chain = load_chain_from_config(llm_chain_config) elif "llm_chain_path" in config: llm_chain = load_chain(config.pop("llm_chain_path")) else: raise ValueError("One of `llm_chain` or `llm_chain_config` must be present.") if not isinstance(llm_chain, LLMChain): raise ValueError(f"Expected LLMChain, got {llm_chain}") if "document_prompt" in config: prompt_config = config.pop("document_prompt") document_prompt = load_prompt_from_config(prompt_config) elif "document_prompt_path" in config: document_prompt = load_prompt(config.pop("document_prompt_path")) else: raise ValueError( "One of `document_prompt` or `document_prompt_path` must be present." ) return StuffDocumentsChain( llm_chain=llm_chain, document_prompt=document_prompt, **config ) def _load_map_reduce_documents_chain( config: dict, **kwargs: Any ) -> MapReduceDocumentsChain: if "llm_chain" in config: llm_chain_config = config.pop("llm_chain") llm_chain = load_chain_from_config(llm_chain_config) elif "llm_chain_path" in config: llm_chain = load_chain(config.pop("llm_chain_path")) else:
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\chains\\loading.html"
2f1910ee1bdc-3
llm_chain = load_chain(config.pop("llm_chain_path")) else: raise ValueError("One of `llm_chain` or `llm_chain_config` must be present.") if not isinstance(llm_chain, LLMChain): raise ValueError(f"Expected LLMChain, got {llm_chain}") if "combine_document_chain" in config: combine_document_chain_config = config.pop("combine_document_chain") combine_document_chain = load_chain_from_config(combine_document_chain_config) elif "combine_document_chain_path" in config: combine_document_chain = load_chain(config.pop("combine_document_chain_path")) else: raise ValueError( "One of `combine_document_chain` or " "`combine_document_chain_path` must be present." ) if "collapse_document_chain" in config: collapse_document_chain_config = config.pop("collapse_document_chain") if collapse_document_chain_config is None: collapse_document_chain = None else: collapse_document_chain = load_chain_from_config( collapse_document_chain_config ) elif "collapse_document_chain_path" in config: collapse_document_chain = load_chain(config.pop("collapse_document_chain_path")) return MapReduceDocumentsChain( llm_chain=llm_chain, combine_document_chain=combine_document_chain, collapse_document_chain=collapse_document_chain, **config, ) def _load_llm_bash_chain(config: dict, **kwargs: Any) -> LLMBashChain: if "llm" in config: llm_config = config.pop("llm") llm = load_llm_from_config(llm_config) elif "llm_path" in config:
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\chains\\loading.html"
2f1910ee1bdc-4
elif "llm_path" in config: llm = load_llm(config.pop("llm_path")) else: raise ValueError("One of `llm` or `llm_path` must be present.") if "prompt" in config: prompt_config = config.pop("prompt") prompt = load_prompt_from_config(prompt_config) elif "prompt_path" in config: prompt = load_prompt(config.pop("prompt_path")) return LLMBashChain(llm=llm, prompt=prompt, **config) def _load_llm_checker_chain(config: dict, **kwargs: Any) -> LLMCheckerChain: if "llm" in config: llm_config = config.pop("llm") llm = load_llm_from_config(llm_config) elif "llm_path" in config: llm = load_llm(config.pop("llm_path")) else: raise ValueError("One of `llm` or `llm_path` must be present.") if "create_draft_answer_prompt" in config: create_draft_answer_prompt_config = config.pop("create_draft_answer_prompt") create_draft_answer_prompt = load_prompt_from_config( create_draft_answer_prompt_config ) elif "create_draft_answer_prompt_path" in config: create_draft_answer_prompt = load_prompt( config.pop("create_draft_answer_prompt_path") ) if "list_assertions_prompt" in config: list_assertions_prompt_config = config.pop("list_assertions_prompt") list_assertions_prompt = load_prompt_from_config(list_assertions_prompt_config) elif "list_assertions_prompt_path" in config: list_assertions_prompt = load_prompt(config.pop("list_assertions_prompt_path"))
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\chains\\loading.html"
2f1910ee1bdc-5
list_assertions_prompt = load_prompt(config.pop("list_assertions_prompt_path")) if "check_assertions_prompt" in config: check_assertions_prompt_config = config.pop("check_assertions_prompt") check_assertions_prompt = load_prompt_from_config( check_assertions_prompt_config ) elif "check_assertions_prompt_path" in config: check_assertions_prompt = load_prompt( config.pop("check_assertions_prompt_path") ) if "revised_answer_prompt" in config: revised_answer_prompt_config = config.pop("revised_answer_prompt") revised_answer_prompt = load_prompt_from_config(revised_answer_prompt_config) elif "revised_answer_prompt_path" in config: revised_answer_prompt = load_prompt(config.pop("revised_answer_prompt_path")) return LLMCheckerChain( llm=llm, create_draft_answer_prompt=create_draft_answer_prompt, list_assertions_prompt=list_assertions_prompt, check_assertions_prompt=check_assertions_prompt, revised_answer_prompt=revised_answer_prompt, **config, ) def _load_llm_math_chain(config: dict, **kwargs: Any) -> LLMMathChain: if "llm" in config: llm_config = config.pop("llm") llm = load_llm_from_config(llm_config) elif "llm_path" in config: llm = load_llm(config.pop("llm_path")) else: raise ValueError("One of `llm` or `llm_path` must be present.") if "prompt" in config: prompt_config = config.pop("prompt") prompt = load_prompt_from_config(prompt_config) elif "prompt_path" in config:
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\chains\\loading.html"
2f1910ee1bdc-6
prompt = load_prompt_from_config(prompt_config) elif "prompt_path" in config: prompt = load_prompt(config.pop("prompt_path")) return LLMMathChain(llm=llm, prompt=prompt, **config) def _load_map_rerank_documents_chain( config: dict, **kwargs: Any ) -> MapRerankDocumentsChain: if "llm_chain" in config: llm_chain_config = config.pop("llm_chain") llm_chain = load_chain_from_config(llm_chain_config) elif "llm_chain_path" in config: llm_chain = load_chain(config.pop("llm_chain_path")) else: raise ValueError("One of `llm_chain` or `llm_chain_config` must be present.") return MapRerankDocumentsChain(llm_chain=llm_chain, **config) def _load_pal_chain(config: dict, **kwargs: Any) -> PALChain: if "llm" in config: llm_config = config.pop("llm") llm = load_llm_from_config(llm_config) elif "llm_path" in config: llm = load_llm(config.pop("llm_path")) else: raise ValueError("One of `llm` or `llm_path` must be present.") if "prompt" in config: prompt_config = config.pop("prompt") prompt = load_prompt_from_config(prompt_config) elif "prompt_path" in config: prompt = load_prompt(config.pop("prompt_path")) else: raise ValueError("One of `prompt` or `prompt_path` must be present.") return PALChain(llm=llm, prompt=prompt, **config)
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\chains\\loading.html"
2f1910ee1bdc-7
return PALChain(llm=llm, prompt=prompt, **config) def _load_refine_documents_chain(config: dict, **kwargs: Any) -> RefineDocumentsChain: if "initial_llm_chain" in config: initial_llm_chain_config = config.pop("initial_llm_chain") initial_llm_chain = load_chain_from_config(initial_llm_chain_config) elif "initial_llm_chain_path" in config: initial_llm_chain = load_chain(config.pop("initial_llm_chain_path")) else: raise ValueError( "One of `initial_llm_chain` or `initial_llm_chain_config` must be present." ) if "refine_llm_chain" in config: refine_llm_chain_config = config.pop("refine_llm_chain") refine_llm_chain = load_chain_from_config(refine_llm_chain_config) elif "refine_llm_chain_path" in config: refine_llm_chain = load_chain(config.pop("refine_llm_chain_path")) else: raise ValueError( "One of `refine_llm_chain` or `refine_llm_chain_config` must be present." ) if "document_prompt" in config: prompt_config = config.pop("document_prompt") document_prompt = load_prompt_from_config(prompt_config) elif "document_prompt_path" in config: document_prompt = load_prompt(config.pop("document_prompt_path")) return RefineDocumentsChain( initial_llm_chain=initial_llm_chain, refine_llm_chain=refine_llm_chain, document_prompt=document_prompt, **config, ) def _load_qa_with_sources_chain(config: dict, **kwargs: Any) -> QAWithSourcesChain:
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\chains\\loading.html"
2f1910ee1bdc-8
if "combine_documents_chain" in config: combine_documents_chain_config = config.pop("combine_documents_chain") combine_documents_chain = load_chain_from_config(combine_documents_chain_config) elif "combine_documents_chain_path" in config: combine_documents_chain = load_chain(config.pop("combine_documents_chain_path")) else: raise ValueError( "One of `combine_documents_chain` or " "`combine_documents_chain_path` must be present." ) return QAWithSourcesChain(combine_documents_chain=combine_documents_chain, **config) def _load_sql_database_chain(config: dict, **kwargs: Any) -> SQLDatabaseChain: if "database" in kwargs: database = kwargs.pop("database") else: raise ValueError("`database` must be present.") if "llm" in config: llm_config = config.pop("llm") llm = load_llm_from_config(llm_config) elif "llm_path" in config: llm = load_llm(config.pop("llm_path")) else: raise ValueError("One of `llm` or `llm_path` must be present.") if "prompt" in config: prompt_config = config.pop("prompt") prompt = load_prompt_from_config(prompt_config) return SQLDatabaseChain(database=database, llm=llm, prompt=prompt, **config) def _load_vector_db_qa_with_sources_chain( config: dict, **kwargs: Any ) -> VectorDBQAWithSourcesChain: if "vectorstore" in kwargs: vectorstore = kwargs.pop("vectorstore") else: raise ValueError("`vectorstore` must be present.") if "combine_documents_chain" in config:
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\chains\\loading.html"
2f1910ee1bdc-9
if "combine_documents_chain" in config: combine_documents_chain_config = config.pop("combine_documents_chain") combine_documents_chain = load_chain_from_config(combine_documents_chain_config) elif "combine_documents_chain_path" in config: combine_documents_chain = load_chain(config.pop("combine_documents_chain_path")) else: raise ValueError( "One of `combine_documents_chain` or " "`combine_documents_chain_path` must be present." ) return VectorDBQAWithSourcesChain( combine_documents_chain=combine_documents_chain, vectorstore=vectorstore, **config, ) def _load_vector_db_qa(config: dict, **kwargs: Any) -> VectorDBQA: if "vectorstore" in kwargs: vectorstore = kwargs.pop("vectorstore") else: raise ValueError("`vectorstore` must be present.") if "combine_documents_chain" in config: combine_documents_chain_config = config.pop("combine_documents_chain") combine_documents_chain = load_chain_from_config(combine_documents_chain_config) elif "combine_documents_chain_path" in config: combine_documents_chain = load_chain(config.pop("combine_documents_chain_path")) else: raise ValueError( "One of `combine_documents_chain` or " "`combine_documents_chain_path` must be present." ) return VectorDBQA( combine_documents_chain=combine_documents_chain, vectorstore=vectorstore, **config, ) def _load_api_chain(config: dict, **kwargs: Any) -> APIChain: if "api_request_chain" in config: api_request_chain_config = config.pop("api_request_chain") api_request_chain = load_chain_from_config(api_request_chain_config)
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\chains\\loading.html"
2f1910ee1bdc-10
api_request_chain = load_chain_from_config(api_request_chain_config) elif "api_request_chain_path" in config: api_request_chain = load_chain(config.pop("api_request_chain_path")) else: raise ValueError( "One of `api_request_chain` or `api_request_chain_path` must be present." ) if "api_answer_chain" in config: api_answer_chain_config = config.pop("api_answer_chain") api_answer_chain = load_chain_from_config(api_answer_chain_config) elif "api_answer_chain_path" in config: api_answer_chain = load_chain(config.pop("api_answer_chain_path")) else: raise ValueError( "One of `api_answer_chain` or `api_answer_chain_path` must be present." ) if "requests_wrapper" in kwargs: requests_wrapper = kwargs.pop("requests_wrapper") else: raise ValueError("`requests_wrapper` must be present.") return APIChain( api_request_chain=api_request_chain, api_answer_chain=api_answer_chain, requests_wrapper=requests_wrapper, **config, ) def _load_llm_requests_chain(config: dict, **kwargs: Any) -> LLMRequestsChain: if "llm_chain" in config: llm_chain_config = config.pop("llm_chain") llm_chain = load_chain_from_config(llm_chain_config) elif "llm_chain_path" in config: llm_chain = load_chain(config.pop("llm_chain_path")) else: raise ValueError("One of `llm_chain` or `llm_chain_path` must be present.") if "requests_wrapper" in kwargs: requests_wrapper = kwargs.pop("requests_wrapper")
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\chains\\loading.html"
2f1910ee1bdc-11
if "requests_wrapper" in kwargs: requests_wrapper = kwargs.pop("requests_wrapper") return LLMRequestsChain( llm_chain=llm_chain, requests_wrapper=requests_wrapper, **config ) else: return LLMRequestsChain(llm_chain=llm_chain, **config) type_to_loader_dict = { "api_chain": _load_api_chain, "hyde_chain": _load_hyde_chain, "llm_chain": _load_llm_chain, "llm_bash_chain": _load_llm_bash_chain, "llm_checker_chain": _load_llm_checker_chain, "llm_math_chain": _load_llm_math_chain, "llm_requests_chain": _load_llm_requests_chain, "pal_chain": _load_pal_chain, "qa_with_sources_chain": _load_qa_with_sources_chain, "stuff_documents_chain": _load_stuff_documents_chain, "map_reduce_documents_chain": _load_map_reduce_documents_chain, "map_rerank_documents_chain": _load_map_rerank_documents_chain, "refine_documents_chain": _load_refine_documents_chain, "sql_database_chain": _load_sql_database_chain, "vector_db_qa_with_sources_chain": _load_vector_db_qa_with_sources_chain, "vector_db_qa": _load_vector_db_qa, } def load_chain_from_config(config: dict, **kwargs: Any) -> Chain: """Load chain from Config Dict.""" if "_type" not in config: raise ValueError("Must specify a chain Type in config") config_type = config.pop("_type") if config_type not in type_to_loader_dict:
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\chains\\loading.html"
2f1910ee1bdc-12
if config_type not in type_to_loader_dict: raise ValueError(f"Loading {config_type} chain not supported") chain_loader = type_to_loader_dict[config_type] return chain_loader(config, **kwargs) [docs]def load_chain(path: Union[str, Path], **kwargs: Any) -> Chain: """Unified method for loading a chain from LangChainHub or local fs.""" if hub_result := try_load_from_hub( path, _load_chain_from_file, "chains", {"json", "yaml"}, **kwargs ): return hub_result else: return _load_chain_from_file(path, **kwargs) def _load_chain_from_file(file: Union[str, Path], **kwargs: Any) -> Chain: """Load chain from file.""" # Convert file to Path object. if isinstance(file, str): file_path = Path(file) else: file_path = file # Load from either json or yaml. if file_path.suffix == ".json": with open(file_path) as f: config = json.load(f) elif file_path.suffix == ".yaml": with open(file_path, "r") as f: config = yaml.safe_load(f) else: raise ValueError("File type must be json or yaml") # Override default 'verbose' and 'memory' for the chain if "verbose" in kwargs: config["verbose"] = kwargs.pop("verbose") if "memory" in kwargs: config["memory"] = kwargs.pop("memory") # Load the chain from the config now. return load_chain_from_config(config, **kwargs) By Harrison Chase © Copyright 2023, Harrison Chase.
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\chains\\loading.html"
2f1910ee1bdc-13
By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Mar 24, 2023.
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\chains\\loading.html"
0239b93c09b4-0
Source code for langchain.chains.mapreduce """Map-reduce chain. Splits up a document, sends the smaller parts to the LLM with one prompt, then combines the results with another one. """ from __future__ import annotations from typing import Dict, List from pydantic import BaseModel, Extra from langchain.chains.base import Chain from langchain.chains.combine_documents.base import BaseCombineDocumentsChain from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain from langchain.chains.combine_documents.stuff import StuffDocumentsChain from langchain.chains.llm import LLMChain from langchain.docstore.document import Document from langchain.llms.base import BaseLLM from langchain.prompts.base import BasePromptTemplate from langchain.text_splitter import TextSplitter [docs]class MapReduceChain(Chain, BaseModel): """Map-reduce chain.""" combine_documents_chain: BaseCombineDocumentsChain """Chain to use to combine documents.""" text_splitter: TextSplitter """Text splitter to use.""" input_key: str = "input_text" #: :meta private: output_key: str = "output_text" #: :meta private: [docs] @classmethod def from_params( cls, llm: BaseLLM, prompt: BasePromptTemplate, text_splitter: TextSplitter ) -> MapReduceChain: """Construct a map-reduce chain that uses the chain for map and reduce.""" llm_chain = LLMChain(llm=llm, prompt=prompt) reduce_chain = StuffDocumentsChain(llm_chain=llm_chain) combine_documents_chain = MapReduceDocumentsChain( llm_chain=llm_chain, combine_document_chain=reduce_chain ) return cls(
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\chains\\mapreduce.html"
0239b93c09b4-1
) return cls( combine_documents_chain=combine_documents_chain, text_splitter=text_splitter ) class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @property def input_keys(self) -> List[str]: """Expect input key. :meta private: """ return [self.input_key] @property def output_keys(self) -> List[str]: """Return output key. :meta private: """ return [self.output_key] def _call(self, inputs: Dict[str, str]) -> Dict[str, str]: # Split the larger text into smaller chunks. texts = self.text_splitter.split_text(inputs[self.input_key]) docs = [Document(page_content=text) for text in texts] outputs, _ = self.combine_documents_chain.combine_docs(docs) return {self.output_key: outputs} By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Mar 24, 2023.
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\chains\\mapreduce.html"
a150e9454a1b-0
Source code for langchain.chains.moderation """Pass input through a moderation endpoint.""" from typing import Any, Dict, List, Optional from pydantic import BaseModel, root_validator from langchain.chains.base import Chain from langchain.utils import get_from_dict_or_env [docs]class OpenAIModerationChain(Chain, BaseModel): """Pass input through a moderation endpoint. To use, you should have the ``openai`` python package installed, and the environment variable ``OPENAI_API_KEY`` set with your API key. Any parameters that are valid to be passed to the openai.create call can be passed in, even if not explicitly saved on this class. Example: .. code-block:: python from langchain.chains import OpenAIModerationChain moderation = OpenAIModerationChain() """ client: Any #: :meta private: model_name: Optional[str] = None """Moderation model name to use.""" error: bool = False """Whether or not to error if bad content was found.""" input_key: str = "input" #: :meta private: output_key: str = "output" #: :meta private: openai_api_key: Optional[str] = None @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" openai_api_key = get_from_dict_or_env( values, "openai_api_key", "OPENAI_API_KEY" ) try: import openai openai.api_key = openai_api_key values["client"] = openai.Moderation except ImportError: raise ValueError(
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\chains\\moderation.html"
a150e9454a1b-1
except ImportError: raise ValueError( "Could not import openai python package. " "Please it install it with `pip install openai`." ) return values @property def input_keys(self) -> List[str]: """Expect input key. :meta private: """ return [self.input_key] @property def output_keys(self) -> List[str]: """Return output key. :meta private: """ return [self.output_key] def _moderate(self, text: str, results: dict) -> str: if results["flagged"]: error_str = "Text was found that violates OpenAI's content policy." if self.error: raise ValueError(error_str) else: return error_str return text def _call(self, inputs: Dict[str, str]) -> Dict[str, str]: text = inputs[self.input_key] results = self.client.create(text) output = self._moderate(text, results["results"][0]) return {self.output_key: output} By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Mar 24, 2023.
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\chains\\moderation.html"
dcf71c7bc6fc-0
Source code for langchain.chains.sequential """Chain pipeline where the outputs of one step feed directly into next.""" from typing import Dict, List from pydantic import BaseModel, Extra, root_validator from langchain.chains.base import Chain from langchain.input import get_color_mapping [docs]class SequentialChain(Chain, BaseModel): """Chain where the outputs of one chain feed directly into next.""" chains: List[Chain] input_variables: List[str] output_variables: List[str] #: :meta private: return_all: bool = False class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @property def input_keys(self) -> List[str]: """Return expected input keys to the chain. :meta private: """ return self.input_variables @property def output_keys(self) -> List[str]: """Return output key. :meta private: """ return self.output_variables @root_validator(pre=True) def validate_chains(cls, values: Dict) -> Dict: """Validate that the correct inputs exist for all chains.""" chains = values["chains"] input_variables = values["input_variables"] memory_keys = list() if "memory" in values and values["memory"] is not None: """Validate that prompt input variables are consistent.""" memory_keys = values["memory"].memory_variables if any(input_variables) in memory_keys: overlapping_keys = input_variables & memory_keys raise ValueError( f"The the input key(s) {''.join(overlapping_keys)} are found " f"in the Memory keys ({memory_keys}) - please use input and "
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\chains\\sequential.html"
dcf71c7bc6fc-1
f"in the Memory keys ({memory_keys}) - please use input and " f"memory keys that don't overlap." ) known_variables = set(input_variables + memory_keys) for chain in chains: missing_vars = set(chain.input_keys).difference(known_variables) if missing_vars: raise ValueError( f"Missing required input keys: {missing_vars}, " f"only had {known_variables}" ) overlapping_keys = known_variables.intersection(chain.output_keys) if overlapping_keys: raise ValueError( f"Chain returned keys that already exist: {overlapping_keys}" ) known_variables |= set(chain.output_keys) if "output_variables" not in values: if values.get("return_all", False): output_keys = known_variables.difference(input_variables) else: output_keys = chains[-1].output_keys values["output_variables"] = output_keys else: missing_vars = set(values["output_variables"]).difference(known_variables) if missing_vars: raise ValueError( f"Expected output variables that were not found: {missing_vars}." ) return values def _call(self, inputs: Dict[str, str]) -> Dict[str, str]: known_values = inputs.copy() for i, chain in enumerate(self.chains): outputs = chain(known_values, return_only_outputs=True) known_values.update(outputs) return {k: known_values[k] for k in self.output_variables} [docs]class SimpleSequentialChain(Chain, BaseModel): """Simple chain where the outputs of one step feed directly into next.""" chains: List[Chain] strip_outputs: bool = False
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\chains\\sequential.html"
dcf71c7bc6fc-2
chains: List[Chain] strip_outputs: bool = False input_key: str = "input" #: :meta private: output_key: str = "output" #: :meta private: class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @property def input_keys(self) -> List[str]: """Expect input key. :meta private: """ return [self.input_key] @property def output_keys(self) -> List[str]: """Return output key. :meta private: """ return [self.output_key] @root_validator() def validate_chains(cls, values: Dict) -> Dict: """Validate that chains are all single input/output.""" for chain in values["chains"]: if len(chain.input_keys) != 1: raise ValueError( "Chains used in SimplePipeline should all have one input, got " f"{chain} with {len(chain.input_keys)} inputs." ) if len(chain.output_keys) != 1: raise ValueError( "Chains used in SimplePipeline should all have one output, got " f"{chain} with {len(chain.output_keys)} outputs." ) return values def _call(self, inputs: Dict[str, str]) -> Dict[str, str]: _input = inputs[self.input_key] color_mapping = get_color_mapping([str(i) for i in range(len(self.chains))]) for i, chain in enumerate(self.chains): _input = chain.run(_input) if self.strip_outputs: _input = _input.strip()
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\chains\\sequential.html"
dcf71c7bc6fc-3
if self.strip_outputs: _input = _input.strip() self.callback_manager.on_text( _input, color=color_mapping[str(i)], end="\n", verbose=self.verbose ) return {self.output_key: _input} By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Mar 24, 2023.
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\chains\\sequential.html"
44d32846b1fd-0
Source code for langchain.chains.transform """Chain that runs an arbitrary python function.""" from typing import Callable, Dict, List from pydantic import BaseModel from langchain.chains.base import Chain [docs]class TransformChain(Chain, BaseModel): """Chain transform chain output. Example: .. code-block:: python from langchain import TransformChain transform_chain = TransformChain(input_variables=["text"], output_variables["entities"], transform=func()) """ input_variables: List[str] output_variables: List[str] transform: Callable[[Dict[str, str]], Dict[str, str]] @property def input_keys(self) -> List[str]: """Expect input keys. :meta private: """ return self.input_variables @property def output_keys(self) -> List[str]: """Return output keys. :meta private: """ return self.output_variables def _call(self, inputs: Dict[str, str]) -> Dict[str, str]: return self.transform(inputs) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Mar 24, 2023.
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\chains\\transform.html"
b02d62bc60f2-0
Source code for langchain.chains.api.base """Chain that makes API calls and summarizes the responses to answer a question.""" from __future__ import annotations from typing import Any, Dict, List, Optional from pydantic import BaseModel, Field, root_validator from langchain.chains.api.prompt import API_RESPONSE_PROMPT, API_URL_PROMPT from langchain.chains.base import Chain from langchain.chains.llm import LLMChain from langchain.prompts import BasePromptTemplate from langchain.requests import RequestsWrapper from langchain.schema import BaseLanguageModel [docs]class APIChain(Chain, BaseModel): """Chain that makes API calls and summarizes the responses to answer a question.""" api_request_chain: LLMChain api_answer_chain: LLMChain requests_wrapper: RequestsWrapper = Field(exclude=True) api_docs: str question_key: str = "question" #: :meta private: output_key: str = "output" #: :meta private: @property def input_keys(self) -> List[str]: """Expect input key. :meta private: """ return [self.question_key] @property def output_keys(self) -> List[str]: """Expect output key. :meta private: """ return [self.output_key] @root_validator(pre=True) def validate_api_request_prompt(cls, values: Dict) -> Dict: """Check that api request prompt expects the right variables.""" input_vars = values["api_request_chain"].prompt.input_variables expected_vars = {"question", "api_docs"} if set(input_vars) != expected_vars: raise ValueError( f"Input variables should be {expected_vars}, got {input_vars}" ) return values
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\chains\\api\\base.html"
b02d62bc60f2-1
) return values @root_validator(pre=True) def validate_api_answer_prompt(cls, values: Dict) -> Dict: """Check that api answer prompt expects the right variables.""" input_vars = values["api_answer_chain"].prompt.input_variables expected_vars = {"question", "api_docs", "api_url", "api_response"} if set(input_vars) != expected_vars: raise ValueError( f"Input variables should be {expected_vars}, got {input_vars}" ) return values def _call(self, inputs: Dict[str, str]) -> Dict[str, str]: question = inputs[self.question_key] api_url = self.api_request_chain.predict( question=question, api_docs=self.api_docs ) self.callback_manager.on_text( api_url, color="green", end="\n", verbose=self.verbose ) api_response = self.requests_wrapper.get(api_url) self.callback_manager.on_text( api_response, color="yellow", end="\n", verbose=self.verbose ) answer = self.api_answer_chain.predict( question=question, api_docs=self.api_docs, api_url=api_url, api_response=api_response, ) return {self.output_key: answer} [docs] @classmethod def from_llm_and_api_docs( cls, llm: BaseLanguageModel, api_docs: str, headers: Optional[dict] = None, api_url_prompt: BasePromptTemplate = API_URL_PROMPT, api_response_prompt: BasePromptTemplate = API_RESPONSE_PROMPT, **kwargs: Any, ) -> APIChain: """Load chain from just an LLM and the api docs."""
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\chains\\api\\base.html"
b02d62bc60f2-2
"""Load chain from just an LLM and the api docs.""" get_request_chain = LLMChain(llm=llm, prompt=api_url_prompt) requests_wrapper = RequestsWrapper(headers=headers) get_answer_chain = LLMChain(llm=llm, prompt=api_response_prompt) return cls( api_request_chain=get_request_chain, api_answer_chain=get_answer_chain, requests_wrapper=requests_wrapper, api_docs=api_docs, **kwargs, ) @property def _chain_type(self) -> str: return "api_chain" By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Mar 24, 2023.
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\chains\\api\\base.html"
aa1ffa18251b-0
Source code for langchain.chains.combine_documents.base """Base interface for chains combining documents.""" from abc import ABC, abstractmethod from typing import Any, Dict, List, Optional, Tuple from pydantic import BaseModel, Field from langchain.chains.base import Chain from langchain.docstore.document import Document from langchain.text_splitter import RecursiveCharacterTextSplitter, TextSplitter class BaseCombineDocumentsChain(Chain, BaseModel, ABC): """Base interface for chains combining documents.""" input_key: str = "input_documents" #: :meta private: output_key: str = "output_text" #: :meta private: @property def input_keys(self) -> List[str]: """Expect input key. :meta private: """ return [self.input_key] @property def output_keys(self) -> List[str]: """Return output key. :meta private: """ return [self.output_key] def prompt_length(self, docs: List[Document], **kwargs: Any) -> Optional[int]: """Return the prompt length given the documents passed in. Returns None if the method does not depend on the prompt length. """ return None @abstractmethod def combine_docs(self, docs: List[Document], **kwargs: Any) -> Tuple[str, dict]: """Combine documents into a single string.""" @abstractmethod async def acombine_docs( self, docs: List[Document], **kwargs: Any ) -> Tuple[str, dict]: """Combine documents into a single string asynchronously.""" def _call(self, inputs: Dict[str, Any]) -> Dict[str, str]: docs = inputs[self.input_key]
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\chains\\combine_documents\\base.html"
aa1ffa18251b-1
docs = inputs[self.input_key] # Other keys are assumed to be needed for LLM prediction other_keys = {k: v for k, v in inputs.items() if k != self.input_key} output, extra_return_dict = self.combine_docs(docs, **other_keys) extra_return_dict[self.output_key] = output return extra_return_dict async def _acall(self, inputs: Dict[str, Any]) -> Dict[str, str]: docs = inputs[self.input_key] # Other keys are assumed to be needed for LLM prediction other_keys = {k: v for k, v in inputs.items() if k != self.input_key} output, extra_return_dict = await self.acombine_docs(docs, **other_keys) extra_return_dict[self.output_key] = output return extra_return_dict [docs]class AnalyzeDocumentChain(Chain, BaseModel): """Chain that splits documents, then analyzes it in pieces.""" input_key: str = "input_document" #: :meta private: output_key: str = "output_text" #: :meta private: text_splitter: TextSplitter = Field(default_factory=RecursiveCharacterTextSplitter) combine_docs_chain: BaseCombineDocumentsChain @property def input_keys(self) -> List[str]: """Expect input key. :meta private: """ return [self.input_key] @property def output_keys(self) -> List[str]: """Return output key. :meta private: """ return [self.output_key] def _call(self, inputs: Dict[str, Any]) -> Dict[str, str]: document = inputs[self.input_key] docs = self.text_splitter.create_documents([document])
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\chains\\combine_documents\\base.html"
aa1ffa18251b-2
docs = self.text_splitter.create_documents([document]) # Other keys are assumed to be needed for LLM prediction other_keys = {k: v for k, v in inputs.items() if k != self.input_key} other_keys[self.combine_docs_chain.input_key] = docs return self.combine_docs_chain(other_keys, return_only_outputs=True) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Mar 24, 2023.
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\chains\\combine_documents\\base.html"
a6b855def8ca-0
Source code for langchain.chains.constitutional_ai.base """Chain for applying constitutional principles to the outputs of another chain.""" from typing import Any, Dict, List, Optional from langchain.chains.base import Chain from langchain.chains.constitutional_ai.models import ConstitutionalPrinciple from langchain.chains.constitutional_ai.principles import PRINCIPLES from langchain.chains.constitutional_ai.prompts import CRITIQUE_PROMPT, REVISION_PROMPT from langchain.chains.llm import LLMChain from langchain.prompts.base import BasePromptTemplate from langchain.schema import BaseLanguageModel [docs]class ConstitutionalChain(Chain): """Chain for applying constitutional principles. Example: .. code-block:: python from langchain.llms import OpenAI from langchain.chains import LLMChain, ConstitutionalChain qa_prompt = PromptTemplate( template="Q: {question} A:", input_variables=["question"], ) qa_chain = LLMChain(llm=OpenAI(), prompt=qa_prompt) constitutional_chain = ConstitutionalChain.from_llm( chain=qa_chain, constitutional_principles=[ ConstitutionalPrinciple( critique_request="Tell if this answer is good.", revision_request="Give a better answer.", ) ], ) constitutional_chain.run(question="What is the meaning of life?") """ chain: LLMChain constitutional_principles: List[ConstitutionalPrinciple] critique_chain: LLMChain revision_chain: LLMChain [docs] @classmethod def get_principles( cls, names: Optional[List[str]] = None ) -> List[ConstitutionalPrinciple]: if names is None:
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\chains\\constitutional_ai\\base.html"
a6b855def8ca-1
) -> List[ConstitutionalPrinciple]: if names is None: return list(PRINCIPLES.values()) else: return [PRINCIPLES[name] for name in names] [docs] @classmethod def from_llm( cls, llm: BaseLanguageModel, chain: LLMChain, critique_prompt: BasePromptTemplate = CRITIQUE_PROMPT, revision_prompt: BasePromptTemplate = REVISION_PROMPT, **kwargs: Any, ) -> "ConstitutionalChain": """Create a chain from an LLM.""" critique_chain = LLMChain(llm=llm, prompt=critique_prompt) revision_chain = LLMChain(llm=llm, prompt=revision_prompt) return cls( chain=chain, critique_chain=critique_chain, revision_chain=revision_chain, **kwargs, ) @property def input_keys(self) -> List[str]: """Defines the input keys.""" return self.chain.input_keys @property def output_keys(self) -> List[str]: """Defines the output keys.""" return ["output"] def _call(self, inputs: Dict[str, str]) -> Dict[str, str]: response = self.chain.run(**inputs) input_prompt = self.chain.prompt.format(**inputs) self.callback_manager.on_text( text="Initial response: " + response + "\n\n", verbose=self.verbose, color="yellow", ) for constitutional_principle in self.constitutional_principles: # Do critique raw_critique = self.critique_chain.run( input_prompt=input_prompt, output_from_model=response,
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\chains\\constitutional_ai\\base.html"
a6b855def8ca-2
input_prompt=input_prompt, output_from_model=response, critique_request=constitutional_principle.critique_request, ) critique = self._parse_critique( output_string=raw_critique, ).strip() # Do revision revision = self.revision_chain.run( input_prompt=input_prompt, output_from_model=response, critique_request=constitutional_principle.critique_request, critique=critique, revision_request=constitutional_principle.revision_request, ).strip() response = revision self.callback_manager.on_text( text=f"Applying {constitutional_principle.name}..." + "\n\n", verbose=self.verbose, color="green", ) self.callback_manager.on_text( text="Critique: " + critique + "\n\n", verbose=self.verbose, color="blue", ) self.callback_manager.on_text( text="Updated response: " + revision + "\n\n", verbose=self.verbose, color="yellow", ) return {"output": response} @staticmethod def _parse_critique(output_string: str) -> str: if "Revision request:" not in output_string: return output_string output_string = output_string.split("Revision request:")[0] if "\n\n" in output_string: output_string = output_string.split("\n\n")[0] return output_string By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Mar 24, 2023.
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\chains\\constitutional_ai\\base.html"
8cd47ad602de-0
Source code for langchain.chains.conversation.base """Chain that carries on a conversation and calls an LLM.""" from typing import Dict, List from pydantic import BaseModel, Extra, Field, root_validator from langchain.chains.conversation.prompt import PROMPT from langchain.chains.llm import LLMChain from langchain.memory.buffer import ConversationBufferMemory from langchain.prompts.base import BasePromptTemplate from langchain.schema import BaseMemory [docs]class ConversationChain(LLMChain, BaseModel): """Chain to have a conversation and load context from memory. Example: .. code-block:: python from langchain import ConversationChain, OpenAI conversation = ConversationChain(llm=OpenAI()) """ memory: BaseMemory = Field(default_factory=ConversationBufferMemory) """Default memory store.""" prompt: BasePromptTemplate = PROMPT """Default conversation prompt to use.""" input_key: str = "input" #: :meta private: output_key: str = "response" #: :meta private: class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @property def input_keys(self) -> List[str]: """Use this since so some prompt vars come from history.""" return [self.input_key] @root_validator() def validate_prompt_input_variables(cls, values: Dict) -> Dict: """Validate that prompt input variables are consistent.""" memory_keys = values["memory"].memory_variables input_key = values["input_key"] if input_key in memory_keys: raise ValueError( f"The input key {input_key} was also found in the memory keys "
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\chains\\conversation\\base.html"
8cd47ad602de-1
f"The input key {input_key} was also found in the memory keys " f"({memory_keys}) - please provide keys that don't overlap." ) prompt_variables = values["prompt"].input_variables expected_keys = memory_keys + [input_key] if set(expected_keys) != set(prompt_variables): raise ValueError( "Got unexpected prompt input variables. The prompt expects " f"{prompt_variables}, but got {memory_keys} as inputs from " f"memory, and {input_key} as the normal input key." ) return values By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Mar 24, 2023.
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\chains\\conversation\\base.html"
b5f01a4500ec-0
Source code for langchain.chains.conversational_retrieval.base """Chain for chatting with a vector database.""" from __future__ import annotations import warnings from abc import abstractmethod from pathlib import Path from typing import Any, Callable, Dict, List, Optional, Tuple, Union from pydantic import BaseModel, Extra, Field, root_validator from langchain.chains.base import Chain from langchain.chains.combine_documents.base import BaseCombineDocumentsChain from langchain.chains.conversational_retrieval.prompts import CONDENSE_QUESTION_PROMPT from langchain.chains.llm import LLMChain from langchain.chains.question_answering import load_qa_chain from langchain.prompts.base import BasePromptTemplate from langchain.schema import BaseLanguageModel, BaseRetriever, Document from langchain.vectorstores.base import VectorStore def _get_chat_history(chat_history: List[Tuple[str, str]]) -> str: buffer = "" for human_s, ai_s in chat_history: human = "Human: " + human_s ai = "Assistant: " + ai_s buffer += "\n" + "\n".join([human, ai]) return buffer class BaseConversationalRetrievalChain(Chain, BaseModel): """Chain for chatting with an index.""" combine_docs_chain: BaseCombineDocumentsChain question_generator: LLMChain output_key: str = "answer" return_source_documents: bool = False get_chat_history: Optional[Callable[[Tuple[str, str]], str]] = None """Return the source documents.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True allow_population_by_field_name = True @property
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\chains\\conversational_retrieval\\base.html"
b5f01a4500ec-1
arbitrary_types_allowed = True allow_population_by_field_name = True @property def input_keys(self) -> List[str]: """Input keys.""" return ["question", "chat_history"] @property def output_keys(self) -> List[str]: """Return the output keys. :meta private: """ _output_keys = [self.output_key] if self.return_source_documents: _output_keys = _output_keys + ["source_documents"] return _output_keys @abstractmethod def _get_docs(self, question: str, inputs: Dict[str, Any]) -> List[Document]: """Get docs.""" def _call(self, inputs: Dict[str, Any]) -> Dict[str, Any]: question = inputs["question"] get_chat_history = self.get_chat_history or _get_chat_history chat_history_str = get_chat_history(inputs["chat_history"]) if chat_history_str: new_question = self.question_generator.run( question=question, chat_history=chat_history_str ) else: new_question = question docs = self._get_docs(new_question, inputs) new_inputs = inputs.copy() new_inputs["question"] = new_question new_inputs["chat_history"] = chat_history_str answer, _ = self.combine_docs_chain.combine_docs(docs, **new_inputs) if self.return_source_documents: return {self.output_key: answer, "source_documents": docs} else: return {self.output_key: answer} async def _acall(self, inputs: Dict[str, Any]) -> Dict[str, Any]: question = inputs["question"] get_chat_history = self.get_chat_history or _get_chat_history
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\chains\\conversational_retrieval\\base.html"
b5f01a4500ec-2
get_chat_history = self.get_chat_history or _get_chat_history chat_history_str = get_chat_history(inputs["chat_history"]) if chat_history_str: new_question = await self.question_generator.arun( question=question, chat_history=chat_history_str ) else: new_question = question # TODO: This blocks the event loop, but it's not clear how to avoid it. docs = self._get_docs(new_question, inputs) new_inputs = inputs.copy() new_inputs["question"] = new_question new_inputs["chat_history"] = chat_history_str answer, _ = await self.combine_docs_chain.acombine_docs(docs, **new_inputs) if self.return_source_documents: return {self.output_key: answer, "source_documents": docs} else: return {self.output_key: answer} def save(self, file_path: Union[Path, str]) -> None: if self.get_chat_history: raise ValueError("Chain not savable when `get_chat_history` is not None.") super().save(file_path) [docs]class ConversationalRetrievalChain(BaseConversationalRetrievalChain, BaseModel): """Chain for chatting with an index.""" retriever: BaseRetriever def _get_docs(self, question: str, inputs: Dict[str, Any]) -> List[Document]: return self.retriever.get_relevant_documents(question) [docs] @classmethod def from_llm( cls, llm: BaseLanguageModel, retriever: BaseRetriever, condense_question_prompt: BasePromptTemplate = CONDENSE_QUESTION_PROMPT, qa_prompt: Optional[BasePromptTemplate] = None,
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\chains\\conversational_retrieval\\base.html"
b5f01a4500ec-3
qa_prompt: Optional[BasePromptTemplate] = None, chain_type: str = "stuff", **kwargs: Any, ) -> BaseConversationalRetrievalChain: """Load chain from LLM.""" doc_chain = load_qa_chain( llm, chain_type=chain_type, prompt=qa_prompt, ) condense_question_chain = LLMChain(llm=llm, prompt=condense_question_prompt) return cls( retriever=retriever, combine_docs_chain=doc_chain, question_generator=condense_question_chain, **kwargs, ) [docs]class ChatVectorDBChain(BaseConversationalRetrievalChain, BaseModel): """Chain for chatting with a vector database.""" vectorstore: VectorStore = Field(alias="vectorstore") top_k_docs_for_context: int = 4 search_kwargs: dict = Field(default_factory=dict) @property def _chain_type(self) -> str: return "chat-vector-db" @root_validator() def raise_deprecation(cls, values: Dict) -> Dict: warnings.warn( "`ChatVectorDBChain` is deprecated - " "please use `from langchain.chains import ConversationalRetrievalChain`" ) return values def _get_docs(self, question: str, inputs: Dict[str, Any]) -> List[Document]: vectordbkwargs = inputs.get("vectordbkwargs", {}) full_kwargs = {**self.search_kwargs, **vectordbkwargs} return self.vectorstore.similarity_search( question, k=self.top_k_docs_for_context, **full_kwargs ) [docs] @classmethod
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\chains\\conversational_retrieval\\base.html"
b5f01a4500ec-4
) [docs] @classmethod def from_llm( cls, llm: BaseLanguageModel, vectorstore: VectorStore, condense_question_prompt: BasePromptTemplate = CONDENSE_QUESTION_PROMPT, qa_prompt: Optional[BasePromptTemplate] = None, chain_type: str = "stuff", **kwargs: Any, ) -> BaseConversationalRetrievalChain: """Load chain from LLM.""" doc_chain = load_qa_chain( llm, chain_type=chain_type, prompt=qa_prompt, ) condense_question_chain = LLMChain(llm=llm, prompt=condense_question_prompt) return cls( vectorstore=vectorstore, combine_docs_chain=doc_chain, question_generator=condense_question_chain, **kwargs, ) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Mar 24, 2023.
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\chains\\conversational_retrieval\\base.html"
14abd9c11940-0
Source code for langchain.chains.graph_qa.base """Question answering over a graph.""" from __future__ import annotations from typing import Any, Dict, List from pydantic import Field from langchain.chains.base import Chain from langchain.chains.graph_qa.prompts import ENTITY_EXTRACTION_PROMPT, PROMPT from langchain.chains.llm import LLMChain from langchain.graphs.networkx_graph import NetworkxEntityGraph, get_entities from langchain.llms.base import BaseLLM from langchain.prompts.base import BasePromptTemplate [docs]class GraphQAChain(Chain): """Chain for question-answering against a graph.""" graph: NetworkxEntityGraph = Field(exclude=True) entity_extraction_chain: LLMChain qa_chain: LLMChain input_key: str = "query" #: :meta private: output_key: str = "result" #: :meta private: @property def input_keys(self) -> List[str]: """Return the input keys. :meta private: """ return [self.input_key] @property def output_keys(self) -> List[str]: """Return the output keys. :meta private: """ _output_keys = [self.output_key] return _output_keys [docs] @classmethod def from_llm( cls, llm: BaseLLM, qa_prompt: BasePromptTemplate = PROMPT, entity_prompt: BasePromptTemplate = ENTITY_EXTRACTION_PROMPT, **kwargs: Any, ) -> GraphQAChain: """Initialize from LLM.""" qa_chain = LLMChain(llm=llm, prompt=qa_prompt)
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\chains\\graph_qa\\base.html"
14abd9c11940-1
qa_chain = LLMChain(llm=llm, prompt=qa_prompt) entity_chain = LLMChain(llm=llm, prompt=entity_prompt) return cls(qa_chain=qa_chain, entity_extraction_chain=entity_chain, **kwargs) def _call(self, inputs: Dict[str, str]) -> Dict[str, Any]: """Extract entities, look up info and answer question.""" question = inputs[self.input_key] entity_string = self.entity_extraction_chain.run(question) self.callback_manager.on_text( "Entities Extracted:", end="\n", verbose=self.verbose ) self.callback_manager.on_text( entity_string, color="green", end="\n", verbose=self.verbose ) entities = get_entities(entity_string) context = "" for entity in entities: triplets = self.graph.get_entity_knowledge(entity) context += "\n".join(triplets) self.callback_manager.on_text("Full Context:", end="\n", verbose=self.verbose) self.callback_manager.on_text( context, color="green", end="\n", verbose=self.verbose ) result = self.qa_chain({"question": question, "context": context}) return {self.output_key: result[self.qa_chain.output_key]} By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Mar 24, 2023.
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\chains\\graph_qa\\base.html"
97c1375df56b-0
Source code for langchain.chains.hyde.base """Hypothetical Document Embeddings. https://arxiv.org/abs/2212.10496 """ from __future__ import annotations from typing import Dict, List import numpy as np from pydantic import BaseModel, Extra from langchain.chains.base import Chain from langchain.chains.hyde.prompts import PROMPT_MAP from langchain.chains.llm import LLMChain from langchain.embeddings.base import Embeddings from langchain.llms.base import BaseLLM [docs]class HypotheticalDocumentEmbedder(Chain, Embeddings, BaseModel): """Generate hypothetical document for query, and then embed that. Based on https://arxiv.org/abs/2212.10496 """ base_embeddings: Embeddings llm_chain: LLMChain class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @property def input_keys(self) -> List[str]: """Input keys for Hyde's LLM chain.""" return self.llm_chain.input_keys @property def output_keys(self) -> List[str]: """Output keys for Hyde's LLM chain.""" return self.llm_chain.output_keys [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """Call the base embeddings.""" return self.base_embeddings.embed_documents(texts) [docs] def combine_embeddings(self, embeddings: List[List[float]]) -> List[float]: """Combine embeddings into final embeddings.""" return list(np.array(embeddings).mean(axis=0)) [docs] def embed_query(self, text: str) -> List[float]:
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\chains\\hyde\\base.html"
97c1375df56b-1
[docs] def embed_query(self, text: str) -> List[float]: """Generate a hypothetical document and embedded it.""" var_name = self.llm_chain.input_keys[0] result = self.llm_chain.generate([{var_name: text}]) documents = [generation.text for generation in result.generations[0]] embeddings = self.embed_documents(documents) return self.combine_embeddings(embeddings) def _call(self, inputs: Dict[str, str]) -> Dict[str, str]: """Call the internal llm chain.""" return self.llm_chain._call(inputs) [docs] @classmethod def from_llm( cls, llm: BaseLLM, base_embeddings: Embeddings, prompt_key: str ) -> HypotheticalDocumentEmbedder: """Load and use LLMChain for a specific prompt key.""" prompt = PROMPT_MAP[prompt_key] llm_chain = LLMChain(llm=llm, prompt=prompt) return cls(base_embeddings=base_embeddings, llm_chain=llm_chain) @property def _chain_type(self) -> str: return "hyde_chain" By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Mar 24, 2023.
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\chains\\hyde\\base.html"
eb9cd79e9203-0
Source code for langchain.chains.llm_bash.base """Chain that interprets a prompt and executes bash code to perform bash operations.""" from typing import Dict, List from pydantic import BaseModel, Extra from langchain.chains.base import Chain from langchain.chains.llm import LLMChain from langchain.chains.llm_bash.prompt import PROMPT from langchain.prompts.base import BasePromptTemplate from langchain.schema import BaseLanguageModel from langchain.utilities.bash import BashProcess [docs]class LLMBashChain(Chain, BaseModel): """Chain that interprets a prompt and executes bash code to perform bash operations. Example: .. code-block:: python from langchain import LLMBashChain, OpenAI llm_bash = LLMBashChain(llm=OpenAI()) """ llm: BaseLanguageModel """LLM wrapper to use.""" input_key: str = "question" #: :meta private: output_key: str = "answer" #: :meta private: prompt: BasePromptTemplate = PROMPT class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @property def input_keys(self) -> List[str]: """Expect input key. :meta private: """ return [self.input_key] @property def output_keys(self) -> List[str]: """Expect output key. :meta private: """ return [self.output_key] def _call(self, inputs: Dict[str, str]) -> Dict[str, str]: llm_executor = LLMChain(prompt=self.prompt, llm=self.llm) bash_executor = BashProcess()
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\chains\\llm_bash\\base.html"
eb9cd79e9203-1
bash_executor = BashProcess() self.callback_manager.on_text(inputs[self.input_key], verbose=self.verbose) t = llm_executor.predict(question=inputs[self.input_key]) self.callback_manager.on_text(t, color="green", verbose=self.verbose) t = t.strip() if t.startswith("```bash"): # Split the string into a list of substrings command_list = t.split("\n") print(command_list) # Remove the first and last substrings command_list = [s for s in command_list[1:-1]] output = bash_executor.run(command_list) self.callback_manager.on_text("\nAnswer: ", verbose=self.verbose) self.callback_manager.on_text(output, color="yellow", verbose=self.verbose) else: raise ValueError(f"unknown format from LLM: {t}") return {self.output_key: output} @property def _chain_type(self) -> str: return "llm_bash_chain" By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Mar 24, 2023.
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\chains\\llm_bash\\base.html"
7ddabe3fc916-0
Source code for langchain.chains.llm_checker.base """Chain for question-answering with self-verification.""" from typing import Dict, List from pydantic import BaseModel, Extra from langchain.chains.base import Chain from langchain.chains.llm import LLMChain from langchain.chains.llm_checker.prompt import ( CHECK_ASSERTIONS_PROMPT, CREATE_DRAFT_ANSWER_PROMPT, LIST_ASSERTIONS_PROMPT, REVISED_ANSWER_PROMPT, ) from langchain.chains.sequential import SequentialChain from langchain.llms.base import BaseLLM from langchain.prompts import PromptTemplate [docs]class LLMCheckerChain(Chain, BaseModel): """Chain for question-answering with self-verification. Example: .. code-block:: python from langchain import OpenAI, LLMCheckerChain llm = OpenAI(temperature=0.7) checker_chain = LLMCheckerChain(llm=llm) """ llm: BaseLLM """LLM wrapper to use.""" create_draft_answer_prompt: PromptTemplate = CREATE_DRAFT_ANSWER_PROMPT list_assertions_prompt: PromptTemplate = LIST_ASSERTIONS_PROMPT check_assertions_prompt: PromptTemplate = CHECK_ASSERTIONS_PROMPT revised_answer_prompt: PromptTemplate = REVISED_ANSWER_PROMPT """Prompt to use when questioning the documents.""" input_key: str = "query" #: :meta private: output_key: str = "result" #: :meta private: class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @property def input_keys(self) -> List[str]:
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\chains\\llm_checker\\base.html"
7ddabe3fc916-1
@property def input_keys(self) -> List[str]: """Return the singular input key. :meta private: """ return [self.input_key] @property def output_keys(self) -> List[str]: """Return the singular output key. :meta private: """ return [self.output_key] def _call(self, inputs: Dict[str, str]) -> Dict[str, str]: question = inputs[self.input_key] create_draft_answer_chain = LLMChain( llm=self.llm, prompt=self.create_draft_answer_prompt, output_key="statement" ) list_assertions_chain = LLMChain( llm=self.llm, prompt=self.list_assertions_prompt, output_key="assertions" ) check_assertions_chain = LLMChain( llm=self.llm, prompt=self.check_assertions_prompt, output_key="checked_assertions", ) revised_answer_chain = LLMChain( llm=self.llm, prompt=self.revised_answer_prompt, output_key="revised_statement", ) chains = [ create_draft_answer_chain, list_assertions_chain, check_assertions_chain, revised_answer_chain, ] question_to_checked_assertions_chain = SequentialChain( chains=chains, input_variables=["question"], output_variables=["revised_statement"], verbose=True, ) output = question_to_checked_assertions_chain({"question": question}) return {self.output_key: output["revised_statement"]} @property def _chain_type(self) -> str: return "llm_checker_chain" By Harrison Chase
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\chains\\llm_checker\\base.html"
7ddabe3fc916-2
return "llm_checker_chain" By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Mar 24, 2023.
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\chains\\llm_checker\\base.html"
6d66b50dd5df-0
Source code for langchain.chains.llm_math.base """Chain that interprets a prompt and executes python code to do math.""" from typing import Dict, List from pydantic import BaseModel, Extra from langchain.chains.base import Chain from langchain.chains.llm import LLMChain from langchain.chains.llm_math.prompt import PROMPT from langchain.llms.base import BaseLLM from langchain.prompts.base import BasePromptTemplate from langchain.python import PythonREPL [docs]class LLMMathChain(Chain, BaseModel): """Chain that interprets a prompt and executes python code to do math. Example: .. code-block:: python from langchain import LLMMathChain, OpenAI llm_math = LLMMathChain(llm=OpenAI()) """ llm: BaseLLM """LLM wrapper to use.""" prompt: BasePromptTemplate = PROMPT """Prompt to use to translate to python if neccessary.""" input_key: str = "question" #: :meta private: output_key: str = "answer" #: :meta private: class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @property def input_keys(self) -> List[str]: """Expect input key. :meta private: """ return [self.input_key] @property def output_keys(self) -> List[str]: """Expect output key. :meta private: """ return [self.output_key] def _process_llm_result(self, t: str) -> Dict[str, str]: python_executor = PythonREPL()
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\chains\\llm_math\\base.html"
6d66b50dd5df-1
python_executor = PythonREPL() self.callback_manager.on_text(t, color="green", verbose=self.verbose) t = t.strip() if t.startswith("```python"): code = t[9:-4] output = python_executor.run(code) self.callback_manager.on_text("\nAnswer: ", verbose=self.verbose) self.callback_manager.on_text(output, color="yellow", verbose=self.verbose) answer = "Answer: " + output elif t.startswith("Answer:"): answer = t elif "Answer:" in t: answer = "Answer: " + t.split("Answer:")[-1] else: raise ValueError(f"unknown format from LLM: {t}") return {self.output_key: answer} def _call(self, inputs: Dict[str, str]) -> Dict[str, str]: llm_executor = LLMChain( prompt=self.prompt, llm=self.llm, callback_manager=self.callback_manager ) self.callback_manager.on_text(inputs[self.input_key], verbose=self.verbose) t = llm_executor.predict(question=inputs[self.input_key], stop=["```output"]) return self._process_llm_result(t) async def _acall(self, inputs: Dict[str, str]) -> Dict[str, str]: llm_executor = LLMChain( prompt=self.prompt, llm=self.llm, callback_manager=self.callback_manager ) self.callback_manager.on_text(inputs[self.input_key], verbose=self.verbose) t = await llm_executor.apredict( question=inputs[self.input_key], stop=["```output"] ) return self._process_llm_result(t) @property def _chain_type(self) -> str: return "llm_math_chain"
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\chains\\llm_math\\base.html"
6d66b50dd5df-2
def _chain_type(self) -> str: return "llm_math_chain" By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Mar 24, 2023.
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\chains\\llm_math\\base.html"
453018a4e9b7-0
Source code for langchain.chains.llm_summarization_checker.base """Chain for summarization with self-verification.""" from pathlib import Path from typing import Dict, List from pydantic import BaseModel, Extra from langchain.chains.base import Chain from langchain.chains.llm import LLMChain from langchain.chains.sequential import SequentialChain from langchain.llms.base import BaseLLM from langchain.prompts.prompt import PromptTemplate PROMPTS_DIR = Path(__file__).parent / "prompts" CREATE_ASSERTIONS_PROMPT = PromptTemplate.from_file( PROMPTS_DIR / "create_facts.txt", ["summary"] ) CHECK_ASSERTIONS_PROMPT = PromptTemplate.from_file( PROMPTS_DIR / "check_facts.txt", ["assertions"] ) REVISED_SUMMARY_PROMPT = PromptTemplate.from_file( PROMPTS_DIR / "revise_summary.txt", ["checked_assertions", "summary"] ) ARE_ALL_TRUE_PROMPT = PromptTemplate.from_file( PROMPTS_DIR / "are_all_true_prompt.txt", ["checked_assertions"] ) [docs]class LLMSummarizationCheckerChain(Chain, BaseModel): """Chain for question-answering with self-verification. Example: .. code-block:: python from langchain import OpenAI, LLMSummarizationCheckerChain llm = OpenAI(temperature=0.0) checker_chain = LLMSummarizationCheckerChain(llm=llm) """ llm: BaseLLM """LLM wrapper to use.""" create_assertions_prompt: PromptTemplate = CREATE_ASSERTIONS_PROMPT check_assertions_prompt: PromptTemplate = CHECK_ASSERTIONS_PROMPT revised_summary_prompt: PromptTemplate = REVISED_SUMMARY_PROMPT
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\chains\\llm_summarization_checker\\base.html"
453018a4e9b7-1
revised_summary_prompt: PromptTemplate = REVISED_SUMMARY_PROMPT are_all_true_prompt: PromptTemplate = ARE_ALL_TRUE_PROMPT input_key: str = "query" #: :meta private: output_key: str = "result" #: :meta private: max_checks: int = 2 """Maximum number of times to check the assertions. Default to double-checking.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @property def input_keys(self) -> List[str]: """Return the singular input key. :meta private: """ return [self.input_key] @property def output_keys(self) -> List[str]: """Return the singular output key. :meta private: """ return [self.output_key] def _call(self, inputs: Dict[str, str]) -> Dict[str, str]: all_true = False count = 0 output = None original_input = inputs[self.input_key] chain_input = original_input while not all_true and count < self.max_checks: chain = SequentialChain( chains=[ LLMChain( llm=self.llm, prompt=self.create_assertions_prompt, output_key="assertions", verbose=self.verbose, ), LLMChain( llm=self.llm, prompt=self.check_assertions_prompt, output_key="checked_assertions", verbose=self.verbose, ), LLMChain( llm=self.llm, prompt=self.revised_summary_prompt, output_key="revised_summary", verbose=self.verbose, ),
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\chains\\llm_summarization_checker\\base.html"
453018a4e9b7-2
output_key="revised_summary", verbose=self.verbose, ), LLMChain( llm=self.llm, output_key="all_true", prompt=self.are_all_true_prompt, verbose=self.verbose, ), ], input_variables=["summary"], output_variables=["all_true", "revised_summary"], verbose=self.verbose, ) output = chain({"summary": chain_input}) count += 1 if output["all_true"].strip() == "True": break if self.verbose: print(output["revised_summary"]) chain_input = output["revised_summary"] if not output: raise ValueError("No output from chain") return {self.output_key: output["revised_summary"].strip()} @property def _chain_type(self) -> str: return "llm_summarization_checker_chain" By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Mar 24, 2023.
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\chains\\llm_summarization_checker\\base.html"
33c94b69314f-0
Source code for langchain.chains.pal.base """Implements Program-Aided Language Models. As in https://arxiv.org/pdf/2211.10435.pdf. """ from __future__ import annotations from typing import Any, Dict, List, Optional from pydantic import BaseModel, Extra from langchain.chains.base import Chain from langchain.chains.llm import LLMChain from langchain.chains.pal.colored_object_prompt import COLORED_OBJECT_PROMPT from langchain.chains.pal.math_prompt import MATH_PROMPT from langchain.prompts.base import BasePromptTemplate from langchain.python import PythonREPL from langchain.schema import BaseLanguageModel [docs]class PALChain(Chain, BaseModel): """Implements Program-Aided Language Models.""" llm: BaseLanguageModel prompt: BasePromptTemplate stop: str = "\n\n" get_answer_expr: str = "print(solution())" python_globals: Optional[Dict[str, Any]] = None python_locals: Optional[Dict[str, Any]] = None output_key: str = "result" #: :meta private: return_intermediate_steps: bool = False class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @property def input_keys(self) -> List[str]: """Return the singular input key. :meta private: """ return self.prompt.input_variables @property def output_keys(self) -> List[str]: """Return the singular output key. :meta private: """ if not self.return_intermediate_steps: return [self.output_key] else:
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\chains\\pal\\base.html"
33c94b69314f-1
return [self.output_key] else: return [self.output_key, "intermediate_steps"] def _call(self, inputs: Dict[str, str]) -> Dict[str, str]: llm_chain = LLMChain(llm=self.llm, prompt=self.prompt) code = llm_chain.predict(stop=[self.stop], **inputs) self.callback_manager.on_text( code, color="green", end="\n", verbose=self.verbose ) repl = PythonREPL(_globals=self.python_globals, _locals=self.python_locals) res = repl.run(code + f"\n{self.get_answer_expr}") output = {self.output_key: res.strip()} if self.return_intermediate_steps: output["intermediate_steps"] = code return output [docs] @classmethod def from_math_prompt(cls, llm: BaseLanguageModel, **kwargs: Any) -> PALChain: """Load PAL from math prompt.""" return cls( llm=llm, prompt=MATH_PROMPT, stop="\n\n", get_answer_expr="print(solution())", **kwargs, ) [docs] @classmethod def from_colored_object_prompt( cls, llm: BaseLanguageModel, **kwargs: Any ) -> PALChain: """Load PAL from colored object prompt.""" return cls( llm=llm, prompt=COLORED_OBJECT_PROMPT, stop="\n\n\n", get_answer_expr="print(answer)", **kwargs, ) @property def _chain_type(self) -> str: return "pal_chain" By Harrison Chase © Copyright 2023, Harrison Chase.
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\chains\\pal\\base.html"
33c94b69314f-2
By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Mar 24, 2023.
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\chains\\pal\\base.html"
9576fe1fa9e9-0
Source code for langchain.chains.qa_generation.base from __future__ import annotations import json from typing import Any, Dict, List, Optional from pydantic import Field from langchain.chains.base import Chain from langchain.chains.llm import LLMChain from langchain.chains.qa_generation.prompt import PROMPT_SELECTOR from langchain.prompts.base import BasePromptTemplate from langchain.schema import BaseLanguageModel from langchain.text_splitter import RecursiveCharacterTextSplitter, TextSplitter [docs]class QAGenerationChain(Chain): llm_chain: LLMChain text_splitter: TextSplitter = Field( default=RecursiveCharacterTextSplitter(chunk_overlap=500) ) input_key: str = "text" output_key: str = "questions" k: Optional[int] = None [docs] @classmethod def from_llm( cls, llm: BaseLanguageModel, prompt: Optional[BasePromptTemplate] = None, **kwargs: Any, ) -> QAGenerationChain: _prompt = prompt or PROMPT_SELECTOR.get_prompt(llm) chain = LLMChain(llm=llm, prompt=_prompt) return cls(llm_chain=chain, **kwargs) @property def _chain_type(self) -> str: raise NotImplementedError @property def input_keys(self) -> List[str]: return [self.input_key] @property def output_keys(self) -> List[str]: return [self.output_key] def _call(self, inputs: Dict[str, str]) -> Dict[str, Any]: docs = self.text_splitter.create_documents([inputs[self.input_key]])
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\chains\\qa_generation\\base.html"
9576fe1fa9e9-1
docs = self.text_splitter.create_documents([inputs[self.input_key]]) results = self.llm_chain.generate([{"text": d.page_content} for d in docs]) qa = [json.loads(res[0].text) for res in results.generations] return {self.output_key: qa} async def _acall(self, inputs: Dict[str, str]) -> Dict[str, str]: raise NotImplementedError By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Mar 24, 2023.
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\chains\\qa_generation\\base.html"
027c5c3c7032-0
Source code for langchain.chains.qa_with_sources.base """Question answering with sources over documents.""" from __future__ import annotations from abc import ABC, abstractmethod from typing import Any, Dict, List, Optional from pydantic import BaseModel, Extra, root_validator from langchain.chains.base import Chain from langchain.chains.combine_documents.base import BaseCombineDocumentsChain from langchain.chains.combine_documents.map_reduce import MapReduceDocumentsChain from langchain.chains.combine_documents.stuff import StuffDocumentsChain from langchain.chains.llm import LLMChain from langchain.chains.qa_with_sources.loading import load_qa_with_sources_chain from langchain.chains.qa_with_sources.map_reduce_prompt import ( COMBINE_PROMPT, EXAMPLE_PROMPT, QUESTION_PROMPT, ) from langchain.docstore.document import Document from langchain.prompts.base import BasePromptTemplate from langchain.schema import BaseLanguageModel class BaseQAWithSourcesChain(Chain, BaseModel, ABC): """Question answering with sources over documents.""" combine_documents_chain: BaseCombineDocumentsChain """Chain to use to combine documents.""" question_key: str = "question" #: :meta private: input_docs_key: str = "docs" #: :meta private: answer_key: str = "answer" #: :meta private: sources_answer_key: str = "sources" #: :meta private: return_source_documents: bool = False """Return the source documents.""" @classmethod def from_llm( cls, llm: BaseLanguageModel, document_prompt: BasePromptTemplate = EXAMPLE_PROMPT, question_prompt: BasePromptTemplate = QUESTION_PROMPT, combine_prompt: BasePromptTemplate = COMBINE_PROMPT,
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\chains\\qa_with_sources\\base.html"
027c5c3c7032-1
combine_prompt: BasePromptTemplate = COMBINE_PROMPT, **kwargs: Any, ) -> BaseQAWithSourcesChain: """Construct the chain from an LLM.""" llm_question_chain = LLMChain(llm=llm, prompt=question_prompt) llm_combine_chain = LLMChain(llm=llm, prompt=combine_prompt) combine_results_chain = StuffDocumentsChain( llm_chain=llm_combine_chain, document_prompt=document_prompt, document_variable_name="summaries", ) combine_document_chain = MapReduceDocumentsChain( llm_chain=llm_question_chain, combine_document_chain=combine_results_chain, document_variable_name="context", ) return cls( combine_documents_chain=combine_document_chain, **kwargs, ) @classmethod def from_chain_type( cls, llm: BaseLanguageModel, chain_type: str = "stuff", chain_type_kwargs: Optional[dict] = None, **kwargs: Any, ) -> BaseQAWithSourcesChain: """Load chain from chain type.""" _chain_kwargs = chain_type_kwargs or {} combine_document_chain = load_qa_with_sources_chain( llm, chain_type=chain_type, **_chain_kwargs ) return cls(combine_documents_chain=combine_document_chain, **kwargs) class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @property def input_keys(self) -> List[str]: """Expect input key. :meta private: """ return [self.question_key] @property
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\chains\\qa_with_sources\\base.html"
027c5c3c7032-2
:meta private: """ return [self.question_key] @property def output_keys(self) -> List[str]: """Return output key. :meta private: """ _output_keys = [self.answer_key, self.sources_answer_key] if self.return_source_documents: _output_keys = _output_keys + ["source_documents"] return _output_keys @root_validator(pre=True) def validate_naming(cls, values: Dict) -> Dict: """Fix backwards compatability in naming.""" if "combine_document_chain" in values: values["combine_documents_chain"] = values.pop("combine_document_chain") return values @abstractmethod def _get_docs(self, inputs: Dict[str, Any]) -> List[Document]: """Get docs to run questioning over.""" def _call(self, inputs: Dict[str, Any]) -> Dict[str, Any]: docs = self._get_docs(inputs) answer, _ = self.combine_documents_chain.combine_docs(docs, **inputs) if "SOURCES: " in answer: answer, sources = answer.split("SOURCES: ") else: sources = "" result: Dict[str, Any] = { self.answer_key: answer, self.sources_answer_key: sources, } if self.return_source_documents: result["source_documents"] = docs return result [docs]class QAWithSourcesChain(BaseQAWithSourcesChain, BaseModel): """Question answering with sources over documents.""" input_docs_key: str = "docs" #: :meta private: @property def input_keys(self) -> List[str]: """Expect input key. :meta private: """
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\chains\\qa_with_sources\\base.html"
027c5c3c7032-3
"""Expect input key. :meta private: """ return [self.input_docs_key, self.question_key] def _get_docs(self, inputs: Dict[str, Any]) -> List[Document]: return inputs.pop(self.input_docs_key) @property def _chain_type(self) -> str: return "qa_with_sources_chain" By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Mar 24, 2023.
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\chains\\qa_with_sources\\base.html"
623e3686f053-0
Source code for langchain.chains.qa_with_sources.retrieval """Question-answering with sources over an index.""" from typing import Any, Dict, List from pydantic import BaseModel, Field from langchain.chains.combine_documents.stuff import StuffDocumentsChain from langchain.chains.qa_with_sources.base import BaseQAWithSourcesChain from langchain.docstore.document import Document from langchain.schema import BaseRetriever [docs]class RetrievalQAWithSourcesChain(BaseQAWithSourcesChain, BaseModel): """Question-answering with sources over an index.""" retriever: BaseRetriever = Field(exclude=True) """Index to connect to.""" reduce_k_below_max_tokens: bool = False """Reduce the number of results to return from store based on tokens limit""" max_tokens_limit: int = 3375 """Restrict the docs to return from store based on tokens, enforced only for StuffDocumentChain and if reduce_k_below_max_tokens is to true""" def _reduce_tokens_below_limit(self, docs: List[Document]) -> List[Document]: num_docs = len(docs) if self.reduce_k_below_max_tokens and isinstance( self.combine_documents_chain, StuffDocumentsChain ): tokens = [ self.combine_documents_chain.llm_chain.llm.get_num_tokens( doc.page_content ) for doc in docs ] token_count = sum(tokens[:num_docs]) while token_count > self.max_tokens_limit: num_docs -= 1 token_count -= tokens[num_docs] return docs[:num_docs] def _get_docs(self, inputs: Dict[str, Any]) -> List[Document]: question = inputs[self.question_key]
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\chains\\qa_with_sources\\retrieval.html"
623e3686f053-1
question = inputs[self.question_key] docs = self.retriever.get_relevant_documents(question) return self._reduce_tokens_below_limit(docs) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Mar 24, 2023.
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\chains\\qa_with_sources\\retrieval.html"
04823d731f30-0
Source code for langchain.chains.qa_with_sources.vector_db """Question-answering with sources over a vector database.""" import warnings from typing import Any, Dict, List from pydantic import BaseModel, Field, root_validator from langchain.chains.combine_documents.stuff import StuffDocumentsChain from langchain.chains.qa_with_sources.base import BaseQAWithSourcesChain from langchain.docstore.document import Document from langchain.vectorstores.base import VectorStore [docs]class VectorDBQAWithSourcesChain(BaseQAWithSourcesChain, BaseModel): """Question-answering with sources over a vector database.""" vectorstore: VectorStore = Field(exclude=True) """Vector Database to connect to.""" k: int = 4 """Number of results to return from store""" reduce_k_below_max_tokens: bool = False """Reduce the number of results to return from store based on tokens limit""" max_tokens_limit: int = 3375 """Restrict the docs to return from store based on tokens, enforced only for StuffDocumentChain and if reduce_k_below_max_tokens is to true""" search_kwargs: Dict[str, Any] = Field(default_factory=dict) """Extra search args.""" def _reduce_tokens_below_limit(self, docs: List[Document]) -> List[Document]: num_docs = len(docs) if self.reduce_k_below_max_tokens and isinstance( self.combine_documents_chain, StuffDocumentsChain ): tokens = [ self.combine_documents_chain.llm_chain.llm.get_num_tokens( doc.page_content ) for doc in docs ] token_count = sum(tokens[:num_docs]) while token_count > self.max_tokens_limit: num_docs -= 1
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\chains\\qa_with_sources\\vector_db.html"
04823d731f30-1
while token_count > self.max_tokens_limit: num_docs -= 1 token_count -= tokens[num_docs] return docs[:num_docs] def _get_docs(self, inputs: Dict[str, Any]) -> List[Document]: question = inputs[self.question_key] docs = self.vectorstore.similarity_search( question, k=self.k, **self.search_kwargs ) return self._reduce_tokens_below_limit(docs) @root_validator() def raise_deprecation(cls, values: Dict) -> Dict: warnings.warn( "`VectorDBQAWithSourcesChain` is deprecated - " "please use `from langchain.chains import RetrievalQAWithSourcesChain`" ) return values @property def _chain_type(self) -> str: return "vector_db_qa_with_sources_chain" By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Mar 24, 2023.
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\chains\\qa_with_sources\\vector_db.html"
42e07aaf7211-0
Source code for langchain.chains.retrieval_qa.base """Chain for question-answering against a vector database.""" from __future__ import annotations import warnings from abc import abstractmethod from typing import Any, Dict, List, Optional from pydantic import BaseModel, Extra, Field, root_validator from langchain.chains.base import Chain from langchain.chains.combine_documents.base import BaseCombineDocumentsChain from langchain.chains.combine_documents.stuff import StuffDocumentsChain from langchain.chains.llm import LLMChain from langchain.chains.question_answering import load_qa_chain from langchain.chains.question_answering.stuff_prompt import PROMPT_SELECTOR from langchain.prompts import PromptTemplate from langchain.schema import BaseLanguageModel, BaseRetriever, Document from langchain.vectorstores.base import VectorStore class BaseRetrievalQA(Chain, BaseModel): combine_documents_chain: BaseCombineDocumentsChain """Chain to use to combine the documents.""" input_key: str = "query" #: :meta private: output_key: str = "result" #: :meta private: return_source_documents: bool = False """Return the source documents.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True allow_population_by_field_name = True @property def input_keys(self) -> List[str]: """Return the input keys. :meta private: """ return [self.input_key] @property def output_keys(self) -> List[str]: """Return the output keys. :meta private: """ _output_keys = [self.output_key] if self.return_source_documents:
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\chains\\retrieval_qa\\base.html"
42e07aaf7211-1
_output_keys = [self.output_key] if self.return_source_documents: _output_keys = _output_keys + ["source_documents"] return _output_keys @classmethod def from_llm( cls, llm: BaseLanguageModel, prompt: Optional[PromptTemplate] = None, **kwargs: Any, ) -> BaseRetrievalQA: """Initialize from LLM.""" _prompt = prompt or PROMPT_SELECTOR.get_prompt(llm) llm_chain = LLMChain(llm=llm, prompt=_prompt) document_prompt = PromptTemplate( input_variables=["page_content"], template="Context:\n{page_content}" ) combine_documents_chain = StuffDocumentsChain( llm_chain=llm_chain, document_variable_name="context", document_prompt=document_prompt, ) return cls(combine_documents_chain=combine_documents_chain, **kwargs) @classmethod def from_chain_type( cls, llm: BaseLanguageModel, chain_type: str = "stuff", chain_type_kwargs: Optional[dict] = None, **kwargs: Any, ) -> BaseRetrievalQA: """Load chain from chain type.""" _chain_type_kwargs = chain_type_kwargs or {} combine_documents_chain = load_qa_chain( llm, chain_type=chain_type, **_chain_type_kwargs ) return cls(combine_documents_chain=combine_documents_chain, **kwargs) @abstractmethod def _get_docs(self, question: str) -> List[Document]: """Get documents to do question answering over.""" def _call(self, inputs: Dict[str, str]) -> Dict[str, Any]:
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\chains\\retrieval_qa\\base.html"
42e07aaf7211-2
def _call(self, inputs: Dict[str, str]) -> Dict[str, Any]: """Run get_relevant_text and llm on input query. If chain has 'return_source_documents' as 'True', returns the retrieved documents as well under the key 'source_documents'. Example: .. code-block:: python res = indexqa({'query': 'This is my query'}) answer, docs = res['result'], res['source_documents'] """ question = inputs[self.input_key] docs = self._get_docs(question) answer, _ = self.combine_documents_chain.combine_docs(docs, question=question) if self.return_source_documents: return {self.output_key: answer, "source_documents": docs} else: return {self.output_key: answer} [docs]class RetrievalQA(BaseRetrievalQA, BaseModel): """Chain for question-answering against an index. Example: .. code-block:: python from langchain.llms import OpenAI from langchain.chains import RetrievalQA from langchain.faiss import FAISS vectordb = FAISS(...) retrievalQA = RetrievalQA.from_llm(llm=OpenAI(), retriever=vectordb) """ retriever: BaseRetriever = Field(exclude=True) def _get_docs(self, question: str) -> List[Document]: return self.retriever.get_relevant_documents(question) [docs]class VectorDBQA(BaseRetrievalQA, BaseModel): """Chain for question-answering against a vector database.""" vectorstore: VectorStore = Field(exclude=True, alias="vectorstore") """Vector Database to connect to."""
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\chains\\retrieval_qa\\base.html"
42e07aaf7211-3
"""Vector Database to connect to.""" k: int = 4 """Number of documents to query for.""" search_type: str = "similarity" """Search type to use over vectorstore. `similarity` or `mmr`.""" search_kwargs: Dict[str, Any] = Field(default_factory=dict) """Extra search args.""" @root_validator() def raise_deprecation(cls, values: Dict) -> Dict: warnings.warn( "`VectorDBQA` is deprecated - " "please use `from langchain.chains import RetrievalQA`" ) return values @root_validator() def validate_search_type(cls, values: Dict) -> Dict: """Validate search type.""" if "search_type" in values: search_type = values["search_type"] if search_type not in ("similarity", "mmr"): raise ValueError(f"search_type of {search_type} not allowed.") return values def _get_docs(self, question: str) -> List[Document]: if self.search_type == "similarity": docs = self.vectorstore.similarity_search( question, k=self.k, **self.search_kwargs ) elif self.search_type == "mmr": docs = self.vectorstore.max_marginal_relevance_search( question, k=self.k, **self.search_kwargs ) else: raise ValueError(f"search_type of {self.search_type} not allowed.") return docs @property def _chain_type(self) -> str: """Return the chain type.""" return "vector_db_qa" By Harrison Chase © Copyright 2023, Harrison Chase.
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\chains\\retrieval_qa\\base.html"
42e07aaf7211-4
By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Mar 24, 2023.
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\chains\\retrieval_qa\\base.html"
2eb54c9c1297-0
Source code for langchain.chains.sql_database.base """Chain for interacting with SQL Database.""" from __future__ import annotations from typing import Any, Dict, List from pydantic import BaseModel, Extra, Field from langchain.chains.base import Chain from langchain.chains.llm import LLMChain from langchain.chains.sql_database.prompt import DECIDER_PROMPT, PROMPT from langchain.prompts.base import BasePromptTemplate from langchain.schema import BaseLanguageModel from langchain.sql_database import SQLDatabase [docs]class SQLDatabaseChain(Chain, BaseModel): """Chain for interacting with SQL Database. Example: .. code-block:: python from langchain import SQLDatabaseChain, OpenAI, SQLDatabase db = SQLDatabase(...) db_chain = SQLDatabaseChain(llm=OpenAI(), database=db) """ llm: BaseLanguageModel """LLM wrapper to use.""" database: SQLDatabase = Field(exclude=True) """SQL Database to connect to.""" prompt: BasePromptTemplate = PROMPT """Prompt to use to translate natural language to SQL.""" top_k: int = 5 """Number of results to return from the query""" input_key: str = "query" #: :meta private: output_key: str = "result" #: :meta private: return_intermediate_steps: bool = False """Whether or not to return the intermediate steps along with the final answer.""" return_direct: bool = False """Whether or not to return the result of querying the SQL table directly.""" class Config: """Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @property def input_keys(self) -> List[str]:
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\chains\\sql_database\\base.html"
2eb54c9c1297-1
@property def input_keys(self) -> List[str]: """Return the singular input key. :meta private: """ return [self.input_key] @property def output_keys(self) -> List[str]: """Return the singular output key. :meta private: """ if not self.return_intermediate_steps: return [self.output_key] else: return [self.output_key, "intermediate_steps"] def _call(self, inputs: Dict[str, Any]) -> Dict[str, Any]: llm_chain = LLMChain(llm=self.llm, prompt=self.prompt) input_text = f"{inputs[self.input_key]} \nSQLQuery:" self.callback_manager.on_text(input_text, verbose=self.verbose) # If not present, then defaults to None which is all tables. table_names_to_use = inputs.get("table_names_to_use") table_info = self.database.get_table_info(table_names=table_names_to_use) llm_inputs = { "input": input_text, "top_k": self.top_k, "dialect": self.database.dialect, "table_info": table_info, "stop": ["\nSQLResult:"], } intermediate_steps = [] sql_cmd = llm_chain.predict(**llm_inputs) intermediate_steps.append(sql_cmd) self.callback_manager.on_text(sql_cmd, color="green", verbose=self.verbose) result = self.database.run(sql_cmd) intermediate_steps.append(result) self.callback_manager.on_text("\nSQLResult: ", verbose=self.verbose) self.callback_manager.on_text(result, color="yellow", verbose=self.verbose)
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\chains\\sql_database\\base.html"
2eb54c9c1297-2
self.callback_manager.on_text(result, color="yellow", verbose=self.verbose) # If return direct, we just set the final result equal to the sql query if self.return_direct: final_result = result else: self.callback_manager.on_text("\nAnswer:", verbose=self.verbose) input_text += f"{sql_cmd}\nSQLResult: {result}\nAnswer:" llm_inputs["input"] = input_text final_result = llm_chain.predict(**llm_inputs) self.callback_manager.on_text( final_result, color="green", verbose=self.verbose ) chain_result: Dict[str, Any] = {self.output_key: final_result} if self.return_intermediate_steps: chain_result["intermediate_steps"] = intermediate_steps return chain_result @property def _chain_type(self) -> str: return "sql_database_chain" [docs]class SQLDatabaseSequentialChain(Chain, BaseModel): """Chain for querying SQL database that is a sequential chain. The chain is as follows: 1. Based on the query, determine which tables to use. 2. Based on those tables, call the normal SQL database chain. This is useful in cases where the number of tables in the database is large. """ return_intermediate_steps: bool = False [docs] @classmethod def from_llm( cls, llm: BaseLanguageModel, database: SQLDatabase, query_prompt: BasePromptTemplate = PROMPT, decider_prompt: BasePromptTemplate = DECIDER_PROMPT, **kwargs: Any, ) -> SQLDatabaseSequentialChain: """Load the necessary chains.""" sql_chain = SQLDatabaseChain(
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\chains\\sql_database\\base.html"
2eb54c9c1297-3
"""Load the necessary chains.""" sql_chain = SQLDatabaseChain( llm=llm, database=database, prompt=query_prompt, **kwargs ) decider_chain = LLMChain( llm=llm, prompt=decider_prompt, output_key="table_names" ) return cls(sql_chain=sql_chain, decider_chain=decider_chain, **kwargs) decider_chain: LLMChain sql_chain: SQLDatabaseChain input_key: str = "query" #: :meta private: output_key: str = "result" #: :meta private: @property def input_keys(self) -> List[str]: """Return the singular input key. :meta private: """ return [self.input_key] @property def output_keys(self) -> List[str]: """Return the singular output key. :meta private: """ if not self.return_intermediate_steps: return [self.output_key] else: return [self.output_key, "intermediate_steps"] def _call(self, inputs: Dict[str, str]) -> Dict[str, str]: _table_names = self.sql_chain.database.get_table_names() table_names = ", ".join(_table_names) llm_inputs = { "query": inputs[self.input_key], "table_names": table_names, } table_names_to_use = self.decider_chain.predict_and_parse(**llm_inputs) self.callback_manager.on_text( "Table names to use:", end="\n", verbose=self.verbose ) self.callback_manager.on_text( str(table_names_to_use), color="yellow", verbose=self.verbose )
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\chains\\sql_database\\base.html"
2eb54c9c1297-4
str(table_names_to_use), color="yellow", verbose=self.verbose ) new_inputs = { self.sql_chain.input_key: inputs[self.input_key], "table_names_to_use": table_names_to_use, } return self.sql_chain(new_inputs, return_only_outputs=True) @property def _chain_type(self) -> str: return "sql_database_sequential_chain" By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Mar 24, 2023.
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\chains\\sql_database\\base.html"
fd39b76639d8-0
Source code for langchain.docstore.in_memory """Simple in memory docstore in the form of a dict.""" from typing import Dict, Union from langchain.docstore.base import AddableMixin, Docstore from langchain.docstore.document import Document [docs]class InMemoryDocstore(Docstore, AddableMixin): """Simple in memory docstore in the form of a dict.""" def __init__(self, _dict: Dict[str, Document]): """Initialize with dict.""" self._dict = _dict [docs] def add(self, texts: Dict[str, Document]) -> None: """Add texts to in memory dictionary.""" overlapping = set(texts).intersection(self._dict) if overlapping: raise ValueError(f"Tried to add ids that already exist: {overlapping}") self._dict = dict(self._dict, **texts) [docs] def search(self, search: str) -> Union[str, Document]: """Search via direct lookup.""" if search not in self._dict: return f"ID {search} not found." else: return self._dict[search] By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Mar 24, 2023.
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\docstore\\in_memory.html"
1ed5aeab3593-0
Source code for langchain.docstore.wikipedia """Wrapper around wikipedia API.""" from typing import Union from langchain.docstore.base import Docstore from langchain.docstore.document import Document [docs]class Wikipedia(Docstore): """Wrapper around wikipedia API.""" def __init__(self) -> None: """Check that wikipedia package is installed.""" try: import wikipedia # noqa: F401 except ImportError: raise ValueError( "Could not import wikipedia python package. " "Please install it with `pip install wikipedia`." ) [docs] def search(self, search: str) -> Union[str, Document]: """Try to search for wiki page. If page exists, return the page summary, and a PageWithLookups object. If page does not exist, return similar entries. """ import wikipedia try: page_content = wikipedia.page(search).content url = wikipedia.page(search).url result: Union[str, Document] = Document( page_content=page_content, metadata={"page": url} ) except wikipedia.PageError: result = f"Could not find [{search}]. Similar: {wikipedia.search(search)}" except wikipedia.DisambiguationError: result = f"Could not find [{search}]. Similar: {wikipedia.search(search)}" return result By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Mar 24, 2023.
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\docstore\\wikipedia.html"
2e9ad2e243de-0
Source code for langchain.embeddings.cohere """Wrapper around Cohere embedding models.""" from typing import Any, Dict, List, Optional from pydantic import BaseModel, Extra, root_validator from langchain.embeddings.base import Embeddings from langchain.utils import get_from_dict_or_env [docs]class CohereEmbeddings(BaseModel, Embeddings): """Wrapper around Cohere embedding models. To use, you should have the ``cohere`` python package installed, and the environment variable ``COHERE_API_KEY`` set with your API key or pass it as a named parameter to the constructor. Example: .. code-block:: python from langchain.embeddings import CohereEmbeddings cohere = CohereEmbeddings(model="medium", cohere_api_key="my-api-key") """ client: Any #: :meta private: model: str = "large" """Model name to use.""" truncate: Optional[str] = None """Truncate embeddings that are too long from start or end ("NONE"|"START"|"END")""" cohere_api_key: Optional[str] = None class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" cohere_api_key = get_from_dict_or_env( values, "cohere_api_key", "COHERE_API_KEY" ) try: import cohere values["client"] = cohere.Client(cohere_api_key) except ImportError: raise ValueError( "Could not import cohere python package. "
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\embeddings\\cohere.html"
2e9ad2e243de-1
raise ValueError( "Could not import cohere python package. " "Please it install it with `pip install cohere`." ) return values [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """Call out to Cohere's embedding endpoint. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ embeddings = self.client.embed( model=self.model, texts=texts, truncate=self.truncate ).embeddings return [list(map(float, e)) for e in embeddings] [docs] def embed_query(self, text: str) -> List[float]: """Call out to Cohere's embedding endpoint. Args: text: The text to embed. Returns: Embeddings for the text. """ embedding = self.client.embed( model=self.model, texts=[text], truncate=self.truncate ).embeddings[0] return list(map(float, embedding)) By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Mar 24, 2023.
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\embeddings\\cohere.html"
c7f90933a1a6-0
Source code for langchain.embeddings.fake from typing import List import numpy as np from pydantic import BaseModel from langchain.embeddings.base import Embeddings [docs]class FakeEmbeddings(Embeddings, BaseModel): size: int def _get_embedding(self) -> List[float]: return list(np.random.normal(size=self.size)) [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: return [self._get_embedding() for _ in texts] [docs] def embed_query(self, text: str) -> List[float]: return self._get_embedding() By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Mar 24, 2023.
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\embeddings\\fake.html"
67a3eb777111-0
Source code for langchain.embeddings.huggingface """Wrapper around HuggingFace embedding models.""" from typing import Any, List from pydantic import BaseModel, Extra from langchain.embeddings.base import Embeddings DEFAULT_MODEL_NAME = "sentence-transformers/all-mpnet-base-v2" DEFAULT_INSTRUCT_MODEL = "hkunlp/instructor-large" DEFAULT_EMBED_INSTRUCTION = "Represent the document for retrieval: " DEFAULT_QUERY_INSTRUCTION = ( "Represent the question for retrieving supporting documents: " ) [docs]class HuggingFaceEmbeddings(BaseModel, Embeddings): """Wrapper around sentence_transformers embedding models. To use, you should have the ``sentence_transformers`` python package installed. Example: .. code-block:: python from langchain.embeddings import HuggingFaceEmbeddings model_name = "sentence-transformers/all-mpnet-base-v2" hf = HuggingFaceEmbeddings(model_name=model_name) """ client: Any #: :meta private: model_name: str = DEFAULT_MODEL_NAME """Model name to use.""" def __init__(self, **kwargs: Any): """Initialize the sentence_transformer.""" super().__init__(**kwargs) try: import sentence_transformers self.client = sentence_transformers.SentenceTransformer(self.model_name) except ImportError: raise ValueError( "Could not import sentence_transformers python package. " "Please install it with `pip install sentence_transformers`." ) class Config: """Configuration for this pydantic object.""" extra = Extra.forbid [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """Compute doc embeddings using a HuggingFace transformer model.
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\embeddings\\huggingface.html"
67a3eb777111-1
"""Compute doc embeddings using a HuggingFace transformer model. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ texts = list(map(lambda x: x.replace("\n", " "), texts)) embeddings = self.client.encode(texts) return embeddings.tolist() [docs] def embed_query(self, text: str) -> List[float]: """Compute query embeddings using a HuggingFace transformer model. Args: text: The text to embed. Returns: Embeddings for the text. """ text = text.replace("\n", " ") embedding = self.client.encode(text) return embedding.tolist() [docs]class HuggingFaceInstructEmbeddings(BaseModel, Embeddings): """Wrapper around sentence_transformers embedding models. To use, you should have the ``sentence_transformers`` and ``InstructorEmbedding`` python package installed. Example: .. code-block:: python from langchain.embeddings import HuggingFaceInstructEmbeddings model_name = "hkunlp/instructor-large" hf = HuggingFaceInstructEmbeddings(model_name=model_name) """ client: Any #: :meta private: model_name: str = DEFAULT_INSTRUCT_MODEL """Model name to use.""" embed_instruction: str = DEFAULT_EMBED_INSTRUCTION """Instruction to use for embedding documents.""" query_instruction: str = DEFAULT_QUERY_INSTRUCTION """Instruction to use for embedding query.""" def __init__(self, **kwargs: Any): """Initialize the sentence_transformer.""" super().__init__(**kwargs) try: from InstructorEmbedding import INSTRUCTOR
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\embeddings\\huggingface.html"
67a3eb777111-2
try: from InstructorEmbedding import INSTRUCTOR self.client = INSTRUCTOR(self.model_name) except ImportError as e: raise ValueError("Dependencies for InstructorEmbedding not found.") from e class Config: """Configuration for this pydantic object.""" extra = Extra.forbid [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """Compute doc embeddings using a HuggingFace instruct model. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ instruction_pairs = [[self.embed_instruction, text] for text in texts] embeddings = self.client.encode(instruction_pairs) return embeddings.tolist() [docs] def embed_query(self, text: str) -> List[float]: """Compute query embeddings using a HuggingFace instruct model. Args: text: The text to embed. Returns: Embeddings for the text. """ instruction_pair = [self.query_instruction, text] embedding = self.client.encode([instruction_pair])[0] return embedding.tolist() By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Mar 24, 2023.
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\embeddings\\huggingface.html"
bf47cfa2b768-0
Source code for langchain.embeddings.huggingface_hub """Wrapper around HuggingFace Hub embedding models.""" from typing import Any, Dict, List, Optional from pydantic import BaseModel, Extra, root_validator from langchain.embeddings.base import Embeddings from langchain.utils import get_from_dict_or_env DEFAULT_REPO_ID = "sentence-transformers/all-mpnet-base-v2" VALID_TASKS = ("feature-extraction",) [docs]class HuggingFaceHubEmbeddings(BaseModel, Embeddings): """Wrapper around HuggingFaceHub embedding models. To use, you should have the ``huggingface_hub`` python package installed, and the environment variable ``HUGGINGFACEHUB_API_TOKEN`` set with your API token, or pass it as a named parameter to the constructor. Example: .. code-block:: python from langchain.embeddings import HuggingFaceHubEmbeddings repo_id = "sentence-transformers/all-mpnet-base-v2" hf = HuggingFaceHubEmbeddings( repo_id=repo_id, task="feature-extraction", huggingfacehub_api_token="my-api-key", ) """ client: Any #: :meta private: repo_id: str = DEFAULT_REPO_ID """Model name to use.""" task: Optional[str] = "feature-extraction" """Task to call the model with.""" model_kwargs: Optional[dict] = None """Key word arguments to pass to the model.""" huggingfacehub_api_token: Optional[str] = None class Config: """Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict:
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\embeddings\\huggingface_hub.html"
bf47cfa2b768-1
@root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" huggingfacehub_api_token = get_from_dict_or_env( values, "huggingfacehub_api_token", "HUGGINGFACEHUB_API_TOKEN" ) try: from huggingface_hub.inference_api import InferenceApi repo_id = values["repo_id"] if not repo_id.startswith("sentence-transformers"): raise ValueError( "Currently only 'sentence-transformers' embedding models " f"are supported. Got invalid 'repo_id' {repo_id}." ) client = InferenceApi( repo_id=repo_id, token=huggingfacehub_api_token, task=values.get("task"), ) if client.task not in VALID_TASKS: raise ValueError( f"Got invalid task {client.task}, " f"currently only {VALID_TASKS} are supported" ) values["client"] = client except ImportError: raise ValueError( "Could not import huggingface_hub python package. " "Please it install it with `pip install huggingface_hub`." ) return values [docs] def embed_documents(self, texts: List[str]) -> List[List[float]]: """Call out to HuggingFaceHub's embedding endpoint for embedding search docs. Args: texts: The list of texts to embed. Returns: List of embeddings, one for each text. """ # replace newlines, which can negatively affect performance. texts = [text.replace("\n", " ") for text in texts]
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\embeddings\\huggingface_hub.html"
bf47cfa2b768-2
texts = [text.replace("\n", " ") for text in texts] _model_kwargs = self.model_kwargs or {} responses = self.client(inputs=texts, params=_model_kwargs) return responses [docs] def embed_query(self, text: str) -> List[float]: """Call out to HuggingFaceHub's embedding endpoint for embedding query text. Args: text: The text to embed. Returns: Embeddings for the text. """ response = self.embed_documents([text])[0] return response By Harrison Chase © Copyright 2023, Harrison Chase. Last updated on Mar 24, 2023.
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\embeddings\\huggingface_hub.html"
e07ee81b056c-0
Source code for langchain.embeddings.openai """Wrapper around OpenAI embedding models.""" from __future__ import annotations import logging from typing import Any, Callable, Dict, List, Optional import numpy as np from pydantic import BaseModel, Extra, root_validator from tenacity import ( before_sleep_log, retry, retry_if_exception_type, stop_after_attempt, wait_exponential, ) from langchain.embeddings.base import Embeddings from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) def _create_retry_decorator(embeddings: OpenAIEmbeddings) -> Callable[[Any], Any]: import openai min_seconds = 4 max_seconds = 10 # Wait 2^x * 1 second between each retry starting with # 4 seconds, then up to 10 seconds, then 10 seconds afterwards return retry( reraise=True, stop=stop_after_attempt(embeddings.max_retries), wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds), retry=( retry_if_exception_type(openai.error.Timeout) | retry_if_exception_type(openai.error.APIError) | retry_if_exception_type(openai.error.APIConnectionError) | retry_if_exception_type(openai.error.RateLimitError) | retry_if_exception_type(openai.error.ServiceUnavailableError) ), before_sleep=before_sleep_log(logger, logging.WARNING), ) def embed_with_retry(embeddings: OpenAIEmbeddings, **kwargs: Any) -> Any: """Use tenacity to retry the completion call.""" retry_decorator = _create_retry_decorator(embeddings) @retry_decorator
ERROR: type should be string, got "https://langchain.readthedocs.io\\en\\latest\\_modules\\langchain\\embeddings\\openai.html"