id
stringlengths 14
16
| text
stringlengths 31
3.14k
| source
stringlengths 58
124
|
---|---|---|
85a93ca3ea2a-3 | Returns:
Folder path.
"""
# Get the course name
course_name = soup.find("span", {"id": "crumb_1"})
if course_name is None:
raise ValueError("No course name found.")
course_name = course_name.text.strip()
# Prepare the folder path
course_name_clean = (
unquote(course_name)
.replace(" ", "_")
.replace("/", "_")
.replace(":", "_")
.replace(",", "_")
.replace("?", "_")
.replace("'", "_")
.replace("!", "_")
.replace('"', "_")
)
# Get the folder path
folder_path = Path(".") / course_name_clean
return str(folder_path)
def _get_documents(self, soup: Any) -> List[Document]:
"""Fetch content from page and return Documents.
Args:
soup: BeautifulSoup4 soup object.
Returns:
List of documents.
"""
attachments = self._get_attachments(soup)
self._download_attachments(attachments)
documents = self._load_documents()
return documents
def _get_attachments(self, soup: Any) -> List[str]:
"""Get all attachments from a page.
Args:
soup: BeautifulSoup4 soup object.
Returns:
List of attachments.
"""
from bs4 import BeautifulSoup, Tag
# Get content list
content_list = soup.find("ul", {"class": "contentList"}) | /content/https://python.langchain.com/en/latest/_modules/langchain/document_loaders/blackboard.html |
85a93ca3ea2a-4 | if content_list is None:
raise ValueError("No content list found.")
content_list: BeautifulSoup # type: ignore
# Get all attachments
attachments = []
for attachment in content_list.find_all("ul", {"class": "attachments"}):
attachment: Tag # type: ignore
for link in attachment.find_all("a"):
link: Tag # type: ignore
href = link.get("href")
# Only add if href is not None and does not start with #
if href is not None and not href.startswith("#"):
attachments.append(href)
return attachments
def _download_attachments(self, attachments: List[str]) -> None:
"""Download all attachments.
Args:
attachments: List of attachments.
"""
# Make sure the folder exists
Path(self.folder_path).mkdir(parents=True, exist_ok=True)
# Download all attachments
for attachment in attachments:
self.download(attachment)
def _load_documents(self) -> List[Document]:
"""Load all documents in the folder.
Returns:
List of documents.
"""
# Create the document loader
loader = DirectoryLoader(
path=self.folder_path, glob="*.pdf", loader_cls=PyPDFLoader # type: ignore
)
# Load the documents
documents = loader.load()
# Return all documents
return documents
def _get_paths(self, soup: Any) -> List[str]:
"""Get all relative paths in the navbar.""" | /content/https://python.langchain.com/en/latest/_modules/langchain/document_loaders/blackboard.html |
85a93ca3ea2a-5 | """Get all relative paths in the navbar."""
relative_paths = []
course_menu = soup.find("ul", {"class": "courseMenu"})
if course_menu is None:
raise ValueError("No course menu found.")
for link in course_menu.find_all("a"):
href = link.get("href")
if href is not None and href.startswith("/"):
relative_paths.append(href)
return relative_paths
[docs] def download(self, path: str) -> None:
"""Download a file from a url.
Args:
path: Path to the file.
"""
# Get the file content
response = self.session.get(self.base_url + path, allow_redirects=True)
# Get the filename
filename = self.parse_filename(response.url)
# Write the file to disk
with open(Path(self.folder_path) / filename, "wb") as f:
f.write(response.content)
[docs] def parse_filename(self, url: str) -> str:
"""Parse the filename from a url.
Args:
url: Url to parse the filename from.
Returns:
The filename.
"""
if (url_path := Path(url)) and url_path.suffix == ".pdf":
return url_path.name
else:
return self._parse_filename_from_url(url)
def _parse_filename_from_url(self, url: str) -> str: | /content/https://python.langchain.com/en/latest/_modules/langchain/document_loaders/blackboard.html |
85a93ca3ea2a-6 | """Parse the filename from a url.
Args:
url: Url to parse the filename from.
Returns:
The filename.
Raises:
ValueError: If the filename could not be parsed.
"""
filename_matches = re.search(r"filename%2A%3DUTF-8%27%27(.+)", url)
if filename_matches:
filename = filename_matches.group(1)
else:
raise ValueError(f"Could not parse filename from {url}")
if ".pdf" not in filename:
raise ValueError(f"Incorrect file type: {filename}")
filename = filename.split(".pdf")[0] + ".pdf"
filename = unquote(filename)
filename = filename.replace("%20", " ")
return filename
if __name__ == "__main__":
loader = BlackboardLoader(
"https://<YOUR BLACKBOARD URL"
" HERE>/webapps/blackboard/content/listContent.jsp?course_id=_<YOUR COURSE ID"
" HERE>_1&content_id=_<YOUR CONTENT ID HERE>_1&mode=reset",
"<YOUR BBROUTER COOKIE HERE>",
load_all_recursively=True,
)
documents = loader.load()
print(f"Loaded {len(documents)} pages of PDFs from {loader.web_path}")
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 26, 2023. | /content/https://python.langchain.com/en/latest/_modules/langchain/document_loaders/blackboard.html |
ae7cabe61274-0 | Source code for langchain.document_loaders.git
import os
from typing import Callable, List, Optional
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
[docs]class GitLoader(BaseLoader):
"""Loads files from a Git repository into a list of documents.
Repository can be local on disk available at `repo_path`,
or remote at `clone_url` that will be cloned to `repo_path`.
Currently supports only text files.
Each document represents one file in the repository. The `path` points to
the local Git repository, and the `branch` specifies the branch to load
files from. By default, it loads from the `main` branch.
"""
def __init__(
self,
repo_path: str,
clone_url: Optional[str] = None,
branch: Optional[str] = "main",
file_filter: Optional[Callable[[str], bool]] = None,
):
self.repo_path = repo_path
self.clone_url = clone_url
self.branch = branch
self.file_filter = file_filter
[docs] def load(self) -> List[Document]:
try:
from git import Blob, Repo # type: ignore
except ImportError as ex:
raise ImportError(
"Could not import git python package. "
"Please install it with `pip install GitPython`."
) from ex
if not os.path.exists(self.repo_path) and self.clone_url is None: | /content/https://python.langchain.com/en/latest/_modules/langchain/document_loaders/git.html |
ae7cabe61274-1 | raise ValueError(f"Path {self.repo_path} does not exist")
elif self.clone_url:
repo = Repo.clone_from(self.clone_url, self.repo_path)
repo.git.checkout(self.branch)
else:
repo = Repo(self.repo_path)
repo.git.checkout(self.branch)
docs: List[Document] = []
for item in repo.tree().traverse():
if not isinstance(item, Blob):
continue
file_path = os.path.join(self.repo_path, item.path)
ignored_files = repo.ignored([file_path])
if len(ignored_files):
continue
# uses filter to skip files
if self.file_filter and not self.file_filter(file_path):
continue
rel_file_path = os.path.relpath(file_path, self.repo_path)
try:
with open(file_path, "rb") as f:
content = f.read()
file_type = os.path.splitext(item.name)[1]
# loads only text files
try:
text_content = content.decode("utf-8")
except UnicodeDecodeError:
continue
metadata = {
"file_path": rel_file_path,
"file_name": item.name,
"file_type": file_type,
}
doc = Document(page_content=text_content, metadata=metadata)
docs.append(doc)
except Exception as e:
print(f"Error reading file {file_path}: {e}")
return docs
By Harrison Chase
© Copyright 2023, Harrison Chase. | /content/https://python.langchain.com/en/latest/_modules/langchain/document_loaders/git.html |
ae7cabe61274-2 | return docs
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 26, 2023. | /content/https://python.langchain.com/en/latest/_modules/langchain/document_loaders/git.html |
6ed6fa4c085b-0 | Source code for langchain.document_loaders.blockchain
import os
import re
from enum import Enum
from typing import List
import requests
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
class BlockchainType(Enum):
ETH_MAINNET = "eth-mainnet"
ETH_GOERLI = "eth-goerli"
POLYGON_MAINNET = "polygon-mainnet"
POLYGON_MUMBAI = "polygon-mumbai"
[docs]class BlockchainDocumentLoader(BaseLoader):
"""Loads elements from a blockchain smart contract into Langchain documents.
The supported blockchains are: Ethereum mainnet, Ethereum Goerli testnet,
Polygon mainnet, and Polygon Mumbai testnet.
If no BlockchainType is specified, the default is Ethereum mainnet.
The Loader uses the Alchemy API to interact with the blockchain.
ALCHEMY_API_KEY environment variable must be set to use this loader.
Future versions of this loader can:
- Support additional Alchemy APIs (e.g. getTransactions, etc.)
"""
def __init__(
self,
contract_address: str,
blockchainType: BlockchainType = BlockchainType.ETH_MAINNET,
api_key: str = "docs-demo",
startToken: int = 0,
):
self.contract_address = contract_address
self.blockchainType = blockchainType.value
self.api_key = os.environ.get("ALCHEMY_API_KEY") or api_key
self.startToken = startToken
if not self.api_key:
raise ValueError("Alchemy API key not provided.") | /content/https://python.langchain.com/en/latest/_modules/langchain/document_loaders/blockchain.html |
6ed6fa4c085b-1 | raise ValueError("Alchemy API key not provided.")
if not re.match(r"^0x[a-fA-F0-9]{40}$", self.contract_address):
raise ValueError(f"Invalid contract address {self.contract_address}")
[docs] def load(self) -> List[Document]:
url = (
f"https://{self.blockchainType}.g.alchemy.com/nft/v2/"
f"{self.api_key}/getNFTsForCollection?withMetadata="
f"True&contractAddress={self.contract_address}"
f"&startToken={self.startToken}"
)
response = requests.get(url)
if response.status_code != 200:
raise ValueError(f"Request failed with status code {response.status_code}")
items = response.json()["nfts"]
if not (items):
raise ValueError(
f"No NFTs found for contract address {self.contract_address}"
)
result = []
for item in items:
content = str(item)
tokenId = item["id"]["tokenId"]
metadata = {"tokenId": tokenId}
result.append(Document(page_content=content, metadata=metadata))
return result
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 26, 2023. | /content/https://python.langchain.com/en/latest/_modules/langchain/document_loaders/blockchain.html |
044b3f7ad14c-0 | Source code for langchain.document_loaders.facebook_chat
"""Loader that loads Facebook chat json dump."""
import datetime
import json
from pathlib import Path
from typing import List
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
def concatenate_rows(row: dict) -> str:
"""Combine message information in a readable format ready to be used."""
sender = row["sender_name"]
text = row["content"]
date = datetime.datetime.fromtimestamp(row["timestamp_ms"] / 1000).strftime(
"%Y-%m-%d %H:%M:%S"
)
return f"{sender} on {date}: {text}\n\n"
[docs]class FacebookChatLoader(BaseLoader):
"""Loader that loads Facebook messages json directory dump."""
def __init__(self, path: str):
"""Initialize with path."""
self.file_path = path
[docs] def load(self) -> List[Document]:
"""Load documents."""
try:
import pandas as pd
except ImportError:
raise ValueError(
"pandas is needed for Facebook chat loader, "
"please install with `pip install pandas`"
)
p = Path(self.file_path)
with open(p, encoding="utf8") as f:
d = json.load(f)
normalized_messages = pd.json_normalize(d["messages"]) | /content/https://python.langchain.com/en/latest/_modules/langchain/document_loaders/facebook_chat.html |
044b3f7ad14c-1 | df_normalized_messages = pd.DataFrame(normalized_messages)
# Only keep plain text messages
# (no services, nor links, hashtags, code, bold ...)
df_filtered = df_normalized_messages[
(df_normalized_messages.content.apply(lambda x: type(x) == str))
]
df_filtered = df_filtered[["timestamp_ms", "content", "sender_name"]]
text = df_filtered.apply(concatenate_rows, axis=1).str.cat(sep="")
metadata = {"source": str(p)}
return [Document(page_content=text, metadata=metadata)]
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 26, 2023. | /content/https://python.langchain.com/en/latest/_modules/langchain/document_loaders/facebook_chat.html |
4546b7ca0dc6-0 | Source code for langchain.document_loaders.diffbot
"""Loader that uses Diffbot to load webpages in text format."""
import logging
from typing import Any, List
import requests
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
logger = logging.getLogger(__name__)
[docs]class DiffbotLoader(BaseLoader):
"""Loader that loads Diffbot file json."""
def __init__(
self, api_token: str, urls: List[str], continue_on_failure: bool = True
):
"""Initialize with API token, ids, and key."""
self.api_token = api_token
self.urls = urls
self.continue_on_failure = continue_on_failure
def _diffbot_api_url(self, diffbot_api: str) -> str:
return f"https://api.diffbot.com/v3/{diffbot_api}"
def _get_diffbot_data(self, url: str) -> Any:
"""Get Diffbot file from Diffbot REST API."""
# TODO: Add support for other Diffbot APIs
diffbot_url = self._diffbot_api_url("article")
params = {
"token": self.api_token,
"url": url,
}
response = requests.get(diffbot_url, params=params, timeout=10)
# TODO: handle non-ok errors
return response.json() if response.ok else {} | /content/https://python.langchain.com/en/latest/_modules/langchain/document_loaders/diffbot.html |
4546b7ca0dc6-1 | return response.json() if response.ok else {}
[docs] def load(self) -> List[Document]:
"""Extract text from Diffbot on all the URLs and return Document instances"""
docs: List[Document] = list()
for url in self.urls:
try:
data = self._get_diffbot_data(url)
text = data["objects"][0]["text"] if "objects" in data else ""
metadata = {"source": url}
docs.append(Document(page_content=text, metadata=metadata))
except Exception as e:
if self.continue_on_failure:
logger.error(f"Error fetching or processing {url}, exception: {e}")
else:
raise e
return docs
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 26, 2023. | /content/https://python.langchain.com/en/latest/_modules/langchain/document_loaders/diffbot.html |
55366b47236a-0 | Source code for langchain.document_loaders.image
"""Loader that loads image files."""
from typing import List
from langchain.document_loaders.unstructured import UnstructuredFileLoader
[docs]class UnstructuredImageLoader(UnstructuredFileLoader):
"""Loader that uses unstructured to load image files, such as PNGs and JPGs."""
def _get_elements(self) -> List:
from unstructured.partition.image import partition_image
return partition_image(filename=self.file_path, **self.unstructured_kwargs)
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 26, 2023. | /content/https://python.langchain.com/en/latest/_modules/langchain/document_loaders/image.html |
51276589e00a-0 | Source code for langchain.document_loaders.duckdb_loader
from typing import Dict, List, Optional, cast
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
[docs]class DuckDBLoader(BaseLoader):
"""Loads a query result from DuckDB into a list of documents.
Each document represents one row of the result. The `page_content_columns`
are written into the `page_content` of the document. The `metadata_columns`
are written into the `metadata` of the document. By default, all columns
are written into the `page_content` and none into the `metadata`.
"""
def __init__(
self,
query: str,
database: str = ":memory:",
read_only: bool = False,
config: Optional[Dict[str, str]] = None,
page_content_columns: Optional[List[str]] = None,
metadata_columns: Optional[List[str]] = None,
):
self.query = query
self.database = database
self.read_only = read_only
self.config = config or {}
self.page_content_columns = page_content_columns
self.metadata_columns = metadata_columns
[docs] def load(self) -> List[Document]:
try:
import duckdb
except ImportError:
raise ValueError(
"Could not import duckdb python package. "
"Please install it with `pip install duckdb`."
)
docs = [] | /content/https://python.langchain.com/en/latest/_modules/langchain/document_loaders/duckdb_loader.html |
51276589e00a-1 | "Please install it with `pip install duckdb`."
)
docs = []
with duckdb.connect(
database=self.database, read_only=self.read_only, config=self.config
) as con:
query_result = con.execute(self.query)
results = query_result.fetchall()
description = cast(list, query_result.description)
field_names = [c[0] for c in description]
if self.page_content_columns is None:
page_content_columns = field_names
else:
page_content_columns = self.page_content_columns
if self.metadata_columns is None:
metadata_columns = []
else:
metadata_columns = self.metadata_columns
for result in results:
page_content = "\n".join(
f"{column}: {result[field_names.index(column)]}"
for column in page_content_columns
)
metadata = {
column: result[field_names.index(column)]
for column in metadata_columns
}
doc = Document(page_content=page_content, metadata=metadata)
docs.append(doc)
return docs
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 26, 2023. | /content/https://python.langchain.com/en/latest/_modules/langchain/document_loaders/duckdb_loader.html |
9291fb49550e-0 | Source code for langchain.document_loaders.notion
"""Loader that loads Notion directory dump."""
from pathlib import Path
from typing import List
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
[docs]class NotionDirectoryLoader(BaseLoader):
"""Loader that loads Notion directory dump."""
def __init__(self, path: str):
"""Initialize with path."""
self.file_path = path
[docs] def load(self) -> List[Document]:
"""Load documents."""
ps = list(Path(self.file_path).glob("**/*.md"))
docs = []
for p in ps:
with open(p) as f:
text = f.read()
metadata = {"source": str(p)}
docs.append(Document(page_content=text, metadata=metadata))
return docs
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 26, 2023. | /content/https://python.langchain.com/en/latest/_modules/langchain/document_loaders/notion.html |
f8cd1481e80f-0 | Source code for langchain.document_loaders.obsidian
"""Loader that loads Obsidian directory dump."""
import re
from pathlib import Path
from typing import List
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
[docs]class ObsidianLoader(BaseLoader):
"""Loader that loads Obsidian files from disk."""
FRONT_MATTER_REGEX = re.compile(r"^---\n(.*?)\n---\n", re.MULTILINE | re.DOTALL)
def __init__(
self, path: str, encoding: str = "UTF-8", collect_metadata: bool = True
):
"""Initialize with path."""
self.file_path = path
self.encoding = encoding
self.collect_metadata = collect_metadata
def _parse_front_matter(self, content: str) -> dict:
"""Parse front matter metadata from the content and return it as a dict."""
if not self.collect_metadata:
return {}
match = self.FRONT_MATTER_REGEX.search(content)
front_matter = {}
if match:
lines = match.group(1).split("\n")
for line in lines:
if ":" in line:
key, value = line.split(":", 1)
front_matter[key.strip()] = value.strip()
else:
# Skip lines without a colon
continue
return front_matter
def _remove_front_matter(self, content: str) -> str: | /content/https://python.langchain.com/en/latest/_modules/langchain/document_loaders/obsidian.html |
f8cd1481e80f-1 | def _remove_front_matter(self, content: str) -> str:
"""Remove front matter metadata from the given content."""
if not self.collect_metadata:
return content
return self.FRONT_MATTER_REGEX.sub("", content)
[docs] def load(self) -> List[Document]:
"""Load documents."""
ps = list(Path(self.file_path).glob("**/*.md"))
docs = []
for p in ps:
with open(p, encoding=self.encoding) as f:
text = f.read()
front_matter = self._parse_front_matter(text)
text = self._remove_front_matter(text)
metadata = {"source": str(p.name), "path": str(p), **front_matter}
docs.append(Document(page_content=text, metadata=metadata))
return docs
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 26, 2023. | /content/https://python.langchain.com/en/latest/_modules/langchain/document_loaders/obsidian.html |
af70b8e8d9f5-0 | Source code for langchain.document_loaders.azure_blob_storage_file
"""Loading logic for loading documents from an Azure Blob Storage file."""
import os
import tempfile
from typing import List
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
from langchain.document_loaders.unstructured import UnstructuredFileLoader
[docs]class AzureBlobStorageFileLoader(BaseLoader):
"""Loading logic for loading documents from Azure Blob Storage."""
def __init__(self, conn_str: str, container: str, blob_name: str):
"""Initialize with connection string, container and blob name."""
self.conn_str = conn_str
self.container = container
self.blob = blob_name
[docs] def load(self) -> List[Document]:
"""Load documents."""
try:
from azure.storage.blob import BlobClient
except ImportError as exc:
raise ValueError(
"Could not import azure storage blob python package. "
"Please install it with `pip install azure-storage-blob`."
) from exc
client = BlobClient.from_connection_string(
conn_str=self.conn_str, container_name=self.container, blob_name=self.blob
)
with tempfile.TemporaryDirectory() as temp_dir:
file_path = f"{temp_dir}/{self.container}/{self.blob}" | /content/https://python.langchain.com/en/latest/_modules/langchain/document_loaders/azure_blob_storage_file.html |
af70b8e8d9f5-1 | os.makedirs(os.path.dirname(file_path), exist_ok=True)
with open(f"{file_path}", "wb") as file:
blob_data = client.download_blob()
blob_data.readinto(file)
loader = UnstructuredFileLoader(file_path)
return loader.load()
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 26, 2023. | /content/https://python.langchain.com/en/latest/_modules/langchain/document_loaders/azure_blob_storage_file.html |
3216c3e931e2-0 | Source code for langchain.document_loaders.twitter
"""Twitter document loader."""
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Sequence, Union
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
if TYPE_CHECKING:
import tweepy
from tweepy import OAuth2BearerHandler, OAuthHandler
def _dependable_tweepy_import() -> tweepy:
try:
import tweepy
except ImportError:
raise ValueError(
"tweepy package not found, please install it with `pip install tweepy`"
)
return tweepy
[docs]class TwitterTweetLoader(BaseLoader):
"""Twitter tweets loader.
Read tweets of user twitter handle.
First you need to go to
`https://developer.twitter.com/en/docs/twitter-api
/getting-started/getting-access-to-the-twitter-api`
to get your token. And create a v2 version of the app.
"""
def __init__(
self,
auth_handler: Union[OAuthHandler, OAuth2BearerHandler],
twitter_users: Sequence[str],
number_tweets: Optional[int] = 100,
):
self.auth = auth_handler
self.twitter_users = twitter_users
self.number_tweets = number_tweets
[docs] def load(self) -> List[Document]:
"""Load tweets.""" | /content/https://python.langchain.com/en/latest/_modules/langchain/document_loaders/twitter.html |
3216c3e931e2-1 | """Load tweets."""
tweepy = _dependable_tweepy_import()
api = tweepy.API(self.auth, parser=tweepy.parsers.JSONParser())
results: List[Document] = []
for username in self.twitter_users:
tweets = api.user_timeline(screen_name=username, count=self.number_tweets)
user = api.get_user(screen_name=username)
docs = self._format_tweets(tweets, user)
results.extend(docs)
return results
def _format_tweets(
self, tweets: List[Dict[str, Any]], user_info: dict
) -> Iterable[Document]:
"""Format tweets into a string."""
for tweet in tweets:
metadata = {
"created_at": tweet["created_at"],
"user_info": user_info,
}
yield Document(
page_content=tweet["text"],
metadata=metadata,
)
[docs] @classmethod
def from_bearer_token(
cls,
oauth2_bearer_token: str,
twitter_users: Sequence[str],
number_tweets: Optional[int] = 100,
) -> TwitterTweetLoader:
"""Create a TwitterTweetLoader from OAuth2 bearer token."""
tweepy = _dependable_tweepy_import()
auth = tweepy.OAuth2BearerHandler(oauth2_bearer_token)
return cls( | /content/https://python.langchain.com/en/latest/_modules/langchain/document_loaders/twitter.html |
3216c3e931e2-2 | return cls(
auth_handler=auth,
twitter_users=twitter_users,
number_tweets=number_tweets,
)
[docs] @classmethod
def from_secrets(
cls,
access_token: str,
access_token_secret: str,
consumer_key: str,
consumer_secret: str,
twitter_users: Sequence[str],
number_tweets: Optional[int] = 100,
) -> TwitterTweetLoader:
"""Create a TwitterTweetLoader from access tokens and secrets."""
tweepy = _dependable_tweepy_import()
auth = tweepy.OAuthHandler(
access_token=access_token,
access_token_secret=access_token_secret,
consumer_key=consumer_key,
consumer_secret=consumer_secret,
)
return cls(
auth_handler=auth,
twitter_users=twitter_users,
number_tweets=number_tweets,
)
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 26, 2023. | /content/https://python.langchain.com/en/latest/_modules/langchain/document_loaders/twitter.html |
a09d6a09c0e9-0 | Source code for langchain.document_loaders.powerpoint
"""Loader that loads powerpoint files."""
import os
from typing import List
from langchain.document_loaders.unstructured import UnstructuredFileLoader
[docs]class UnstructuredPowerPointLoader(UnstructuredFileLoader):
"""Loader that uses unstructured to load powerpoint files."""
def _get_elements(self) -> List:
from unstructured.__version__ import __version__ as __unstructured_version__
from unstructured.file_utils.filetype import FileType, detect_filetype
unstructured_version = tuple(
[int(x) for x in __unstructured_version__.split(".")]
)
# NOTE(MthwRobinson) - magic will raise an import error if the libmagic
# system dependency isn't installed. If it's not installed, we'll just
# check the file extension
try:
import magic # noqa: F401
is_ppt = detect_filetype(self.file_path) == FileType.PPT
except ImportError:
_, extension = os.path.splitext(self.file_path)
is_ppt = extension == ".ppt"
if is_ppt and unstructured_version < (0, 4, 11):
raise ValueError(
f"You are on unstructured version {__unstructured_version__}. "
"Partitioning .ppt files is only supported in unstructured>=0.4.11. "
"Please upgrade the unstructured package and try again."
)
if is_ppt:
from unstructured.partition.ppt import partition_ppt | /content/https://python.langchain.com/en/latest/_modules/langchain/document_loaders/powerpoint.html |
a09d6a09c0e9-1 | )
if is_ppt:
from unstructured.partition.ppt import partition_ppt
return partition_ppt(filename=self.file_path, **self.unstructured_kwargs)
else:
from unstructured.partition.pptx import partition_pptx
return partition_pptx(filename=self.file_path, **self.unstructured_kwargs)
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 26, 2023. | /content/https://python.langchain.com/en/latest/_modules/langchain/document_loaders/powerpoint.html |
d1278cb61a60-0 | Source code for langchain.document_loaders.azlyrics
"""Loader that loads AZLyrics."""
from typing import List
from langchain.docstore.document import Document
from langchain.document_loaders.web_base import WebBaseLoader
[docs]class AZLyricsLoader(WebBaseLoader):
"""Loader that loads AZLyrics webpages."""
[docs] def load(self) -> List[Document]:
"""Load webpage."""
soup = self.scrape()
title = soup.title.text
lyrics = soup.find_all("div", {"class": ""})[2].text
text = title + lyrics
metadata = {"source": self.web_path}
return [Document(page_content=text, metadata=metadata)]
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 26, 2023. | /content/https://python.langchain.com/en/latest/_modules/langchain/document_loaders/azlyrics.html |
f69391bf857c-0 | Source code for langchain.document_loaders.gcs_directory
"""Loading logic for loading documents from an GCS directory."""
from typing import List
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
from langchain.document_loaders.gcs_file import GCSFileLoader
[docs]class GCSDirectoryLoader(BaseLoader):
"""Loading logic for loading documents from GCS."""
def __init__(self, project_name: str, bucket: str, prefix: str = ""):
"""Initialize with bucket and key name."""
self.project_name = project_name
self.bucket = bucket
self.prefix = prefix
[docs] def load(self) -> List[Document]:
"""Load documents."""
try:
from google.cloud import storage
except ImportError:
raise ValueError(
"Could not import google-cloud-storage python package. "
"Please install it with `pip install google-cloud-storage`."
)
client = storage.Client(project=self.project_name)
docs = []
for blob in client.list_blobs(self.bucket, prefix=self.prefix):
# we shall just skip directories since GCSFileLoader creates
# intermediate directories on the fly
if blob.name.endswith("/"):
continue
loader = GCSFileLoader(self.project_name, self.bucket, blob.name)
docs.extend(loader.load())
return docs
By Harrison Chase
© Copyright 2023, Harrison Chase. | /content/https://python.langchain.com/en/latest/_modules/langchain/document_loaders/gcs_directory.html |
f69391bf857c-1 | return docs
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 26, 2023. | /content/https://python.langchain.com/en/latest/_modules/langchain/document_loaders/gcs_directory.html |
567cd7f2ea83-0 | Source code for langchain.document_loaders.csv_loader
from csv import DictReader
from typing import Dict, List, Optional
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
[docs]class CSVLoader(BaseLoader):
"""Loads a CSV file into a list of documents.
Each document represents one row of the CSV file. Every row is converted into a
key/value pair and outputted to a new line in the document's page_content.
The source for each document loaded from csv is set to the value of the
`file_path` argument for all doucments by default.
You can override this by setting the `source_column` argument to the
name of a column in the CSV file.
The source of each document will then be set to the value of the column
with the name specified in `source_column`.
Output Example:
.. code-block:: txt
column1: value1
column2: value2
column3: value3
"""
def __init__(
self,
file_path: str,
source_column: Optional[str] = None,
csv_args: Optional[Dict] = None,
encoding: Optional[str] = None,
):
self.file_path = file_path
self.source_column = source_column
self.encoding = encoding
if csv_args is None:
self.csv_args = {
"delimiter": ",",
"quotechar": '"',
}
else: | /content/https://python.langchain.com/en/latest/_modules/langchain/document_loaders/csv_loader.html |
567cd7f2ea83-1 | "quotechar": '"',
}
else:
self.csv_args = csv_args
[docs] def load(self) -> List[Document]:
docs = []
with open(self.file_path, newline="", encoding=self.encoding) as csvfile:
csv = DictReader(csvfile, **self.csv_args) # type: ignore
for i, row in enumerate(csv):
content = "\n".join(f"{k.strip()}: {v.strip()}" for k, v in row.items())
if self.source_column is not None:
source = row[self.source_column]
else:
source = self.file_path
metadata = {"source": source, "row": i}
doc = Document(page_content=content, metadata=metadata)
docs.append(doc)
return docs
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 26, 2023. | /content/https://python.langchain.com/en/latest/_modules/langchain/document_loaders/csv_loader.html |
57da1f613fb0-0 | Source code for langchain.document_loaders.slack_directory
"""Loader for documents from a Slack export."""
import json
import zipfile
from pathlib import Path
from typing import Dict, List, Optional
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
[docs]class SlackDirectoryLoader(BaseLoader):
"""Loader for loading documents from a Slack directory dump."""
def __init__(self, zip_path: str, workspace_url: Optional[str] = None):
"""Initialize the SlackDirectoryLoader.
Args:
zip_path (str): The path to the Slack directory dump zip file.
workspace_url (Optional[str]): The Slack workspace URL.
Including the URL will turn
sources into links. Defaults to None.
"""
self.zip_path = Path(zip_path)
self.workspace_url = workspace_url
self.channel_id_map = self._get_channel_id_map(self.zip_path)
@staticmethod
def _get_channel_id_map(zip_path: Path) -> Dict[str, str]:
"""Get a dictionary mapping channel names to their respective IDs."""
with zipfile.ZipFile(zip_path, "r") as zip_file:
try:
with zip_file.open("channels.json", "r") as f:
channels = json.load(f)
return {channel["name"]: channel["id"] for channel in channels} | /content/https://python.langchain.com/en/latest/_modules/langchain/document_loaders/slack_directory.html |
57da1f613fb0-1 | except KeyError:
return {}
[docs] def load(self) -> List[Document]:
"""Load and return documents from the Slack directory dump."""
docs = []
with zipfile.ZipFile(self.zip_path, "r") as zip_file:
for channel_path in zip_file.namelist():
channel_name = Path(channel_path).parent.name
if not channel_name:
continue
if channel_path.endswith(".json"):
messages = self._read_json(zip_file, channel_path)
for message in messages:
document = self._convert_message_to_document(
message, channel_name
)
docs.append(document)
return docs
def _read_json(self, zip_file: zipfile.ZipFile, file_path: str) -> List[dict]:
"""Read JSON data from a zip subfile."""
with zip_file.open(file_path, "r") as f:
data = json.load(f)
return data
def _convert_message_to_document(
self, message: dict, channel_name: str
) -> Document:
"""
Convert a message to a Document object.
Args:
message (dict): A message in the form of a dictionary.
channel_name (str): The name of the channel the message belongs to.
Returns:
Document: A Document object representing the message.
"""
text = message.get("text", "")
metadata = self._get_message_metadata(message, channel_name)
return Document(
page_content=text,
metadata=metadata,
) | /content/https://python.langchain.com/en/latest/_modules/langchain/document_loaders/slack_directory.html |
57da1f613fb0-2 | return Document(
page_content=text,
metadata=metadata,
)
def _get_message_metadata(self, message: dict, channel_name: str) -> dict:
"""Create and return metadata for a given message and channel."""
timestamp = message.get("ts", "")
user = message.get("user", "")
source = self._get_message_source(channel_name, user, timestamp)
return {
"source": source,
"channel": channel_name,
"timestamp": timestamp,
"user": user,
}
def _get_message_source(self, channel_name: str, user: str, timestamp: str) -> str:
"""
Get the message source as a string.
Args:
channel_name (str): The name of the channel the message belongs to.
user (str): The user ID who sent the message.
timestamp (str): The timestamp of the message.
Returns:
str: The message source.
"""
if self.workspace_url:
channel_id = self.channel_id_map.get(channel_name, "")
return (
f"{self.workspace_url}/archives/{channel_id}"
+ f"/p{timestamp.replace('.', '')}"
)
else:
return f"{channel_name} - {user} - {timestamp}"
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 26, 2023. | /content/https://python.langchain.com/en/latest/_modules/langchain/document_loaders/slack_directory.html |
8d0f1ad67d4b-0 | Source code for langchain.document_loaders.notiondb
"""Notion DB loader for langchain"""
from typing import Any, Dict, List
import requests
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
NOTION_BASE_URL = "https://api.notion.com/v1"
DATABASE_URL = NOTION_BASE_URL + "/databases/{database_id}/query"
PAGE_URL = NOTION_BASE_URL + "/pages/{page_id}"
BLOCK_URL = NOTION_BASE_URL + "/blocks/{block_id}/children"
[docs]class NotionDBLoader(BaseLoader):
"""Notion DB Loader.
Reads content from pages within a Noton Database.
Args:
integration_token (str): Notion integration token.
database_id (str): Notion database id.
"""
def __init__(self, integration_token: str, database_id: str) -> None:
"""Initialize with parameters."""
if not integration_token:
raise ValueError("integration_token must be provided")
if not database_id:
raise ValueError("database_id must be provided")
self.token = integration_token
self.database_id = database_id
self.headers = {
"Authorization": "Bearer " + self.token,
"Content-Type": "application/json",
"Notion-Version": "2022-06-28",
}
[docs] def load(self) -> List[Document]: | /content/https://python.langchain.com/en/latest/_modules/langchain/document_loaders/notiondb.html |
8d0f1ad67d4b-1 | }
[docs] def load(self) -> List[Document]:
"""Load documents from the Notion database.
Returns:
List[Document]: List of documents.
"""
page_ids = self._retrieve_page_ids()
return list(self.load_page(page_id) for page_id in page_ids)
def _retrieve_page_ids(
self, query_dict: Dict[str, Any] = {"page_size": 100}
) -> List[str]:
"""Get all the pages from a Notion database."""
pages: List[Dict[str, Any]] = []
while True:
data = self._request(
DATABASE_URL.format(database_id=self.database_id),
method="POST",
query_dict=query_dict,
)
pages.extend(data.get("results"))
if not data.get("has_more"):
break
query_dict["start_cursor"] = data.get("next_cursor")
page_ids = [page["id"] for page in pages]
return page_ids
[docs] def load_page(self, page_id: str) -> Document:
"""Read a page."""
data = self._request(PAGE_URL.format(page_id=page_id))
# load properties as metadata
metadata: Dict[str, Any] = {}
for prop_name, prop_data in data["properties"].items():
prop_type = prop_data["type"]
if prop_type == "rich_text":
value = ( | /content/https://python.langchain.com/en/latest/_modules/langchain/document_loaders/notiondb.html |
8d0f1ad67d4b-2 | if prop_type == "rich_text":
value = (
prop_data["rich_text"][0]["plain_text"]
if prop_data["rich_text"]
else None
)
elif prop_type == "title":
value = (
prop_data["title"][0]["plain_text"] if prop_data["title"] else None
)
elif prop_type == "multi_select":
value = (
[item["name"] for item in prop_data["multi_select"]]
if prop_data["multi_select"]
else []
)
else:
value = None
metadata[prop_name.lower()] = value
metadata["id"] = page_id
return Document(page_content=self._load_blocks(page_id), metadata=metadata)
def _load_blocks(self, block_id: str, num_tabs: int = 0) -> str:
"""Read a block and its children."""
result_lines_arr: List[str] = []
cur_block_id: str = block_id
while cur_block_id:
data = self._request(BLOCK_URL.format(block_id=cur_block_id))
for result in data["results"]:
result_obj = result[result["type"]]
if "rich_text" not in result_obj:
continue
cur_result_text_arr: List[str] = []
for rich_text in result_obj["rich_text"]:
if "text" in rich_text:
cur_result_text_arr.append( | /content/https://python.langchain.com/en/latest/_modules/langchain/document_loaders/notiondb.html |
8d0f1ad67d4b-3 | cur_result_text_arr.append(
"\t" * num_tabs + rich_text["text"]["content"]
)
if result["has_children"]:
children_text = self._load_blocks(
result["id"], num_tabs=num_tabs + 1
)
cur_result_text_arr.append(children_text)
result_lines_arr.append("\n".join(cur_result_text_arr))
cur_block_id = data.get("next_cursor")
return "\n".join(result_lines_arr)
def _request(
self, url: str, method: str = "GET", query_dict: Dict[str, Any] = {}
) -> Any:
res = requests.request(
method,
url,
headers=self.headers,
json=query_dict,
timeout=10,
)
res.raise_for_status()
return res.json()
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 26, 2023. | /content/https://python.langchain.com/en/latest/_modules/langchain/document_loaders/notiondb.html |
d2d7b5c9eaea-0 | Source code for langchain.document_loaders.readthedocs
"""Loader that loads ReadTheDocs documentation directory dump."""
from pathlib import Path
from typing import Any, List, Optional
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
[docs]class ReadTheDocsLoader(BaseLoader):
"""Loader that loads ReadTheDocs documentation directory dump."""
def __init__(
self,
path: str,
encoding: Optional[str] = None,
errors: Optional[str] = None,
**kwargs: Optional[Any]
):
"""Initialize path."""
try:
from bs4 import BeautifulSoup
except ImportError:
raise ValueError(
"Could not import python packages. "
"Please install it with `pip install beautifulsoup4`. "
)
try:
_ = BeautifulSoup(
"<html><body>Parser builder library test.</body></html>", **kwargs
)
except Exception as e:
raise ValueError("Parsing kwargs do not appear valid") from e
self.file_path = path
self.encoding = encoding
self.errors = errors
self.bs_kwargs = kwargs
[docs] def load(self) -> List[Document]:
"""Load documents."""
from bs4 import BeautifulSoup
def _clean_data(data: str) -> str:
soup = BeautifulSoup(data, **self.bs_kwargs)
text = soup.find_all("main", {"id": "main-content"}) | /content/https://python.langchain.com/en/latest/_modules/langchain/document_loaders/readthedocs.html |
d2d7b5c9eaea-1 | if len(text) == 0:
text = soup.find_all("div", {"role": "main"})
if len(text) != 0:
text = text[0].get_text()
else:
text = ""
return "\n".join([t for t in text.split("\n") if t])
docs = []
for p in Path(self.file_path).rglob("*"):
if p.is_dir():
continue
with open(p, encoding=self.encoding, errors=self.errors) as f:
text = _clean_data(f.read())
metadata = {"source": str(p)}
docs.append(Document(page_content=text, metadata=metadata))
return docs
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 26, 2023. | /content/https://python.langchain.com/en/latest/_modules/langchain/document_loaders/readthedocs.html |
db701136bdcd-0 | Source code for langchain.document_loaders.gutenberg
"""Loader that loads .txt web files."""
from typing import List
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
[docs]class GutenbergLoader(BaseLoader):
"""Loader that uses urllib to load .txt web files."""
def __init__(self, file_path: str):
"""Initialize with file path."""
if not file_path.startswith("https://www.gutenberg.org"):
raise ValueError("file path must start with 'https://www.gutenberg.org'")
if not file_path.endswith(".txt"):
raise ValueError("file path must end with '.txt'")
self.file_path = file_path
[docs] def load(self) -> List[Document]:
"""Load file."""
from urllib.request import urlopen
elements = urlopen(self.file_path)
text = "\n\n".join([str(el.decode("utf-8-sig")) for el in elements])
metadata = {"source": self.file_path}
return [Document(page_content=text, metadata=metadata)]
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 26, 2023. | /content/https://python.langchain.com/en/latest/_modules/langchain/document_loaders/gutenberg.html |
9e340a59fff1-0 | Source code for langchain.document_loaders.googledrive
"""Loader that loads data from Google Drive."""
# Prerequisites:
# 1. Create a Google Cloud project
# 2. Enable the Google Drive API:
# https://console.cloud.google.com/flows/enableapi?apiid=drive.googleapis.com
# 3. Authorize credentials for desktop app:
# https://developers.google.com/drive/api/quickstart/python#authorize_credentials_for_a_desktop_application # noqa: E501
# 4. For service accounts visit
# https://cloud.google.com/iam/docs/service-accounts-create
from pathlib import Path
from typing import Any, Dict, List, Optional, Union
from pydantic import BaseModel, root_validator, validator
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
SCOPES = ["https://www.googleapis.com/auth/drive.readonly"]
[docs]class GoogleDriveLoader(BaseLoader, BaseModel):
"""Loader that loads Google Docs from Google Drive."""
service_account_key: Path = Path.home() / ".credentials" / "keys.json"
credentials_path: Path = Path.home() / ".credentials" / "credentials.json"
token_path: Path = Path.home() / ".credentials" / "token.json"
folder_id: Optional[str] = None
document_ids: Optional[List[str]] = None | /content/https://python.langchain.com/en/latest/_modules/langchain/document_loaders/googledrive.html |
9e340a59fff1-1 | document_ids: Optional[List[str]] = None
file_ids: Optional[List[str]] = None
recursive: bool = False
@root_validator
def validate_folder_id_or_document_ids(
cls, values: Dict[str, Any]
) -> Dict[str, Any]:
"""Validate that either folder_id or document_ids is set, but not both."""
if values.get("folder_id") and (
values.get("document_ids") or values.get("file_ids")
):
raise ValueError(
"Cannot specify both folder_id and document_ids nor "
"folder_id and file_ids"
)
if (
not values.get("folder_id")
and not values.get("document_ids")
and not values.get("file_ids")
):
raise ValueError("Must specify either folder_id, document_ids, or file_ids")
return values
@validator("credentials_path")
def validate_credentials_path(cls, v: Any, **kwargs: Any) -> Any:
"""Validate that credentials_path exists."""
if not v.exists():
raise ValueError(f"credentials_path {v} does not exist")
return v
def _load_credentials(self) -> Any:
"""Load credentials."""
# Adapted from https://developers.google.com/drive/api/v3/quickstart/python
try:
from google.auth.transport.requests import Request
from google.oauth2 import service_account | /content/https://python.langchain.com/en/latest/_modules/langchain/document_loaders/googledrive.html |
9e340a59fff1-2 | from google.oauth2 import service_account
from google.oauth2.credentials import Credentials
from google_auth_oauthlib.flow import InstalledAppFlow
except ImportError:
raise ImportError(
"You must run"
"`pip install --upgrade "
"google-api-python-client google-auth-httplib2 "
"google-auth-oauthlib`"
"to use the Google Drive loader."
)
creds = None
if self.service_account_key.exists():
return service_account.Credentials.from_service_account_file(
str(self.service_account_key), scopes=SCOPES
)
if self.token_path.exists():
creds = Credentials.from_authorized_user_file(str(self.token_path), SCOPES)
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
str(self.credentials_path), SCOPES
)
creds = flow.run_local_server(port=0)
with open(self.token_path, "w") as token:
token.write(creds.to_json())
return creds
def _load_sheet_from_id(self, id: str) -> List[Document]:
"""Load a sheet and all tabs from an ID."""
from googleapiclient.discovery import build
creds = self._load_credentials() | /content/https://python.langchain.com/en/latest/_modules/langchain/document_loaders/googledrive.html |
9e340a59fff1-3 | creds = self._load_credentials()
sheets_service = build("sheets", "v4", credentials=creds)
spreadsheet = sheets_service.spreadsheets().get(spreadsheetId=id).execute()
sheets = spreadsheet.get("sheets", [])
documents = []
for sheet in sheets:
sheet_name = sheet["properties"]["title"]
result = (
sheets_service.spreadsheets()
.values()
.get(spreadsheetId=id, range=sheet_name)
.execute()
)
values = result.get("values", [])
header = values[0]
for i, row in enumerate(values[1:], start=1):
metadata = {
"source": (
f"https://docs.google.com/spreadsheets/d/{id}/"
f"edit?gid={sheet['properties']['sheetId']}"
),
"title": f"{spreadsheet['properties']['title']} - {sheet_name}",
"row": i,
}
content = []
for j, v in enumerate(row):
title = header[j].strip() if len(header) > j else ""
content.append(f"{title}: {v.strip()}")
page_content = "\n".join(content)
documents.append(Document(page_content=page_content, metadata=metadata))
return documents | /content/https://python.langchain.com/en/latest/_modules/langchain/document_loaders/googledrive.html |
9e340a59fff1-4 | return documents
def _load_document_from_id(self, id: str) -> Document:
"""Load a document from an ID."""
from io import BytesIO
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError
from googleapiclient.http import MediaIoBaseDownload
creds = self._load_credentials()
service = build("drive", "v3", credentials=creds)
file = service.files().get(fileId=id, supportsAllDrives=True).execute()
request = service.files().export_media(fileId=id, mimeType="text/plain")
fh = BytesIO()
downloader = MediaIoBaseDownload(fh, request)
done = False
try:
while done is False:
status, done = downloader.next_chunk()
except HttpError as e:
if e.resp.status == 404:
print("File not found: {}".format(id))
else:
print("An error occurred: {}".format(e))
text = fh.getvalue().decode("utf-8")
metadata = {
"source": f"https://docs.google.com/document/d/{id}/edit",
"title": f"{file.get('name')}",
}
return Document(page_content=text, metadata=metadata)
def _load_documents_from_folder(self, folder_id: str) -> List[Document]:
"""Load documents from a folder."""
from googleapiclient.discovery import build | /content/https://python.langchain.com/en/latest/_modules/langchain/document_loaders/googledrive.html |
9e340a59fff1-5 | from googleapiclient.discovery import build
creds = self._load_credentials()
service = build("drive", "v3", credentials=creds)
files = self._fetch_files_recursive(service, folder_id)
returns = []
for file in files:
if file["mimeType"] == "application/vnd.google-apps.document":
returns.append(self._load_document_from_id(file["id"])) # type: ignore
elif file["mimeType"] == "application/vnd.google-apps.spreadsheet":
returns.extend(self._load_sheet_from_id(file["id"])) # type: ignore
elif file["mimeType"] == "application/pdf":
returns.extend(self._load_file_from_id(file["id"])) # type: ignore
else:
pass
return returns
def _fetch_files_recursive(
self, service: Any, folder_id: str
) -> List[Dict[str, Union[str, List[str]]]]:
"""Fetch all files and subfolders recursively."""
results = (
service.files()
.list(
q=f"'{folder_id}' in parents",
pageSize=1000,
includeItemsFromAllDrives=True,
supportsAllDrives=True,
fields="nextPageToken, files(id, name, mimeType, parents)",
)
.execute()
) | /content/https://python.langchain.com/en/latest/_modules/langchain/document_loaders/googledrive.html |
9e340a59fff1-6 | )
.execute()
)
files = results.get("files", [])
returns = []
for file in files:
if file["mimeType"] == "application/vnd.google-apps.folder":
if self.recursive:
returns.extend(self._fetch_files_recursive(service, file["id"]))
else:
returns.append(file)
return returns
def _load_documents_from_ids(self) -> List[Document]:
"""Load documents from a list of IDs."""
if not self.document_ids:
raise ValueError("document_ids must be set")
return [self._load_document_from_id(doc_id) for doc_id in self.document_ids]
def _load_file_from_id(self, id: str) -> List[Document]:
"""Load a file from an ID."""
from io import BytesIO
from googleapiclient.discovery import build
from googleapiclient.http import MediaIoBaseDownload
creds = self._load_credentials()
service = build("drive", "v3", credentials=creds)
file = service.files().get(fileId=id, supportsAllDrives=True).execute()
request = service.files().get_media(fileId=id)
fh = BytesIO()
downloader = MediaIoBaseDownload(fh, request)
done = False
while done is False:
status, done = downloader.next_chunk()
content = fh.getvalue()
from PyPDF2 import PdfReader | /content/https://python.langchain.com/en/latest/_modules/langchain/document_loaders/googledrive.html |
9e340a59fff1-7 | content = fh.getvalue()
from PyPDF2 import PdfReader
pdf_reader = PdfReader(BytesIO(content))
return [
Document(
page_content=page.extract_text(),
metadata={
"source": f"https://drive.google.com/file/d/{id}/view",
"title": f"{file.get('name')}",
"page": i,
},
)
for i, page in enumerate(pdf_reader.pages)
]
def _load_file_from_ids(self) -> List[Document]:
"""Load files from a list of IDs."""
if not self.file_ids:
raise ValueError("file_ids must be set")
docs = []
for file_id in self.file_ids:
docs.extend(self._load_file_from_id(file_id))
return docs
[docs] def load(self) -> List[Document]:
"""Load documents."""
if self.folder_id:
return self._load_documents_from_folder(self.folder_id)
elif self.document_ids:
return self._load_documents_from_ids()
else:
return self._load_file_from_ids()
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 26, 2023. | /content/https://python.langchain.com/en/latest/_modules/langchain/document_loaders/googledrive.html |
ae68c038b487-0 | Source code for langchain.document_loaders.gitbook
"""Loader that loads GitBook."""
from typing import Any, List, Optional
from urllib.parse import urljoin, urlparse
from langchain.docstore.document import Document
from langchain.document_loaders.web_base import WebBaseLoader
[docs]class GitbookLoader(WebBaseLoader):
"""Load GitBook data.
1. load from either a single page, or
2. load all (relative) paths in the navbar.
"""
def __init__(
self,
web_page: str,
load_all_paths: bool = False,
base_url: Optional[str] = None,
content_selector: str = "main",
):
"""Initialize with web page and whether to load all paths.
Args:
web_page: The web page to load or the starting point from where
relative paths are discovered.
load_all_paths: If set to True, all relative paths in the navbar
are loaded instead of only `web_page`.
base_url: If `load_all_paths` is True, the relative paths are
appended to this base url. Defaults to `web_page` if not set.
"""
self.base_url = base_url or web_page
if self.base_url.endswith("/"):
self.base_url = self.base_url[:-1]
if load_all_paths:
# set web_path to the sitemap if we want to crawl all paths
web_paths = f"{self.base_url}/sitemap.xml"
else: | /content/https://python.langchain.com/en/latest/_modules/langchain/document_loaders/gitbook.html |
ae68c038b487-1 | else:
web_paths = web_page
super().__init__(web_paths)
self.load_all_paths = load_all_paths
self.content_selector = content_selector
[docs] def load(self) -> List[Document]:
"""Fetch text from one single GitBook page."""
if self.load_all_paths:
soup_info = self.scrape()
relative_paths = self._get_paths(soup_info)
documents = []
for path in relative_paths:
url = urljoin(self.base_url, path)
print(f"Fetching text from {url}")
soup_info = self._scrape(url)
documents.append(self._get_document(soup_info, url))
return [d for d in documents if d]
else:
soup_info = self.scrape()
documents = [self._get_document(soup_info, self.web_path)]
return [d for d in documents if d]
def _get_document(
self, soup: Any, custom_url: Optional[str] = None
) -> Optional[Document]:
"""Fetch content from page and return Document."""
page_content_raw = soup.find(self.content_selector)
if not page_content_raw:
return None
content = page_content_raw.get_text(separator="\n").strip()
title_if_exists = page_content_raw.find("h1")
title = title_if_exists.text if title_if_exists else "" | /content/https://python.langchain.com/en/latest/_modules/langchain/document_loaders/gitbook.html |
ae68c038b487-2 | title = title_if_exists.text if title_if_exists else ""
metadata = {"source": custom_url or self.web_path, "title": title}
return Document(page_content=content, metadata=metadata)
def _get_paths(self, soup: Any) -> List[str]:
"""Fetch all relative paths in the navbar."""
return [urlparse(loc.text).path for loc in soup.find_all("loc")]
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 26, 2023. | /content/https://python.langchain.com/en/latest/_modules/langchain/document_loaders/gitbook.html |
db484fc02657-0 | Source code for langchain.document_loaders.dataframe
"""Load from Dataframe object"""
from typing import Any, List
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
[docs]class DataFrameLoader(BaseLoader):
"""Load Pandas DataFrames."""
def __init__(self, data_frame: Any, page_content_column: str = "text"):
"""Initialize with dataframe object."""
import pandas as pd
if not isinstance(data_frame, pd.DataFrame):
raise ValueError(
f"Expected data_frame to be a pd.DataFrame, got {type(data_frame)}"
)
self.data_frame = data_frame
self.page_content_column = page_content_column
[docs] def load(self) -> List[Document]:
"""Load from the dataframe."""
result = []
# For very large dataframes, this needs to yeild instead of building a list
# but that would require chaging return type to a generator for BaseLoader
# and all its subclasses, which is a bigger refactor. Marking as future TODO.
# This change will allow us to extend this to Spark and Dask dataframes.
for _, row in self.data_frame.iterrows():
text = row[self.page_content_column]
metadata = row.to_dict()
metadata.pop(self.page_content_column)
result.append(Document(page_content=text, metadata=metadata))
return result
By Harrison Chase | /content/https://python.langchain.com/en/latest/_modules/langchain/document_loaders/dataframe.html |
db484fc02657-1 | return result
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 26, 2023. | /content/https://python.langchain.com/en/latest/_modules/langchain/document_loaders/dataframe.html |
5229845c7e21-0 | Source code for langchain.document_loaders.text
from typing import List, Optional
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
[docs]class TextLoader(BaseLoader):
"""Load text files."""
def __init__(self, file_path: str, encoding: Optional[str] = None):
"""Initialize with file path."""
self.file_path = file_path
self.encoding = encoding
[docs] def load(self) -> List[Document]:
"""Load from file path."""
with open(self.file_path, encoding=self.encoding) as f:
text = f.read()
metadata = {"source": self.file_path}
return [Document(page_content=text, metadata=metadata)]
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 26, 2023. | /content/https://python.langchain.com/en/latest/_modules/langchain/document_loaders/text.html |
08667f8c168b-0 | Source code for langchain.document_loaders.ifixit
"""Loader that loads iFixit data."""
from typing import List, Optional
import requests
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
from langchain.document_loaders.web_base import WebBaseLoader
IFIXIT_BASE_URL = "https://www.ifixit.com/api/2.0"
[docs]class IFixitLoader(BaseLoader):
"""Load iFixit repair guides, device wikis and answers.
iFixit is the largest, open repair community on the web. The site contains nearly
100k repair manuals, 200k Questions & Answers on 42k devices, and all the data is
licensed under CC-BY.
This loader will allow you to download the text of a repair guide, text of Q&A's
and wikis from devices on iFixit using their open APIs and web scraping.
"""
def __init__(self, web_path: str):
"""Initialize with web path."""
if not web_path.startswith("https://www.ifixit.com"):
raise ValueError("web path must start with 'https://www.ifixit.com'")
path = web_path.replace("https://www.ifixit.com", "")
allowed_paths = ["/Device", "/Guide", "/Answers", "/Teardown"]
""" TODO: Add /Wiki """
if not any(path.startswith(allowed_path) for allowed_path in allowed_paths):
raise ValueError( | /content/https://python.langchain.com/en/latest/_modules/langchain/document_loaders/ifixit.html |
08667f8c168b-1 | raise ValueError(
"web path must start with /Device, /Guide, /Teardown or /Answers"
)
pieces = [x for x in path.split("/") if x]
"""Teardowns are just guides by a different name"""
self.page_type = pieces[0] if pieces[0] != "Teardown" else "Guide"
if self.page_type == "Guide" or self.page_type == "Answers":
self.id = pieces[2]
else:
self.id = pieces[1]
self.web_path = web_path
[docs] def load(self) -> List[Document]:
if self.page_type == "Device":
return self.load_device()
elif self.page_type == "Guide" or self.page_type == "Teardown":
return self.load_guide()
elif self.page_type == "Answers":
return self.load_questions_and_answers()
else:
raise ValueError("Unknown page type: " + self.page_type)
[docs] @staticmethod
def load_suggestions(query: str = "", doc_type: str = "all") -> List[Document]:
res = requests.get(
IFIXIT_BASE_URL + "/suggest/" + query + "?doctypes=" + doc_type
)
if res.status_code != 200:
raise ValueError(
'Could not load suggestions for "' + query + '"\n' + res.json()
)
data = res.json() | /content/https://python.langchain.com/en/latest/_modules/langchain/document_loaders/ifixit.html |
08667f8c168b-2 | )
data = res.json()
results = data["results"]
output = []
for result in results:
try:
loader = IFixitLoader(result["url"])
if loader.page_type == "Device":
output += loader.load_device(include_guides=False)
else:
output += loader.load()
except ValueError:
continue
return output
[docs] def load_questions_and_answers(
self, url_override: Optional[str] = None
) -> List[Document]:
loader = WebBaseLoader(self.web_path if url_override is None else url_override)
soup = loader.scrape()
output = []
title = soup.find("h1", "post-title").text
output.append("# " + title)
output.append(soup.select_one(".post-content .post-text").text.strip())
answersHeader = soup.find("div", "post-answers-header")
if answersHeader:
output.append("\n## " + answersHeader.text.strip())
for answer in soup.select(".js-answers-list .post.post-answer"):
if answer.has_attr("itemprop") and "acceptedAnswer" in answer["itemprop"]:
output.append("\n### Accepted Answer")
elif "post-helpful" in answer["class"]:
output.append("\n### Most Helpful Answer")
else: | /content/https://python.langchain.com/en/latest/_modules/langchain/document_loaders/ifixit.html |
08667f8c168b-3 | output.append("\n### Most Helpful Answer")
else:
output.append("\n### Other Answer")
output += [
a.text.strip() for a in answer.select(".post-content .post-text")
]
output.append("\n")
text = "\n".join(output).strip()
metadata = {"source": self.web_path, "title": title}
return [Document(page_content=text, metadata=metadata)]
[docs] def load_device(
self, url_override: Optional[str] = None, include_guides: bool = True
) -> List[Document]:
documents = []
if url_override is None:
url = IFIXIT_BASE_URL + "/wikis/CATEGORY/" + self.id
else:
url = url_override
res = requests.get(url)
data = res.json()
text = "\n".join(
[
data[key]
for key in ["title", "description", "contents_raw"]
if key in data
]
).strip()
metadata = {"source": self.web_path, "title": data["title"]}
documents.append(Document(page_content=text, metadata=metadata))
if include_guides:
"""Load and return documents for each guide linked to from the device"""
guide_urls = [guide["url"] for guide in data["guides"]]
for guide_url in guide_urls: | /content/https://python.langchain.com/en/latest/_modules/langchain/document_loaders/ifixit.html |
08667f8c168b-4 | for guide_url in guide_urls:
documents.append(IFixitLoader(guide_url).load()[0])
return documents
[docs] def load_guide(self, url_override: Optional[str] = None) -> List[Document]:
if url_override is None:
url = IFIXIT_BASE_URL + "/guides/" + self.id
else:
url = url_override
res = requests.get(url)
if res.status_code != 200:
raise ValueError(
"Could not load guide: " + self.web_path + "\n" + res.json()
)
data = res.json()
doc_parts = ["# " + data["title"], data["introduction_raw"]]
doc_parts.append("\n\n###Tools Required:")
if len(data["tools"]) == 0:
doc_parts.append("\n - None")
else:
for tool in data["tools"]:
doc_parts.append("\n - " + tool["text"])
doc_parts.append("\n\n###Parts Required:")
if len(data["parts"]) == 0:
doc_parts.append("\n - None")
else:
for part in data["parts"]:
doc_parts.append("\n - " + part["text"])
for row in data["steps"]:
doc_parts.append(
"\n\n## "
+ ( | /content/https://python.langchain.com/en/latest/_modules/langchain/document_loaders/ifixit.html |
08667f8c168b-5 | doc_parts.append(
"\n\n## "
+ (
row["title"]
if row["title"] != ""
else "Step {}".format(row["orderby"])
)
)
for line in row["lines"]:
doc_parts.append(line["text_raw"])
doc_parts.append(data["conclusion_raw"])
text = "\n".join(doc_parts)
metadata = {"source": self.web_path, "title": data["title"]}
return [Document(page_content=text, metadata=metadata)]
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 26, 2023. | /content/https://python.langchain.com/en/latest/_modules/langchain/document_loaders/ifixit.html |
1c8ab4f7e063-0 | Source code for langchain.document_loaders.confluence
"""Load Data from a Confluence Space"""
import logging
from typing import Any, Callable, List, Optional, Union
from tenacity import (
before_sleep_log,
retry,
stop_after_attempt,
wait_exponential,
)
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
logger = logging.getLogger(__name__)
[docs]class ConfluenceLoader(BaseLoader):
"""
Load Confluence pages. Port of https://llamahub.ai/l/confluence
This currently supports both username/api_key and Oauth2 login.
Specify a list page_ids and/or space_key to load in the corresponding pages into
Document objects, if both are specified the union of both sets will be returned.
You can also specify a boolean `include_attachments` to include attachments, this
is set to False by default, if set to True all attachments will be downloaded and
ConfluenceReader will extract the text from the attachments and add it to the
Document object. Currently supported attachment types are: PDF, PNG, JPEG/JPG,
SVG, Word and Excel.
Hint: space_key and page_id can both be found in the URL of a page in Confluence
- https://yoursite.atlassian.com/wiki/spaces/<space_key>/pages/<page_id>
Example:
.. code-block:: python
from langchain.document_loaders import ConfluenceLoader
loader = ConfluenceLoader( | /content/https://python.langchain.com/en/latest/_modules/langchain/document_loaders/confluence.html |
1c8ab4f7e063-1 | loader = ConfluenceLoader(
url="https://yoursite.atlassian.com/wiki",
username="me",
api_key="12345"
)
documents = loader.load(space_key="SPACE",limit=50)
:param url: _description_
:type url: str
:param api_key: _description_, defaults to None
:type api_key: str, optional
:param username: _description_, defaults to None
:type username: str, optional
:param oauth2: _description_, defaults to {}
:type oauth2: dict, optional
:param cloud: _description_, defaults to True
:type cloud: bool, optional
:param number_of_retries: How many times to retry, defaults to 3
:type number_of_retries: Optional[int], optional
:param min_retry_seconds: defaults to 2
:type min_retry_seconds: Optional[int], optional
:param max_retry_seconds: defaults to 10
:type max_retry_seconds: Optional[int], optional
:param confluence_kwargs: additional kwargs to initialize confluence with
:type confluence_kwargs: dict, optional
:raises ValueError: Errors while validating input
:raises ImportError: Required dependencies not installed.
"""
def __init__(
self,
url: str,
api_key: Optional[str] = None,
username: Optional[str] = None, | /content/https://python.langchain.com/en/latest/_modules/langchain/document_loaders/confluence.html |
1c8ab4f7e063-2 | username: Optional[str] = None,
oauth2: Optional[dict] = None,
cloud: Optional[bool] = True,
number_of_retries: Optional[int] = 3,
min_retry_seconds: Optional[int] = 2,
max_retry_seconds: Optional[int] = 10,
confluence_kwargs: Optional[dict] = None,
):
confluence_kwargs = confluence_kwargs or {}
errors = ConfluenceLoader.validate_init_args(url, api_key, username, oauth2)
if errors:
raise ValueError(f"Error(s) while validating input: {errors}")
self.base_url = url
self.number_of_retries = number_of_retries
self.min_retry_seconds = min_retry_seconds
self.max_retry_seconds = max_retry_seconds
try:
from atlassian import Confluence # noqa: F401
except ImportError:
raise ImportError(
"`atlassian` package not found, please run"
"`pip install atlassian-python-api`"
)
if oauth2:
self.confluence = Confluence(
url=url, oauth2=oauth2, cloud=cloud, **confluence_kwargs
)
else:
self.confluence = Confluence(
url=url,
username=username,
password=api_key,
cloud=cloud,
**confluence_kwargs,
)
[docs] @staticmethod
def validate_init_args( | /content/https://python.langchain.com/en/latest/_modules/langchain/document_loaders/confluence.html |
1c8ab4f7e063-3 | )
[docs] @staticmethod
def validate_init_args(
url: Optional[str] = None,
api_key: Optional[str] = None,
username: Optional[str] = None,
oauth2: Optional[dict] = None,
) -> Union[List, None]:
"""Validates proper combinations of init arguments"""
errors = []
if url is None:
errors.append("Must provide `base_url`")
if (api_key and not username) or (username and not api_key):
errors.append(
"If one of `api_key` or `username` is provided,"
"the other must be as well."
)
if (api_key or username) and oauth2:
errors.append(
"Cannot provide a value for `api_key` and/or"
"`username` and provide a value for `oauth2`"
)
if oauth2 and oauth2.keys() != [
"access_token",
"access_token_secret",
"consumer_key",
"key_cert",
]:
errors.append(
"You have either ommited require keys or added extra"
"keys to the oauth2 dictionary. key values should be"
"`['access_token', 'access_token_secret', 'consumer_key', 'key_cert']`"
)
if errors:
return errors
return None
[docs] def load(
self,
space_key: Optional[str] = None, | /content/https://python.langchain.com/en/latest/_modules/langchain/document_loaders/confluence.html |
1c8ab4f7e063-4 | self,
space_key: Optional[str] = None,
page_ids: Optional[List[str]] = None,
label: Optional[str] = None,
cql: Optional[str] = None,
include_attachments: bool = False,
include_comments: bool = False,
limit: Optional[int] = 50,
max_pages: Optional[int] = 1000,
) -> List[Document]:
"""
:param space_key: Space key retrieved from a confluence URL, defaults to None
:type space_key: Optional[str], optional
:param page_ids: List of specific page IDs to load, defaults to None
:type page_ids: Optional[List[str]], optional
:param label: Get all pages with this label, defaults to None
:type label: Optional[str], optional
:param cql: CQL Expression, defaults to None
:type cql: Optional[str], optional
:param include_attachments: defaults to False
:type include_attachments: bool, optional
:param include_comments: defaults to False
:type include_comments: bool, optional
:param limit: Maximum number of pages to retrieve per request, defaults to 50
:type limit: int, optional
:param max_pages: Maximum number of pages to retrieve in total, defaults 1000
:type max_pages: int, optional
:raises ValueError: _description_
:raises ImportError: _description_
:return: _description_
:rtype: List[Document]
""" | /content/https://python.langchain.com/en/latest/_modules/langchain/document_loaders/confluence.html |
1c8ab4f7e063-5 | :return: _description_
:rtype: List[Document]
"""
if not space_key and not page_ids and not label and not cql:
raise ValueError(
"Must specify at least one among `space_key`, `page_ids`,"
"`label`, `cql` parameters."
)
docs = []
if space_key:
pages = self.paginate_request(
self.confluence.get_all_pages_from_space,
space=space_key,
limit=limit,
max_pages=max_pages,
expand="body.storage.value",
)
for page in pages:
doc = self.process_page(page, include_attachments, include_comments)
docs.append(doc)
if label:
pages = self.paginate_request(
self.confluence.get_all_pages_by_label,
label=label,
limit=limit,
max_pages=max_pages,
expand="body.storage.value",
)
for page in pages:
doc = self.process_page(page, include_attachments, include_comments)
docs.append(doc)
if cql:
pages = self.paginate_request(
self.confluence.cql,
cql=cql,
limit=limit,
max_pages=max_pages,
expand="body.storage.value",
)
for page in pages:
doc = self.process_page(page, include_attachments, include_comments)
docs.append(doc)
if page_ids:
for page_id in page_ids:
get_page = retry(
reraise=True,
stop=stop_after_attempt( | /content/https://python.langchain.com/en/latest/_modules/langchain/document_loaders/confluence.html |
1c8ab4f7e063-6 | reraise=True,
stop=stop_after_attempt(
self.number_of_retries # type: ignore[arg-type]
),
wait=wait_exponential(
multiplier=1, # type: ignore[arg-type]
min=self.min_retry_seconds, # type: ignore[arg-type]
max=self.max_retry_seconds, # type: ignore[arg-type]
),
before_sleep=before_sleep_log(logger, logging.WARNING),
)(self.confluence.get_page_by_id)
page = get_page(page_id=page_id, expand="body.storage.value")
doc = self.process_page(page, include_attachments, include_comments)
docs.append(doc)
return docs
[docs] def paginate_request(self, retrieval_method: Callable, **kwargs: Any) -> List:
"""Paginate the various methods to retrieve groups of pages.
Unfortunately, due to page size, sometimes the Confluence API
doesn't match the limit value. If `limit` is >100 confluence
seems to cap the response to 100. Also, due to the Atlassian Python
package, we don't get the "next" values from the "_links" key because
they only return the value from the results key. So here, the pagination
starts from 0 and goes until the max_pages, getting the `limit` number
of pages with each request. We have to manually check if there
are more docs based on the length of the returned list of pages, rather than
just checking for the presence of a `next` key in the response like this page | /content/https://python.langchain.com/en/latest/_modules/langchain/document_loaders/confluence.html |
1c8ab4f7e063-7 | just checking for the presence of a `next` key in the response like this page
would have you do:
https://developer.atlassian.com/server/confluence/pagination-in-the-rest-api/
:param retrieval_method: Function used to retrieve docs
:type retrieval_method: callable
:return: List of documents
:rtype: List
"""
max_pages = kwargs.pop("max_pages")
docs: List[dict] = []
while len(docs) < max_pages:
get_pages = retry(
reraise=True,
stop=stop_after_attempt(
self.number_of_retries # type: ignore[arg-type]
),
wait=wait_exponential(
multiplier=1,
min=self.min_retry_seconds, # type: ignore[arg-type]
max=self.max_retry_seconds, # type: ignore[arg-type]
),
before_sleep=before_sleep_log(logger, logging.WARNING),
)(retrieval_method)
batch = get_pages(**kwargs, start=len(docs))
if not batch:
break
docs.extend(batch)
return docs[:max_pages]
[docs] def process_page(
self,
page: dict,
include_attachments: bool,
include_comments: bool,
) -> Document:
try:
from bs4 import BeautifulSoup # type: ignore
except ImportError:
raise ImportError(
"`beautifulsoup4` package not found, please run" | /content/https://python.langchain.com/en/latest/_modules/langchain/document_loaders/confluence.html |
1c8ab4f7e063-8 | "`beautifulsoup4` package not found, please run"
" `pip install beautifulsoup4`"
)
if include_attachments:
attachment_texts = self.process_attachment(page["id"])
else:
attachment_texts = []
text = BeautifulSoup(
page["body"]["storage"]["value"], "lxml"
).get_text() + "".join(attachment_texts)
if include_comments:
comments = self.confluence.get_page_comments(
page["id"], expand="body.view.value", depth="all"
)["results"]
comment_texts = [
BeautifulSoup(comment["body"]["view"]["value"], "lxml").get_text()
for comment in comments
]
text = text + "".join(comment_texts)
return Document(
page_content=text,
metadata={
"title": page["title"],
"id": page["id"],
"source": self.base_url.strip("/") + page["_links"]["webui"],
},
)
[docs] def process_attachment(self, page_id: str) -> List[str]:
try:
import requests # noqa: F401
from PIL import Image # noqa: F401
except ImportError:
raise ImportError(
"`pytesseract` or `pdf2image` or `Pillow` package not found,"
"please run `pip install pytesseract pdf2image Pillow`"
)
# depending on setup you may also need to set the correct path for | /content/https://python.langchain.com/en/latest/_modules/langchain/document_loaders/confluence.html |
1c8ab4f7e063-9 | )
# depending on setup you may also need to set the correct path for
# poppler and tesseract
attachments = self.confluence.get_attachments_from_content(page_id)["results"]
texts = []
for attachment in attachments:
media_type = attachment["metadata"]["mediaType"]
absolute_url = self.base_url + attachment["_links"]["download"]
title = attachment["title"]
if media_type == "application/pdf":
text = title + self.process_pdf(absolute_url)
elif (
media_type == "image/png"
or media_type == "image/jpg"
or media_type == "image/jpeg"
):
text = title + self.process_image(absolute_url)
elif (
media_type == "application/vnd.openxmlformats-officedocument"
".wordprocessingml.document"
):
text = title + self.process_doc(absolute_url)
elif media_type == "application/vnd.ms-excel":
text = title + self.process_xls(absolute_url)
elif media_type == "image/svg+xml":
text = title + self.process_svg(absolute_url)
else:
continue
texts.append(text)
return texts
[docs] def process_pdf(self, link: str) -> str:
try:
import pytesseract # noqa: F401
from pdf2image import convert_from_bytes # noqa: F401
except ImportError:
raise ImportError( | /content/https://python.langchain.com/en/latest/_modules/langchain/document_loaders/confluence.html |
1c8ab4f7e063-10 | except ImportError:
raise ImportError(
"`pytesseract` or `pdf2image` package not found,"
"please run `pip install pytesseract pdf2image`"
)
import pytesseract # noqa: F811
from pdf2image import convert_from_bytes # noqa: F811
response = self.confluence.request(path=link, absolute=True)
text = ""
if (
response.status_code != 200
or response.content == b""
or response.content is None
):
return text
try:
images = convert_from_bytes(response.content)
except ValueError:
return text
for i, image in enumerate(images):
image_text = pytesseract.image_to_string(image)
text += f"Page {i + 1}:\n{image_text}\n\n"
return text
[docs] def process_image(self, link: str) -> str:
try:
from io import BytesIO # noqa: F401
import pytesseract # noqa: F401
from PIL import Image # noqa: F401
except ImportError:
raise ImportError(
"`pytesseract` or `Pillow` package not found,"
"please run `pip install pytesseract Pillow`"
)
response = self.confluence.request(path=link, absolute=True)
text = ""
if (
response.status_code != 200
or response.content == b""
or response.content is None
):
return text
try:
image = Image.open(BytesIO(response.content))
except OSError:
return text | /content/https://python.langchain.com/en/latest/_modules/langchain/document_loaders/confluence.html |
1c8ab4f7e063-11 | except OSError:
return text
return pytesseract.image_to_string(image)
[docs] def process_doc(self, link: str) -> str:
try:
from io import BytesIO # noqa: F401
import docx2txt # noqa: F401
except ImportError:
raise ImportError(
"`docx2txt` package not found, please run `pip install docx2txt`"
)
response = self.confluence.request(path=link, absolute=True)
text = ""
if (
response.status_code != 200
or response.content == b""
or response.content is None
):
return text
file_data = BytesIO(response.content)
return docx2txt.process(file_data)
[docs] def process_xls(self, link: str) -> str:
try:
import xlrd # noqa: F401
except ImportError:
raise ImportError("`xlrd` package not found, please run `pip install xlrd`")
response = self.confluence.request(path=link, absolute=True)
text = ""
if (
response.status_code != 200
or response.content == b""
or response.content is None
):
return text
workbook = xlrd.open_workbook(file_contents=response.content)
for sheet in workbook.sheets():
text += f"{sheet.name}:\n"
for row in range(sheet.nrows):
for col in range(sheet.ncols):
text += f"{sheet.cell_value(row, col)}\t"
text += "\n" | /content/https://python.langchain.com/en/latest/_modules/langchain/document_loaders/confluence.html |
1c8ab4f7e063-12 | text += "\n"
text += "\n"
return text
[docs] def process_svg(self, link: str) -> str:
try:
from io import BytesIO # noqa: F401
import pytesseract # noqa: F401
from PIL import Image # noqa: F401
from reportlab.graphics import renderPM # noqa: F401
from reportlab.graphics.shapes import Drawing # noqa: F401
from svglib.svglib import svg2rlg # noqa: F401
except ImportError:
raise ImportError(
"`pytesseract`, `Pillow`, or `svglib` package not found,"
"please run `pip install pytesseract Pillow svglib`"
)
response = self.confluence.request(path=link, absolute=True)
text = ""
if (
response.status_code != 200
or response.content == b""
or response.content is None
):
return text
drawing = svg2rlg(BytesIO(response.content))
img_data = BytesIO()
renderPM.drawToFile(drawing, img_data, fmt="PNG")
img_data.seek(0)
image = Image.open(img_data)
return pytesseract.image_to_string(image)
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 26, 2023. | /content/https://python.langchain.com/en/latest/_modules/langchain/document_loaders/confluence.html |
d2ac4d9c3f01-0 | Source code for langchain.document_loaders.word_document
"""Loader that loads word documents."""
import os
from typing import List
from langchain.document_loaders.unstructured import UnstructuredFileLoader
[docs]class UnstructuredWordDocumentLoader(UnstructuredFileLoader):
"""Loader that uses unstructured to load word documents."""
def _get_elements(self) -> List:
from unstructured.__version__ import __version__ as __unstructured_version__
from unstructured.file_utils.filetype import FileType, detect_filetype
unstructured_version = tuple(
[int(x) for x in __unstructured_version__.split(".")]
)
# NOTE(MthwRobinson) - magic will raise an import error if the libmagic
# system dependency isn't installed. If it's not installed, we'll just
# check the file extension
try:
import magic # noqa: F401
is_doc = detect_filetype(self.file_path) == FileType.DOC
except ImportError:
_, extension = os.path.splitext(self.file_path)
is_doc = extension == ".doc"
if is_doc and unstructured_version < (0, 4, 11):
raise ValueError(
f"You are on unstructured version {__unstructured_version__}. "
"Partitioning .doc files is only supported in unstructured>=0.4.11. "
"Please upgrade the unstructured package and try again."
)
if is_doc:
from unstructured.partition.doc import partition_doc | /content/https://python.langchain.com/en/latest/_modules/langchain/document_loaders/word_document.html |
d2ac4d9c3f01-1 | )
if is_doc:
from unstructured.partition.doc import partition_doc
return partition_doc(filename=self.file_path, **self.unstructured_kwargs)
else:
from unstructured.partition.docx import partition_docx
return partition_docx(filename=self.file_path, **self.unstructured_kwargs)
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 26, 2023. | /content/https://python.langchain.com/en/latest/_modules/langchain/document_loaders/word_document.html |
5cf9814c3673-0 | Source code for langchain.document_loaders.chatgpt
"""Load conversations from ChatGPT data export"""
import datetime
import json
from typing import List
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
def concatenate_rows(message: dict, title: str) -> str:
if not message:
return ""
sender = message["author"]["role"] if message["author"] else "unknown"
text = message["content"]["parts"][0]
date = datetime.datetime.fromtimestamp(message["create_time"]).strftime(
"%Y-%m-%d %H:%M:%S"
)
return f"{title} - {sender} on {date}: {text}\n\n"
[docs]class ChatGPTLoader(BaseLoader):
"""Loader that loads conversations from exported ChatGPT data."""
def __init__(self, log_file: str, num_logs: int = -1):
self.log_file = log_file
self.num_logs = num_logs
[docs] def load(self) -> List[Document]:
with open(self.log_file, encoding="utf8") as f:
data = json.load(f)[: self.num_logs] if self.num_logs else json.load(f)
documents = []
for d in data:
title = d["title"]
messages = d["mapping"]
text = "".join(
[ | /content/https://python.langchain.com/en/latest/_modules/langchain/document_loaders/chatgpt.html |
5cf9814c3673-1 | messages = d["mapping"]
text = "".join(
[
concatenate_rows(messages[key]["message"], title)
for idx, key in enumerate(messages)
if not (
idx == 0
and messages[key]["message"]["author"]["role"] == "system"
)
]
)
metadata = {"source": str(self.log_file)}
documents.append(Document(page_content=text, metadata=metadata))
return documents
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 26, 2023. | /content/https://python.langchain.com/en/latest/_modules/langchain/document_loaders/chatgpt.html |
5658205b4657-0 | Source code for langchain.document_loaders.html
"""Loader that uses unstructured to load HTML files."""
from typing import List
from langchain.document_loaders.unstructured import UnstructuredFileLoader
[docs]class UnstructuredHTMLLoader(UnstructuredFileLoader):
"""Loader that uses unstructured to load HTML files."""
def _get_elements(self) -> List:
from unstructured.partition.html import partition_html
return partition_html(filename=self.file_path, **self.unstructured_kwargs)
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 26, 2023. | /content/https://python.langchain.com/en/latest/_modules/langchain/document_loaders/html.html |
378f1edce068-0 | Source code for langchain.document_loaders.roam
"""Loader that loads Roam directory dump."""
from pathlib import Path
from typing import List
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
[docs]class RoamLoader(BaseLoader):
"""Loader that loads Roam files from disk."""
def __init__(self, path: str):
"""Initialize with path."""
self.file_path = path
[docs] def load(self) -> List[Document]:
"""Load documents."""
ps = list(Path(self.file_path).glob("**/*.md"))
docs = []
for p in ps:
with open(p) as f:
text = f.read()
metadata = {"source": str(p)}
docs.append(Document(page_content=text, metadata=metadata))
return docs
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 26, 2023. | /content/https://python.langchain.com/en/latest/_modules/langchain/document_loaders/roam.html |
f144930656d9-0 | Source code for langchain.document_loaders.evernote
"""Load documents from Evernote.
https://gist.github.com/foxmask/7b29c43a161e001ff04afdb2f181e31c
"""
import hashlib
from base64 import b64decode
from time import strptime
from typing import Any, Dict, List
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
def _parse_content(content: str) -> str:
from pypandoc import convert_text
text = convert_text(content, "org", format="html")
return text
def _parse_resource(resource: list) -> dict:
rsc_dict: Dict[str, Any] = {}
for elem in resource:
if elem.tag == "data":
# Some times elem.text is None
rsc_dict[elem.tag] = b64decode(elem.text) if elem.text else b""
rsc_dict["hash"] = hashlib.md5(rsc_dict[elem.tag]).hexdigest()
else:
rsc_dict[elem.tag] = elem.text
return rsc_dict
def _parse_note(note: List) -> dict:
note_dict: Dict[str, Any] = {}
resources = []
for elem in note:
if elem.tag == "content":
note_dict[elem.tag] = _parse_content(elem.text)
# A copy of original content
note_dict["content-raw"] = elem.text
elif elem.tag == "resource": | /content/https://python.langchain.com/en/latest/_modules/langchain/document_loaders/evernote.html |
f144930656d9-1 | elif elem.tag == "resource":
resources.append(_parse_resource(elem))
elif elem.tag == "created" or elem.tag == "updated":
note_dict[elem.tag] = strptime(elem.text, "%Y%m%dT%H%M%SZ")
else:
note_dict[elem.tag] = elem.text
note_dict["resource"] = resources
return note_dict
def _parse_note_xml(xml_file: str) -> str:
"""Parse Evernote xml."""
# Without huge_tree set to True, parser may complain about huge text node
# Try to recover, because there may be " ", which will cause
# "XMLSyntaxError: Entity 'nbsp' not defined"
from lxml import etree
context = etree.iterparse(
xml_file, encoding="utf-8", strip_cdata=False, huge_tree=True, recover=True
)
result_string = ""
for action, elem in context:
if elem.tag == "note":
result_string += _parse_note(elem)["content"]
return result_string
[docs]class EverNoteLoader(BaseLoader):
"""Loader to load in EverNote files.."""
def __init__(self, file_path: str):
"""Initialize with file path."""
self.file_path = file_path
[docs] def load(self) -> List[Document]:
"""Load document from EverNote file.""" | /content/https://python.langchain.com/en/latest/_modules/langchain/document_loaders/evernote.html |
f144930656d9-2 | """Load document from EverNote file."""
text = _parse_note_xml(self.file_path)
metadata = {"source": self.file_path}
return [Document(page_content=text, metadata=metadata)]
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 26, 2023. | /content/https://python.langchain.com/en/latest/_modules/langchain/document_loaders/evernote.html |
6921466f9163-0 | Source code for langchain.document_loaders.web_base
"""Web base loader class."""
import asyncio
import logging
import warnings
from typing import Any, List, Optional, Union
import aiohttp
import requests
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
logger = logging.getLogger(__name__)
default_header_template = {
"User-Agent": "",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*"
";q=0.8",
"Accept-Language": "en-US,en;q=0.5",
"Referer": "https://www.google.com/",
"DNT": "1",
"Connection": "keep-alive",
"Upgrade-Insecure-Requests": "1",
}
def _build_metadata(soup: Any, url: str) -> dict:
"""Build metadata from BeautifulSoup output."""
metadata = {"source": url}
if title := soup.find("title"):
metadata["title"] = title.get_text()
if description := soup.find("meta", attrs={"name": "description"}):
metadata["description"] = description.get("content", None)
if html := soup.find("html"):
metadata["language"] = html.get("lang", None)
return metadata
[docs]class WebBaseLoader(BaseLoader): | /content/https://python.langchain.com/en/latest/_modules/langchain/document_loaders/web_base.html |
6921466f9163-1 | return metadata
[docs]class WebBaseLoader(BaseLoader):
"""Loader that uses urllib and beautiful soup to load webpages."""
web_paths: List[str]
requests_per_second: int = 2
"""Max number of concurrent requests to make."""
default_parser: str = "html.parser"
"""Default parser to use for BeautifulSoup."""
def __init__(
self, web_path: Union[str, List[str]], header_template: Optional[dict] = None
):
"""Initialize with webpage path."""
# TODO: Deprecate web_path in favor of web_paths, and remove this
# left like this because there are a number of loaders that expect single
# urls
if isinstance(web_path, str):
self.web_paths = [web_path]
elif isinstance(web_path, List):
self.web_paths = web_path
self.session = requests.Session()
try:
import bs4 # noqa:F401
except ImportError:
raise ValueError(
"bs4 package not found, please install it with " "`pip install bs4`"
)
try:
from fake_useragent import UserAgent
headers = header_template or default_header_template
headers["User-Agent"] = UserAgent().random
self.session.headers = dict(headers)
except ImportError:
logger.info(
"fake_useragent not found, using default user agent." | /content/https://python.langchain.com/en/latest/_modules/langchain/document_loaders/web_base.html |
6921466f9163-2 | logger.info(
"fake_useragent not found, using default user agent."
"To get a realistic header for requests, `pip install fake_useragent`."
)
@property
def web_path(self) -> str:
if len(self.web_paths) > 1:
raise ValueError("Multiple webpaths found.")
return self.web_paths[0]
async def _fetch(
self, url: str, retries: int = 3, cooldown: int = 2, backoff: float = 1.5
) -> str:
async with aiohttp.ClientSession() as session:
for i in range(retries):
try:
async with session.get(
url, headers=self.session.headers
) as response:
return await response.text()
except aiohttp.ClientConnectionError as e:
if i == retries - 1:
raise
else:
logger.warning(
f"Error fetching {url} with attempt "
f"{i + 1}/{retries}: {e}. Retrying..."
)
await asyncio.sleep(cooldown * backoff**i)
raise ValueError("retry count exceeded")
async def _fetch_with_rate_limit(
self, url: str, semaphore: asyncio.Semaphore
) -> str:
async with semaphore:
return await self._fetch(url)
[docs] async def fetch_all(self, urls: List[str]) -> Any:
"""Fetch all urls concurrently with rate limiting.""" | /content/https://python.langchain.com/en/latest/_modules/langchain/document_loaders/web_base.html |
6921466f9163-3 | """Fetch all urls concurrently with rate limiting."""
semaphore = asyncio.Semaphore(self.requests_per_second)
tasks = []
for url in urls:
task = asyncio.ensure_future(self._fetch_with_rate_limit(url, semaphore))
tasks.append(task)
try:
from tqdm.asyncio import tqdm_asyncio
return await tqdm_asyncio.gather(
*tasks, desc="Fetching pages", ascii=True, mininterval=1
)
except ImportError:
warnings.warn("For better logging of progress, `pip install tqdm`")
return await asyncio.gather(*tasks)
@staticmethod
def _check_parser(parser: str) -> None:
"""Check that parser is valid for bs4."""
valid_parsers = ["html.parser", "lxml", "xml", "lxml-xml", "html5lib"]
if parser not in valid_parsers:
raise ValueError(
"`parser` must be one of " + ", ".join(valid_parsers) + "."
)
[docs] def scrape_all(self, urls: List[str], parser: Union[str, None] = None) -> List[Any]:
"""Fetch all urls, then return soups for all results."""
from bs4 import BeautifulSoup | /content/https://python.langchain.com/en/latest/_modules/langchain/document_loaders/web_base.html |
6921466f9163-4 | from bs4 import BeautifulSoup
results = asyncio.run(self.fetch_all(urls))
final_results = []
for i, result in enumerate(results):
url = urls[i]
if parser is None:
if url.endswith(".xml"):
parser = "xml"
else:
parser = self.default_parser
self._check_parser(parser)
final_results.append(BeautifulSoup(result, parser))
return final_results
def _scrape(self, url: str, parser: Union[str, None] = None) -> Any:
from bs4 import BeautifulSoup
if parser is None:
if url.endswith(".xml"):
parser = "xml"
else:
parser = self.default_parser
self._check_parser(parser)
html_doc = self.session.get(url)
return BeautifulSoup(html_doc.text, parser)
[docs] def scrape(self, parser: Union[str, None] = None) -> Any:
"""Scrape data from webpage and return it in BeautifulSoup format."""
if parser is None:
parser = self.default_parser
return self._scrape(self.web_path, parser)
[docs] def load(self) -> List[Document]:
"""Load text from the url(s) in web_path."""
docs = []
for path in self.web_paths:
soup = self._scrape(path)
text = soup.get_text() | /content/https://python.langchain.com/en/latest/_modules/langchain/document_loaders/web_base.html |
6921466f9163-5 | soup = self._scrape(path)
text = soup.get_text()
metadata = _build_metadata(soup, path)
docs.append(Document(page_content=text, metadata=metadata))
return docs
[docs] def aload(self) -> List[Document]:
"""Load text from the urls in web_path async into Documents."""
results = self.scrape_all(self.web_paths)
docs = []
for i in range(len(results)):
soup = results[i]
text = soup.get_text()
metadata = _build_metadata(soup, self.web_paths[i])
docs.append(Document(page_content=text, metadata=metadata))
return docs
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 26, 2023. | /content/https://python.langchain.com/en/latest/_modules/langchain/document_loaders/web_base.html |
963f84a30bfe-0 | Source code for langchain.document_loaders.s3_file
"""Loading logic for loading documents from an s3 file."""
import os
import tempfile
from typing import List
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
from langchain.document_loaders.unstructured import UnstructuredFileLoader
[docs]class S3FileLoader(BaseLoader):
"""Loading logic for loading documents from s3."""
def __init__(self, bucket: str, key: str):
"""Initialize with bucket and key name."""
self.bucket = bucket
self.key = key
[docs] def load(self) -> List[Document]:
"""Load documents."""
try:
import boto3
except ImportError:
raise ValueError(
"Could not import boto3 python package. "
"Please install it with `pip install boto3`."
)
s3 = boto3.client("s3")
with tempfile.TemporaryDirectory() as temp_dir:
file_path = f"{temp_dir}/{self.key}"
os.makedirs(os.path.dirname(file_path), exist_ok=True)
s3.download_file(self.bucket, self.key, file_path)
loader = UnstructuredFileLoader(file_path)
return loader.load()
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 26, 2023. | /content/https://python.langchain.com/en/latest/_modules/langchain/document_loaders/s3_file.html |
c37072a17311-0 | Source code for langchain.document_loaders.html_bs
"""Loader that uses bs4 to load HTML files, enriching metadata with page title."""
import logging
from typing import Dict, List, Union
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
logger = logging.getLogger(__name__)
[docs]class BSHTMLLoader(BaseLoader):
"""Loader that uses beautiful soup to parse HTML files."""
def __init__(
self,
file_path: str,
open_encoding: Union[str, None] = None,
bs_kwargs: Union[dict, None] = None,
) -> None:
"""Initialise with path, and optionally, file encoding to use, and any kwargs
to pass to the BeautifulSoup object."""
try:
import bs4 # noqa:F401
except ImportError:
raise ValueError(
"beautifulsoup4 package not found, please install it with "
"`pip install beautifulsoup4`"
)
self.file_path = file_path
self.open_encoding = open_encoding
if bs_kwargs is None:
bs_kwargs = {"features": "lxml"}
self.bs_kwargs = bs_kwargs
[docs] def load(self) -> List[Document]:
from bs4 import BeautifulSoup
"""Load HTML document into document objects."""
with open(self.file_path, "r", encoding=self.open_encoding) as f: | /content/https://python.langchain.com/en/latest/_modules/langchain/document_loaders/html_bs.html |
c37072a17311-1 | soup = BeautifulSoup(f, **self.bs_kwargs)
text = soup.get_text()
if soup.title:
title = str(soup.title.string)
else:
title = ""
metadata: Dict[str, Union[str, None]] = {
"source": self.file_path,
"title": title,
}
return [Document(page_content=text, metadata=metadata)]
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 26, 2023. | /content/https://python.langchain.com/en/latest/_modules/langchain/document_loaders/html_bs.html |
a266070547fb-0 | Source code for langchain.document_loaders.imsdb
"""Loader that loads IMSDb."""
from typing import List
from langchain.docstore.document import Document
from langchain.document_loaders.web_base import WebBaseLoader
[docs]class IMSDbLoader(WebBaseLoader):
"""Loader that loads IMSDb webpages."""
[docs] def load(self) -> List[Document]:
"""Load webpage."""
soup = self.scrape()
text = soup.select_one("td[class='scrtext']").text
metadata = {"source": self.web_path}
return [Document(page_content=text, metadata=metadata)]
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 26, 2023. | /content/https://python.langchain.com/en/latest/_modules/langchain/document_loaders/imsdb.html |
d37230fc2d0d-0 | Source code for langchain.document_loaders.azure_blob_storage_container
"""Loading logic for loading documents from an Azure Blob Storage container."""
from typing import List
from langchain.docstore.document import Document
from langchain.document_loaders.azure_blob_storage_file import (
AzureBlobStorageFileLoader,
)
from langchain.document_loaders.base import BaseLoader
[docs]class AzureBlobStorageContainerLoader(BaseLoader):
"""Loading logic for loading documents from Azure Blob Storage."""
def __init__(self, conn_str: str, container: str, prefix: str = ""):
"""Initialize with connection string, container and blob prefix."""
self.conn_str = conn_str
self.container = container
self.prefix = prefix
[docs] def load(self) -> List[Document]:
"""Load documents."""
try:
from azure.storage.blob import ContainerClient
except ImportError as exc:
raise ValueError(
"Could not import azure storage blob python package. "
"Please install it with `pip install azure-storage-blob`."
) from exc
container = ContainerClient.from_connection_string(
conn_str=self.conn_str, container_name=self.container
)
docs = []
blob_list = container.list_blobs(name_starts_with=self.prefix)
for blob in blob_list:
loader = AzureBlobStorageFileLoader( | /content/https://python.langchain.com/en/latest/_modules/langchain/document_loaders/azure_blob_storage_container.html |
d37230fc2d0d-1 | loader = AzureBlobStorageFileLoader(
self.conn_str, self.container, blob.name # type: ignore
)
docs.extend(loader.load())
return docs
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 26, 2023. | /content/https://python.langchain.com/en/latest/_modules/langchain/document_loaders/azure_blob_storage_container.html |
c2e0612818df-0 | Source code for langchain.document_loaders.sitemap
"""Loader that fetches a sitemap and loads those URLs."""
import re
from typing import Any, Callable, List, Optional
from langchain.document_loaders.web_base import WebBaseLoader
from langchain.schema import Document
def _default_parsing_function(content: Any) -> str:
return str(content.get_text())
[docs]class SitemapLoader(WebBaseLoader):
"""Loader that fetches a sitemap and loads those URLs."""
def __init__(
self,
web_path: str,
filter_urls: Optional[List[str]] = None,
parsing_function: Optional[Callable] = None,
):
"""Initialize with webpage path and optional filter URLs.
Args:
web_path: url of the sitemap
filter_urls: list of strings or regexes that will be applied to filter the
urls that are parsed and loaded
parsing_function: Function to parse bs4.Soup output
"""
try:
import lxml # noqa:F401
except ImportError:
raise ValueError(
"lxml package not found, please install it with " "`pip install lxml`"
)
super().__init__(web_path)
self.filter_urls = filter_urls
self.parsing_function = parsing_function or _default_parsing_function
[docs] def parse_sitemap(self, soup: Any) -> List[dict]:
"""Parse sitemap xml and load into a list of dicts."""
els = [] | /content/https://python.langchain.com/en/latest/_modules/langchain/document_loaders/sitemap.html |
c2e0612818df-1 | els = []
for url in soup.find_all("url"):
loc = url.find("loc")
if not loc:
continue
if self.filter_urls and not any(
re.match(r, loc.text) for r in self.filter_urls
):
continue
els.append(
{
tag: prop.text
for tag in ["loc", "lastmod", "changefreq", "priority"]
if (prop := url.find(tag))
}
)
for sitemap in soup.find_all("sitemap"):
loc = sitemap.find("loc")
if not loc:
continue
soup_child = self.scrape_all([loc.text], "xml")[0]
els.extend(self.parse_sitemap(soup_child))
return els
[docs] def load(self) -> List[Document]:
"""Load sitemap."""
soup = self.scrape("xml")
els = self.parse_sitemap(soup)
results = self.scrape_all([el["loc"].strip() for el in els if "loc" in el])
return [
Document(
page_content=self.parsing_function(results[i]),
metadata={**{"source": els[i]["loc"]}, **els[i]},
)
for i in range(len(results))
]
By Harrison Chase
© Copyright 2023, Harrison Chase.
Last updated on Apr 26, 2023. | /content/https://python.langchain.com/en/latest/_modules/langchain/document_loaders/sitemap.html |
8287855dad66-0 | Source code for langchain.llms.llamacpp
"""Wrapper around llama.cpp."""
import logging
from typing import Any, Dict, Generator, List, Optional
from pydantic import Field, root_validator
from langchain.llms.base import LLM
logger = logging.getLogger(__name__)
[docs]class LlamaCpp(LLM):
"""Wrapper around the llama.cpp model.
To use, you should have the llama-cpp-python library installed, and provide the
path to the Llama model as a named parameter to the constructor.
Check out: https://github.com/abetlen/llama-cpp-python
Example:
.. code-block:: python
from langchain.llms import LlamaCppEmbeddings
llm = LlamaCppEmbeddings(model_path="/path/to/llama/model")
"""
client: Any #: :meta private:
model_path: str
"""The path to the Llama model file."""
lora_base: Optional[str] = None
"""The path to the Llama LoRA base model."""
lora_path: Optional[str] = None
"""The path to the Llama LoRA. If None, no LoRa is loaded."""
n_ctx: int = Field(512, alias="n_ctx")
"""Token context window."""
n_parts: int = Field(-1, alias="n_parts")
"""Number of parts to split the model into.
If -1, the number of parts is automatically determined.""" | /content/https://python.langchain.com/en/latest/_modules/langchain/llms/llamacpp.html |
8287855dad66-1 | If -1, the number of parts is automatically determined."""
seed: int = Field(-1, alias="seed")
"""Seed. If -1, a random seed is used."""
f16_kv: bool = Field(True, alias="f16_kv")
"""Use half-precision for key/value cache."""
logits_all: bool = Field(False, alias="logits_all")
"""Return logits for all tokens, not just the last token."""
vocab_only: bool = Field(False, alias="vocab_only")
"""Only load the vocabulary, no weights."""
use_mlock: bool = Field(False, alias="use_mlock")
"""Force system to keep model in RAM."""
n_threads: Optional[int] = Field(None, alias="n_threads")
"""Number of threads to use.
If None, the number of threads is automatically determined."""
n_batch: Optional[int] = Field(8, alias="n_batch")
"""Number of tokens to process in parallel.
Should be a number between 1 and n_ctx."""
suffix: Optional[str] = Field(None)
"""A suffix to append to the generated text. If None, no suffix is appended."""
max_tokens: Optional[int] = 256
"""The maximum number of tokens to generate."""
temperature: Optional[float] = 0.8
"""The temperature to use for sampling.""" | /content/https://python.langchain.com/en/latest/_modules/langchain/llms/llamacpp.html |
8287855dad66-2 | """The temperature to use for sampling."""
top_p: Optional[float] = 0.95
"""The top-p value to use for sampling."""
logprobs: Optional[int] = Field(None)
"""The number of logprobs to return. If None, no logprobs are returned."""
echo: Optional[bool] = False
"""Whether to echo the prompt."""
stop: Optional[List[str]] = []
"""A list of strings to stop generation when encountered."""
repeat_penalty: Optional[float] = 1.1
"""The penalty to apply to repeated tokens."""
top_k: Optional[int] = 40
"""The top-k value to use for sampling."""
last_n_tokens_size: Optional[int] = 64
"""The number of tokens to look back when applying the repeat_penalty."""
use_mmap: Optional[bool] = True
"""Whether to keep the model loaded in RAM"""
streaming: bool = True
"""Whether to stream the results, token by token."""
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that llama-cpp-python library is installed."""
model_path = values["model_path"]
lora_path = values["lora_path"]
lora_base = values["lora_base"]
n_ctx = values["n_ctx"] | /content/https://python.langchain.com/en/latest/_modules/langchain/llms/llamacpp.html |
Subsets and Splits