text
stringlengths 96
319k
| id
stringlengths 14
178
| metadata
dict |
---|---|---|
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def get_jobs(workflow_run_id, token=None):
"""Extract jobs in a GitHub Actions workflow run"""
headers = None
if token is not None:
headers = {"Accept": "application/vnd.github+json", "Authorization": f"Bearer {token}"}
url = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"
result = requests.get(url, headers=headers).json()
jobs = []
try:
jobs.extend(result["jobs"])
pages_to_iterate_over = math.ceil((result["total_count"] - 100) / 100)
for i in range(pages_to_iterate_over):
result = requests.get(url + f"&page={i + 2}", headers=headers).json()
jobs.extend(result["jobs"])
return jobs
except Exception:
print(f"Unknown error, could not fetch links:\n{traceback.format_exc()}")
return []
def get_job_links(workflow_run_id, token=None):
"""Extract job names and their job links in a GitHub Actions workflow run"""
headers = None
if token is not None:
headers = {"Accept": "application/vnd.github+json", "Authorization": f"Bearer {token}"}
url = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"
result = requests.get(url, headers=headers).json()
job_links = {}
try:
job_links.update({job["name"]: job["html_url"] for job in result["jobs"]})
pages_to_iterate_over = math.ceil((result["total_count"] - 100) / 100)
for i in range(pages_to_iterate_over):
result = requests.get(url + f"&page={i + 2}", headers=headers).json()
job_links.update({job["name"]: job["html_url"] for job in result["jobs"]})
return job_links
except Exception:
print(f"Unknown error, could not fetch links:\n{traceback.format_exc()}")
return {}
def get_artifacts_links(worflow_run_id, token=None):
"""Get all artifact links from a workflow run"""
headers = None
if token is not None:
headers = {"Accept": "application/vnd.github+json", "Authorization": f"Bearer {token}"}
url = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"
result = requests.get(url, headers=headers).json()
artifacts = {}
try:
artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]})
pages_to_iterate_over = math.ceil((result["total_count"] - 100) / 100)
for i in range(pages_to_iterate_over):
result = requests.get(url + f"&page={i + 2}", headers=headers).json()
artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]})
return artifacts
except Exception:
print(f"Unknown error, could not fetch links:\n{traceback.format_exc()}")
return {}
def download_artifact(artifact_name, artifact_url, output_dir, token):
"""Download a GitHub Action artifact from a URL.
The URL is of the form `https://api.github.com/repos/huggingface/transformers/actions/artifacts/{ARTIFACT_ID}/zip`,
but it can't be used to download directly. We need to get a redirect URL first.
See https://docs.github.com/en/rest/actions/artifacts#download-an-artifact
"""
headers = None
if token is not None:
headers = {"Accept": "application/vnd.github+json", "Authorization": f"Bearer {token}"}
result = requests.get(artifact_url, headers=headers, allow_redirects=False)
download_url = result.headers["Location"]
response = requests.get(download_url, allow_redirects=True)
file_path = os.path.join(output_dir, f"{artifact_name}.zip")
with open(file_path, "wb") as fp:
fp.write(response.content)
def get_errors_from_single_artifact(artifact_zip_path, job_links=None):
"""Extract errors from a downloaded artifact (in .zip format)"""
errors = []
failed_tests = []
job_name = None
with zipfile.ZipFile(artifact_zip_path) as z:
for filename in z.namelist():
if not os.path.isdir(filename):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(filename) as f:
for line in f:
line = line.decode("UTF-8").strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
error_line = line[: line.index(": ")]
error = line[line.index(": ") + len(": ") :]
errors.append([error_line, error])
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("FAILED "):
# `test` is the test method that failed
test = line[len("FAILED ") :]
failed_tests.append(test)
elif filename == "job_name.txt":
job_name = line
if len(errors) != len(failed_tests):
raise ValueError(
f"`errors` and `failed_tests` should have the same number of elements. Got {len(errors)} for `errors` "
f"and {len(failed_tests)} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"
" problem."
)
job_link = None
if job_name and job_links:
job_link = job_links.get(job_name, None)
# A list with elements of the form (line of error, error, failed test)
result = [x + [y] + [job_link] for x, y in zip(errors, failed_tests)]
return result
def get_all_errors(artifact_dir, job_links=None):
"""Extract errors from all artifact files"""
errors = []
paths = [os.path.join(artifact_dir, p) for p in os.listdir(artifact_dir) if p.endswith(".zip")]
for p in paths:
errors.extend(get_errors_from_single_artifact(p, job_links=job_links))
return errors
def reduce_by_error(logs, error_filter=None):
"""count each error"""
counter = Counter()
counter.update([x[1] for x in logs])
counts = counter.most_common()
r = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
r[error] = {"count": count, "failed_tests": [(x[2], x[0]) for x in logs if x[1] == error]}
r = dict(sorted(r.items(), key=lambda item: item[1]["count"], reverse=True))
return r
def get_model(test):
"""Get the model name from a test method"""
test = test.split("::")[0]
if test.startswith("tests/models/"):
test = test.split("/")[2]
else:
test = None
return test
def reduce_by_model(logs, error_filter=None):
"""count each error per model"""
logs = [(x[0], x[1], get_model(x[2])) for x in logs]
logs = [x for x in logs if x[2] is not None]
tests = {x[2] for x in logs}
r = {}
for test in tests:
counter = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test])
counts = counter.most_common()
error_counts = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
n_errors = sum(error_counts.values())
if n_errors > 0:
r[test] = {"count": n_errors, "errors": error_counts}
r = dict(sorted(r.items(), key=lambda item: item[1]["count"], reverse=True))
return r
def make_github_table(reduced_by_error):
header = "| no. | error | status |"
sep = "|-:|:-|:-|"
lines = [header, sep]
for error in reduced_by_error:
count = reduced_by_error[error]["count"]
line = f"| {count} | {error[:100]} | |"
lines.append(line)
return "\n".join(lines)
def make_github_table_per_model(reduced_by_model):
header = "| model | no. of errors | major error | count |"
sep = "|-:|-:|-:|-:|"
lines = [header, sep]
for model in reduced_by_model:
count = reduced_by_model[model]["count"]
error, _count = list(reduced_by_model[model]["errors"].items())[0]
line = f"| {model} | {count} | {error[:60]} | {_count} |"
lines.append(line)
return "\n".join(lines)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
args = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
_job_links = get_job_links(args.workflow_run_id, token=args.token)
job_links = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
index = k.find(" / ")
k = k[index + len(" / ") :]
job_links[k] = v
with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
artifacts = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
errors = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
counter = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
most_common = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
reduced_by_error = reduce_by_error(errors)
reduced_by_model = reduce_by_model(errors)
s1 = make_github_table(reduced_by_error)
s2 = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp:
fp.write(s1)
with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp:
fp.write(s2)
| transformers/utils/get_ci_error_statistics.py/0 | {
"file_path": "transformers/utils/get_ci_error_statistics.py",
"repo_id": "transformers",
"token_count": 4815
} |
"""An internal script to process `new_model_failures_with_bad_commit.json` produced by `utils/check_bad_commit.py`.
This is used by `.github/workflows/check_failed_model_tests.yml` to produce a slack report of the following form
```
<{url}|New failed tests>
{
"GH_ydshieh": {
"vit": 1
}
}
```
"""
import datetime
import json
import os
from collections import Counter
from copy import deepcopy
from huggingface_hub import HfApi
if __name__ == "__main__":
api = HfApi()
with open("new_model_failures_with_bad_commit.json") as fp:
data = json.load(fp)
# TODO: extend
team_members = [
"ydshieh",
"zucchini-nlp",
"ArthurZucker",
"gante",
"LysandreJik",
"molbap",
"qubvel",
"Rocketknight1",
"muellerzr",
"SunMarc",
]
# Counting the number of failures grouped by authors
new_data = {}
for model, model_result in data.items():
for device, failed_tests in model_result.items():
for failed_test in failed_tests:
author = failed_test["author"]
if author not in team_members:
author = failed_test["merged_by"]
if author not in new_data:
new_data[author] = Counter()
new_data[author].update([model])
for author in new_data:
new_data[author] = dict(new_data[author])
# Group by author
new_data_full = {author: deepcopy(data) for author in new_data}
for author, _data in new_data_full.items():
for model, model_result in _data.items():
for device, failed_tests in model_result.items():
failed_tests = [x for x in failed_tests if x["author"] == author or x["merged_by"] == author]
model_result[device] = failed_tests
_data[model] = {k: v for k, v in model_result.items() if len(v) > 0}
new_data_full[author] = {k: v for k, v in _data.items() if len(v) > 0}
# Upload to Hub and get the url
with open("new_model_failures_with_bad_commit_grouped_by_authors.json", "w") as fp:
json.dump(new_data_full, fp, ensure_ascii=False, indent=4)
commit_info = api.upload_file(
path_or_fileobj="new_model_failures_with_bad_commit_grouped_by_authors.json",
path_in_repo=f"{datetime.datetime.today().strftime('%Y-%m-%d')}/ci_results_run_models_gpu/new_model_failures_with_bad_commit_grouped_by_authors.json",
repo_id="hf-internal-testing/transformers_daily_ci",
repo_type="dataset",
token=os.environ.get("TRANSFORMERS_CI_RESULTS_UPLOAD_TOKEN", None),
)
url = f"https://huggingface.co/datasets/hf-internal-testing/transformers_daily_ci/raw/{commit_info.oid}/{datetime.datetime.today().strftime('%Y-%m-%d')}/ci_results_run_models_gpu/new_model_failures_with_bad_commit_grouped_by_authors.json"
# Add `GH_` prefix as keyword mention
output = {}
for author, item in new_data.items():
author = f"GH_{author}"
output[author] = item
report = f"<{url}|New failed tests>\\n\\n"
report += json.dumps(output, indent=4).replace('"', '\\"').replace("\n", "\\n")
print(report)
| transformers/utils/process_bad_commit_report.py/0 | {
"file_path": "transformers/utils/process_bad_commit_report.py",
"repo_id": "transformers",
"token_count": 1414
} |
repos:
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.6.3
hooks:
- id: ruff
types_or: [ python, pyi ]
args: [ --fix ]
- id: ruff-format
types_or: [ python, pyi ]
# - repo: https://github.com/codespell-project/codespell
# rev: v2.1.0
# hooks:
# - id: codespell
# args:
# - --ignore-words-list=nd,reacher,thist,ths,magent,ba
# - --skip=docs/css/termynal.css,docs/js/termynal.js
| trl/.pre-commit-config.yaml/0 | {
"file_path": "trl/.pre-commit-config.yaml",
"repo_id": "trl",
"token_count": 248
} |
# Callbacks
## SyncRefModelCallback
[[autodoc]] SyncRefModelCallback
## RichProgressCallback
[[autodoc]] RichProgressCallback
## WinRateCallback
[[autodoc]] WinRateCallback
## LogCompletionsCallback
[[autodoc]] LogCompletionsCallback
## MergeModelCallback
[[autodoc]] MergeModelCallback | trl/docs/source/callbacks.md/0 | {
"file_path": "trl/docs/source/callbacks.md",
"repo_id": "trl",
"token_count": 89
} |
# Installation
You can install TRL either from PyPI or from source:
## PyPI
Install the library with pip or [uv](https://docs.astral.sh/uv/):
<hfoptions id="install">
<hfoption id="uv">
uv is a fast Rust-based Python package and project manager. Refer to [Installation](https://docs.astral.sh/uv/getting-started/installation/) for installation instructions), .
```bash
uv pip install trl
```
</hfoption>
<hfoption id="pip">
```bash
pip install trl
```
</hfoption>
</hfoptions>
## Source
You can also install the latest version from source. First clone the repo and then run the installation with `pip`:
```bash
git clone https://github.com/huggingface/trl.git
cd trl/
pip install -e .
```
If you want the development install you can replace the pip install with the following:
```bash
pip install -e ".[dev]"
```
| trl/docs/source/installation.md/0 | {
"file_path": "trl/docs/source/installation.md",
"repo_id": "trl",
"token_count": 267
} |
# Reducing Memory Usage
<Tip warning={true}>
Section under construction. Feel free to contribute!
</Tip>
## Truncation
Sequence lengths in the dataset can vary widely. When data is batched, sequences are padded to match the longest one in the batch, which can cause high memory usage, even if most sequences are relatively short.
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/trl-lib/documentation-images/resolve/main/why_you_should_truncate.png" alt="Truncation prompt completion" width="600"/>
</div>
To reduce memory usage, itβs important to truncate sequences to a reasonable length. While TRL trainers truncate sequences by default, you may want to adjust the default truncation length to better align with your specific use case.
<hfoptions id="dpo">
<hfoption id="DPO">
DPO truncation is applied first to the prompt and to the completion via the `max_prompt_length` and `max_completion_length` parameters. The `max_length` parameter is then used to truncate the resulting sequence.
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/trl-lib/documentation-images/resolve/main/truncation_prompt_completion.png" alt="Truncation prompt completion" width="600"/>
</div>
To set the truncation parameters, use the following code snippet:
```python
from trl import DPOConfig
training_args = DPOConfig(..., max_prompt_length=..., max_length=...)
```
You can also use the `max_completion_length` parameter to truncate the completion, though this is less common since the goal is typically to preserve the completion's full length whenever possible.
```python
from trl import DPOConfig
training_args = DPOConfig(..., max_completion_length=...)
```
</hfoption>
<hfoption id="SFT">
SFT truncation is applied to the input sequence via the `max_seq_length` parameter.
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/trl-lib/documentation-images/resolve/main/truncation_input_ids.png" alt="Truncation input ids" width="600"/>
</div>
To set the truncation parameter, use the following code snippet:
```python
from trl import SFTConfig
training_args = SFTConfig(..., max_seq_length=...)
```
</hfoption>
</hfoptions>
## Packing
<Tip>
This technique applies only to SFT.
</Tip>
[Truncation](#truncation) has several drawbacks:
1. **Loss of information**: Key data at the end of a sequence may be discarded.
2. **Choosing truncation length**: Too short loses data; too long undermines efficiency.
Packing, introduced in [Raffel et al., 2020](https://huggingface.co/papers/1910.10683), addresses these issues by grouping sequences instead of truncating. It concatenates and splits dataset sequences into the desired lengths.
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/trl-lib/documentation-images/resolve/main/packing.png" alt="Packing" width="600"/>
</div>
Packing eliminates padding, preserves all sequence information, and allows for flexible sequence lengths, making it a more efficient alternative to truncation. To enable packing, use `packing=True` in the [`SFTConfig`]:
```python
from trl import SFTConfig
training_args = SFTConfig(..., packing=True, max_seq_length=512)
```
<Tip warning={true}>
Packing may cause batch contamination, where adjacent sequences influence one another. This can be problematic for some applications. For more details, see [#1230](https://github.com/huggingface/trl/issues/1230).
</Tip>
## Disabling model gathering for generation in online methods
When using DeepSpeed ZeRO-3, model weights are sharded across multiple GPUs. Online methods involve generating completions from the model as part of the training process. During this step, the model weights are temporarily gathered on a single GPU for generation. For very large models, this gathering can lead to out-of-memory (OOM) errors, as described in this issue: [#2250](https://github.com/huggingface/trl/issues/2250#issue-2598304204).
If you encounter this issue, you can disable the gathering of model weights for generation by setting the following parameter:
<hfoptions id="ds3_gather_for_generation">
<hfoption id="Online DPO">
```python
from trl import OnlineDPOConfig
training_args = OnlineDPOConfig(..., ds3_gather_for_generation=False)
```
</hfoption>
<hfoption id="PPO">
```python
from trl import PPOConfig
training_args = PPOConfig(..., ds3_gather_for_generation=False)
```
</hfoption>
<hfoption id="RLOO">
```python
from trl import RLOOConfig
training_args = RLOOConfig(..., ds3_gather_for_generation=False)
```
</hfoption>
</hfoptions>
This adjustment prevents model weights from being gathered, avoiding OOM errors, but it may result in slower generation speeds.
| trl/docs/source/reducing_memory_usage.md/0 | {
"file_path": "trl/docs/source/reducing_memory_usage.md",
"repo_id": "trl",
"token_count": 1396
} |
compute_environment: LOCAL_MACHINE
debug: false
distributed_type: FSDP
downcast_bf16: 'no'
fsdp_config:
fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
fsdp_backward_prefetch: BACKWARD_PRE
fsdp_cpu_ram_efficient_loading: true
fsdp_forward_prefetch: false
fsdp_offload_params: true
fsdp_sharding_strategy: FULL_SHARD
fsdp_state_dict_type: SHARDED_STATE_DICT
fsdp_sync_module_states: true
fsdp_use_orig_params: false
machine_rank: 0
main_training_function: main
mixed_precision: 'bf16'
num_machines: 1
num_processes: 8
rdzv_backend: static
same_network: true
tpu_env: []
tpu_use_cluster: false
tpu_use_sudo: false
use_cpu: false | trl/examples/accelerate_configs/fsdp_qlora.yaml/0 | {
"file_path": "trl/examples/accelerate_configs/fsdp_qlora.yaml",
"repo_id": "trl",
"token_count": 566
} |
<jupyter_start><jupyter_text>Tune GPT2 to generate controlled sentiment reviews> Optimise GPT2 to produce IMDB movie reviews with controlled sentiment using a BERT sentiment classifier for rewards.**WARNING:** We often experienced loss spikes in this examples which caused model training to fail or slow down. There is a [GitHub issue](https://github.com/lvwerra/trl/issues/101) to track the issue. Figure: Experiment setup to tune GPT2. The yellow arrows are outside the scope of this notebook, but the trained models are available through Hugging Face. The experiment setup is very similar to the positive sentiment notebook. However, in this notebook we fine-tune GPT2 (small) to generate **controlled** movie reviews based on the IMDB dataset. The model gets the target sentiment and 5 tokens from a real review and is tasked to produce continuations with the targeted sentiment. The reward for the continuations is calculated with the logits of a BERT sentiment classifier. That reward is then used for PPO training. Setup experiment Import dependencies<jupyter_code>%load_ext autoreload
%autoreload 2
import random
import torch
import wandb
import time
import os
from tqdm import tqdm
import numpy as np
import pandas as pd
from random import choices
import matplotlib.pyplot as plt
tqdm.pandas()
from datasets import load_dataset
from transformers import AutoTokenizer, pipeline
from trl import (
PPOTrainer,
PPOConfig,
AutoModelForCausalLMWithValueHead,
create_reference_model,
)<jupyter_output>/home/leandro_huggingface_co/miniconda3/envs/trl/lib/python3.9/site-packages/tqdm/auto.py:22: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html
from .autonotebook import tqdm as notebook_tqdm<jupyter_text>Configuration<jupyter_code>sentiment_pipe_kwargs = {"top_k": None, "function_to_apply": "none"}
config = PPOConfig(
model_name="lvwerra/gpt2-imdb",
steps=51200,
learning_rate=1.41e-5,
remove_unused_columns=False,
log_with="wandb",
)
txt_in_len = 5
txt_out_len = 20
seed = 1
np.random.seed(seed)<jupyter_output><empty_output><jupyter_text>You can see that we load a GPT2 model called `gpt2_imdb`. This model was additionally fine-tuned on the IMDB dataset for 1 epoch with the huggingface [script](https://github.com/huggingface/transformers/blob/master/examples/run_language_modeling.py) (no special settings). The other parameters are mostly taken from the original paper ["Fine-Tuning Language Models from Human Preferences"](https://huggingface.co/papers/1909.08593). This model as well as the BERT model is available in the Huggingface model zoo [here](https://huggingface.co/models). The following code should automatically download the models. Load data and models Load pre-trained GPT2 language models We load the GPT2 model with a value head and the tokenizer. We load the model twice; the first model is optimized while the second model serves as a reference to calculate the KL-divergence from the starting point. This serves as an additional reward signal in the PPO training to make sure the optimized model does not deviate too much from the original language model.<jupyter_code>gpt2_model = AutoModelForCausalLMWithValueHead.from_pretrained(config.model_name)
gpt2_ref_model = create_reference_model(gpt2_model)
gpt2_tokenizer = AutoTokenizer.from_pretrained(config.model_name)
gpt2_tokenizer.pad_token = gpt2_tokenizer.eos_token<jupyter_output><empty_output><jupyter_text>Load IMDB datasetThe IMDB dataset contains 50k movie review annotated with "positive"/"negative" feedback indicating the sentiment. We load the IMDB dataset into a DataFrame and filter for comments that are at least 500 characters long and take the first 1000 characters of each comment. The first filter we apply to avoid comments that are less than `txt_in_len` token long and the second to avoid tokenizing way more text than we actually need.<jupyter_code># create the dataset
#
dataset = load_dataset("stanfordnlp/imdb", split="train")
dataset = dataset.rename_columns({"text": "review", "label": "sentiment"})
# make sure the comments are are at least 500 and trim to 1000
dataset = dataset.filter(lambda x: len(x["review"]) > 500, batched=False)
dataset = dataset.map(lambda x: {"review": x["review"][:1000]}, batched=False)
dataset<jupyter_output>Found cached dataset imdb (/home/leandro_huggingface_co/.cache/huggingface/datasets/imdb/plain_text/1.0.0/2fdd8b9bcadd6e7055e742a706876ba43f19faee861df134affd7a3f60fc38a1)
Loading cached processed dataset at /home/leandro_huggingface_co/.cache/huggingface/datasets/imdb/plain_text/1.0.0/2fdd8b9bcadd6e7055e742a706876ba43f19faee861df134affd7a3f60fc38a1/cache-d314b4c14499bf03.arrow
Loading cached processed dataset at /home/leandro_huggingface_co/.cache/huggingface/datasets/imdb/plain_text/1.0.0/2fdd8b9bcadd6e7055e742a706876ba43f19faee861df134affd7a3f60fc38a1/cache-0d5fcb05c95b1186.arrow<jupyter_text>Tokenize IMDB reviews We tokenize all IMDB in advance to avoid tokenizing twice. In the first step we encode the queries and slice the first `txt_in_len` tokens. In a second step we decode these tokens back to text for later display.<jupyter_code>dataset = dataset.map(
lambda x: {
"input_ids": gpt2_tokenizer.encode(" " + x["review"], return_tensors="pt")[
0, :txt_in_len
]
},
batched=False,
)
dataset = dataset.map(
lambda x: {"query": gpt2_tokenizer.decode(x["input_ids"])}, batched=False
)
dataset = dataset[:20480]
from datasets import Dataset
dataset = Dataset.from_dict(dataset)
dataset.set_format("pytorch")
dataset[3]["input_ids"]
def collator(data):
return dict((key, [d[key] for d in data]) for key in data[0])
ppo_trainer = PPOTrainer(
config, gpt2_model, gpt2_ref_model, gpt2_tokenizer, dataset, data_collator=collator
)<jupyter_output>Failed to detect the name of this notebook, you can set it manually with the WANDB_NOTEBOOK_NAME environment variable to enable code saving.
[34m[1mwandb[0m: Currently logged in as: [33mlvwerra[0m. Use [1m`wandb login --relogin`[0m to force relogin<jupyter_text>Load BERT classifierWe load a BERT classifier fine-tuned on the IMDB dataset.<jupyter_code>if ppo_trainer.accelerator.num_processes == 1:
device = 0 if torch.cuda.is_available() else "cpu" # to avoid a `pipeline` bug
else:
device = ppo_trainer.accelerator.device
sentiment_pipe = pipeline(
"sentiment-analysis", "lvwerra/distilbert-imdb", device=device
)<jupyter_output><empty_output><jupyter_text>The model outputs are the logits for the negative and positive class. We will use the logits for positive class as a reward signal for the language model.<jupyter_code>text = "this movie was really bad!!"
output = sentiment_pipe(text, **sentiment_pipe_kwargs)
output
text = "this movie was really good!!"
output = sentiment_pipe(text, **sentiment_pipe_kwargs)
output
text = "this movie was a documentary"
output = sentiment_pipe(text, **sentiment_pipe_kwargs)
output<jupyter_output><empty_output><jupyter_text>The resulting reward signal:<jupyter_code>def extract_pipe_output(outputs):
positive_logits = []
for out in outputs:
for element in out:
if element["label"] == "POSITIVE":
positive_logits.append(torch.tensor(element["score"]))
return positive_logits
output[1]["score"]<jupyter_output><empty_output><jupyter_text>Control token dictWe will append the control token at the beginning of each query to signal the model what the target sentiment is. Each control sequence consists of three tokens:<jupyter_code>ctrl_str = ["[negative]", "[neutral]", "[positive]"]
device = torch.device(
"cuda" if torch.cuda.is_available() else "cpu"
) # this should be handled by accelerate
ctrl_tokens = dict(
(s, gpt2_tokenizer.encode(s, return_tensors="pt").squeeze().to(device))
for s in ctrl_str
)
ctrl_tokens<jupyter_output><empty_output><jupyter_text>Reward function<jupyter_code>def pos_logit_to_reward(logit, task):
"""
Take the positive sentiment logit and scale it for the task.
task [negative]: reward = -logit
task [neutral]: reward = -2*abs(logit)+4
task [positive]: reward = logit
"""
for i in range(len(logit)):
if task[i] == "[negative]":
logit[i] = -logit[i]
elif task[i] == "[neutral]":
logit[i] = -2 * torch.abs(logit[i]) + 4
elif task[i] == "[positive]":
pass
else:
raise ValueError("task has to be in [0, 1, 2]!")
return logit<jupyter_output><empty_output><jupyter_text>The following examples show the rewards for the cases where the classifier logit is 4, -4 and 0 for the three targets `['negative]`, `['neutral]` and `['positive']`. The scaling is not perfect as it differs between neutral and the other two classes. This is something to further investigate in the future. Ideally, one would use the logit output for each class individually, but since there is no dedicated class for neutral this is a workaround.<jupyter_code>print(ctrl_str)
pos_logit_to_reward(torch.Tensor([4, 4, 4]), ctrl_str)
pos_logit_to_reward(torch.Tensor([-4, -4, -4]), ctrl_str)
pos_logit_to_reward(torch.Tensor([0, 0, 0]), ctrl_str)<jupyter_output><empty_output><jupyter_text>Generation settings<jupyter_code>generation_kwargs = {
"min_length": -1,
"top_k": 0.0,
"top_p": 1.0,
"do_sample": True,
"pad_token_id": gpt2_tokenizer.eos_token_id,
"max_new_tokens": txt_out_len,
"eos_token_id": -1,
}<jupyter_output><empty_output><jupyter_text>Optimize model **Steps**The training loop consists of the following steps:1. Get a batch of queries and create random controls2. Get the query responses from the policy3. Join query and responses and tokenize for BERT analysis4. Get sentiments for query/responses from BERT5. Optimize policy with PPO using the (query, response, reward) triplet6. Log all the training statistics**Training time**This step takes **~2h** on a P6000 GPU with the above specified settings.<jupyter_code>for epoch in range(2):
for batch in tqdm(ppo_trainer.dataloader):
(
logs,
game_data,
) = (
dict(),
dict(),
)
#### prepend a random control token
task_list = choices(ctrl_str, k=config.batch_size)
game_data["query"] = [t + q for t, q in zip(task_list, batch["query"])]
query_tensors = [
torch.cat((ctrl_tokens[t], input_ids))
for t, input_ids in zip(task_list, batch["input_ids"])
]
#### get response from gpt2
response_tensors = []
for query in query_tensors:
response = ppo_trainer.generate(query, **generation_kwargs)
response_tensors.append(response.squeeze()[-txt_out_len:])
game_data["response"] = [
gpt2_tokenizer.decode(r.squeeze()) for r in response_tensors
]
#### sentiment analysis
texts = [q + r for q, r in zip(batch["query"], game_data["response"])]
logits = extract_pipe_output(sentiment_pipe(texts, **sentiment_pipe_kwargs))
rewards = pos_logit_to_reward(logits, task_list)
#### Run PPO training
t = time.time()
stats = ppo_trainer.step(query_tensors, response_tensors, rewards)
for cs in ctrl_str:
key = "env/reward_" + cs.strip("[]")
stats[key] = np.mean(
[r.cpu().numpy() for r, t in zip(rewards, task_list) if t == cs]
)
ppo_trainer.log_stats(stats, game_data, rewards)<jupyter_output>8%|β | 6/80 [12:44<2:37:54, 128.03s/it]/home/leandro_huggingface_co/miniconda3/envs/trl/lib/python3.9/site-packages/transformers/pipelines/base.py:1045: UserWarning: You seem to be using the pipelines sequentially on GPU. In order to maximize efficiency please use a dataset
warnings.warn(
100%|ββββββββββ| 80/80 [2:46:39<00:00, 124.99s/it]
91%|ββββββββββ| 73/80 [2:30:39<14:35, 125.03s/it]<jupyter_text>Training progressIf you are tracking the training progress with Weights&Biases you should see a plot similar to the following: Figure: Reward mean and distribution evolution during training. One can observe how the model starts to generate more positive outputs after a few optimisation steps.> Note: Investigating the KL-divergence will probably show that at this point the model has not converged to the target KL-divergence, yet. To get there would require longer training or starting with a higher inital coefficient. Model inspection Reward distributionFirst, we can have a look at the reward distribution. Both the negative and positive rewards are clearly shifted to high rewards. The neutral rewards, however, are still centered around zero. There are a few possible explanations for this. There could be a bug in the code and the way the neutral rewards are calculated. Another problem could be that sentence sometimes start with a strong sentiment and it is hard for the model shift the sentiment towards neutral.<jupyter_code>for ctrl_s in ctrl_str:
plt.hist(
[r for r, t in zip(logs["env/reward_dist"], task_list) if t == ctrl_s],
density=True,
alpha=0.5,
label=ctrl_s,
)
plt.legend(loc="best")
plt.title("reward distribution")
plt.grid(True)
plt.show()<jupyter_output><empty_output><jupyter_text>Save modelFinally, we save the model to disk for later usage.<jupyter_code>gpt2_model.save_pretrained("gpt2-imdb-ctrl")
gpt2_tokenizer.save_pretrained("gpt2-imdb-ctrl")<jupyter_output><empty_output> | trl/examples/notebooks/gpt2-sentiment-control.ipynb/0 | {
"file_path": "trl/examples/notebooks/gpt2-sentiment-control.ipynb",
"repo_id": "trl",
"token_count": 4851
} |
# Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import csv
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from tqdm import tqdm
from transformers import AutoModelForCausalLM, AutoTokenizer, is_torch_npu_available, is_torch_xpu_available
toxicity = evaluate.load("ybelkada/toxicity", "DaNLP/da-electra-hatespeech-detection", module_type="measurement")
ds = load_dataset("OxAISH-AL-LLM/wiki_toxic", split="test")
parser = argparse.ArgumentParser(description="Evaluate de-toxified models")
parser.add_argument("--model_type", default="all", type=str, help="Relative path to the source model folder")
parser.add_argument("--output_file", default="toxicity.csv", type=str, help="Relative path to the source model folder")
parser.add_argument("--batch_size", default=64, type=int, help="Batch size")
parser.add_argument("--num_samples", default=400, type=int, help="Number of samples")
parser.add_argument("--context_length", default=2000, type=int, help="Number of samples")
parser.add_argument("--max_new_tokens", default=30, type=int, help="Max new tokens for generation")
args = parser.parse_args()
if args.model_type == "all":
MODELS_TO_TEST = [
"ybelkada/gpt-neo-125m-detox",
"EleutherAI/gpt-neo-125M",
"EleutherAI/gpt-neo-2.7B",
"ybelkada/gpt-neo-2.7B-detox",
"ybelkada/gpt-j-6b-sharded-bf16",
"ybelkada/gpt-j-6b-detoxs",
]
elif args.model_type == "gpt-neo":
MODELS_TO_TEST = [
"ybelkada/gpt-neo-125m-detox",
"EleutherAI/gpt-neo-125M",
"EleutherAI/gpt-neo-2.7B",
"ybelkada/gpt-neo-2.7B-detox",
]
elif args.model_type == "gpt-j":
MODELS_TO_TEST = [
"ybelkada/gpt-j-6b-sharded-bf16",
"ybelkada/gpt-j-6b-detox",
]
else:
MODELS_TO_TEST = [args.model_type]
NUM_SAMPLES = args.num_samples
BATCH_SIZE = args.batch_size
output_file = args.output_file
max_new_tokens = args.max_new_tokens
context_length = args.context_length
if is_torch_xpu_available():
device = torch.xpu.current_device()
elif is_torch_npu_available():
device = torch.npu.current_device()
else:
device = torch.cuda.current_device() if torch.cuda.is_available() else "cpu"
# consider only toxic prompts
ds = ds.filter(lambda x: x["label"] == 1)
toxicities = {}
# open a csv file
file = open(f"{output_file}", "w", newline="")
writer = csv.writer(file)
# add first rows
writer.writerow(["model_id", "mean_toxicity", "std_toxicity"])
for model_id in tqdm(MODELS_TO_TEST):
model = AutoModelForCausalLM.from_pretrained(model_id, device_map={"": device}, torch_dtype=torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_id)
tokenizer.pad_token = tokenizer.eos_token
tokenizer.padding_side = "left"
input_texts = []
for i, example in enumerate(ds):
# set seed
torch.manual_seed(42)
input_text = example["comment_text"]
input_texts.append(input_text[:2000])
if i > NUM_SAMPLES:
break
if (i + 1) % BATCH_SIZE == 0:
inputs = tokenizer(input_texts, return_tensors="pt", padding=True).to(device)
inputs.input_ids = inputs.input_ids[:context_length]
inputs.attention_mask = inputs.attention_mask[:context_length]
outputs = model.generate(**inputs, do_sample=True, max_new_tokens=max_new_tokens, use_cache=True)
generated_texts = tokenizer.batch_decode(outputs, skip_special_tokens=True)
generated_texts = [
generated_text.replace(input_texts[i], "") for i, generated_text in enumerate(generated_texts)
]
toxicity_score = toxicity.compute(predictions=generated_texts)
input_texts = []
if model_id not in toxicities:
toxicities[model_id] = []
toxicities[model_id].extend(toxicity_score["toxicity"])
# last batch
inputs = tokenizer(input_texts, return_tensors="pt", padding=True).to(device)
outputs = model.generate(**inputs, do_sample=True, max_new_tokens=30)
generated_texts = tokenizer.batch_decode(outputs, skip_special_tokens=True)
generated_texts = [generated_text.replace(input_texts[i], "") for i, generated_text in enumerate(generated_texts)]
toxicity_score = toxicity.compute(predictions=generated_texts)
toxicities[model_id].extend(toxicity_score["toxicity"])
# compute mean & std using np
mean = np.mean(toxicities[model_id])
std = np.std(toxicities[model_id])
# save to file
writer.writerow([model_id, mean, std])
# print
print(f"Model: {model_id} - Mean: {mean} - Std: {std}")
model = None
if is_torch_xpu_available():
torch.xpu.empty_cache()
elif is_torch_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
# close file
file.close()
| trl/examples/research_projects/toxicity/scripts/evaluate-toxicity.py/0 | {
"file_path": "trl/examples/research_projects/toxicity/scripts/evaluate-toxicity.py",
"repo_id": "trl",
"token_count": 2222
} |
# Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import shutil
import torch
from accelerate import PartialState
from datasets import load_dataset
from transformers import (
AutoModelForCausalLM,
AutoModelForSequenceClassification,
AutoTokenizer,
HfArgumentParser,
)
from trl import (
ModelConfig,
PPOConfig,
PPOTrainer,
ScriptArguments,
get_kbit_device_map,
get_peft_config,
get_quantization_config,
)
from trl.trainer.utils import SIMPLE_CHAT_TEMPLATE
"""
python examples/scripts/ppo/ppo_tldr.py \
--dataset_name trl-internal-testing/tldr-preference-sft-trl-style \
--dataset_test_split validation \
--learning_rate 3e-6 \
--output_dir models/minimal/ppo_tldr \
--per_device_train_batch_size 1 \
--gradient_accumulation_steps 64 \
--total_episodes 30000 \
--model_name_or_path EleutherAI/pythia-1b-deduped \
--sft_model_path cleanrl/EleutherAI_pythia-1b-deduped__sft__tldr \
--reward_model_path cleanrl/EleutherAI_pythia-1b-deduped__reward__tldr \
--missing_eos_penalty 1.0 \
--stop_token eos \
--response_length 53 \
--eval_strategy steps \
--eval_steps 100
accelerate launch --config_file examples/accelerate_configs/deepspeed_zero2.yaml \
examples/scripts/ppo/ppo_tldr.py \
--dataset_name trl-internal-testing/tldr-preference-sft-trl-style \
--dataset_test_split validation \
--output_dir models/minimal/ppo_tldr \
--learning_rate 3e-6 \
--per_device_train_batch_size 16 \
--gradient_accumulation_steps 4 \
--total_episodes 1000000 \
--model_name_or_path EleutherAI/pythia-1b-deduped \
--sft_model_path cleanrl/EleutherAI_pythia-1b-deduped__sft__tldr \
--reward_model_path cleanrl/EleutherAI_pythia-1b-deduped__reward__tldr \
--local_rollout_forward_batch_size 16 \
--missing_eos_penalty 1.0 \
--stop_token eos \
--eval_strategy steps \
--eval_steps 100
"""
if __name__ == "__main__":
parser = HfArgumentParser((ScriptArguments, PPOConfig, ModelConfig))
script_args, training_args, model_args = parser.parse_args_into_dataclasses()
# remove output_dir if exists
shutil.rmtree(training_args.output_dir, ignore_errors=True)
################
# Model & Tokenizer
################
torch_dtype = (
model_args.torch_dtype if model_args.torch_dtype in ["auto", None] else getattr(torch, model_args.torch_dtype)
)
quantization_config = get_quantization_config(model_args)
model_kwargs = dict(
revision=model_args.model_revision,
attn_implementation=model_args.attn_implementation,
torch_dtype=torch_dtype,
device_map=get_kbit_device_map() if quantization_config is not None else None,
quantization_config=quantization_config,
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.model_name_or_path, padding_side="left", trust_remote_code=model_args.trust_remote_code
)
tokenizer.add_special_tokens({"pad_token": "[PAD]"})
if tokenizer.chat_template is None:
tokenizer.chat_template = SIMPLE_CHAT_TEMPLATE
value_model = AutoModelForSequenceClassification.from_pretrained(
training_args.reward_model_path, trust_remote_code=model_args.trust_remote_code, num_labels=1
)
reward_model = AutoModelForSequenceClassification.from_pretrained(
training_args.reward_model_path, trust_remote_code=model_args.trust_remote_code, num_labels=1
)
policy = AutoModelForCausalLM.from_pretrained(
training_args.sft_model_path, trust_remote_code=model_args.trust_remote_code
)
peft_config = get_peft_config(model_args)
if peft_config is None:
ref_policy = AutoModelForCausalLM.from_pretrained(
training_args.sft_model_path, trust_remote_code=model_args.trust_remote_code
)
else:
ref_policy = None
################
# Dataset
################
dataset = load_dataset(script_args.dataset_name, name=script_args.dataset_config)
train_dataset = dataset[script_args.dataset_train_split]
eval_dataset = dataset[script_args.dataset_test_split] if training_args.eval_strategy != "no" else None
def prepare_dataset(dataset, tokenizer):
"""pre-tokenize the dataset before training; only collate during training"""
def tokenize(element):
input_ids = tokenizer.apply_chat_template(
element["messages"][:1],
padding=False,
add_generation_prompt=True,
)
return {"input_ids": input_ids, "lengths": len(input_ids)}
return dataset.map(
tokenize,
remove_columns=dataset.column_names,
num_proc=training_args.dataset_num_proc,
)
# Compute that only on the main process for faster data processing.
# see: https://github.com/huggingface/trl/pull/1255
with PartialState().local_main_process_first():
train_dataset = prepare_dataset(train_dataset, tokenizer)
if eval_dataset is not None:
eval_dataset = prepare_dataset(eval_dataset, tokenizer)
# filtering
train_dataset = train_dataset.filter(lambda x: x["lengths"] <= 512, num_proc=training_args.dataset_num_proc)
if eval_dataset is not None:
eval_dataset = eval_dataset.filter(lambda x: x["lengths"] <= 512, num_proc=training_args.dataset_num_proc)
assert train_dataset[0]["input_ids"][-1] != tokenizer.eos_token_id, "The last token should not be an EOS token"
################
# Training
################
trainer = PPOTrainer(
args=training_args,
processing_class=tokenizer,
model=policy,
ref_model=ref_policy,
reward_model=reward_model,
value_model=value_model,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
peft_config=peft_config,
)
trainer.train()
# Save and push to hub
trainer.save_model(training_args.output_dir)
if training_args.push_to_hub:
trainer.push_to_hub(dataset_name=script_args.dataset_name)
trainer.generate_completions()
| trl/examples/scripts/ppo/ppo_tldr.py/0 | {
"file_path": "trl/examples/scripts/ppo/ppo_tldr.py",
"repo_id": "trl",
"token_count": 2726
} |
# Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import logging
import os
from datetime import date
from pathlib import Path
from tabulate import tabulate
MAX_LEN_MESSAGE = 2900 # Slack endpoint has a limit of 3001 characters
parser = argparse.ArgumentParser()
parser.add_argument("--slack_channel_name", default="trl-push-ci")
# Set up logging
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
def process_log_file(log):
failed_tests = []
passed_tests = []
section_num_failed = 0
try:
with open(log) as f:
for line in f:
try:
data = json.loads(line)
test_name = data.get("nodeid", "")
duration = f'{data["duration"]:.4f}' if "duration" in data else "N/A"
outcome = data.get("outcome", "")
if test_name:
if outcome == "failed":
section_num_failed += 1
failed_tests.append([test_name, duration, log.stem.split("_")[0]])
else:
passed_tests.append([test_name, duration, log.stem.split("_")[0]])
except json.JSONDecodeError as e:
logging.warning(f"Could not decode line in {log}: {e}")
except FileNotFoundError as e:
logging.error(f"Log file {log} not found: {e}")
except Exception as e:
logging.error(f"Error processing log file {log}: {e}")
return failed_tests, passed_tests, section_num_failed
def main(slack_channel_name):
group_info = []
total_num_failed = 0
total_empty_files = []
log_files = list(Path().glob("*.log"))
if not log_files:
logging.info("No log files found.")
return
for log in log_files:
failed, passed, section_num_failed = process_log_file(log)
empty_file = not failed and not passed
total_num_failed += section_num_failed
total_empty_files.append(empty_file)
group_info.append([str(log), section_num_failed, failed])
# Clean up log file
try:
os.remove(log)
except OSError as e:
logging.warning(f"Could not remove log file {log}: {e}")
# Prepare Slack message payload
payload = [
{
"type": "header",
"text": {"type": "plain_text", "text": f"π€ Results of the {os.environ.get('TEST_TYPE', '')} TRL tests."},
},
]
if total_num_failed > 0:
message = ""
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
message += f"*{name}: {num_failed} failed test(s)*\n"
failed_table = [
test[0].split("::")[:2] + [test[0].split("::")[-1][:30] + ".."] for test in failed_tests
]
message += (
"\n```\n"
+ tabulate(failed_table, headers=["Test Location", "Test Name"], tablefmt="grid")
+ "\n```\n"
)
if any(total_empty_files):
message += f"\n*{name}: Warning! Empty file - check GitHub action job*\n"
# Logging
logging.info(f"Total failed tests: {total_num_failed}")
print(f"### {message}")
if len(message) > MAX_LEN_MESSAGE:
message = (
f"β There are {total_num_failed} failed tests in total! Please check the action results directly."
)
payload.append({"type": "section", "text": {"type": "mrkdwn", "text": message}})
payload.append(
{
"type": "section",
"text": {"type": "mrkdwn", "text": "*For more details:*"},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results"},
"url": f"https://github.com/huggingface/trl/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
)
payload.append(
{
"type": "context",
"elements": [
{
"type": "plain_text",
"text": f"On Push main {os.environ.get('TEST_TYPE')} results for {date.today()}",
}
],
}
)
# Send to Slack
from slack_sdk import WebClient
slack_client = WebClient(token=os.environ.get("SLACK_API_TOKEN"))
slack_client.chat_postMessage(channel=f"#{slack_channel_name}", text=message, blocks=payload)
else:
payload.append(
{
"type": "section",
"text": {
"type": "plain_text",
"text": "β
No failures! All tests passed successfully.",
"emoji": True,
},
}
)
logging.info("All tests passed. No errors detected.")
if __name__ == "__main__":
args = parser.parse_args()
main(args.slack_channel_name)
| trl/scripts/log_reports.py/0 | {
"file_path": "trl/scripts/log_reports.py",
"repo_id": "trl",
"token_count": 2761
} |
# Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tempfile
import unittest
import torch
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoModelForCausalLM, AutoModelForSeq2SeqLM, AutoTokenizer
from transformers.testing_utils import require_peft
from trl import CPOConfig, CPOTrainer
from trl.trainer.utils import SIMPLE_CHAT_TEMPLATE
class CPOTrainerTester(unittest.TestCase):
def setUp(self):
self.model_id = "trl-internal-testing/tiny-Qwen2ForCausalLM-2.5"
self.model = AutoModelForCausalLM.from_pretrained(self.model_id)
self.tokenizer = AutoTokenizer.from_pretrained(self.model_id)
self.tokenizer.pad_token = self.tokenizer.eos_token
# get t5 as seq2seq example:
model_id = "trl-internal-testing/tiny-T5ForConditionalGeneration"
self.t5_model = AutoModelForSeq2SeqLM.from_pretrained(model_id)
self.t5_tokenizer = AutoTokenizer.from_pretrained(model_id)
self.t5_tokenizer.chat_template = SIMPLE_CHAT_TEMPLATE
@parameterized.expand(
[
("qwen", "sigmoid", "standard_preference"),
("t5", "hinge", "standard_implicit_prompt_preference"),
("qwen", "ipo", "conversational_preference"),
("t5", "ipo", "conversational_implicit_prompt_preference"),
("qwen", "simpo", "standard_preference"),
("t5", "simpo", "standard_implicit_prompt_preference"),
("qwen", "hinge", "conversational_preference"),
]
)
def test_cpo_trainer(self, name, loss_type, config_name):
with tempfile.TemporaryDirectory() as tmp_dir:
training_args = CPOConfig(
output_dir=tmp_dir,
per_device_train_batch_size=2,
max_steps=3,
remove_unused_columns=False,
gradient_accumulation_steps=1,
learning_rate=9e-1,
eval_strategy="steps",
beta=0.1,
loss_type=loss_type,
cpo_alpha=1.0,
report_to="none",
)
dummy_dataset = load_dataset("trl-internal-testing/zen", config_name)
if name == "qwen":
model = self.model
tokenizer = self.tokenizer
elif name == "t5":
model = self.t5_model
tokenizer = self.t5_tokenizer
training_args.is_encoder_decoder = True
trainer = CPOTrainer(
model=model,
args=training_args,
processing_class=tokenizer,
train_dataset=dummy_dataset["train"],
eval_dataset=dummy_dataset["test"],
)
previous_trainable_params = {n: param.clone() for n, param in trainer.model.named_parameters()}
trainer.train()
self.assertIsNotNone(trainer.state.log_history[-1]["train_loss"])
# Check that the parameters have changed
for n, param in previous_trainable_params.items():
new_param = trainer.model.get_parameter(n)
if param.sum() != 0: # ignore 0 biases
self.assertFalse(torch.equal(param, new_param))
@parameterized.expand(
[
("standard_preference",),
("standard_implicit_prompt_preference",),
("conversational_preference",),
("conversational_implicit_prompt_preference",),
]
)
@require_peft
def test_cpo_trainer_with_lora(self, config_name):
from peft import LoraConfig
lora_config = LoraConfig(
r=16,
lora_alpha=32,
lora_dropout=0.05,
bias="none",
task_type="CAUSAL_LM",
)
with tempfile.TemporaryDirectory() as tmp_dir:
training_args = CPOConfig(
output_dir=tmp_dir,
per_device_train_batch_size=2,
max_steps=3,
remove_unused_columns=False,
gradient_accumulation_steps=4,
learning_rate=9e-1,
eval_strategy="steps",
beta=0.1,
cpo_alpha=1.0,
report_to="none",
)
dummy_dataset = load_dataset("trl-internal-testing/zen", config_name)
trainer = CPOTrainer(
model=self.model,
args=training_args,
processing_class=self.tokenizer,
train_dataset=dummy_dataset["train"],
eval_dataset=dummy_dataset["test"],
peft_config=lora_config,
)
previous_trainable_params = {n: param.clone() for n, param in trainer.model.named_parameters()}
trainer.train()
self.assertIsNotNone(trainer.state.log_history[-1]["train_loss"])
# Check that the parameters have changed
for n, param in previous_trainable_params.items():
if "lora" in n:
new_param = trainer.model.get_parameter(n)
if param.sum() != 0: # ignore 0 biases
self.assertFalse(torch.equal(param, new_param))
| trl/tests/test_cpo_trainer.py/0 | {
"file_path": "trl/tests/test_cpo_trainer.py",
"repo_id": "trl",
"token_count": 2811
} |
# Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tempfile
import unittest
import torch
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoModelForCausalLM, AutoModelForSeq2SeqLM, AutoTokenizer
from transformers.testing_utils import require_peft
from trl import ORPOConfig, ORPOTrainer
from trl.trainer.utils import SIMPLE_CHAT_TEMPLATE
class ORPOTrainerTester(unittest.TestCase):
def setUp(self):
self.model_id = "trl-internal-testing/tiny-Qwen2ForCausalLM-2.5"
self.model = AutoModelForCausalLM.from_pretrained(self.model_id)
self.tokenizer = AutoTokenizer.from_pretrained(self.model_id)
self.tokenizer.pad_token = self.tokenizer.eos_token
# get t5 as seq2seq example:
model_id = "trl-internal-testing/tiny-T5ForConditionalGeneration"
self.t5_model = AutoModelForSeq2SeqLM.from_pretrained(model_id)
self.t5_tokenizer = AutoTokenizer.from_pretrained(model_id)
self.t5_tokenizer.chat_template = SIMPLE_CHAT_TEMPLATE
@parameterized.expand(
[
("qwen", "standard_preference"),
("t5", "standard_implicit_prompt_preference"),
("qwen", "conversational_preference"),
("t5", "conversational_implicit_prompt_preference"),
]
)
def test_orpo_trainer(self, name, config_name):
with tempfile.TemporaryDirectory() as tmp_dir:
training_args = ORPOConfig(
output_dir=tmp_dir,
per_device_train_batch_size=2,
max_steps=3,
remove_unused_columns=False,
gradient_accumulation_steps=1,
learning_rate=9e-1,
eval_strategy="steps",
beta=0.1,
report_to="none",
)
dummy_dataset = load_dataset("trl-internal-testing/zen", config_name)
if name == "qwen":
model = self.model
tokenizer = self.tokenizer
elif name == "t5":
model = self.t5_model
tokenizer = self.t5_tokenizer
training_args.is_encoder_decoder = True
trainer = ORPOTrainer(
model=model,
args=training_args,
processing_class=tokenizer,
train_dataset=dummy_dataset["train"],
eval_dataset=dummy_dataset["test"],
)
previous_trainable_params = {n: param.clone() for n, param in trainer.model.named_parameters()}
trainer.train()
self.assertIsNotNone(trainer.state.log_history[-1]["train_loss"])
# Check that the parameters have changed
for n, param in previous_trainable_params.items():
new_param = trainer.model.get_parameter(n)
if param.sum() != 0: # ignore 0 biases
self.assertFalse(torch.equal(param, new_param))
@parameterized.expand(
[
("standard_preference",),
("standard_implicit_prompt_preference",),
("conversational_preference",),
("conversational_implicit_prompt_preference",),
]
)
@require_peft
def test_orpo_trainer_with_lora(self, config_name):
from peft import LoraConfig
lora_config = LoraConfig(
r=16,
lora_alpha=32,
lora_dropout=0.05,
bias="none",
task_type="CAUSAL_LM",
)
with tempfile.TemporaryDirectory() as tmp_dir:
training_args = ORPOConfig(
output_dir=tmp_dir,
per_device_train_batch_size=2,
max_steps=3,
remove_unused_columns=False,
gradient_accumulation_steps=4,
learning_rate=9e-1,
eval_strategy="steps",
beta=0.1,
report_to="none",
)
dummy_dataset = load_dataset("trl-internal-testing/zen", config_name)
trainer = ORPOTrainer(
model=self.model,
args=training_args,
processing_class=self.tokenizer,
train_dataset=dummy_dataset["train"],
eval_dataset=dummy_dataset["test"],
peft_config=lora_config,
)
previous_trainable_params = {n: param.clone() for n, param in trainer.model.named_parameters()}
trainer.train()
self.assertIsNotNone(trainer.state.log_history[-1]["train_loss"])
# Check that the parameters have changed
for n, param in previous_trainable_params.items():
if "lora" in n:
new_param = trainer.model.get_parameter(n)
if param.sum() != 0: # ignore 0 biases
self.assertFalse(torch.equal(param, new_param))
| trl/tests/test_orpo_trainer.py/0 | {
"file_path": "trl/tests/test_orpo_trainer.py",
"repo_id": "trl",
"token_count": 2634
} |
# Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Callable, Optional, Sequence, TypeVar, Union
from datasets import Dataset, DatasetDict
from transformers import PreTrainedTokenizer
DatasetType = TypeVar("DatasetType", Dataset, DatasetDict)
def is_conversational(example: dict[str, Any]) -> bool:
r"""
Check if the example is in a conversational format.
Args:
example (`dict[str, Any]`):
A single data entry of a dataset. The example can have different keys depending on the
dataset type.
Returns:
`bool`: `True` if the data is in a conversational format, `False` otherwise.
Examples:
```python
>>> example = {"prompt": [{"role": "user", "content": "What color is the sky?"}]}
>>> is_conversational(example)
True
>>> example = {"prompt": "The sky is"})
>>> is_conversational(example)
False
```
"""
supported_keys = ["prompt", "chosen", "rejected", "completion", "messages"]
example_keys = {key for key in example.keys() if key in supported_keys}
# It must have one of the supported keys
if example_keys:
key = example_keys.pop() # take the first supported key
maybe_messages = example[key]
# It must be a list of messages,
if isinstance(maybe_messages, list):
maybe_message = maybe_messages[0]
# Each message must a list of dictionaries with keys "role" and "content"
if isinstance(maybe_message, dict) and "role" in maybe_message and "content" in maybe_message:
return True
return False
def apply_chat_template(
example: dict[str, list[dict[str, str]]],
tokenizer: PreTrainedTokenizer,
tools: Optional[list[Union[dict, Callable]]] = None,
) -> dict[str, str]:
r"""
Apply a chat template to a conversational example along with the schema for a list of functions in `tools`.
For more details, see [`maybe_apply_chat_template`].
"""
# Check that the example has the correct keys
supported_keys = ["prompt", "chosen", "rejected", "completion", "messages", "label"]
example_keys = {key for key in example.keys() if key in supported_keys}
if example_keys not in [
{"messages"}, # language modeling
{"prompt"}, # prompt-only
{"prompt", "completion"}, # prompt-completion
{"prompt", "chosen", "rejected"}, # preference
{"chosen", "rejected"}, # preference with implicit prompt
{"prompt", "completion", "label"}, # unpaired preference
]:
raise KeyError(f"Invalid keys in the example: {example_keys}")
# Apply the chat template to the whole conversation
if "messages" in example:
messages = tokenizer.apply_chat_template(example["messages"], tools=tools, tokenize=False)
# Apply the chat template to the prompt, adding the generation prompt
if "prompt" in example:
prompt = tokenizer.apply_chat_template(
example["prompt"], tools=tools, tokenize=False, add_generation_prompt=True
)
# Apply the chat template to the entire prompt + completion
if "prompt" in example: # explicit prompt and prompt-completion case
if "chosen" in example:
prompt_chosen = tokenizer.apply_chat_template(
example["prompt"] + example["chosen"], tools=tools, tokenize=False
)
chosen = prompt_chosen[len(prompt) :]
if "rejected" in example and "prompt" in example: # explicit prompt
prompt_rejected = tokenizer.apply_chat_template(
example["prompt"] + example["rejected"], tools=tools, tokenize=False
)
rejected = prompt_rejected[len(prompt) :]
if "completion" in example:
prompt_completion = tokenizer.apply_chat_template(
example["prompt"] + example["completion"], tools=tools, tokenize=False
)
completion = prompt_completion[len(prompt) :]
else: # implicit prompt case
if "chosen" in example:
chosen = tokenizer.apply_chat_template(example["chosen"], tools=tools, tokenize=False)
if "rejected" in example:
rejected = tokenizer.apply_chat_template(example["rejected"], tools=tools, tokenize=False)
# Ensure that the prompt is the initial part of the prompt-completion string
if "prompt" in example:
error_message = (
"The chat template applied to the prompt + completion does not start with the chat template applied to "
"the prompt alone. This can indicate that the chat template is not supported by TRL."
"\n**Prompt**:\n{}\n\n**Prompt + Completion**:\n{}"
)
if "chosen" in example and not prompt_chosen.startswith(prompt):
raise ValueError(error_message.format(prompt, prompt_chosen))
if "rejected" in example and not prompt_rejected.startswith(prompt):
raise ValueError(error_message.format(prompt, prompt_rejected))
if "completion" in example and not prompt_completion.startswith(prompt):
raise ValueError(error_message.format(prompt, prompt_completion))
# Extract the completion by removing the prompt part from the prompt-completion string
output = {}
if "messages" in example:
output["text"] = messages
if "prompt" in example:
output["prompt"] = prompt
if "chosen" in example:
output["chosen"] = chosen
if "rejected" in example:
output["rejected"] = rejected
if "completion" in example:
output["completion"] = completion
if "label" in example:
output["label"] = example["label"]
return output
def maybe_apply_chat_template(
example: dict[str, list[dict[str, str]]],
tokenizer: PreTrainedTokenizer,
tools: Optional[list[Union[dict, Callable]]] = None,
) -> dict[str, str]:
r"""
If the example is in a conversational format, apply a chat template to it.
Args:
example (`dict[str, list[dict[str, str]]`):
Dictionary representing a single data entry of a conversational dataset. Each data entry can have different
keys depending on the dataset type. The supported dataset types are:
- Language modeling dataset: `"messages"`.
- Prompt-only dataset: `"prompt"`.
- Prompt-completion dataset: `"prompt"` and `"completion"`.
- Preference dataset: `"prompt"`, `"chosen"`, and `"rejected"`.
- Preference dataset with implicit prompt: `"chosen"` and `"rejected"`.
- Unpaired preference dataset: `"prompt"`, `"completion"`, and `"label"`.
For keys `"messages"`, `"prompt"`, `"chosen"`, `"rejected"`, and `"completion"`, the values are lists of
messages, where each message is a dictionary with keys `"role"` and `"content"`.
tokenizer (`PreTrainedTokenizer`):
The tokenizer to apply the chat template with.
tools (`list[Union[dict, Callable]]` or `None`, *optional*, defaults to `None`):
A list of tools (callable functions) that will be accessible to the model.
If the template does not support function calling, this argument will have no effect
Returns:
`dict[str, str]`: The formatted example with the chat template applied.
Note:
This function does not alter the keys, except for Language modeling dataset, where `"messages"` is replaced by
`"text"`.
Example:
```python
>>> from transformers import AutoTokenizer
>>> tokenizer = AutoTokenizer.from_pretrained("microsoft/Phi-3-mini-128k-instruct")
>>> example = {
... "prompt": [{"role": "user", "content": "What color is the sky?"}],
... "completion": [{"role": "assistant", "content": "It is blue."}]
... }
>>> apply_chat_template(example, tokenizer)
{'prompt': '<|user|>\nWhat color is the sky?<|end|>\n<|assistant|>\n', 'completion': 'It is blue.<|end|>\n<|endoftext|>'}
```
"""
if is_conversational(example):
return apply_chat_template(example, tokenizer, tools)
else:
return example
def _unpair_row(examples: list[dict[str, list[dict[str, str]]]]) -> list[dict[str, list[dict[str, str]]]]:
batch_size = len(examples["chosen"])
new_rows = {
"completion": examples["chosen"] + examples["rejected"],
"label": [True] * batch_size + [False] * batch_size,
}
if "prompt" in examples:
new_rows["prompt"] = examples["prompt"] + examples["prompt"]
return new_rows
def unpair_preference_dataset(
dataset: DatasetType, num_proc: Optional[int] = None, desc: Optional[str] = None
) -> DatasetType:
r"""
Unpair a preference dataset.
Args:
dataset (`Dataset` or `DatasetDict`):
Preference dataset to unpair. The dataset must have columns `"chosen"`, `"rejected"` and optionally
`"prompt"`.
num_proc (`int` or `None`, *optional*, defaults to `None`):
Number of processes to use for processing the dataset.
desc (`str` or `None`, *optional*, defaults to `None`):
Meaningful description to be displayed alongside with the progress bar while mapping examples.
Returns:
`Dataset`: The unpaired preference dataset.
Example:
```python
>>> from datasets import Dataset
>>> dataset_dict = {
... "prompt": ["The sky is", "The sun is"]
... "chosen": [" blue.", "in the sky."],
... "rejected": [" green.", " in the sea."]
... }
>>> dataset = Dataset.from_dict(dataset_dict)
>>> dataset = unpair_preference_dataset(dataset)
>>> dataset
Dataset({
features: ['prompt', 'completion', 'label'],
num_rows: 4
})
>>> dataset[0]
{'prompt': 'The sky is', 'completion': ' blue.', 'label': True}
```
"""
return dataset.map(_unpair_row, batched=True, remove_columns=["chosen", "rejected"], num_proc=num_proc, desc=desc)
def maybe_unpair_preference_dataset(
dataset: DatasetType, num_proc: Optional[int] = None, desc: Optional[str] = None
) -> DatasetType:
r"""
Unpair a preference dataset if it is paired.
Args:
dataset (`Dataset` or `DatasetDict`):
Preference dataset to unpair. The dataset must have columns `"chosen"`, `"rejected"` and optionally
`"prompt"`.
num_proc (`int` or `None`, *optional*, defaults to `None`):
Number of processes to use for processing the dataset.
desc (`str` or `None`, *optional*, defaults to `None`):
Meaningful description to be displayed alongside with the progress bar while mapping examples.
Returns:
`Dataset` or `DatasetDict`: The unpaired preference dataset if it was paired, otherwise the original dataset.
Example:
```python
>>> from datasets import Dataset
>>> dataset_dict = {
... "prompt": ["The sky is", "The sun is"]
... "chosen": [" blue.", "in the sky."],
... "rejected": [" green.", " in the sea."]
... }
>>> dataset = Dataset.from_dict(dataset_dict)
>>> dataset = unpair_preference_dataset(dataset)
>>> dataset
Dataset({
features: ['prompt', 'completion', 'label'],
num_rows: 4
})
>>> dataset[0]
{'prompt': 'The sky is', 'completion': ' blue.', 'label': True}
```
"""
if isinstance(dataset, DatasetDict):
column_names = dataset[list(dataset.keys())[0]].column_names
else:
column_names = dataset.column_names
if "chosen" in column_names and "rejected" in column_names:
return unpair_preference_dataset(dataset, num_proc=num_proc, desc=desc)
else:
return dataset
def extract_prompt(example: dict[str, Sequence]) -> dict[str, Sequence]:
r"""
Extracts the shared prompt from a preference data example, where the prompt is implicit within both
the chosen and rejected completions.
For more details, see [`maybe_extract_prompt`].
"""
for idx in range(min(len(example["chosen"]), len(example["rejected"]))):
if example["chosen"][idx] != example["rejected"][idx]:
if example["chosen"][idx - 1] == " ": # remove space before the prompt
idx -= 1
break
return {
"prompt": example["chosen"][:idx],
"chosen": example["chosen"][idx:],
"rejected": example["rejected"][idx:],
}
def maybe_extract_prompt(example: dict[str, list]) -> dict[str, list]:
r"""
Extracts the shared prompt from a preference data example, where the prompt is implicit within both
the chosen and rejected completions.
If the example already contains a `"prompt"` key, the function returns the example as is. Else, the function
identifies the longest common sequence (prefix) of conversation turns between the "chosen" and "rejected"
completions and extracts this as the prompt. It then removes this prompt from the respective "chosen" and
"rejected" completions.
Args:
example (`dict[str, list]`):
A dictionary representing a single data entry in the preference dataset. It must contain the keys
`"chosen"` and `"rejected"`, where each value is either conversational or standard (`str`).
Returns:
`dict[str, list]`: A dictionary containing:
- `"prompt"`: The longest common prefix between the "chosen" and "rejected" completions.
- `"chosen"`: The remainder of the "chosen" completion, with the prompt removed.
- `"rejected"`: The remainder of the "rejected" completion, with the prompt removed.
Examples:
```python
>>> example = {
... "chosen": [
... {"role": "user", "content": "What color is the sky?"},
... {"role": "assistant", "content": "It is blue."}
... ],
... "rejected": [
... {"role": "user", "content": "What color is the sky?"},
... {"role": "assistant", "content": "It is green."}
... ]
... }
>>> extract_prompt(example)
{'prompt': [{'role': 'user', 'content': 'What color is the sky?'}],
'chosen': [{'role': 'assistant', 'content': 'It is blue.'}],
'rejected': [{'role': 'assistant', 'content': 'It is green.'}]}
```
Or, with the `map` method of `datasets.Dataset`:
```python
>>> from trl import extract_prompt
>>> from datasets import Dataset
>>> dataset_dict = {
... "chosen": [
... [
... {"role": "user", "content": "What color is the sky?"},
... {"role": "assistant", "content": "It is blue."},
... ],
... [
... {"role": "user", "content": "Where is the sun?"},
... {"role": "assistant", "content": "In the sky."},
... ],
... ],
... "rejected": [
... [
... {"role": "user", "content": "What color is the sky?"},
... {"role": "assistant", "content": "It is green."},
... ],
... [
... {"role": "user", "content": "Where is the sun?"},
... {"role": "assistant", "content": "In the sea."},
... ],
... ],
... }
>>> dataset = Dataset.from_dict(dataset_dict)
>>> dataset = dataset.map(extract_prompt)
>>> dataset[0]
{'prompt': [{'role': 'user', 'content': 'What color is the sky?'}],
'chosen': [{'role': 'assistant', 'content': 'It is blue.'}],
'rejected': [{'role': 'assistant', 'content': 'It is green.'}]}
```
"""
# Some dataset add a `"prompt"` column, even though the prompt is implicit and included in the "chosen" and
# "rejected" completions. E.g.:
# {"prompt": "What color is the sky?",
# "chosen": [{"role": "user", "content": "What color is the sky?"}, {"role": "assistant", "content": "It is blue."}],
# "rejected": [{"role": "user", "content": "What color is the sky?"}, {"role": "assistant", "content": "It is green."}]}
# That's why we check if the prompt is also conversational before deciding not to extract it.
if "chosen" not in example or "rejected" not in example: # not a preference example
return example
if "prompt" in example:
# Both conversational or both non-conversational
chosen_conv = is_conversational({"chosen": example["chosen"]})
prompt_conv = is_conversational({"prompt": example["prompt"]})
if (chosen_conv and prompt_conv) or (not chosen_conv and not prompt_conv):
return example
return extract_prompt({"chosen": example["chosen"], "rejected": example["rejected"]})
def pack_examples(examples: dict[str, list[list]], seq_length: int) -> dict[str, list[list]]:
"""
Pack examples into chunks of size `seq_length`.
Args:
examples (`dict[str, list[list]]`):
Dictionary of examples with keys as strings and values as lists of lists.
seq_length (`int`):
Maximum sequence length.
Returns:
`dict[str, list[list]]`: Dictionary of examples with keys as strings and values as lists of lists.
Example:
```python
>>> from trl import pack_examples
>>> examples = {
... "input_ids": [[1, 2, 3], [4, 5, 6, 7], [8]],
... "attention_mask": [[0, 1, 1], [0, 0, 1, 1], [1]],
... }
>>> pack_examples(examples, seq_length=5)
{'input_ids': [[1, 2, 3, 4, 5], [6, 7, 8]], 'attention_mask': [[0, 1, 1, 0, 0], [1, 1, 1]]}
>>> pack_examples(examples, seq_length=2)
{'input_ids': [[1, 2], [3, 4], [5, 6], [7, 8]], 'attention_mask': [[0, 1], [1, 0], [0, 1], [1, 1]]}
```
"""
# Join all the values into a single list
examples = {k: sum(v, []) for k, v in examples.items()}
# Split the values into chunks of size seq_length
examples = {k: [v[i : i + seq_length] for i in range(0, len(v), seq_length)] for k, v in examples.items()}
return examples
| trl/trl/data_utils.py/0 | {
"file_path": "trl/trl/data_utils.py",
"repo_id": "trl",
"token_count": 7248
} |
# Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import copy
import json
import os
import platform
import re
import time
from dataclasses import dataclass, field
from threading import Thread
from typing import Optional
import torch
import yaml
from rich.console import Console
from rich.live import Live
from rich.markdown import Markdown
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
from trl import TrlParser, init_zero_verbose
from trl.trainer.utils import get_quantization_config
if platform.system() != "Windows":
import pwd
init_zero_verbose()
HELP_STRING = """\
**TRL CHAT INTERFACE**
The chat interface is a simple tool to try out a chat model.
Besides talking to the model there are several commands:
- **clear**: clears the current conversation and start a new one
- **example {NAME}**: load example named `{NAME}` from the config and use it as the user input
- **set {SETTING_NAME}={SETTING_VALUE};**: change the system prompt or generation settings (multiple settings are separated by a ';').
- **reset**: same as clear but also resets the generation configs to defaults if they have been changed by **set**
- **save {SAVE_NAME} (optional)**: save the current chat and settings to file by default to `./chat_history/{MODEL_NAME}/chat_{DATETIME}.yaml` or `{SAVE_NAME}` if provided
- **exit**: closes the interface
"""
SUPPORTED_GENERATION_KWARGS = [
"max_new_tokens",
"do_sample",
"num_beams",
"temperature",
"top_p",
"top_k",
"repetition_penalty",
]
SETTING_RE = r"^set\s+[A-Za-z\s_]+=[A-Za-z\d\s.!\"#$%&'()*+,-/:<=>?@\[\]^_`{|}~]+(?:;\s*[A-Za-z\s_]+=[A-Za-z\d\s.!\"#$%&'()*+,-/:<=>?@\[\]^_`{|}~]+)*$"
DEFAULT_EXAMPLES = {
"llama": {"text": "There is a Llama in my lawn, how can I get rid of it?"},
"code": {
"text": "Write a Python function that integrates any Python function f(x) numerically over an arbitrary interval [x_start, x_end]."
},
"helicopter": {"text": "How many helicopters can a human eat in one sitting?"},
"numbers": {"text": "Count to 10 but skip every number ending with an 'e'"},
"birds": {"text": "Why aren't birds real?"},
"socks": {"text": "Why is it important to eat socks after meditating?"},
}
@dataclass
class ChatArguments:
r"""
Arguments for the chat script.
Args:
model_name_or_path (`str`):
Name of the pre-trained model.
user (`str` or `None`, *optional*, defaults to `None`):
Username to display in chat interface.
system_prompt (`str` or `None`, *optional*, defaults to `None`):
System prompt.
save_folder (`str`, *optional*, defaults to `"./chat_history/"`):
Folder to save chat history.
device (`str`, *optional*, defaults to `"cpu"`):
Device to use for inference.
examples_path (`str` or `None`, *optional*, defaults to `None`):
Path to a yaml file with examples.
max_new_tokens (`int`, *optional*, defaults to `256`):
Maximum number of tokens to generate.
do_sample (`bool`, *optional*, defaults to `True`):
Whether to sample outputs during generation.
num_beams (`int`, *optional*, defaults to `1`):
Number of beams for beam search.
temperature (`float`, *optional*, defaults to `1.0`):
Temperature parameter for generation.
top_k (`int`, *optional*, defaults to `50`):
Value of k for top-k sampling.
top_p (`float`, *optional*, defaults to `1.0`):
Value of p for nucleus sampling.
repetition_penalty (`float`, *optional*, defaults to `1.0`):
Repetition penalty.
eos_tokens (`str` or `None`, *optional*, defaults to `None`):
EOS tokens to stop the generation. If multiple they should be comma separated.
eos_token_ids (`str` or `None`, *optional*, defaults to `None`):
EOS token IDs to stop the generation. If multiple they should be comma separated.
model_revision (`str`, *optional*, defaults to `"main"`):
Specific model version to use (can be a branch name, tag name or commit id).
torch_dtype (`str` or `None`, *optional*, defaults to `None`):
Override the default `torch.dtype` and load the model under this dtype. If `'auto'` is passed, the dtype
will be automatically derived from the model's weights.
trust_remote_code (`bool`, *optional*, defaults to `False`):
Whether to trust remote code when loading a model.
attn_implementation (`str` or `None`, *optional*, defaults to `None`):
Which attention implementation to use; you can run --attn_implementation=flash_attention_2, in which case
you must install this manually by running `pip install flash-attn --no-build-isolation`.
load_in_8bit (`bool`, *optional*, defaults to `False`):
Whether to use 8 bit precision for the base model - works only with LoRA.
load_in_4bit (`bool`, *optional*, defaults to `False`):
Whether to use 4 bit precision for the base model - works only with LoRA.
bnb_4bit_quant_type (`str`, *optional*, defaults to `"nf4"`):
Quantization type.
use_bnb_nested_quant (`bool`, *optional*, defaults to `False`):
Whether to use nested quantization.
"""
# General settings
model_name_or_path: str = field(metadata={"help": "Name of the pre-trained model."})
user: Optional[str] = field(default=None, metadata={"help": "Username to display in chat interface."})
system_prompt: Optional[str] = field(default=None, metadata={"help": "System prompt."})
save_folder: str = field(default="./chat_history/", metadata={"help": "Folder to save chat history."})
device: str = field(default="cpu", metadata={"help": "Device to use for inference."})
examples_path: Optional[str] = field(default=None, metadata={"help": "Path to a yaml file with examples."})
# Generation settings
max_new_tokens: int = field(default=256, metadata={"help": "Maximum number of tokens to generate."})
do_sample: bool = field(default=True, metadata={"help": "Whether to sample outputs during generation."})
num_beams: int = field(default=1, metadata={"help": "Number of beams for beam search."})
temperature: float = field(default=1.0, metadata={"help": "Temperature parameter for generation."})
top_k: int = field(default=50, metadata={"help": "Value of k for top-k sampling."})
top_p: float = field(default=1.0, metadata={"help": "Value of p for nucleus sampling."})
repetition_penalty: float = field(default=1.0, metadata={"help": "Repetition penalty."})
eos_tokens: Optional[str] = field(
default=None,
metadata={"help": "EOS tokens to stop the generation. If multiple they should be comma separated."},
)
eos_token_ids: Optional[str] = field(
default=None,
metadata={"help": "EOS token IDs to stop the generation. If multiple they should be comma separated."},
)
# Model loading
model_revision: str = field(
default="main",
metadata={"help": "Specific model version to use (can be a branch name, tag name or commit id)."},
)
torch_dtype: Optional[str] = field(
default=None,
metadata={
"help": "Override the default `torch.dtype` and load the model under this dtype. If `'auto'` is passed, "
"the dtype will be automatically derived from the model's weights.",
"choices": ["auto", "bfloat16", "float16", "float32"],
},
)
trust_remote_code: bool = field(
default=False, metadata={"help": "Whether to trust remote code when loading a model."}
)
attn_implementation: Optional[str] = field(
default=None,
metadata={
"help": "Which attention implementation to use; you can run --attn_implementation=flash_attention_2, in "
"which case you must install this manually by running `pip install flash-attn --no-build-isolation`."
},
)
load_in_8bit: bool = field(
default=False,
metadata={"help": "Whether to use 8 bit precision for the base model - works only with LoRA."},
)
load_in_4bit: bool = field(
default=False,
metadata={"help": "Whether to use 4 bit precision for the base model - works only with LoRA."},
)
bnb_4bit_quant_type: str = field(default="nf4", metadata={"help": "Quantization type.", "choices": ["fp4", "nf4"]})
use_bnb_nested_quant: bool = field(default=False, metadata={"help": "Whether to use nested quantization."})
class RichInterface:
def __init__(self, model_name=None, user_name=None):
self._console = Console()
if model_name is None:
self.model_name = "assistant"
else:
self.model_name = model_name
if user_name is None:
self.user_name = "user"
else:
self.user_name = user_name
def stream_output(self, output_stream):
"""Stream output from a role."""
# This method is originally from the FastChat CLI: https://github.com/lm-sys/FastChat/blob/main/fastchat/serve/cli.py
# Create a Live context for updating the console output
text = ""
self._console.print(f"[bold blue]<{self.model_name}>:")
with Live(console=self._console, refresh_per_second=4) as live:
# Read lines from the stream
for i, outputs in enumerate(output_stream):
if not outputs or i == 0:
continue
text += outputs
# Render the accumulated text as Markdown
# NOTE: this is a workaround for the rendering "unstandard markdown"
# in rich. The chatbots output treat "\n" as a new line for
# better compatibility with real-world text. However, rendering
# in markdown would break the format. It is because standard markdown
# treat a single "\n" in normal text as a space.
# Our workaround is adding two spaces at the end of each line.
# This is not a perfect solution, as it would
# introduce trailing spaces (only) in code block, but it works well
# especially for console output, because in general the console does not
# care about trailing spaces.
lines = []
for line in text.splitlines():
lines.append(line)
if line.startswith("```"):
# Code block marker - do not add trailing spaces, as it would
# break the syntax highlighting
lines.append("\n")
else:
lines.append(" \n")
markdown = Markdown("".join(lines).strip(), code_theme="github-dark")
# Update the Live console output
live.update(markdown)
self._console.print()
return text
def input(self):
input = self._console.input(f"[bold red]<{self.user_name}>:\n")
self._console.print()
return input
def clear(self):
self._console.clear()
def print_user_message(self, text):
self._console.print(f"[bold red]<{self.user_name}>:[/ bold red]\n{text}")
self._console.print()
def print_green(self, text):
self._console.print(f"[bold green]{text}")
self._console.print()
def print_red(self, text):
self._console.print(f"[bold red]{text}")
self._console.print()
def print_help(self):
self._console.print(Markdown(HELP_STRING))
self._console.print()
def get_username():
if platform.system() == "Windows":
return os.getlogin()
else:
return pwd.getpwuid(os.getuid()).pw_name
def create_default_filename(model_name):
time_str = time.strftime("%Y-%m-%d_%H-%M-%S")
return f"{model_name}/chat_{time_str}.json"
def save_chat(chat, args, filename):
output_dict = {}
output_dict["settings"] = vars(args)
output_dict["chat_history"] = chat
folder = args.save_folder
if filename is None:
filename = create_default_filename(args.model_name_or_path)
filename = os.path.join(folder, filename)
os.makedirs(os.path.dirname(filename), exist_ok=True)
with open(filename, "w") as f:
json.dump(output_dict, f, indent=4)
return os.path.abspath(filename)
def clear_chat_history(system_prompt):
if system_prompt is None:
chat = []
else:
chat = [{"role": "system", "content": system_prompt}]
return chat
def parse_settings(user_input, current_args, interface):
settings = user_input[4:].strip().split(";")
settings = [(setting.split("=")[0], setting[len(setting.split("=")[0]) + 1 :]) for setting in settings]
settings = dict(settings)
error = False
for name in settings:
if hasattr(current_args, name):
try:
if isinstance(getattr(current_args, name), bool):
if settings[name] == "True":
settings[name] = True
elif settings[name] == "False":
settings[name] = False
else:
raise ValueError
else:
settings[name] = type(getattr(current_args, name))(settings[name])
except ValueError:
interface.print_red(
f"Cannot cast setting {name} (={settings[name]}) to {type(getattr(current_args, name))}."
)
else:
interface.print_red(f"There is no '{name}' setting.")
if error:
interface.print_red("There was an issue parsing the settings. No settings have been changed.")
return current_args, False
else:
for name in settings:
setattr(current_args, name, settings[name])
interface.print_green(f"Set {name} to {settings[name]}.")
time.sleep(1.5) # so the user has time to read the changes
return current_args, True
def load_model_and_tokenizer(args):
tokenizer = AutoTokenizer.from_pretrained(
args.model_name_or_path,
revision=args.model_revision,
trust_remote_code=args.trust_remote_code,
)
torch_dtype = args.torch_dtype if args.torch_dtype in ["auto", None] else getattr(torch, args.torch_dtype)
quantization_config = get_quantization_config(args)
model_kwargs = dict(
revision=args.model_revision,
attn_implementation=args.attn_implementation,
torch_dtype=torch_dtype,
device_map="auto",
quantization_config=quantization_config,
)
model = AutoModelForCausalLM.from_pretrained(
args.model_name_or_path, trust_remote_code=args.trust_remote_code, **model_kwargs
)
if getattr(model, "hf_device_map", None) is None:
model = model.to(args.device)
return model, tokenizer
def parse_eos_tokens(tokenizer, eos_tokens, eos_token_ids):
if tokenizer.pad_token_id is None:
pad_token_id = tokenizer.eos_token_id
else:
pad_token_id = tokenizer.pad_token_id
all_eos_token_ids = []
if eos_tokens is not None:
all_eos_token_ids.extend(tokenizer.convert_tokens_to_ids(eos_tokens.split(",")))
if eos_token_ids is not None:
all_eos_token_ids.extend([int(token_id) for token_id in eos_token_ids.split(",")])
if len(all_eos_token_ids) == 0:
all_eos_token_ids.append(tokenizer.eos_token_id)
return pad_token_id, all_eos_token_ids
def main(args: ChatArguments):
if args.examples_path is None:
examples = DEFAULT_EXAMPLES
else:
with open(args.examples_path) as f:
examples = yaml.safe_load(f)
current_args = copy.deepcopy(args)
if args.user is None:
user = get_username()
else:
user = args.user
model, tokenizer = load_model_and_tokenizer(args)
generation_streamer = TextIteratorStreamer(tokenizer, skip_special_tokens=True, skip_prompt=True)
pad_token_id, eos_token_ids = parse_eos_tokens(tokenizer, args.eos_tokens, args.eos_token_ids)
interface = RichInterface(model_name=args.model_name_or_path, user_name=user)
interface.clear()
chat = clear_chat_history(current_args.system_prompt)
while True:
try:
user_input = interface.input()
if user_input == "clear":
chat = clear_chat_history(current_args.system_prompt)
interface.clear()
continue
if user_input == "help":
interface.print_help()
continue
if user_input == "exit":
break
if user_input == "reset":
interface.clear()
current_args = copy.deepcopy(args)
chat = clear_chat_history(current_args.system_prompt)
continue
if user_input.startswith("save") and len(user_input.split()) < 2:
split_input = user_input.split()
if len(split_input) == 2:
filename = split_input[1]
else:
filename = None
filename = save_chat(chat, current_args, filename)
interface.print_green(f"Chat saved in {filename}!")
continue
if re.match(SETTING_RE, user_input):
current_args, success = parse_settings(user_input, current_args, interface)
if success:
chat = []
interface.clear()
continue
if user_input.startswith("example") and len(user_input.split()) == 2:
example_name = user_input.split()[1]
if example_name in examples:
interface.clear()
chat = []
interface.print_user_message(examples[example_name]["text"])
user_input = examples[example_name]["text"]
else:
interface.print_red(
f"Example {example_name} not found in list of available examples: {list(examples.keys())}."
)
continue
chat.append({"role": "user", "content": user_input})
inputs = tokenizer.apply_chat_template(chat, return_tensors="pt", add_generation_prompt=True).to(
model.device
)
attention_mask = torch.ones_like(inputs)
generation_kwargs = dict(
inputs=inputs,
attention_mask=attention_mask,
streamer=generation_streamer,
max_new_tokens=current_args.max_new_tokens,
do_sample=current_args.do_sample,
num_beams=current_args.num_beams,
temperature=current_args.temperature,
top_k=current_args.top_k,
top_p=current_args.top_p,
repetition_penalty=current_args.repetition_penalty,
pad_token_id=pad_token_id,
eos_token_id=eos_token_ids,
)
thread = Thread(target=model.generate, kwargs=generation_kwargs)
thread.start()
model_output = interface.stream_output(generation_streamer)
thread.join()
chat.append({"role": "assistant", "content": model_output})
except KeyboardInterrupt:
break
def make_parser(subparsers: argparse._SubParsersAction = None):
dataclass_types = (ChatArguments,)
if subparsers is not None:
parser = subparsers.add_parser("chat", help=HELP_STRING, dataclass_types=dataclass_types)
else:
parser = TrlParser(dataclass_types)
return parser
if __name__ == "__main__":
parser = make_parser()
(chat_args,) = parser.parse_args_and_config()
main(chat_args)
| trl/trl/scripts/chat.py/0 | {
"file_path": "trl/trl/scripts/chat.py",
"repo_id": "trl",
"token_count": 8753
} |
# Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from transformers import is_bitsandbytes_available
from ..core import flatten_dict
@dataclass
class DDPOConfig:
r"""
Configuration class for the [`DDPOTrainer`].
Using [`~transformers.HfArgumentParser`] we can turn this class into
[argparse](https://docs.python.org/3/library/argparse#module-argparse) arguments that can be specified on the
command line.
Parameters:
exp_name (`str`, *optional*, defaults to `os.path.basename(sys.argv[0])[: -len(".py")]`):
Name of this experiment (by default is the file name without the extension name).
run_name (`str`, *optional*, defaults to `""`):
Name of this run.
seed (`int`, *optional*, defaults to `0`):
Random seed.
log_with (`Literal["wandb", "tensorboard"]]` or `None`, *optional*, defaults to `None`):
Log with either 'wandb' or 'tensorboard', check
https://huggingface.co/docs/accelerate/usage_guides/tracking for more details.
tracker_kwargs (`Dict`, *optional*, defaults to `{}`):
Keyword arguments for the tracker (e.g. wandb_project).
accelerator_kwargs (`Dict`, *optional*, defaults to `{}`):
Keyword arguments for the accelerator.
project_kwargs (`Dict`, *optional*, defaults to `{}`):
Keyword arguments for the accelerator project config (e.g. `logging_dir`).
tracker_project_name (`str`, *optional*, defaults to `"trl"`):
Name of project to use for tracking.
logdir (`str`, *optional*, defaults to `"logs"`):
Top-level logging directory for checkpoint saving.
num_epochs (`int`, *optional*, defaults to `100`):
Number of epochs to train.
save_freq (`int`, *optional*, defaults to `1`):
Number of epochs between saving model checkpoints.
num_checkpoint_limit (`int`, *optional*, defaults to `5`):
Number of checkpoints to keep before overwriting old ones.
mixed_precision (`str`, *optional*, defaults to `"fp16"`):
Mixed precision training.
allow_tf32 (`bool`, *optional*, defaults to `True`):
Allow `tf32` on Ampere GPUs.
resume_from (`str`, *optional*, defaults to `""`):
Resume training from a checkpoint.
sample_num_steps (`int`, *optional*, defaults to `50`):
Number of sampler inference steps.
sample_eta (`float`, *optional*, defaults to `1.0`):
Eta parameter for the DDIM sampler.
sample_guidance_scale (`float`, *optional*, defaults to `5.0`):
Classifier-free guidance weight.
sample_batch_size (`int`, *optional*, defaults to `1`):
Batch size (per GPU) to use for sampling.
sample_num_batches_per_epoch (`int`, *optional*, defaults to `2`):
Number of batches to sample per epoch.
train_batch_size (`int`, *optional*, defaults to `1`):
Batch size (per GPU) to use for training.
train_use_8bit_adam (`bool`, *optional*, defaults to `False`):
Use 8bit Adam optimizer from bitsandbytes.
train_learning_rate (`float`, *optional*, defaults to `3e-4`):
Learning rate.
train_adam_beta1 (`float`, *optional*, defaults to `0.9`):
Adam beta1.
train_adam_beta2 (`float`, *optional*, defaults to `0.999`):
Adam beta2.
train_adam_weight_decay (`float`, *optional*, defaults to `1e-4`):
Adam weight decay.
train_adam_epsilon (`float`, *optional*, defaults to `1e-8`):
Adam epsilon.
train_gradient_accumulation_steps (`int`, *optional*, defaults to `1`):
Number of gradient accumulation steps.
train_max_grad_norm (`float`, *optional*, defaults to `1.0`):
Maximum gradient norm for gradient clipping.
train_num_inner_epochs (`int`, *optional*, defaults to `1`):
Number of inner epochs per outer epoch.
train_cfg (`bool`, *optional*, defaults to `True`):
Whether to use classifier-free guidance during training.
train_adv_clip_max (`float`, *optional*, defaults to `5.0`):
Clip advantages to the range.
train_clip_range (`float`, *optional*, defaults to `1e-4`):
PPO clip range.
train_timestep_fraction (`float`, *optional*, defaults to `1.0`):
Fraction of timesteps to train on.
per_prompt_stat_tracking (`bool`, *optional*, defaults to `False`):
Whether to track statistics for each prompt separately.
per_prompt_stat_tracking_buffer_size (`int`, *optional*, defaults to `16`):
Number of reward values to store in the buffer for each prompt.
per_prompt_stat_tracking_min_count (`int`, *optional*, defaults to `16`):
Minimum number of reward values to store in the buffer.
async_reward_computation (`bool`, *optional*, defaults to `False`):
Whether to compute rewards asynchronously.
max_workers (`int`, *optional*, defaults to `2`):
Maximum number of workers to use for async reward computation.
negative_prompts (`str`, *optional*, defaults to `""`):
Comma-separated list of prompts to use as negative examples.
push_to_hub (`bool`, *optional*, defaults to `False`):
Whether to push the final model checkpoint to the Hub.
"""
exp_name: str = field(
default=os.path.basename(sys.argv[0])[: -len(".py")],
metadata={"help": "Name of this experiment (by default is the file name without the extension name)."},
)
run_name: str = field(
default="",
metadata={"help": "Name of this run."},
)
seed: int = field(
default=0,
metadata={"help": "Random seed."},
)
log_with: Optional[str] = field(
default=None,
metadata={
"help": "Log with either 'wandb' or 'tensorboard'.",
"choices": ["wandb", "tensorboard"],
},
)
tracker_kwargs: dict = field(
default_factory=dict,
metadata={"help": "Keyword arguments for the tracker (e.g. wandb_project)."},
)
accelerator_kwargs: dict = field(
default_factory=dict,
metadata={"help": "Keyword arguments for the accelerator."},
)
project_kwargs: dict = field(
default_factory=dict,
metadata={"help": "Keyword arguments for the accelerator project config (e.g. `logging_dir`)."},
)
tracker_project_name: str = field(
default="trl",
metadata={"help": "Name of project to use for tracking."},
)
logdir: str = field(
default="logs",
metadata={"help": "Top-level logging directory for checkpoint saving."},
)
num_epochs: int = field(
default=100,
metadata={"help": "Number of epochs to train."},
)
save_freq: int = field(
default=1,
metadata={"help": "Number of epochs between saving model checkpoints."},
)
num_checkpoint_limit: int = field(
default=5,
metadata={"help": "Number of checkpoints to keep before overwriting old ones."},
)
mixed_precision: str = field(
default="fp16",
metadata={"help": "Mixed precision training."},
)
allow_tf32: bool = field(
default=True,
metadata={"help": "Allow `tf32` on Ampere GPUs."},
)
resume_from: str = field(
default="",
metadata={"help": "Resume training from a checkpoint."},
)
sample_num_steps: int = field(
default=50,
metadata={"help": "Number of sampler inference steps."},
)
sample_eta: float = field(
default=1.0,
metadata={"help": "Eta parameter for the DDIM sampler."},
)
sample_guidance_scale: float = field(
default=5.0,
metadata={"help": "Classifier-free guidance weight."},
)
sample_batch_size: int = field(
default=1,
metadata={"help": "Batch size (per GPU) to use for sampling."},
)
sample_num_batches_per_epoch: int = field(
default=2,
metadata={"help": "Number of batches to sample per epoch."},
)
train_batch_size: int = field(
default=1,
metadata={"help": "Batch size (per GPU) to use for training."},
)
train_use_8bit_adam: bool = field(
default=False,
metadata={"help": "Use 8bit Adam optimizer from bitsandbytes."},
)
train_learning_rate: float = field(
default=3e-4,
metadata={"help": "Learning rate."},
)
train_adam_beta1: float = field(
default=0.9,
metadata={"help": "Adam beta1."},
)
train_adam_beta2: float = field(
default=0.999,
metadata={"help": "Adam beta2."},
)
train_adam_weight_decay: float = field(
default=1e-4,
metadata={"help": "Adam weight decay."},
)
train_adam_epsilon: float = field(
default=1e-8,
metadata={"help": "Adam epsilon."},
)
train_gradient_accumulation_steps: int = field(
default=1,
metadata={"help": "Number of gradient accumulation steps."},
)
train_max_grad_norm: float = field(
default=1.0,
metadata={"help": "Maximum gradient norm for gradient clipping."},
)
train_num_inner_epochs: int = field(
default=1,
metadata={"help": "Number of inner epochs per outer epoch."},
)
train_cfg: bool = field(
default=True,
metadata={"help": "Whether to use classifier-free guidance during training."},
)
train_adv_clip_max: float = field(
default=5.0,
metadata={"help": "Clip advantages to the range."},
)
train_clip_range: float = field(
default=1e-4,
metadata={"help": "PPO clip range."},
)
train_timestep_fraction: float = field(
default=1.0,
metadata={"help": "Fraction of timesteps to train on."},
)
per_prompt_stat_tracking: bool = field(
default=False,
metadata={"help": "Whether to track statistics for each prompt separately."},
)
per_prompt_stat_tracking_buffer_size: int = field(
default=16,
metadata={"help": "Number of reward values to store in the buffer for each prompt."},
)
per_prompt_stat_tracking_min_count: int = field(
default=16,
metadata={"help": "Minimum number of reward values to store in the buffer."},
)
async_reward_computation: bool = field(
default=False,
metadata={"help": "Whether to compute rewards asynchronously."},
)
max_workers: int = field(
default=2,
metadata={"help": "Maximum number of workers to use for async reward computation."},
)
negative_prompts: str = field(
default="",
metadata={"help": "Comma-separated list of prompts to use as negative examples."},
)
push_to_hub: bool = field(
default=False,
metadata={"help": "Whether to push the final model checkpoint to the Hub."},
)
def to_dict(self):
output_dict = {}
for key, value in self.__dict__.items():
output_dict[key] = value
return flatten_dict(output_dict)
def __post_init__(self):
if self.train_use_8bit_adam and not is_bitsandbytes_available():
raise ImportError(
"You need to install bitsandbytes to use 8bit Adam. "
"You can install it with `pip install bitsandbytes`."
)
| trl/trl/trainer/ddpo_config.py/0 | {
"file_path": "trl/trl/trainer/ddpo_config.py",
"repo_id": "trl",
"token_count": 4996
} |
# Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import textwrap
import warnings
from functools import wraps
from typing import Any, Callable, Optional, Union
import datasets
import jinja2
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
import transformers
from datasets import Dataset
from packaging import version
from torch.utils.data import DataLoader, IterableDataset
from transformers import (
BaseImageProcessor,
DataCollator,
FeatureExtractionMixin,
GenerationConfig,
PreTrainedModel,
PreTrainedTokenizerBase,
ProcessorMixin,
Trainer,
TrainerCallback,
is_apex_available,
is_wandb_available,
)
from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR, EvalPrediction, seed_worker
from transformers.training_args import OptimizerNames
from transformers.utils import is_peft_available, is_sagemaker_mp_enabled, logging
from ..data_utils import apply_chat_template, is_conversational, maybe_apply_chat_template
from ..import_utils import is_vllm_available
from ..models import create_reference_model
from ..models.utils import unwrap_model_for_generation
from .judges import BasePairwiseJudge
from .online_dpo_config import OnlineDPOConfig
from .utils import (
SIMPLE_CHAT_TEMPLATE,
DPODataCollatorWithPadding,
disable_dropout_in_model,
empty_cache,
generate_model_card,
get_comet_experiment_url,
get_reward,
prepare_deepspeed,
truncate_right,
)
if is_peft_available():
from peft import PeftModel, get_peft_model
if is_apex_available():
from apex import amp
if is_sagemaker_mp_enabled():
from smdistributed.modelparallel import __version__ as SMP_VERSION
IS_SAGEMAKER_MP_POST_1_10 = version.parse(SMP_VERSION) >= version.parse("1.10")
else:
IS_SAGEMAKER_MP_POST_1_10 = False
if is_vllm_available():
from vllm import LLM, SamplingParams
if is_wandb_available():
import wandb
logger = logging.get_logger(__name__)
class OnlineDPOTrainer(Trainer):
r"""
Initialize OnlineDPOTrainer.
Args:
model (`transformers.PreTrainedModel` or `torch.nn.Module`):
The model to train, preferably an `AutoModelForCausalLM`.
ref_model (`transformers.PreTrainedModel` or `torch.nn.Module` or `None`):
The reference model to use for training. If None is specified, the reference model will be created from
the model.
reward_model (`transformers.PreTrainedModel` or `torch.nn.Module` or `None`):
The reward model to score completions with, preferably an `AutoModelForSequenceClassification`.
judge (`BasePairwiseJudge`):
The judge to use for pairwise comparison of model completions.
args (`OnlineDPOConfig`):
The online DPO config arguments to use for training.
data_collator (`transformers.DataCollator`):
The data collator to use for training. If None is specified, the default data collator (`DPODataCollatorWithPadding`) will be used
which will pad the sequences to the maximum length of the sequences in the batch, given a dataset of paired sequences.
train_dataset (`datasets.Dataset`):
The dataset to use for training.
eval_dataset (`datasets.Dataset`):
The dataset to use for evaluation.
processing_class (`PreTrainedTokenizerBase` or `BaseImageProcessor` or `FeatureExtractionMixin` or `ProcessorMixin`, *optional*):
Processing class used to process the data. If provided, will be used to automatically process the inputs
for the model, and it will be saved along the model to make it easier to rerun an interrupted training or
reuse the fine-tuned model.
peft_config (`dict`):
The peft config to use for training.
compute_metrics (`Callable[[EvalPrediction], dict]`, *optional*):
The function to use to compute the metrics. Must take a `EvalPrediction` and return
a dictionary string to metric values.
callbacks (`list[transformers.TrainerCallback]`):
The callbacks to use for training.
optimizers (`tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR]`):
The optimizer and scheduler to use for training.
preprocess_logits_for_metrics (`Callable[[torch.Tensor, torch.Tensor], torch.Tensor]`):
The function to use to preprocess the logits before computing the metrics.
"""
_tag_names = ["trl", "online-dpo"]
def __init__(
self,
model: Union[PreTrainedModel, nn.Module],
ref_model: Union[PreTrainedModel, nn.Module, None] = None,
reward_model: Union[PreTrainedModel, nn.Module, None] = None,
judge: Optional[BasePairwiseJudge] = None,
args: Optional[OnlineDPOConfig] = None,
data_collator: Optional[DataCollator] = None,
train_dataset: Optional[Union[Dataset, IterableDataset, "datasets.Dataset"]] = None,
eval_dataset: Optional[Union[Dataset, dict[str, Dataset], "datasets.Dataset"]] = None,
processing_class: Optional[
Union[PreTrainedTokenizerBase, BaseImageProcessor, FeatureExtractionMixin, ProcessorMixin]
] = None,
reward_processing_class: Optional[PreTrainedTokenizerBase] = None,
peft_config: Optional[dict] = None,
compute_metrics: Optional[Callable[[EvalPrediction], dict]] = None,
callbacks: Optional[list[TrainerCallback]] = None,
optimizers: tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (None, None),
preprocess_logits_for_metrics: Optional[Callable[[torch.Tensor, torch.Tensor], torch.Tensor]] = None,
) -> None:
if ref_model is model:
raise ValueError(
"`model` and `ref_model` cannot be the same object. If you want `ref_model` to be the "
"same as `model`, either omit the `ref_model` argument or pass `None`."
)
self.ref_model = ref_model
if reward_model is not None and judge is not None:
warnings.warn(
"Both `reward_model` and `judge` are provided. Please choose provide only one of them. "
"Ignoring `judge` and using `reward_model`.",
UserWarning,
)
judge = None
elif reward_model is None and judge is None:
raise ValueError("Either `reward_model` or `judge` must be provided.")
self.reward_model = reward_model
self.reward_processing_class = reward_processing_class
self.judge = judge
if args.missing_eos_penalty is not None and judge is not None:
raise ValueError("`missing_eos_penalty` is not supported when `judge` is provided.")
if args is None:
raise ValueError("`args` must be provided.")
# Check that the processing_class is provided
if processing_class is None:
raise ValueError("`processing_class` must be provided.")
# Convert to PEFT model if peft_config is provided
if peft_config is not None:
# Check if PEFT is available
if not is_peft_available():
raise ImportError(
"PEFT is not available and passed `peft_config`. Please install PEFT with "
"`pip install peft` to use it."
)
# If the model is already a PeftModel, we need to merge and unload it.
# Further information here: https://huggingface.co/docs/trl/dpo_trainer#reference-model-considerations-with-peft
if isinstance(model, PeftModel):
model = model.merge_and_unload()
# Get peft model with the given config
model = get_peft_model(model, peft_config)
# Disable dropout in the model and reference model
if args.disable_dropout:
disable_dropout_in_model(model)
if self.ref_model is not None:
disable_dropout_in_model(self.ref_model)
# Handle the ref_model
# Usually, the user wants the ref model to be the initial version of the model. When using PEFT, it's easy to
# get the ref model, as it's just the model with a disabled adapter. When not using PEFT, we need to create
# the ref model from the model by copying it and disable the gradients and set it in evaluation mode.
if ref_model is None: # No ref model provided, the most common case
if peft_config is None:
self.ref_model = create_reference_model(model) # copy, disable gradients, set eval mode
else:
self.ref_model = None # we don't need a ref model here, we can just disable the adapter.
else: # rare case, the user provided a ref model
self.ref_model = ref_model
self.ref_model.eval()
# Disable the gradient and set the reward model in eval mode
if self.reward_model is not None:
self.reward_model.eval()
# Define the collator is not provided
if data_collator is None:
data_collator = DPODataCollatorWithPadding(pad_token_id=processing_class.pad_token_id)
self.max_length = args.max_length
self.stats = {
"objective/kl": [],
"objective/entropy": [],
"objective/non_score_reward": [],
"rewards/chosen": [],
"rewards/rejected": [],
"rewards/accuracies": [],
"rewards/margins": [],
"logps/chosen": [],
"logps/rejected": [],
"val/contain_eos_token": [],
"beta": [],
}
if self.reward_model is not None:
self.stats["objective/rlhf_reward"] = []
self.stats["objective/scores_margin"] = []
self.stats["objective/scores"] = []
if args.use_vllm:
if not is_vllm_available():
raise ImportError(
"vLLM is not available and `use_vllm` is set to True. Please install vLLM with "
"`pip install vllm` to use it."
)
self.generation_config = SamplingParams(
n=2, # 2 generations per prompt
max_tokens=args.max_new_tokens,
temperature=args.temperature,
top_k=50,
top_p=1.0,
detokenize=False, # to avoid vllm to decode (we don't need it)
)
# vLLM dynamically adjusts the size of the key-value cache based on available GPU memory at instantiation.
# A larger cache size improves speed, so we would expect gpu_memory_utilization=1.
# However, at this stage, the optimizer's weights are not yet loaded onto the GPU; they will be loaded
# after the first optimizer step and remain in GPU memory throughout training. So we must reserve enough
# space for them. Setting gpu_memory_utilization to 0.55 seems to work well in practice.
self.llm = LLM(
model=model.name_or_path,
gpu_memory_utilization=0.55,
dtype=torch.float32,
# When release by vLLM, we would be able to distribute the model on multiple GPUs
# See https://github.com/vllm-project/vllm/pull/12071
# tensor_parallel_size=torch.cuda.device_count(),
# distributed_executor_backend="external_launcher",
)
else:
self.generation_config = GenerationConfig(
max_new_tokens=args.max_new_tokens,
temperature=args.temperature,
top_k=50,
top_p=1.0,
do_sample=True,
use_cache=False if args.gradient_checkpointing else True,
)
# The trainer estimates the number of FLOPs (floating-point operations) using the number of elements in the
# input tensor associated with the key "input_ids". However, in Online DPO, the sampled data does not include
# the "input_ids" key. As a result, the trainer issues the warning: "Could not estimate the number of tokens
# of the input, floating-point operations will not be computed." To suppress this warning, we set the
# "estimate_tokens" key in the model's "warnings_issued" dictionary to True. This acts as a flag to indicate
# that the warning has already been issued.
model.warnings_issued["estimate_tokens"] = True
super().__init__(
model=model,
args=args,
data_collator=data_collator,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
processing_class=processing_class,
compute_metrics=compute_metrics,
callbacks=callbacks,
optimizers=optimizers,
preprocess_logits_for_metrics=preprocess_logits_for_metrics,
)
# Add tags for models that have been loaded with the correct transformers version
if hasattr(self.model, "add_model_tags"):
self.model.add_model_tags(self._tag_names)
self._beta = args.beta
# Placed after the super().__init__ because we need self.is_deepspeed_enabled and self.accelerator
if self.is_deepspeed_enabled:
if self.reward_model is not None:
self.reward_model = prepare_deepspeed(
self.reward_model, args.per_device_train_batch_size, args.fp16, args.bf16
)
if self.ref_model is not None:
self.ref_model = prepare_deepspeed(
self.ref_model, args.per_device_train_batch_size, args.fp16, args.bf16
)
else:
if self.ref_model is not None:
self.ref_model = self.ref_model.to(self.accelerator.device)
if self.reward_model is not None:
self.reward_model = self.reward_model.to(self.accelerator.device)
@property
def beta(self):
if isinstance(self._beta, list):
epoch = self.state.epoch
return self._beta[epoch] if epoch < len(self._beta) else self._beta[-1]
else:
return self._beta
@staticmethod
def tokenize_row(feature, is_encoder_decoder: bool, tokenizer: PreTrainedTokenizerBase) -> dict[str, Any]:
"""Tokenize a single row from a DPO specific dataset."""
if not is_encoder_decoder:
batch = tokenizer(feature["prompt"], add_special_tokens=False)
# Add BOS token to head of prompt. Avoid adding if it's already there
if tokenizer.bos_token_id is not None:
prompt_len_input_ids = len(batch["input_ids"])
if prompt_len_input_ids == 0 or tokenizer.bos_token_id != batch["input_ids"][0]:
batch["input_ids"] = [tokenizer.bos_token_id] + batch["input_ids"]
batch["attention_mask"] = [1] + batch["attention_mask"]
else:
batch = tokenizer(feature["prompt"], add_special_tokens=True)
batch = {f"prompt_{key}": value for key, value in batch.items()}
return batch
# Same as Trainer.get_train_dataloader but skip the "remove_unused_columns".
@wraps(Trainer.get_train_dataloader)
def get_train_dataloader(self) -> DataLoader:
if self.train_dataset is None:
raise ValueError("Trainer: training requires a train_dataset.")
train_dataset = self.train_dataset
data_collator = self.data_collator
dataloader_params = {
"batch_size": self._train_batch_size,
"collate_fn": data_collator,
"num_workers": self.args.dataloader_num_workers,
"pin_memory": self.args.dataloader_pin_memory,
"persistent_workers": self.args.dataloader_persistent_workers,
}
if not isinstance(train_dataset, torch.utils.data.IterableDataset):
dataloader_params["sampler"] = self._get_train_sampler()
dataloader_params["drop_last"] = self.args.dataloader_drop_last
dataloader_params["worker_init_fn"] = seed_worker
dataloader_params["prefetch_factor"] = self.args.dataloader_prefetch_factor
return self.accelerator.prepare(DataLoader(train_dataset, **dataloader_params))
# Same as Trainer.get_eval_dataloader but skip the "remove_unused_columns".
@wraps(Trainer.get_eval_dataloader)
def get_eval_dataloader(self, eval_dataset: Optional[Union[str, Dataset]] = None) -> DataLoader:
if eval_dataset is None and self.eval_dataset is None:
raise ValueError("Trainer: evaluation requires an eval_dataset.")
# If we have persistent workers, don't do a fork bomb especially as eval datasets
# don't change during training
dataloader_key = eval_dataset if isinstance(eval_dataset, str) else "eval"
if (
hasattr(self, "_eval_dataloaders")
and dataloader_key in self._eval_dataloaders
and self.args.dataloader_persistent_workers
):
return self.accelerator.prepare(self._eval_dataloaders[dataloader_key])
eval_dataset = (
self.eval_dataset[eval_dataset]
if isinstance(eval_dataset, str)
else eval_dataset
if eval_dataset is not None
else self.eval_dataset
)
data_collator = self.data_collator
dataloader_params = {
"batch_size": self.args.eval_batch_size,
"collate_fn": data_collator,
"num_workers": self.args.dataloader_num_workers,
"pin_memory": self.args.dataloader_pin_memory,
"persistent_workers": self.args.dataloader_persistent_workers,
}
if not isinstance(eval_dataset, torch.utils.data.IterableDataset):
dataloader_params["sampler"] = self._get_eval_sampler(eval_dataset)
dataloader_params["drop_last"] = self.args.dataloader_drop_last
dataloader_params["prefetch_factor"] = self.args.dataloader_prefetch_factor
# accelerator.free_memory() will destroy the references, so
# we need to store the non-prepared version
eval_dataloader = DataLoader(eval_dataset, **dataloader_params)
if self.args.dataloader_persistent_workers:
if hasattr(self, "_eval_dataloaders"):
self._eval_dataloaders[dataloader_key] = eval_dataloader
else:
self._eval_dataloaders = {dataloader_key: eval_dataloader}
return self.accelerator.prepare(eval_dataloader)
def _generate_vllm(self, model, prompts):
eos_token_id = self.processing_class.eos_token_id
pad_token_id = self.processing_class.pad_token_id
# Load the latest weights
llm_model = self.llm.llm_engine.model_executor.driver_worker.model_runner.model
llm_model.load_weights(model.state_dict().items())
if is_conversational({"prompt": prompts[0]}):
outputs = self.llm.chat(prompts, self.generation_config, use_tqdm=False)
else:
outputs = self.llm.generate(prompts, self.generation_config, use_tqdm=False)
completion_ids = [list(output.outputs[i].token_ids) for i in range(2) for output in outputs]
prompt_ids = [list(output.prompt_token_ids) for _ in range(2) for output in outputs]
# Create mask and pad the prompt and completion
max_prompt_length = max(len(ids) for ids in prompt_ids)
prompt_mask = [[0] * (max_prompt_length - len(ids)) + [1] * len(ids) for ids in prompt_ids]
prompt_ids = [[pad_token_id] * (max_prompt_length - len(ids)) + ids for ids in prompt_ids]
max_tokens = self.generation_config.max_tokens
completion_mask = [[1] * len(ids) + [0] * (max_tokens - len(ids)) for ids in completion_ids]
completion_ids = [
ids + [eos_token_id] if ids[-1] != eos_token_id and len(ids) < max_tokens else ids
for ids in completion_ids
]
completion_ids = [ids + [pad_token_id] * (max_tokens - len(ids)) for ids in completion_ids]
# Convert to tensors
prompt_ids = torch.tensor(prompt_ids, device=self.accelerator.device)
prompt_mask = torch.tensor(prompt_mask, device=self.accelerator.device)
completion_ids = torch.tensor(completion_ids, device=self.accelerator.device)
completion_mask = torch.tensor(completion_mask, device=self.accelerator.device)
return prompt_ids, prompt_mask, completion_ids, completion_mask
def _generate(self, model, prompts):
eos_token_id = self.processing_class.eos_token_id
pad_token_id = self.processing_class.pad_token_id
# Apply chat template and tokenize the input. We do this on-the-fly to enable the use of reward models and
# policies with different tokenizers / chat templates.
inputs = [{"prompt": prompt} for prompt in prompts]
inputs = [maybe_apply_chat_template(x, self.processing_class) for x in inputs]
inputs = [self.tokenize_row(x, model.config.is_encoder_decoder, self.processing_class) for x in inputs]
inputs = self.data_collator(inputs)
# Sample 2 completions per prompt of size `max_new_tokens` from the model
inputs = self._prepare_inputs(inputs)
prompt_ids = inputs["prompt_input_ids"].repeat(2, 1)
prompt_mask = inputs["prompt_attention_mask"].repeat(2, 1)
with unwrap_model_for_generation(
model, self.accelerator, gather_deepspeed3_params=self.args.ds3_gather_for_generation
) as unwrapped_model:
output = unwrapped_model.generate(
input_ids=prompt_ids,
attention_mask=prompt_mask,
generation_config=self.generation_config,
)
completion_ids = output[:, prompt_ids.size(1) :]
completion_ids, completion_mask = truncate_right(completion_ids, eos_token_id, pad_token_id)
return prompt_ids, prompt_mask, completion_ids, completion_mask
def _forward(self, model, prompt_ids, prompt_mask, completion_ids, completion_mask):
# Get the number of tokens to truncate from prompt
num_tokens_to_truncate = max(prompt_ids.size(1) + completion_ids.size(1) - self.max_length, 0)
# Truncate left to avoid oom
prompt_ids = prompt_ids[:, num_tokens_to_truncate:]
prompt_mask = prompt_mask[:, num_tokens_to_truncate:]
# Concat the prompt and completion
prompt_completion_ids = torch.cat((prompt_ids, completion_ids), dim=1)
prompt_completion_mask = torch.cat((prompt_mask, completion_mask), dim=1)
# Get the logprobs of the completions from the model
output = model(prompt_completion_ids, attention_mask=prompt_completion_mask)
# There is 1 offset, because the model predict the next token
logits = output.logits[:, prompt_ids.size(1) - 1 : -1]
# Take the completion tokens logprob
logprobs = torch.take_along_dim(logits.log_softmax(dim=-1), completion_ids.unsqueeze(-1), dim=2).squeeze(-1)
return logprobs
def training_step(
self, model: nn.Module, inputs: dict[str, Union[torch.Tensor, Any]], num_items_in_batch: Optional[int] = None
) -> torch.Tensor:
model.train()
prompts = inputs["prompt"]
batch_size = len(prompts)
if self.args.use_vllm:
prompt_ids, prompt_mask, completion_ids, completion_mask = self._generate_vllm(model, prompts)
else:
prompt_ids, prompt_mask, completion_ids, completion_mask = self._generate(model, prompts)
contain_eos_token = torch.any(completion_ids == self.processing_class.eos_token_id, dim=-1)
logprobs = self._forward(model, prompt_ids, prompt_mask, completion_ids, completion_mask)
with torch.no_grad():
if self.ref_model is not None:
ref_logprobs = self._forward(self.ref_model, prompt_ids, prompt_mask, completion_ids, completion_mask)
else: # peft case: we just need to disable the adapter
with self.model.disable_adapter():
ref_logprobs = self._forward(self.model, prompt_ids, prompt_mask, completion_ids, completion_mask)
# Decode the completions, and format them if the input is conversational
device = logprobs.device
completions = self.processing_class.batch_decode(completion_ids, skip_special_tokens=True)
if is_conversational({"prompt": prompts[0]}):
completions = [[{"role": "assistant", "content": completion}] for completion in completions]
# Get the reward from the reward model or judge
if self.judge is not None:
# Once formatted, conversational data may contain special tokens (such as <|im_start|>) that are not
# directly understandable by the judge and could alter its judgment. To avoid this and make the judge
# independent of the model's chat template, we use the raw conversation data, and apply our own chat
# template to it.
if is_conversational({"prompt": prompts[0]}):
environment = jinja2.Environment()
template = environment.from_string(SIMPLE_CHAT_TEMPLATE)
prompts = [template.render(messages=prompt) for prompt in prompts]
completions = [template.render(messages=completion) for completion in completions]
ranks_of_first_completion = self.judge.judge(
prompts, list(zip(completions[:batch_size], completions[batch_size:]))
)
# convert ranks to a True/False mask:
# when rank == 0, it means the first completion is the best
# when rank == 1, it means the second completion is the best
mask = torch.tensor([rank == 0 for rank in ranks_of_first_completion], device=device)
else:
# The reward model may not have the same chat template or tokenizer as the model, so we need to use the
# raw data (string), apply the chat template (if needed), and tokenize it with the reward processing class.
prompts = 2 * prompts # repeat the prompt: [prompt0, prompt1] -> [prompt0, prompt1, prompt0, prompt1]
if is_conversational({"prompt": prompts[0]}):
examples = [{"prompt": p, "completion": c} for p, c in zip(prompts, completions)]
examples = [apply_chat_template(example, self.reward_processing_class) for example in examples]
prompts = [example["prompt"] for example in examples]
completions = [example["completion"] for example in examples]
# Tokenize the prompts
prompts_ids = self.reward_processing_class(
prompts, padding=True, return_tensors="pt", padding_side="left"
)["input_ids"].to(device)
context_length = prompts_ids.shape[1]
# Tokenize the completions
completions_ids = self.reward_processing_class(
completions, padding=True, return_tensors="pt", padding_side="right"
)["input_ids"].to(device)
# Concatenate the prompts and completions and get the reward
prompt_completion_ids = torch.cat((prompts_ids, completions_ids), dim=1)
with torch.inference_mode():
_, scores, _ = get_reward(
self.reward_model, prompt_completion_ids, self.reward_processing_class.pad_token_id, context_length
)
# Filter completion. Ensure that the sample contains stop_token_id
# Completions not passing that filter will receive a lower score.
if self.args.missing_eos_penalty is not None:
scores[~contain_eos_token] -= self.args.missing_eos_penalty
# Split the scores in 2 (the prompts of the first half are the same as the second half)
first_half, second_half = scores.split(batch_size)
# Get the indices of the chosen and rejected examples
mask = first_half >= second_half
batch_range = torch.arange(batch_size, device=device)
chosen_indices = batch_range + (~mask * batch_size)
rejected_indices = batch_range + (mask * batch_size)
# Build tensor so that the first half is the chosen examples and the second half the rejected examples
cr_indices = torch.cat((chosen_indices, rejected_indices), dim=0) # cr = chosen and rejected
cr_logprobs = logprobs[cr_indices]
cr_ref_logprobs = ref_logprobs[cr_indices]
# mask out the padding tokens
padding_mask = ~completion_mask.bool()
cr_padding_mask = padding_mask[cr_indices]
cr_logprobs_sum = (cr_logprobs * ~cr_padding_mask).sum(1)
cr_ref_logprobs_sum = (cr_ref_logprobs * ~cr_padding_mask).sum(1)
# Split the chosen and rejected examples
chosen_logprobs_sum, rejected_logprobs_sum = torch.split(cr_logprobs_sum, batch_size)
chosen_ref_logprobs_sum, rejected_ref_logprobs_sum = torch.split(cr_ref_logprobs_sum, batch_size)
pi_logratios = chosen_logprobs_sum - rejected_logprobs_sum
ref_logratios = chosen_ref_logprobs_sum - rejected_ref_logprobs_sum
logits = pi_logratios - ref_logratios
if self.args.loss_type == "sigmoid":
losses = -F.logsigmoid(self.beta * logits)
elif self.args.loss_type == "ipo":
losses = (logits - 1 / (2 * self.beta)) ** 2
else:
raise NotImplementedError(f"invalid loss type {self.loss_type}")
loss = losses.mean()
# Log everything
if self.reward_model is not None:
scores_margin = scores[chosen_indices] - scores[rejected_indices]
self.stats["objective/scores_margin"].append(
self.accelerator.gather_for_metrics(scores_margin.mean()).mean().item()
)
self.stats["objective/scores"].append(self.accelerator.gather_for_metrics(scores.mean()).mean().item())
self.stats["val/contain_eos_token"].append(contain_eos_token.float().mean().item())
self.stats["logps/chosen"].append(self.accelerator.gather_for_metrics(chosen_logprobs_sum).mean().item())
self.stats["logps/rejected"].append(self.accelerator.gather_for_metrics(rejected_logprobs_sum).mean().item())
kl = logprobs - ref_logprobs
mean_kl = kl.sum(1).mean()
self.stats["objective/kl"].append(self.accelerator.gather_for_metrics(mean_kl).mean().item())
non_score_reward = (-self.beta * kl).sum(1)
mean_non_score_reward = non_score_reward.mean()
self.stats["objective/non_score_reward"].append(
self.accelerator.gather_for_metrics(mean_non_score_reward).mean().item()
)
if self.reward_model is not None:
rlhf_reward = scores + non_score_reward
self.stats["objective/rlhf_reward"].append(self.accelerator.gather_for_metrics(rlhf_reward).mean().item())
mean_entropy = -logprobs.sum(1).mean()
self.stats["objective/entropy"].append(self.accelerator.gather_for_metrics(mean_entropy).mean().item())
chosen_rewards = self.beta * (chosen_logprobs_sum - chosen_ref_logprobs_sum)
gathered_chosen_rewards = self.accelerator.gather_for_metrics(chosen_rewards)
self.stats["rewards/chosen"].append(gathered_chosen_rewards.mean().item())
rejected_rewards = self.beta * (rejected_logprobs_sum - rejected_ref_logprobs_sum)
gathered_rejected_rewards = self.accelerator.gather_for_metrics(rejected_rewards)
self.stats["rewards/rejected"].append(gathered_rejected_rewards.mean().item())
margin = gathered_chosen_rewards - gathered_rejected_rewards
self.stats["rewards/margins"].append(margin.mean().item())
accuracy = margin > 0
self.stats["rewards/accuracies"].append(accuracy.float().mean().item())
self.stats["beta"].append(self.beta)
if (
self.args.torch_empty_cache_steps is not None
and self.state.global_step % self.args.torch_empty_cache_steps == 0
):
empty_cache()
kwargs = {}
# For LOMO optimizers you need to explicitly use the learnign rate
if self.args.optim in [OptimizerNames.LOMO, OptimizerNames.ADALOMO]:
kwargs["learning_rate"] = self._get_learning_rate()
if self.args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if self.use_apex:
with amp.scale_loss(loss, self.optimizer) as scaled_loss:
scaled_loss.backward()
else:
self.accelerator.backward(loss, **kwargs)
return loss.detach() / self.args.gradient_accumulation_steps
# Same as Trainer._maybe_log_save_evaluate but log our metrics
# start_time defaults to None to allow compatibility with transformers<=4.46
def _maybe_log_save_evaluate(self, tr_loss, grad_norm, model, trial, epoch, ignore_keys_for_eval, start_time=None):
if self.control.should_log and self.state.global_step > self._globalstep_last_logged:
logs: dict[str, float] = {}
# all_gather + mean() to get average loss over all processes
tr_loss_scalar = self._nested_gather(tr_loss).mean().item()
# reset tr_loss to zero
tr_loss -= tr_loss
logs["loss"] = round(tr_loss_scalar / (self.state.global_step - self._globalstep_last_logged), 4)
if grad_norm is not None:
logs["grad_norm"] = grad_norm.detach().item() if isinstance(grad_norm, torch.Tensor) else grad_norm
logs["learning_rate"] = self._get_learning_rate()
# Add our metrics
for key, val in self.stats.items():
logs[key] = sum(val) / len(val)
self.stats = {key: [] for key in self.stats} # reset stats
self._total_loss_scalar += tr_loss_scalar
self._globalstep_last_logged = self.state.global_step
self.store_flos()
if version.parse(transformers.__version__) >= version.parse("4.47.0.dev0"):
self.log(logs, start_time)
else: # transformers<=4.46
self.log(logs)
metrics = None
if self.control.should_evaluate:
metrics = self._evaluate(trial, ignore_keys_for_eval)
is_new_best_metric = self._determine_best_metric(metrics=metrics, trial=trial)
if self.args.save_strategy == "best":
self.control.should_save = is_new_best_metric
if self.control.should_save:
self._save_checkpoint(model, trial)
self.control = self.callback_handler.on_save(self.args, self.state, self.control)
# Copy-pasted from transformers.Trainer to maintain compatibility with earlier versions.
# This can be removed once the minimum transformers version is updated to 4.47.
# Refer to https://github.com/huggingface/trl/pull/2288 for more details.
def _determine_best_metric(self, metrics, trial):
"""
Determine if the model should be saved based on the evaluation metrics.
If args.metric_for_best_model is not set, the loss is used.
Returns:
bool: True if a new best metric was found, else False
"""
is_new_best_metric = False
if self.args.metric_for_best_model is not None:
metric_to_check = self.args.metric_for_best_model
if not metric_to_check.startswith("eval_"):
metric_to_check = f"eval_{metric_to_check}"
try:
metric_value = metrics[metric_to_check]
except KeyError as exc:
raise KeyError(
f"The `metric_for_best_model` training argument is set to '{metric_to_check}', which is not found in the evaluation metrics. "
f"The available evaluation metrics are: {list(metrics.keys())}. Consider changing the `metric_for_best_model` via the TrainingArguments."
) from exc
operator = np.greater if self.args.greater_is_better else np.less
if self.state.best_metric is None:
self.state.best_metric = float("-inf") if self.args.greater_is_better else float("inf")
if operator(metric_value, self.state.best_metric):
run_dir = self._get_output_dir(trial=trial)
checkpoint_folder = f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}"
output_dir = os.path.join(run_dir, checkpoint_folder)
self.state.best_metric = metric_value
self.state.best_model_checkpoint = output_dir
is_new_best_metric = True
return is_new_best_metric
def create_model_card(
self,
model_name: Optional[str] = None,
dataset_name: Optional[str] = None,
tags: Union[str, list[str], None] = None,
):
"""
Creates a draft of a model card using the information available to the `Trainer`.
Args:
model_name (`str` or `None`, *optional*, defaults to `None`):
Name of the model.
dataset_name (`str` or `None`, *optional*, defaults to `None`):
Name of the dataset used for training.
tags (`str`, `list[str]` or `None`, *optional*, defaults to `None`):
Tags to be associated with the model card.
"""
if not self.is_world_process_zero():
return
if hasattr(self.model.config, "_name_or_path") and not os.path.isdir(self.model.config._name_or_path):
base_model = self.model.config._name_or_path
else:
base_model = None
tags = tags or []
if isinstance(tags, str):
tags = [tags]
if hasattr(self.model.config, "unsloth_version"):
tags.append("unsloth")
citation = textwrap.dedent("""\
@article{guo2024direct,
title = {{Direct Language Model Alignment from Online AI Feedback}},
author = {Shangmin Guo and Biao Zhang and Tianlin Liu and Tianqi Liu and Misha Khalman and Felipe Llinares and Alexandre Ram{\'{e}} and Thomas Mesnard and Yao Zhao and Bilal Piot and Johan Ferret and Mathieu Blondel},
year = 2024,
eprint = {arXiv:2402.04792}
}""")
model_card = generate_model_card(
base_model=base_model,
model_name=model_name,
hub_model_id=self.hub_model_id,
dataset_name=dataset_name,
tags=tags,
wandb_url=wandb.run.get_url() if is_wandb_available() and wandb.run is not None else None,
comet_url=get_comet_experiment_url(),
trainer_name="Online DPO",
trainer_citation=citation,
paper_title="Direct Language Model Alignment from Online AI Feedback",
paper_id="2402.04792",
)
model_card.save(os.path.join(self.args.output_dir, "README.md"))
| trl/trl/trainer/online_dpo_trainer.py/0 | {
"file_path": "trl/trl/trainer/online_dpo_trainer.py",
"repo_id": "trl",
"token_count": 17117
} |
.PHONY: quality style test docs utils
check_dirs := .
# Check that source code meets quality standards
extra_quality_checks:
python utils/check_copies.py
python utils/check_dummies.py
python utils/check_repo.py
doc-builder style src/accelerate docs/source --max_len 119
# this target runs checks on all files
quality:
ruff check $(check_dirs)
ruff format --check $(check_dirs)
doc-builder style src/accelerate docs/source --max_len 119 --check_only
# Format source code automatically and check is there are any problems left that need manual fixing
style:
ruff check $(check_dirs) --fix
ruff format $(check_dirs)
doc-builder style src/accelerate docs/source --max_len 119
# Run tests for the library
test_big_modeling:
python -m pytest -s -v ./tests/test_big_modeling.py ./tests/test_modeling_utils.py $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_big_modeling.log",)
test_core:
python -m pytest -s -v ./tests/ --ignore=./tests/test_examples.py --ignore=./tests/deepspeed --ignore=./tests/test_big_modeling.py \
--ignore=./tests/fsdp --ignore=./tests/test_cli.py $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_core.log",)
test_cli:
python -m pytest -s -v ./tests/test_cli.py $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_cli.log",)
test_deepspeed:
python -m pytest -s -v ./tests/deepspeed $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_deepspeed.log",)
test_fsdp:
python -m pytest -s -v ./tests/fsdp $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_fsdp.log",)
# Since the new version of pytest will *change* how things are collected, we need `deepspeed` to
# run after test_core and test_cli
test:
$(MAKE) test_core
$(MAKE) test_cli
$(MAKE) test_big_modeling
$(MAKE) test_deepspeed
$(MAKE) test_fsdp
test_examples:
python -m pytest -s -v ./tests/test_examples.py $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_examples.log",)
# Broken down example tests for the CI runners
test_integrations:
python -m pytest -s -v ./tests/deepspeed ./tests/fsdp $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_integrations.log",)
test_example_differences:
python -m pytest -s -v ./tests/test_examples.py::ExampleDifferenceTests $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_example_diff.log",)
test_checkpoint_epoch:
python -m pytest -s -v ./tests/test_examples.py::FeatureExamplesTests -k "by_epoch" $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_checkpoint_epoch.log",)
test_checkpoint_step:
python -m pytest -s -v ./tests/test_examples.py::FeatureExamplesTests -k "by_step" $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_checkpoint_step.log",)
# Same as test but used to install only the base dependencies
test_prod:
$(MAKE) test_core
test_rest:
python -m pytest -s -v ./tests/test_examples.py::FeatureExamplesTests -k "not by_step and not by_epoch" $(if $(IS_GITHUB_CI),--report-log "$(PYTORCH_VERSION)_rest.log",)
# For developers to prepare a release
prepare_release:
rm -rf dist build
python setup.py bdist_wheel sdist
# Make sure this is ran in a fresh venv of some form
install_test_release:
pip uninstall accelerate -y
pip install -i https://testpypi.python.org/pypi --extra-index-url https://pypi.org/simple accelerate
# Run as `make target=testpypi upload_release`
upload_release:
@if [ "$(target)" != "testpypi" ] && [ "$(target)" != "pypi" ]; then \
echo "Error: target must be either 'testpypi' or 'pypi'"; \
exit 1; \
fi
twine upload dist/* -r $(target) | accelerate/Makefile/0 | {
"file_path": "accelerate/Makefile",
"repo_id": "accelerate",
"token_count": 1313
} |
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script tests to ensure that `accelerate` performs at the same level as raw `TransformersEngine`.
This particular script verifies this for FSDP training.
"""
from functools import partial
import evaluate
import torch
import transformer_engine.common.recipe as te_recipe
import transformer_engine.pytorch as te
from fp8_utils import evaluate_model, get_named_parameters, get_training_utilities
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp import MixedPrecision
from torch.distributed.fsdp.wrap import transformer_auto_wrap_policy
from transformer_engine.common.recipe import DelayedScaling
from transformers.models.bert import BertLayer
from accelerate import Accelerator
from accelerate import FullyShardedDataParallelPlugin as FSDPPlugin
from accelerate.state import AcceleratorState
from accelerate.utils import FP8RecipeKwargs, set_seed
from accelerate.utils.transformer_engine import convert_model
MODEL_NAME = "bert-base-cased"
METRIC = evaluate.load("glue", "mrpc")
FSDP_WRAP_POLICY = partial(transformer_auto_wrap_policy, transformer_layer_cls={BertLayer})
def train_baseline():
set_seed(42)
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = get_training_utilities(MODEL_NAME)
accelerator = Accelerator()
device = accelerator.device
model.to(device)
# Convert the model to TE
old_named_params = get_named_parameters(model)
with torch.no_grad():
convert_model(model)
FP8_RECIPE_KWARGS = {"fp8_format": te_recipe.Format.HYBRID, "amax_history_len": 32, "amax_compute_algo": "max"}
fp8_recipe = DelayedScaling(**FP8_RECIPE_KWARGS)
new_named_params = get_named_parameters(model)
# Convert the model to FSDP
model = FSDP(
model,
use_orig_params=True,
mixed_precision=MixedPrecision(param_dtype=torch.bfloat16, reduce_dtype=torch.float32),
auto_wrap_policy=FSDP_WRAP_POLICY,
)
mapping = {p: new_named_params[n] for n, p in old_named_params.items()}
for param_group in optimizer.param_groups:
param_group["params"] = [mapping[p] for p in param_group["params"]]
base_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
model.train()
for _ in range(2):
for batch in train_dataloader:
with te.fp8_autocast(enabled=True, fp8_recipe=fp8_recipe):
with torch.autocast(device_type="cuda", dtype=torch.bfloat16):
batch = batch.to(device)
outputs = model(**batch)
loss = outputs.loss
loss.backward()
optimizer.step()
optimizer.zero_grad()
lr_scheduler.step()
trained_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
assert (
trained_model_results["accuracy"] > base_model_results["accuracy"]
), f'Accuracy should be higher for the trained model: {trained_model_results["accuracy"]} > {base_model_results["accuracy"]}'
assert (
trained_model_results["f1"] > base_model_results["f1"]
), f'F1 score should be higher for the trained model: {trained_model_results["f1"]} > {base_model_results["f1"]}'
return base_model_results, trained_model_results
def train_integration():
FP8_RECIPE_KWARGS = {"fp8_format": "HYBRID", "amax_history_len": 32, "amax_compute_algo": "max"}
kwargs_handlers = [FP8RecipeKwargs(backend="TE", **FP8_RECIPE_KWARGS)]
AcceleratorState()._reset_state(True)
fsdp_plugin = FSDPPlugin(
auto_wrap_policy=FSDP_WRAP_POLICY,
use_orig_params=True,
mixed_precision_policy=MixedPrecision(param_dtype=torch.bfloat16, reduce_dtype=torch.float32),
)
accelerator = Accelerator(mixed_precision="fp8", fsdp_plugin=fsdp_plugin, kwargs_handlers=kwargs_handlers)
set_seed(42)
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = get_training_utilities(
MODEL_NAME, accelerator=accelerator
)
model, optimizer = accelerator.prepare(model, optimizer)
base_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
model.train()
for _ in range(2):
for batch in train_dataloader:
outputs = model(**batch)
loss = outputs.loss
accelerator.backward(loss)
optimizer.step()
optimizer.zero_grad()
lr_scheduler.step()
trained_model_results = evaluate_model(model, eval_dataloader, METRIC, accelerator=accelerator)
assert (
trained_model_results["accuracy"] > base_model_results["accuracy"]
), f'Accuracy should be higher for the trained model: {trained_model_results["accuracy"]} > {base_model_results["accuracy"]}'
assert (
trained_model_results["f1"] > base_model_results["f1"]
), f'F1 score should be higher for the trained model: {trained_model_results["f1"]} > {base_model_results["f1"]}'
return base_model_results, trained_model_results
if __name__ == "__main__":
baseline_not_trained, baseline_trained = train_baseline()
accelerator_not_trained, accelerator_trained = train_integration()
assert (
baseline_not_trained["accuracy"] == accelerator_not_trained["accuracy"]
), f'Accuracy should be the same for the baseline and accelerator: {baseline_not_trained["accuracy"]} == {accelerator_not_trained["accuracy"]}'
assert (
baseline_not_trained["f1"] == accelerator_not_trained["f1"]
), f'F1 score should be the same for the baseline and accelerator: {baseline_not_trained["f1"]} == {accelerator_not_trained["f1"]}'
assert (
baseline_trained["accuracy"] == accelerator_trained["accuracy"]
), f'Accuracy should be the same for the baseline and accelerator: {baseline_trained["accuracy"]} == {accelerator_trained["accuracy"]}'
assert (
baseline_trained["f1"] == accelerator_trained["f1"]
), f'F1 score should be the same for the baseline and accelerator: {baseline_trained["f1"]} == {accelerator_trained["f1"]}'
torch.distributed.destroy_process_group()
| accelerate/benchmarks/fp8/transformer_engine/fsdp.py/0 | {
"file_path": "accelerate/benchmarks/fp8/transformer_engine/fsdp.py",
"repo_id": "accelerate",
"token_count": 2483
} |
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
β οΈ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Troubleshoot
This guide provides solutions to some issues you might encounter when using Accelerate. Not all errors are covered because Accelerate is an active library that is continuously evolving and there are many different use cases and distributed training setups. If the solutions described here don't help with your specific error, please take a look at the [Ask for help](#ask-for-help) section to learn where and how to get help.
## Logging
Logging can help you identify where an error is coming from. In a distributed setup with multiple processes, logging can be a challenge, but Accelerate provides the [`~accelerate.logging`] utility to ensure logs are synchronized.
To troubleshoot an issue, use [`~accelerate.logging`] instead of the standard Python [`logging`](https://docs.python.org/3/library/logging.html#module-logging) module. Set the verbosity level (`INFO`, `DEBUG`, `WARNING`, `ERROR`, `CRITICAL`) with the `log_level` parameter, and then you can either:
1. Export the `log_level` as the `ACCELERATE_LOG_LEVEL` environment variable.
2. Pass the `log_level` directly to `get_logger`.
For example, to set `log_level="INFO"`:
```py
from accelerate.logging import get_logger
logger = get_logger(__name__, log_level="DEBUG")
```
By default, the log is called on main processes only. To call it on all processes, pass `main_process_only=False`.
If a log should be called on all processes and in order, also pass `in_order=True`.
```py
from accelerate.logging import get_logger
logger = get_logger(__name__, log_level="DEBUG")
# log all processes
logger.debug("thing_to_log", main_process_only=False)
# log all processes in order
logger.debug("thing_to_log", main_process_only=False, in_order=True)
```
## Hanging code and timeout errors
There can be many reasons why your code is hanging. Let's take a look at how to solve some of the most common issues that can cause your code to hang.
### Mismatched tensor shapes
Mismatched tensor shapes is a common issue that can cause your code to hang for a significant amount of time on a distributed setup.
When running scripts in a distributed setup, functions such as [`Accelerator.gather`] and [`Accelerator.reduce`] are necessary to grab tensors across devices to collectively perform operations on them. These (and other) functions rely on `torch.distributed` to perform a `gather` operation, which requires tensors to have the **exact same shape** across all processes. When the tensor shapes don't match, your code hangs and you'll eventually hit a timeout exception.
You can use Accelerate's operational debug mode to immediately catch this issue. We recommend enabling this mode during the `accelerate config` setup, but you can also enable it from the CLI, as an environment variable, or by manually editing the `config.yaml` file.
<hfoptions id="mismatch">
<hfoption id="CLI">
```bash
accelerate launch --debug {my_script.py} --arg1 --arg2
```
</hfoption>
<hfoption id="environment variable">
If enabling debug mode as an environment variable, you don't need to call `accelerate launch`.
```bash
ACCELERATE_DEBUG_MODE="1" torchrun {my_script.py} --arg1 --arg2
```
</hfoption>
<hfoption id="config.yaml">
Add `debug: true` to your `config.yaml` file.
```yaml
compute_environment: LOCAL_MACHINE
debug: true
```
</hfoption>
</hfoptions>
Once you enable debug mode, you should get a traceback that points to the tensor shape mismatch issue.
```py
Traceback (most recent call last):
File "/home/zach_mueller_huggingface_co/test.py", line 18, in <module>
main()
File "/home/zach_mueller_huggingface_co/test.py", line 15, in main
broadcast_tensor = broadcast(tensor)
File "/home/zach_mueller_huggingface_co/accelerate/src/accelerate/utils/operations.py", line 303, in wrapper
accelerate.utils.operations.DistributedOperationException:
Cannot apply desired operation due to shape mismatches. All shapes across devices must be valid.
Operation: `accelerate.utils.operations.broadcast`
Input shapes:
- Process 0: [1, 5]
- Process 1: [1, 2, 5]
```
### Early stopping
For early stopping in distributed training, if each process has a specific stopping condition (e.g. validation loss), it may not be synchronized across all processes. As a result, a break can happen on process 0 but not on process 1 which will cause your code to hang indefinitely until a timeout occurs.
If you have early stopping conditionals, use the `set_trigger` and `check_trigger` methods to make sure all the processes
are ended correctly.
```py
# Assume `should_do_breakpoint` is a custom defined function that returns a conditional,
# and that conditional might be true only on process 1
if should_do_breakpoint(loss):
accelerator.set_trigger()
# Later in the training script when we need to check for the breakpoint
if accelerator.check_trigger():
break
```
### Low kernel versions on Linux
On Linux with kernel version < 5.5, hanging processes have been reported. To avoid this problem, upgrade your system to a later kernel version.
### MPI
If your distributed CPU training job using MPI is hanging, ensure that you have
[passwordless SSH](https://www.open-mpi.org/faq/?category=rsh#ssh-keys) setup (using keys) between the nodes. This means
that for all nodes in your hostfile, you should to be able to SSH from one node to another without being prompted for a password.
Next, try to run the `mpirun` command as a sanity check. For example, the command below should print out the
hostnames for each of the nodes.
```bash
mpirun -f hostfile -n {number of nodes} -ppn 1 hostname
```
## Out-of-Memory
One of the most frustrating errors when it comes to running training scripts is hitting "Out-of-Memory" on devices like CUDA, XPU or CPU. The entire script needs to be restarted and any progress is lost.
To address this problem, Accelerate provides the [`find_executable_batch_size`] utility that is heavily based on [toma](https://github.com/BlackHC/toma).
This utility retries code that fails due to OOM (out-of-memory) conditions and automatically lowers batch sizes. For each OOM condition, the algorithm decreases the batch size by half and retries the code until it succeeds.
To use [`find_executable_batch_size`], restructure your training function to include an inner function with `find_executable_batch_size` and build your dataloaders inside it. At a minimum, this only takes 4 new lines of code.
<Tip warning={true}>
The inner function **must** take batch size as the first parameter, but we do not pass one to it when called. The wrapper will handles this for you. Any object (models, optimizers) that consumes device memory and is passed to the [`Accelerator`] also **must** be declared inside the inner function.
</Tip>
```diff
def training_function(args):
accelerator = Accelerator()
+ @find_executable_batch_size(starting_batch_size=args.batch_size)
+ def inner_training_loop(batch_size):
+ nonlocal accelerator # Ensure they can be used in our context
+ accelerator.free_memory() # Free all lingering references
model = get_model()
model.to(accelerator.device)
optimizer = get_optimizer()
train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size)
lr_scheduler = get_scheduler(
optimizer,
num_training_steps=len(train_dataloader)*num_epochs
)
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler
)
train(model, optimizer, train_dataloader, lr_scheduler)
validate(model, eval_dataloader)
+ inner_training_loop()
```
## Non-reproducible results between device setups
If you changed the device setup and observe different model performance, it is likely you didn't update your script when moving from one setup to another. Even if you're using the same script with the same batch size, the results will still be different on a TPU, multi-GPU, and single GPU.
For example, if you were training on a single GPU with a batch size of 16 and you move to a dual GPU setup, you need to change the batch size to 8 to have the same effective batch size. This is because when training with Accelerate, the batch size passed to the dataloader is the **batch size per GPU**.
To make sure you can reproduce the results between the setups, make sure to use the same seed, adjust the batch size accordingly, and consider scaling the learning rate.
For more details and a quick reference for batch sizes, check out the [Comparing performance between different device setups](../concept_guides/performance) guide.
## Performance issues on different GPUs
If your multi-GPU setup consists of different GPUs, you may encounter some performance issues:
- There may be an imbalance in GPU memory between the GPUs. In this case, the GPU with the smaller memory will limit the batch size or the size of the model that can be loaded onto the GPUs.
- If you are using GPUs with different performance profiles, the performance will be driven by the slowest GPU you are using because the other GPUs will have to wait for it to complete its workload.
Vastly different GPUs within the same setup can lead to performance bottlenecks.
## Ask for help
If none of the solutions and advice here helped resolve your issue, you can always reach out to the community and Accelerate team for help.
- Ask for help on the Hugging Face forums by posting your question in the [Accelerate category](https://discuss.huggingface.co/c/accelerate/18). Make sure to write a descriptive post with relevant context about your setup and reproducible code to maximize the likelihood that your problem is solved!
- Post a question on [Discord](http://hf.co/join/discord), and let the team and the community help you.
- Create an Issue on the Accelerate [GitHub repository](https://github.com/huggingface/accelerate/issues) if you think you've found a bug related to the library. Include context regarding the bug and details about your distributed setup to help us better figure out what's wrong and how we can fix it.
| accelerate/docs/source/basic_tutorials/troubleshooting.md/0 | {
"file_path": "accelerate/docs/source/basic_tutorials/troubleshooting.md",
"repo_id": "accelerate",
"token_count": 3046
} |
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
β οΈ Note that this file is in Markdown but contains specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# DeepSpeed
[DeepSpeed](https://github.com/deepspeedai/DeepSpeed) implements everything described in the [ZeRO paper](https://arxiv.org/abs/1910.02054). Some of the salient optimizations are:
1. Optimizer state partitioning (ZeRO stage 1)
2. Gradient partitioning (ZeRO stage 2)
3. Parameter partitioning (ZeRO stage 3)
4. Custom mixed precision training handling
5. A range of fast CUDA-extension-based optimizers
6. ZeRO-Offload to CPU and Disk/NVMe
7. Hierarchical partitioning of model parameters (ZeRO++)
ZeRO-Offload has its own dedicated paper: [ZeRO-Offload: Democratizing Billion-Scale Model Training](https://arxiv.org/abs/2101.06840). And NVMe-support is described in the paper [ZeRO-Infinity: Breaking the GPU
Memory Wall for Extreme Scale Deep Learning](https://arxiv.org/abs/2104.07857).
DeepSpeed ZeRO-2 is primarily used only for training, as its features are of no use to inference.
DeepSpeed ZeRO-3 can be used for inference as well since it allows huge models to be loaded on multiple GPUs, which
won't be possible on a single GPU.
Accelerate integrates [DeepSpeed](https://github.com/deepspeedai/DeepSpeed) via 2 options:
1. Integration of the DeepSpeed features via `deepspeed config file` specification in `accelerate config` . You just supply your custom config file or use our template. Most of
this document is focused on this feature. This supports all the core features of DeepSpeed and gives user a lot of flexibility.
User may have to change a few lines of code depending on the config.
2. Integration via `deepspeed_plugin`.This supports subset of the DeepSpeed features and uses default options for the rest of the configurations.
User need not change any code and is good for those who are fine with most of the default settings of DeepSpeed.
## What is integrated?
Training:
1. Accelerate integrates all features of DeepSpeed ZeRO. This includes all the ZeRO stages 1, 2 and 3 as well as ZeRO-Offload, ZeRO-Infinity (which can offload to disk/NVMe) and ZeRO++.
Below is a short description of Data Parallelism using ZeRO - Zero Redundancy Optimizer along with diagram from this [blog post](https://www.microsoft.com/en-us/research/blog/zero-deepspeed-new-system-optimizations-enable-training-models-with-over-100-billion-parameters/)

(Source: [link](https://www.microsoft.com/en-us/research/blog/zero-deepspeed-new-system-optimizations-enable-training-models-with-over-100-billion-parameters/))
a. **Stage 1** : Shards optimizer states across data parallel workers/GPUs
b. **Stage 2** : Shards optimizer states + gradients across data parallel workers/GPUs
c. **Stage 3**: Shards optimizer states + gradients + model parameters across data parallel workers/GPUs
d. **Optimizer Offload**: Offloads the gradients + optimizer states to CPU/Disk building on top of ZERO Stage 2
e. **Param Offload**: Offloads the model parameters to CPU/Disk building on top of ZERO Stage 3
f. **Hierarchical Partitioning**: Enables efficient multi-node training with data-parallel training across nodes and ZeRO-3 sharding within a node, built on top of ZeRO Stage 3.
<u>Note</u>: With respect to Disk Offload, the disk should be an NVME for decent speed but it technically works on any Disk
Inference:
1. DeepSpeed ZeRO Inference supports ZeRO stage 3 with ZeRO-Infinity. It uses the same ZeRO protocol as training, but
it doesn't use an optimizer and a lr scheduler and only stage 3 is relevant. For more details see:
[deepspeed-zero-inference](#deepspeed-zero-inference).
## How it works?
**Pre-Requisites**: Install DeepSpeed version >=0.6.5. Please refer to the [DeepSpeed Installation details](https://github.com/deepspeedai/DeepSpeed#installation)
for more information.
We will first look at easy to use integration via `accelerate config`.
Followed by more flexible and feature rich `deepspeed config file` integration.
### Accelerate DeepSpeed Plugin
On your machine(s) just run:
```bash
accelerate config
```
and answer the questions asked. It will ask whether you want to use a config file for DeepSpeed to which you should answer no. Then answer the following questions to generate a basic DeepSpeed config.
This will generate a config file that will be used automatically to properly set the
default options when doing
```bash
accelerate launch my_script.py --args_to_my_script
```
For instance, here is how you would run the NLP example `examples/nlp_example.py` (from the root of the repo) with DeepSpeed Plugin:
**ZeRO Stage-2 DeepSpeed Plugin Example**
```bash
compute_environment: LOCAL_MACHINE
deepspeed_config:
gradient_accumulation_steps: 1
gradient_clipping: 1.0
offload_optimizer_device: none
offload_param_device: none
zero3_init_flag: true
zero_stage: 2
distributed_type: DEEPSPEED
fsdp_config: {}
machine_rank: 0
main_process_ip: null
main_process_port: null
main_training_function: main
mixed_precision: fp16
num_machines: 1
num_processes: 2
use_cpu: false
```
```bash
accelerate launch examples/nlp_example.py --mixed_precision fp16
```
**ZeRO Stage-3 with CPU Offload DeepSpeed Plugin Example**
```bash
compute_environment: LOCAL_MACHINE
deepspeed_config:
gradient_accumulation_steps: 1
gradient_clipping: 1.0
offload_optimizer_device: cpu
offload_param_device: cpu
zero3_init_flag: true
zero3_save_16bit_model: true
zero_stage: 3
distributed_type: DEEPSPEED
fsdp_config: {}
machine_rank: 0
main_process_ip: null
main_process_port: null
main_training_function: main
mixed_precision: fp16
num_machines: 1
num_processes: 2
use_cpu: false
```
```bash
accelerate launch examples/nlp_example.py --mixed_precision fp16
```
Currently, `Accelerate` supports following config through the CLI:
```bash
`zero_stage`: [0] Disabled, [1] optimizer state partitioning, [2] optimizer+gradient state partitioning and [3] optimizer+gradient+parameter partitioning
`gradient_accumulation_steps`: Number of training steps to accumulate gradients before averaging and applying them.
`gradient_clipping`: Enable gradient clipping with value.
`offload_optimizer_device`: [none] Disable optimizer offloading, [cpu] offload optimizer to CPU, [nvme] offload optimizer to NVMe SSD. Only applicable with ZeRO >= Stage-2.
`offload_optimizer_nvme_path`: Decides Nvme Path to offload optimizer states. If unspecified, will default to 'none'.
`offload_param_device`: [none] Disable parameter offloading, [cpu] offload parameters to CPU, [nvme] offload parameters to NVMe SSD. Only applicable with ZeRO Stage-3.
`offload_param_nvme_path`: Decides Nvme Path to offload parameters. If unspecified, will default to 'none'.
`zero3_init_flag`: Decides whether to enable `deepspeed.zero.Init` for constructing massive models. Only applicable with ZeRO Stage-3.
`zero3_save_16bit_model`: Decides whether to save 16-bit model weights when using ZeRO Stage-3.
`mixed_precision`: `no` for FP32 training, `fp16` for FP16 mixed-precision training and `bf16` for BF16 mixed-precision training.
`deepspeed_moe_layer_cls_names`: Comma-separated list of transformer Mixture-of-Experts (MoE) layer class names (case-sensitive) to wrap ,e.g, `MixtralSparseMoeBlock`, `Qwen2MoeSparseMoeBlock`, `JetMoEAttention,JetMoEBlock` ...
`deepspeed_hostfile`: DeepSpeed hostfile for configuring multi-node compute resources.
`deepspeed_exclusion_filter`: DeepSpeed exclusion filter string when using mutli-node setup.
`deepspeed_inclusion_filter`: DeepSpeed inclusion filter string when using mutli-node setup.
`deepspeed_multinode_launcher`: DeepSpeed multi-node launcher to use. If unspecified, will default to `pdsh`.
`deepspeed_config_file`: path to the DeepSpeed config file in `json` format. See the next section for more details on this.
```
To be able to tweak more options, you will need to use a DeepSpeed config file.
### DeepSpeed Config File
On your machine(s) just run:
```bash
accelerate config
```
and answer the questions asked. It will ask whether you want to use a config file for deepspeed to which you answer yes
and provide the path to the deepspeed config file.
This will generate a config file that will be used automatically to properly set the
default options when doing
```bash
accelerate launch my_script.py --args_to_my_script
```
For instance, here is how you would run the NLP example `examples/by_feature/deepspeed_with_config_support.py` (from the root of the repo) with DeepSpeed Config File:
**ZeRO Stage-2 DeepSpeed Config File Example**
```bash
compute_environment: LOCAL_MACHINE
deepspeed_config:
deepspeed_config_file: /home/ubuntu/accelerate/examples/configs/deepspeed_config_templates/zero_stage2_config.json
zero3_init_flag: true
distributed_type: DEEPSPEED
fsdp_config: {}
machine_rank: 0
main_process_ip: null
main_process_port: null
main_training_function: main
mixed_precision: fp16
num_machines: 1
num_processes: 2
use_cpu: false
```
with the contents of `zero_stage2_config.json` being:
```json
{
"fp16": {
"enabled": true,
"loss_scale": 0,
"loss_scale_window": 1000,
"initial_scale_power": 16,
"hysteresis": 2,
"min_loss_scale": 1
},
"optimizer": {
"type": "AdamW",
"params": {
"lr": "auto",
"weight_decay": "auto",
"torch_adam": true,
"adam_w_mode": true
}
},
"scheduler": {
"type": "WarmupDecayLR",
"params": {
"warmup_min_lr": "auto",
"warmup_max_lr": "auto",
"warmup_num_steps": "auto",
"total_num_steps": "auto"
}
},
"zero_optimization": {
"stage": 2,
"allgather_partitions": true,
"allgather_bucket_size": 2e8,
"overlap_comm": true,
"reduce_scatter": true,
"reduce_bucket_size": "auto",
"contiguous_gradients": true
},
"gradient_accumulation_steps": 1,
"gradient_clipping": "auto",
"steps_per_print": 2000,
"train_batch_size": "auto",
"train_micro_batch_size_per_gpu": "auto",
"wall_clock_breakdown": false
}
```
```bash
accelerate launch examples/by_feature/deepspeed_with_config_support.py \
--config_name "gpt2-large" \
--tokenizer_name "gpt2-large" \
--dataset_name "wikitext" \
--dataset_config_name "wikitext-2-raw-v1" \
--block_size 128 \
--output_dir "./clm/clm_deepspeed_stage2_accelerate" \
--learning_rate 5e-4 \
--per_device_train_batch_size 24 \
--per_device_eval_batch_size 24 \
--num_train_epochs 3 \
--with_tracking \
--report_to "wandb"\
```
**ZeRO Stage-3 with CPU offload DeepSpeed Config File Example**
```bash
compute_environment: LOCAL_MACHINE
deepspeed_config:
deepspeed_config_file: /home/ubuntu/accelerate/examples/configs/deepspeed_config_templates/zero_stage3_offload_config.json
zero3_init_flag: true
distributed_type: DEEPSPEED
fsdp_config: {}
machine_rank: 0
main_process_ip: null
main_process_port: null
main_training_function: main
mixed_precision: fp16
num_machines: 1
num_processes: 2
use_cpu: false
```
with the contents of `zero_stage3_offload_config.json` being:
```json
{
"fp16": {
"enabled": true,
"loss_scale": 0,
"loss_scale_window": 1000,
"initial_scale_power": 16,
"hysteresis": 2,
"min_loss_scale": 1
},
"optimizer": {
"type": "AdamW",
"params": {
"lr": "auto",
"weight_decay": "auto"
}
},
"scheduler": {
"type": "WarmupDecayLR",
"params": {
"warmup_min_lr": "auto",
"warmup_max_lr": "auto",
"warmup_num_steps": "auto",
"total_num_steps": "auto"
}
},
"zero_optimization": {
"stage": 3,
"offload_optimizer": {
"device": "cpu",
"pin_memory": true
},
"offload_param": {
"device": "cpu",
"pin_memory": true
},
"overlap_comm": true,
"contiguous_gradients": true,
"reduce_bucket_size": "auto",
"stage3_prefetch_bucket_size": "auto",
"stage3_param_persistence_threshold": "auto",
"sub_group_size": 1e9,
"stage3_max_live_parameters": 1e9,
"stage3_max_reuse_distance": 1e9,
"stage3_gather_16bit_weights_on_model_save": "auto"
},
"gradient_accumulation_steps": 1,
"gradient_clipping": "auto",
"steps_per_print": 2000,
"train_batch_size": "auto",
"train_micro_batch_size_per_gpu": "auto",
"wall_clock_breakdown": false
}
```
```bash
accelerate launch examples/by_feature/deepspeed_with_config_support.py \
--config_name "gpt2-large" \
--tokenizer_name "gpt2-large" \
--dataset_name "wikitext" \
--dataset_config_name "wikitext-2-raw-v1" \
--block_size 128 \
--output_dir "./clm/clm_deepspeed_stage3_offload_accelerate" \
--learning_rate 5e-4 \
--per_device_train_batch_size 32 \
--per_device_eval_batch_size 32 \
--num_train_epochs 3 \
--with_tracking \
--report_to "wandb"\
```
**ZeRO++ Config Example**
You can use the features of ZeRO++ by using the appropriate config parameters. Note that ZeRO++ is an extension for ZeRO Stage 3. Here is how the config file can be modified, from [DeepSpeed's ZeRO++ tutorial](https://www.deepspeed.ai/tutorials/zeropp/):
```json
{
"zero_optimization": {
"stage": 3,
"reduce_bucket_size": "auto",
"zero_quantized_weights": true,
"zero_hpz_partition_size": 8,
"zero_quantized_gradients": true,
"contiguous_gradients": true,
"overlap_comm": true
}
}
```
For hierarchical partitioning, the partition size `zero_hpz_partition_size` should ideally be set to the number of GPUs per node. (For example, the above config file assumes 8 GPUs per node)
**Important code changes when using DeepSpeed Config File**
1. DeepSpeed Optimizers and Schedulers. For more information on these,
see the [DeepSpeed Optimizers](https://deepspeed.readthedocs.io/en/latest/optimizers.html) and [DeepSpeed Schedulers](https://deepspeed.readthedocs.io/en/latest/schedulers.html) documentation.
We will look at the changes needed in the code when using these.
a. DS Optim + DS Scheduler: The case when both `optimizer` and `scheduler` keys are present in the DeepSpeed config file.
In this situation, those will be used and the user has to use `accelerate.utils.DummyOptim` and `accelerate.utils.DummyScheduler` to replace the PyTorch/Custom optimizers and schedulers in their code.
Below is the snippet from `examples/by_feature/deepspeed_with_config_support.py` showing this:
```python
# Creates Dummy Optimizer if `optimizer` was specified in the config file else creates Adam Optimizer
optimizer_cls = (
torch.optim.AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
optimizer = optimizer_cls(optimizer_grouped_parameters, lr=args.learning_rate)
# Creates Dummy Scheduler if `scheduler` was specified in the config file else creates `args.lr_scheduler_type` Scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lr_scheduler = get_scheduler(
name=args.lr_scheduler_type,
optimizer=optimizer,
num_warmup_steps=args.num_warmup_steps,
num_training_steps=args.max_train_steps,
)
else:
lr_scheduler = DummyScheduler(
optimizer, total_num_steps=args.max_train_steps, warmup_num_steps=args.num_warmup_steps
)
```
b. Custom Optim + Custom Scheduler: The case when both `optimizer` and `scheduler` keys are absent in the DeepSpeed config file.
In this situation, no code changes are needed from the user and this is the case when using integration via DeepSpeed Plugin.
In the above example we can see that the code remains unchanged if the `optimizer` and `scheduler` keys are absent in the DeepSpeed config file.
c. Custom Optim + DS Scheduler: The case when only `scheduler` key is present in the DeepSpeed config file.
In this situation, the user has to use `accelerate.utils.DummyScheduler` to replace the PyTorch/Custom scheduler in their code.
d. DS Optim + Custom Scheduler: The case when only `optimizer` key is present in the DeepSpeed config file.
This will result in an error because you can only use DS Scheduler when using DS Optim.
2. Notice the `auto` values in the above example DeepSpeed config files. These are automatically handled by `prepare` method
based on model, dataloaders, dummy optimizer and dummy schedulers provided to `prepare` method.
Only the `auto` fields specified in above examples are handled by `prepare` method and the rest have to be explicitly specified by the user.
The `auto` values are calculated as:
- `reduce_bucket_size`: `hidden_size * hidden_size`
- `stage3_prefetch_bucket_size`: `int(0.9 * hidden_size * hidden_size)`
- `stage3_param_persistence_threshold`: `10 * hidden_size`
For the `auto` feature to work for these 3 config entries - Accelerate will use `model.config.hidden_size` or `max(model.config.hidden_sizes)` as `hidden_size`. If neither of these is available, the launching will fail and you will have to set these 3 config entries manually. Remember the first 2 config entries are the communication buffers - the larger they are the more efficient the comms will be, and the larger they are the more GPU memory they will consume, so it's a tunable performance trade-off.
**Things to note when using DeepSpeed Config File**
Below is a sample script using `deepspeed_config_file` in different scenarios.
Code `test.py`:
```python
from accelerate import Accelerator
from accelerate.state import AcceleratorState
def main():
accelerator = Accelerator()
accelerator.print(f"{AcceleratorState()}")
if __name__ == "__main__":
main()
```
**Scenario 1**: Manually tampered accelerate config file having `deepspeed_config_file` along with other entries.
1. Content of the `accelerate` config:
```yaml
command_file: null
commands: null
compute_environment: LOCAL_MACHINE
deepspeed_config:
gradient_accumulation_steps: 1
gradient_clipping: 1.0
offload_optimizer_device: 'cpu'
offload_param_device: 'cpu'
zero3_init_flag: true
zero3_save_16bit_model: true
zero_stage: 3
deepspeed_config_file: 'ds_config.json'
distributed_type: DEEPSPEED
downcast_bf16: 'no'
dynamo_backend: 'NO'
fsdp_config: {}
gpu_ids: null
machine_rank: 0
main_process_ip: null
main_process_port: null
main_training_function: main
megatron_lm_config: {}
num_machines: 1
num_processes: 2
rdzv_backend: static
same_network: true
tpu_name: null
tpu_zone: null
use_cpu: false
```
2. `ds_config.json`:
```json
{
"bf16": {
"enabled": true
},
"zero_optimization": {
"stage": 3,
"stage3_gather_16bit_weights_on_model_save": false,
"offload_optimizer": {
"device": "none"
},
"offload_param": {
"device": "none"
}
},
"gradient_clipping": 1.0,
"train_batch_size": "auto",
"train_micro_batch_size_per_gpu": "auto",
"gradient_accumulation_steps": 10,
"steps_per_print": 2000000
}
```
3. Output of `accelerate launch test.py`:
```bash
ValueError: When using `deepspeed_config_file`, the following accelerate config variables will be ignored:
['gradient_accumulation_steps', 'gradient_clipping', 'zero_stage', 'offload_optimizer_device', 'offload_param_device',
'zero3_save_16bit_model', 'mixed_precision'].
Please specify them appropriately in the DeepSpeed config file.
If you are using an accelerate config file, remove other config variables mentioned in the above specified list.
The easiest method is to create a new config following the questionnaire via `accelerate config`.
It will only ask for the necessary config variables when using `deepspeed_config_file`.
```
**Scenario 2**: Use the solution of the error to create new accelerate config and check that no ambiguity error is now thrown.
1. Run `accelerate config`:
```bash
$ accelerate config
-------------------------------------------------------------------------------------------------------------------------------
In which compute environment are you running?
This machine
-------------------------------------------------------------------------------------------------------------------------------
Which type of machine are you using?
multi-GPU
How many different machines will you use (use more than 1 for multi-node training)? [1]:
Do you wish to optimize your script with torch dynamo?[yes/NO]:
Do you want to use DeepSpeed? [yes/NO]: yes
Do you want to specify a json file to a DeepSpeed config? [yes/NO]: yes
Please enter the path to the json DeepSpeed config file: ds_config.json
Do you want to enable `deepspeed.zero.Init` when using ZeRO Stage-3 for constructing massive models? [yes/NO]: yes
How many GPU(s) should be used for distributed training? [1]:4
accelerate configuration saved at ds_config_sample.yaml
```
2. Content of the `accelerate` config:
```yaml
compute_environment: LOCAL_MACHINE
deepspeed_config:
deepspeed_config_file: ds_config.json
zero3_init_flag: true
distributed_type: DEEPSPEED
downcast_bf16: 'no'
dynamo_backend: 'NO'
fsdp_config: {}
machine_rank: 0
main_training_function: main
megatron_lm_config: {}
num_machines: 1
num_processes: 4
rdzv_backend: static
same_network: true
use_cpu: false
```
3. Output of `accelerate launch test.py`:
```bash
Distributed environment: DEEPSPEED Backend: nccl
Num processes: 4
Process index: 0
Local process index: 0
Device: cuda:0
Mixed precision type: bf16
ds_config: {'bf16': {'enabled': True}, 'zero_optimization': {'stage': 3, 'stage3_gather_16bit_weights_on_model_save': False, 'offload_optimizer': {'device': 'none'}, 'offload_param': {'device': 'none'}}, 'gradient_clipping': 1.0, 'train_batch_size': 'auto', 'train_micro_batch_size_per_gpu': 'auto', 'gradient_accumulation_steps': 10, 'steps_per_print': inf, 'fp16': {'enabled': False}}
```
**Scenario 3**: Setting the `accelerate launch` command arguments related to DeepSpeed as `"auto"` in the DeepSpeed` configuration file and check that things work as expected.
1. New `ds_config.json` with `"auto"` for the `accelerate launch` DeepSpeed command arguments:
```json
{
"bf16": {
"enabled": "auto"
},
"zero_optimization": {
"stage": "auto",
"stage3_gather_16bit_weights_on_model_save": "auto",
"offload_optimizer": {
"device": "auto"
},
"offload_param": {
"device": "auto"
}
},
"gradient_clipping": "auto",
"train_batch_size": "auto",
"train_micro_batch_size_per_gpu": "auto",
"gradient_accumulation_steps": "auto",
"steps_per_print": 2000000
}
```
2. Output of `accelerate launch --mixed_precision="fp16" --zero_stage=3 --gradient_accumulation_steps=5 --gradient_clipping=1.0 --offload_param_device="cpu" --offload_optimizer_device="nvme" --zero3_save_16bit_model="true" test.py`:
```bash
Distributed environment: DEEPSPEED Backend: nccl
Num processes: 4
Process index: 0
Local process index: 0
Device: cuda:0
Mixed precision type: fp16
ds_config: {'bf16': {'enabled': False}, 'zero_optimization': {'stage': 3, 'stage3_gather_16bit_weights_on_model_save': True, 'offload_optimizer': {'device': 'nvme'}, 'offload_param': {'device': 'cpu'}}, 'gradient_clipping': 1.0, 'train_batch_size': 'auto', 'train_micro_batch_size_per_gpu': 'auto', 'gradient_accumulation_steps': 5, 'steps_per_print': inf, 'fp16': {'enabled': True, 'auto_cast': True}}
```
**Note**:
1. Remaining `"auto"` values are handled in `accelerator.prepare()` call as explained in point 2 of
`Important code changes when using DeepSpeed Config File`.
2. Only when `gradient_accumulation_steps` is `auto`, the value passed while creating `Accelerator` object via `Accelerator(gradient_accumulation_steps=k)` will be used. When using DeepSpeed Plugin, the value from it will be used and it will overwrite the value passed while creating Accelerator object.
## Saving and loading
1. Saving and loading of models is unchanged for ZeRO Stage-1 and Stage-2.
2. under ZeRO Stage-3, `state_dict` contains just the placeholders since the model weights are partitioned across multiple GPUs.
ZeRO Stage-3 has 2 options:
a. Saving the entire 16bit model weights to directly load later on using `model.load_state_dict(torch.load(pytorch_model.bin))`.
For this, either set `zero_optimization.stage3_gather_16bit_weights_on_model_save` to True in DeepSpeed Config file or set
`zero3_save_16bit_model` to True in DeepSpeed Plugin.
**Note that this option requires consolidation of the weights on one GPU it can be slow and memory demanding, so only use this feature when needed.**
Below is the snippet from `examples/by_feature/deepspeed_with_config_support.py` showing this:
```python
unwrapped_model = accelerator.unwrap_model(model)
# New Code #
# Saves the whole/unpartitioned fp16 model when in ZeRO Stage-3 to the output directory if
# `stage3_gather_16bit_weights_on_model_save` is True in DeepSpeed Config file or
# `zero3_save_16bit_model` is True in DeepSpeed Plugin.
# For Zero Stages 1 and 2, models are saved as usual in the output directory.
# The model name saved is `pytorch_model.bin`
unwrapped_model.save_pretrained(
args.output_dir,
is_main_process=accelerator.is_main_process,
save_function=accelerator.save,
state_dict=accelerator.get_state_dict(model),
)
```
b. To get 32bit weights, first save the model using `model.save_checkpoint()`.
Below is the snippet from `examples/by_feature/deepspeed_with_config_support.py` showing this:
```python
success = model.save_checkpoint(PATH, ckpt_id, checkpoint_state_dict)
status_msg = f"checkpointing: PATH={PATH}, ckpt_id={ckpt_id}"
if success:
logging.info(f"Success {status_msg}")
else:
logging.warning(f"Failure {status_msg}")
```
This will create ZeRO model and optimizer partitions along with `zero_to_fp32.py` script in checkpoint directory.
You can use this script to do offline consolidation.
It requires no configuration files or GPUs. Here is an example of its usage:
```bash
$ cd /path/to/checkpoint_dir
$ ./zero_to_fp32.py . pytorch_model.bin
Processing zero checkpoint at global_step1
Detected checkpoint of type zero stage 3, world_size: 2
Saving fp32 state dict to pytorch_model.bin (total_numel=60506624)
```
To get 32bit model for saving/inference, you can perform:
```python
from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
unwrapped_model = accelerator.unwrap_model(model)
fp32_model = load_state_dict_from_zero_checkpoint(unwrapped_model, checkpoint_dir)
```
If you are only interested in the `state_dict`, you can do the following:
```python
from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir)
```
Note that all these functions require ~2x memory (general RAM) of the size of the final checkpoint.
## ZeRO Inference
DeepSpeed ZeRO Inference supports ZeRO stage 3 with ZeRO-Infinity.
It uses the same ZeRO protocol as training, but it doesn't use an optimizer and a lr scheduler and only stage 3 is relevant.
With accelerate integration, you just need to prepare the model and dataloader as shown below:
```python
model, eval_dataloader = accelerator.prepare(model, eval_dataloader)
```
## Few caveats to be aware of
1. Current integration doesnβt support Pipeline Parallelism of DeepSpeed.
2. Current integration doesnβt support `mpu`, limiting the tensor parallelism which is supported in Megatron-LM.
3. Current integration doesnβt support multiple models.
## DeepSpeed Resources
The documentation for the internals related to deepspeed can be found [here](../package_reference/deepspeed).
- [Project's github](https://github.com/deepspeedai/DeepSpeed)
- [Usage docs](https://www.deepspeed.ai/getting-started/)
- [API docs](https://deepspeed.readthedocs.io/en/latest/index.html)
- [Blog posts](https://www.microsoft.com/en-us/research/search/?q=deepspeed)
Papers:
- [ZeRO: Memory Optimizations Toward Training Trillion Parameter Models](https://arxiv.org/abs/1910.02054)
- [ZeRO-Offload: Democratizing Billion-Scale Model Training](https://arxiv.org/abs/2101.06840)
- [ZeRO-Infinity: Breaking the GPU Memory Wall for Extreme Scale Deep Learning](https://arxiv.org/abs/2104.07857)
- [ZeRO++: Extremely Efficient Collective Communication for Giant Model Training](https://arxiv.org/abs/2306.10209)
Finally, please, remember that `Accelerate` only integrates DeepSpeed, therefore if you
have any problems or questions with regards to DeepSpeed usage, please, file an issue with [DeepSpeed GitHub](https://github.com/deepspeedai/DeepSpeed/issues).
<Tip>
For those interested in the similarities and differences between FSDP and DeepSpeed, please check out the [concept guide here](../concept_guides/fsdp_and_deepspeed)!
</Tip> | accelerate/docs/source/usage_guides/deepspeed.md/0 | {
"file_path": "accelerate/docs/source/usage_guides/deepspeed.md",
"repo_id": "accelerate",
"token_count": 10171
} |
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from manim import *
class Stage4(Scene):
def construct(self):
step_1 = MarkupText(
f"To understand the next part fully, let's define two terms,\n<span fgcolor='{RED}'>`batch_size`</span> and <span fgcolor='{BLUE}'>`global_batch_size`</span>:",
font_size=18
)
step_1.move_to([0, 1.5, 0])
# <span fgcolor='{YELLOW}'>β</span>
step_2 = MarkupText(
f"\n\nβ <span fgcolor='{RED}'>`batch_size`</span>: \n\tThis will be defined as the batch size seen on a given\n\t*individual* GPU",
font_size=18,
).next_to(step_1, direction=DOWN, aligned_edge=LEFT)
step_3 = MarkupText(
f"\n\nβ <span fgcolor='{BLUE}'>`global_batch_size`</span>:\n\tThis will be defined as the *total* number of\n\tdifferent items seen in the dataset, across all GPUs",
font_size=18,
).next_to(step_2, direction=DOWN, aligned_edge=LEFT)
step_4 = MarkupText(
f"\n\nSo if we have a dataset of 64 items, 8 GPUs, \nand a `batch_size` of 8, each *step* will go through\nthe entire dataset one time as 8*8=64",
font_size=18,
).next_to(step_3, direction=DOWN, aligned_edge=LEFT)
self.play(
Write(step_1, run_time=4),
)
self.play(
Write(step_2, run_time=4)
)
self.play(
Write(step_3, run_time=4)
)
self.play(
Write(step_4, run_time=6)
)
self.wait() | accelerate/manim_animations/dataloaders/stage_4.py/0 | {
"file_path": "accelerate/manim_animations/dataloaders/stage_4.py",
"repo_id": "accelerate",
"token_count": 914
} |
#!/usr/bin/env python
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
FP8BackendType,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
DYNAMO_BACKENDS = [
"EAGER",
"AOT_EAGER",
"INDUCTOR",
"AOT_TS_NVFUSER",
"NVPRIMS_NVFUSER",
"CUDAGRAPHS",
"OFI",
"FX2TRT",
"ONNXRT",
"TENSORRT",
"AOT_TORCHXLA_TRACE_ONCE",
"TORHCHXLA_TRACE_ONCE",
"IPEX",
"TVM",
]
def _ask_field(input_text, convert_value=None, default=None, error_message=None):
ask_again = True
while ask_again:
result = input(input_text)
try:
if default is not None and len(result) == 0:
return default
return convert_value(result) if convert_value is not None else result
except Exception:
if error_message is not None:
print(error_message)
def _ask_options(input_text, options=[], convert_value=None, default=0):
menu = BulletMenu(input_text, options)
result = menu.run(default_choice=default)
return convert_value(result) if convert_value is not None else result
def _convert_compute_environment(value):
value = int(value)
return ComputeEnvironment(["LOCAL_MACHINE", "AMAZON_SAGEMAKER"][value])
def _convert_distributed_mode(value):
value = int(value)
return DistributedType(
["NO", "MULTI_CPU", "MULTI_XPU", "MULTI_GPU", "MULTI_NPU", "MULTI_MLU", "MULTI_MUSA", "XLA"][value]
)
def _convert_dynamo_backend(value):
value = int(value)
return DynamoBackend(DYNAMO_BACKENDS[value]).value
def _convert_mixed_precision(value):
value = int(value)
return PrecisionType(["no", "fp16", "bf16", "fp8"][value])
def _convert_sagemaker_distributed_mode(value):
value = int(value)
return SageMakerDistributedType(["NO", "DATA_PARALLEL", "MODEL_PARALLEL"][value])
def _convert_fp8_backend(value):
value = int(value)
return FP8BackendType(["TE", "MSAMP"][value])
def _convert_yes_no_to_bool(value):
return {"yes": True, "no": False}[value.lower()]
class SubcommandHelpFormatter(argparse.RawDescriptionHelpFormatter):
"""
A custom formatter that will remove the usage line from the help message for subcommands.
"""
def _format_usage(self, usage, actions, groups, prefix):
usage = super()._format_usage(usage, actions, groups, prefix)
usage = usage.replace("<command> [<args>] ", "")
return usage
| accelerate/src/accelerate/commands/config/config_utils.py/0 | {
"file_path": "accelerate/src/accelerate/commands/config/config_utils.py",
"repo_id": "accelerate",
"token_count": 1219
} |
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
class _StoreAction(argparse.Action):
"""
Custom action that allows for `-` or `_` to be passed in for an argument.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
new_option_strings = []
for option_string in self.option_strings:
new_option_strings.append(option_string)
if "_" in option_string[2:]:
# Add `-` version to the option string
new_option_strings.append(option_string.replace("_", "-"))
self.option_strings = new_option_strings
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, values)
class _StoreConstAction(_StoreAction):
"""
Same as `argparse._StoreConstAction` but uses the custom `_StoreAction`.
"""
def __init__(self, option_strings, dest, const, default=None, required=False, help=None):
super().__init__(
option_strings=option_strings,
dest=dest,
nargs=0,
const=const,
default=default,
required=required,
help=help,
)
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, self.const)
class _StoreTrueAction(_StoreConstAction):
"""
Same as `argparse._StoreTrueAction` but uses the custom `_StoreConstAction`.
"""
def __init__(
self,
option_strings,
dest,
default=None,
required=False,
help=None,
):
super().__init__(
option_strings=option_strings, dest=dest, const=True, default=default, required=required, help=help
)
class CustomArgumentGroup(argparse._ArgumentGroup):
"""
Custom argument group that allows for the use of `-` or `_` in arguments passed and overrides the help for each
when applicable.
"""
def _add_action(self, action):
args = vars(action)
if isinstance(action, argparse._StoreTrueAction):
action = _StoreTrueAction(
args["option_strings"], args["dest"], args["default"], args["required"], args["help"]
)
elif isinstance(action, argparse._StoreConstAction):
action = _StoreConstAction(
args["option_strings"],
args["dest"],
args["const"],
args["default"],
args["required"],
args["help"],
)
elif isinstance(action, argparse._StoreAction):
action = _StoreAction(**args)
action = super()._add_action(action)
return action
class CustomArgumentParser(argparse.ArgumentParser):
"""
Custom argument parser that allows for the use of `-` or `_` in arguments passed and overrides the help for each
when applicable.
"""
def add_argument(self, *args, **kwargs):
if "action" in kwargs:
# Translate action -> class
if kwargs["action"] == "store_true":
kwargs["action"] = _StoreTrueAction
else:
kwargs["action"] = _StoreAction
super().add_argument(*args, **kwargs)
def add_argument_group(self, *args, **kwargs):
group = CustomArgumentGroup(self, *args, **kwargs)
self._action_groups.append(group)
return group
| accelerate/src/accelerate/commands/utils.py/0 | {
"file_path": "accelerate/src/accelerate/commands/utils.py",
"repo_id": "accelerate",
"token_count": 1619
} |
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Test script for verifying multiple models can be utilized with Accelerate + DeepSpeed:
Scenario 1: One model is training, another model is being used for inference/logits to impact training in some form.
Scenario 2: Two models are training simultaneously, which means two optimizers, etc.
"""
import argparse
from pathlib import Path
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup
from accelerate import Accelerator, DeepSpeedPlugin, DistributedType
from accelerate.state import AcceleratorState
from accelerate.utils.deepspeed import get_active_deepspeed_plugin
MAX_GPU_BATCH_SIZE = 16
EVAL_BATCH_SIZE = 32
class NoiseModel(torch.nn.Module):
def __init__(self, noise_factor=0.1):
super().__init__()
self.noise_factor = torch.nn.Parameter(torch.tensor(noise_factor, dtype=torch.float32))
def forward(self, loss):
return loss * self.noise_factor
def get_dataloaders(accelerator: Accelerator, batch_size: int = 16, model_name: str = "bert-base-cased"):
"""
Creates a set of `DataLoader`s for the `glue` dataset.
Args:
accelerator (`Accelerator`):
An `Accelerator` object
batch_size (`int`, *optional*):
The batch size for the train and validation DataLoaders.
model_name (`str`, *optional*):
"""
tokenizer = AutoTokenizer.from_pretrained(model_name)
datasets = load_dataset("glue", "mrpc")
def tokenize_function(examples):
# max_length=None => use the model max length (it's actually the default)
outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
tokenized_datasets = datasets.map(
tokenize_function, batched=True, remove_columns=["idx", "sentence1", "sentence2"], load_from_cache_file=False
)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
tokenized_datasets = tokenized_datasets.rename_column("label", "labels")
def collate_fn(examples):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.XLA:
return tokenizer.pad(examples, padding="max_length", max_length=128, return_tensors="pt")
return tokenizer.pad(examples, padding="longest", return_tensors="pt")
# Instantiate dataloaders.
train_dataloader = DataLoader(
tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size
)
eval_dataloader = DataLoader(
tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=EVAL_BATCH_SIZE
)
return train_dataloader, eval_dataloader
test_file_path = __file__
path = Path(test_file_path).resolve()
test_file_dir_str = str(path.parent.parent.parent.parent.parent.parent)
# Create our DS plugins
# We use custom schedulers and optimizers, hence `model_only`
ds_config_file = dict(
zero2=f"{test_file_dir_str}/tests/deepspeed/ds_config_zero2_model_only.json",
zero3=f"{test_file_dir_str}/tests/deepspeed/ds_config_zero3_model_only.json",
)
def single_model_training(config, args):
# Training a single model, we have a `noise` model that is untrainable used to inject some noise into the training process
num_epochs = config["num_epochs"]
zero2_plugin = DeepSpeedPlugin(hf_ds_config=ds_config_file["zero2"])
zero3_plugin = DeepSpeedPlugin(hf_ds_config=ds_config_file["zero3"])
deepspeed_plugins = {"training": zero2_plugin, "inference": zero3_plugin}
# Initialize accelerator
accelerator = Accelerator(
deepspeed_plugins=deepspeed_plugins,
mixed_precision="bf16",
)
# Initialize model under zero2 plugin
assert get_active_deepspeed_plugin(accelerator.state) is zero2_plugin
train_model = AutoModelForSequenceClassification.from_pretrained(args.model_name_or_path)
train_dataloader, eval_dataloader = get_dataloaders(
accelerator, batch_size=config["batch_size"], model_name=args.model_name_or_path
)
max_training_steps = len(train_dataloader) * config["num_epochs"]
optimizer = AdamW(train_model.parameters(), lr=config["lr"])
lr_scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=0, num_training_steps=max_training_steps
)
train_dataloader, eval_dataloader, train_model, optimizer, lr_scheduler = accelerator.prepare(
train_dataloader, eval_dataloader, train_model, optimizer, lr_scheduler
)
# Now prepare the model under zero3 plugin
accelerator.state.select_deepspeed_plugin("inference")
assert get_active_deepspeed_plugin(accelerator.state) is zero3_plugin
inference_model = NoiseModel()
inference_model = accelerator.prepare(inference_model)
inference_model.eval()
# Run training loop
accelerator.state.select_deepspeed_plugin("training")
# We also need to keep track of the stating epoch so files are named properly
starting_epoch = 0
# Now we train the model
best_performance = 0
metric = evaluate.load("glue", "mrpc")
performance_metric = {}
for epoch in range(starting_epoch, num_epochs):
train_model.train()
inference_model.train()
for step, batch in enumerate(train_dataloader):
with accelerator.accumulate(train_model):
outputs_1 = train_model(**batch)
with torch.no_grad():
outputs_2 = inference_model(outputs_1.loss)
# Combine the losses
loss = outputs_1.loss + outputs_2
accelerator.backward(loss)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
train_model.eval()
for step, batch in enumerate(eval_dataloader):
with torch.no_grad():
outputs = train_model(**batch)
predictions = outputs.logits.argmax(dim=-1)
# It is slightly faster to call this once, than multiple times
predictions, references = accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=predictions,
references=references,
)
eval_metric = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:", eval_metric)
performance_metric[f"epoch-{epoch}"] = eval_metric["accuracy"]
if best_performance < eval_metric["accuracy"]:
best_performance = eval_metric["accuracy"]
assert best_performance > performance_metric["epoch-0"]
def multiple_model_training(config, args):
# This will essentially be like a k-fold model, but one model is Zero-2 and another model is Zero-3
num_epochs = config["num_epochs"]
zero2_plugin = DeepSpeedPlugin(hf_ds_config=ds_config_file["zero2"])
zero3_plugin = DeepSpeedPlugin(hf_ds_config=ds_config_file["zero3"])
deepspeed_plugins = {"zero2": zero2_plugin, "zero3": zero3_plugin}
# Initialize accelerator
zero2_accelerator = Accelerator(
deepspeed_plugins=deepspeed_plugins,
mixed_precision="bf16",
)
# Since an `AcceleratorState` has already been made, we can just reuse it here
zero3_accelerator = Accelerator()
# Initialize model under zero2 plugin
assert get_active_deepspeed_plugin(zero2_accelerator.state) is zero2_plugin
zero2_model = AutoModelForSequenceClassification.from_pretrained(args.model_name_or_path)
train_dataloader, eval_dataloader = get_dataloaders(
zero2_accelerator, batch_size=config["batch_size"], model_name=args.model_name_or_path
)
max_training_steps = len(train_dataloader) * config["num_epochs"]
zero2_optimizer = AdamW(zero2_model.parameters(), lr=config["lr"])
zero2_lr_scheduler = get_linear_schedule_with_warmup(
zero2_optimizer, num_warmup_steps=0, num_training_steps=max_training_steps
)
train_dataloader, eval_dataloader, zero2_model, zero2_optimizer, zero2_lr_scheduler = zero2_accelerator.prepare(
train_dataloader, eval_dataloader, zero2_model, zero2_optimizer, zero2_lr_scheduler
)
assert zero2_accelerator.deepspeed_engine_wrapped.engine is zero2_model
# now do Zero3
zero3_accelerator.state.select_deepspeed_plugin("zero3")
zero3_plugin.deepspeed_config["train_micro_batch_size_per_gpu"] = zero2_plugin.deepspeed_config[
"train_micro_batch_size_per_gpu"
]
assert get_active_deepspeed_plugin(zero3_accelerator.state) is zero3_plugin
zero3_model = AutoModelForSequenceClassification.from_pretrained(args.model_name_or_path)
zero3_optimizer = AdamW(zero3_model.parameters(), lr=config["lr"])
zero3_lr_scheduler = get_linear_schedule_with_warmup(
zero3_optimizer, num_warmup_steps=0, num_training_steps=max_training_steps
)
zero3_model, zero3_optimizer, zero3_lr_scheduler = zero3_accelerator.prepare(
zero3_model, zero3_optimizer, zero3_lr_scheduler
)
assert zero3_accelerator.deepspeed_engine_wrapped.engine is zero3_model
# Run training loop
starting_epoch = 0
# Now we train the model
best_performance_a = 0
best_performance_b = 0
metric_a = evaluate.load("glue", "mrpc")
metric_b = evaluate.load("glue", "mrpc")
performance_metric_a = {}
performance_metric_b = {}
for epoch in range(starting_epoch, num_epochs):
zero2_model.train()
zero3_model.train()
for step, batch in enumerate(train_dataloader):
with zero2_accelerator.accumulate(zero2_model, zero3_model):
outputs_1 = zero2_model(**batch)
zero2_accelerator.backward(outputs_1.loss)
zero2_optimizer.step()
zero2_lr_scheduler.step()
zero2_optimizer.zero_grad()
outputs_2 = zero3_model(**batch)
zero3_accelerator.backward(outputs_2.loss)
zero3_optimizer.step()
zero3_lr_scheduler.step()
zero3_optimizer.zero_grad()
zero2_model.eval()
zero3_model.eval()
for step, batch in enumerate(eval_dataloader):
with torch.no_grad():
logits_a = zero2_model(**batch).logits
logits_b = zero3_model(**batch).logits
# Combine the logits from both models
predictions_a = logits_a.argmax(dim=-1)
predictions_b = logits_b.argmax(dim=-1)
# It is slightly faster to call this once, than multiple times
predictions_a, predictions_b, references = zero2_accelerator.gather_for_metrics(
(predictions_a, predictions_b, batch["labels"])
)
metric_a.add_batch(
predictions=predictions_a,
references=references,
)
metric_b.add_batch(
predictions=predictions_b,
references=references,
)
eval_metric_a = metric_a.compute()
eval_metric_b = metric_b.compute()
# Use accelerator.print to print only on the main process.
zero2_accelerator.print(f"epoch {epoch}:", eval_metric_a, eval_metric_b)
performance_metric_a[f"epoch-{epoch}"] = eval_metric_a["accuracy"]
performance_metric_b[f"epoch-{epoch}"] = eval_metric_b["accuracy"]
if best_performance_a < eval_metric_a["accuracy"]:
best_performance_a = eval_metric_a["accuracy"]
if best_performance_b < eval_metric_b["accuracy"]:
best_performance_b = eval_metric_b["accuracy"]
assert best_performance_a > performance_metric_a["epoch-0"]
assert best_performance_b > performance_metric_b["epoch-0"]
def main():
parser = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage.")
parser.add_argument(
"--model_name_or_path",
type=str,
default="bert-base-cased",
help="Path to pretrained model or model identifier from huggingface.co/models.",
required=False,
)
parser.add_argument(
"--performance_lower_bound",
type=float,
default=None,
help="Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.",
)
parser.add_argument(
"--num_epochs",
type=int,
default=2,
help="Number of train epochs.",
)
args = parser.parse_args()
config = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
single_model_training(config, args)
AcceleratorState._reset_state(True)
multiple_model_training(config, args)
if __name__ == "__main__":
main()
| accelerate/src/accelerate/test_utils/scripts/external_deps/test_ds_multiple_model.py/0 | {
"file_path": "accelerate/src/accelerate/test_utils/scripts/external_deps/test_ds_multiple_model.py",
"repo_id": "accelerate",
"token_count": 5491
} |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Expectation:
# Provide a project dir name, then each type of logger gets stored in project/{`logging_dir`}
import json
import os
import time
from functools import wraps
from typing import Any, Dict, List, Optional, Union
import yaml
from .logging import get_logger
from .state import PartialState
from .utils import (
LoggerType,
is_aim_available,
is_clearml_available,
is_comet_ml_available,
is_dvclive_available,
is_mlflow_available,
is_tensorboard_available,
is_wandb_available,
listify,
)
_available_trackers = []
if is_tensorboard_available():
_available_trackers.append(LoggerType.TENSORBOARD)
if is_wandb_available():
_available_trackers.append(LoggerType.WANDB)
if is_comet_ml_available():
_available_trackers.append(LoggerType.COMETML)
if is_aim_available():
_available_trackers.append(LoggerType.AIM)
if is_mlflow_available():
_available_trackers.append(LoggerType.MLFLOW)
if is_clearml_available():
_available_trackers.append(LoggerType.CLEARML)
if is_dvclive_available():
_available_trackers.append(LoggerType.DVCLIVE)
logger = get_logger(__name__)
def on_main_process(function):
"""
Decorator to selectively run the decorated function on the main process only based on the `main_process_only`
attribute in a class.
Checks at function execution rather than initialization time, not triggering the initialization of the
`PartialState`.
"""
@wraps(function)
def execute_on_main_process(self, *args, **kwargs):
if getattr(self, "main_process_only", False):
return PartialState().on_main_process(function)(self, *args, **kwargs)
else:
return function(self, *args, **kwargs)
return execute_on_main_process
def get_available_trackers():
"Returns a list of all supported available trackers in the system"
return _available_trackers
class GeneralTracker:
"""
A base Tracker class to be used for all logging integration implementations.
Each function should take in `**kwargs` that will automatically be passed in from a base dictionary provided to
[`Accelerator`].
Should implement `name`, `requires_logging_directory`, and `tracker` properties such that:
`name` (`str`): String representation of the tracker class name, such as "TensorBoard" `requires_logging_directory`
(`bool`): Whether the logger requires a directory to store their logs. `tracker` (`object`): Should return internal
tracking mechanism used by a tracker class (such as the `run` for wandb)
Implementations can also include a `main_process_only` (`bool`) attribute to toggle if relevent logging, init, and
other functions should occur on the main process or across all processes (by default will use `True`)
"""
main_process_only = True
def __init__(self, _blank=False):
if not _blank:
err = ""
if not hasattr(self, "name"):
err += "`name`"
if not hasattr(self, "requires_logging_directory"):
if len(err) > 0:
err += ", "
err += "`requires_logging_directory`"
# as tracker is a @property that relies on post-init
if "tracker" not in dir(self):
if len(err) > 0:
err += ", "
err += "`tracker`"
if len(err) > 0:
raise NotImplementedError(
f"The implementation for this tracker class is missing the following "
f"required attributes. Please define them in the class definition: "
f"{err}"
)
def store_init_configuration(self, values: dict):
"""
Logs `values` as hyperparameters for the run. Implementations should use the experiment configuration
functionality of a tracking API.
Args:
values (Dictionary `str` to `bool`, `str`, `float` or `int`):
Values to be stored as initial hyperparameters as key-value pairs. The values need to have type `bool`,
`str`, `float`, `int`, or `None`.
"""
pass
def log(self, values: dict, step: Optional[int], **kwargs):
"""
Logs `values` to the current run. Base `log` implementations of a tracking API should go in here, along with
special behavior for the `step parameter.
Args:
values (Dictionary `str` to `str`, `float`, or `int`):
Values to be logged as key-value pairs. The values need to have type `str`, `float`, or `int`.
step (`int`, *optional*):
The run step. If included, the log will be affiliated with this step.
"""
pass
def finish(self):
"""
Should run any finalizing functions within the tracking API. If the API should not have one, just don't
overwrite that method.
"""
pass
class TensorBoardTracker(GeneralTracker):
"""
A `Tracker` class that supports `tensorboard`. Should be initialized at the start of your script.
Args:
run_name (`str`):
The name of the experiment run
logging_dir (`str`, `os.PathLike`):
Location for TensorBoard logs to be stored.
**kwargs (additional keyword arguments, *optional*):
Additional key word arguments passed along to the `tensorboard.SummaryWriter.__init__` method.
"""
name = "tensorboard"
requires_logging_directory = True
@on_main_process
def __init__(self, run_name: str, logging_dir: Union[str, os.PathLike], **kwargs):
try:
from torch.utils import tensorboard
except ModuleNotFoundError:
import tensorboardX as tensorboard
super().__init__()
self.run_name = run_name
self.logging_dir = os.path.join(logging_dir, run_name)
self.writer = tensorboard.SummaryWriter(self.logging_dir, **kwargs)
logger.debug(f"Initialized TensorBoard project {self.run_name} logging to {self.logging_dir}")
logger.debug(
"Make sure to log any initial configurations with `self.store_init_configuration` before training!"
)
@property
def tracker(self):
return self.writer
@on_main_process
def store_init_configuration(self, values: dict):
"""
Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment. Stores the
hyperparameters in a yaml file for future use.
Args:
values (Dictionary `str` to `bool`, `str`, `float` or `int`):
Values to be stored as initial hyperparameters as key-value pairs. The values need to have type `bool`,
`str`, `float`, `int`, or `None`.
"""
self.writer.add_hparams(values, metric_dict={})
self.writer.flush()
project_run_name = time.time()
dir_name = os.path.join(self.logging_dir, str(project_run_name))
os.makedirs(dir_name, exist_ok=True)
with open(os.path.join(dir_name, "hparams.yml"), "w") as outfile:
try:
yaml.dump(values, outfile)
except yaml.representer.RepresenterError:
logger.error("Serialization to store hyperparameters failed")
raise
logger.debug("Stored initial configuration hyperparameters to TensorBoard and hparams yaml file")
@on_main_process
def log(self, values: dict, step: Optional[int] = None, **kwargs):
"""
Logs `values` to the current run.
Args:
values (Dictionary `str` to `str`, `float`, `int` or `dict` of `str` to `float`/`int`):
Values to be logged as key-value pairs. The values need to have type `str`, `float`, `int` or `dict` of
`str` to `float`/`int`.
step (`int`, *optional*):
The run step. If included, the log will be affiliated with this step.
kwargs:
Additional key word arguments passed along to either `SummaryWriter.add_scaler`,
`SummaryWriter.add_text`, or `SummaryWriter.add_scalers` method based on the contents of `values`.
"""
values = listify(values)
for k, v in values.items():
if isinstance(v, (int, float)):
self.writer.add_scalar(k, v, global_step=step, **kwargs)
elif isinstance(v, str):
self.writer.add_text(k, v, global_step=step, **kwargs)
elif isinstance(v, dict):
self.writer.add_scalars(k, v, global_step=step, **kwargs)
self.writer.flush()
logger.debug("Successfully logged to TensorBoard")
@on_main_process
def log_images(self, values: dict, step: Optional[int], **kwargs):
"""
Logs `images` to the current run.
Args:
values (Dictionary `str` to `List` of `np.ndarray` or `PIL.Image`):
Values to be logged as key-value pairs. The values need to have type `List` of `np.ndarray` or
step (`int`, *optional*):
The run step. If included, the log will be affiliated with this step.
kwargs:
Additional key word arguments passed along to the `SummaryWriter.add_image` method.
"""
for k, v in values.items():
self.writer.add_images(k, v, global_step=step, **kwargs)
logger.debug("Successfully logged images to TensorBoard")
@on_main_process
def finish(self):
"""
Closes `TensorBoard` writer
"""
self.writer.close()
logger.debug("TensorBoard writer closed")
class WandBTracker(GeneralTracker):
"""
A `Tracker` class that supports `wandb`. Should be initialized at the start of your script.
Args:
run_name (`str`):
The name of the experiment run.
**kwargs (additional keyword arguments, *optional*):
Additional key word arguments passed along to the `wandb.init` method.
"""
name = "wandb"
requires_logging_directory = False
main_process_only = False
@on_main_process
def __init__(self, run_name: str, **kwargs):
super().__init__()
self.run_name = run_name
import wandb
self.run = wandb.init(project=self.run_name, **kwargs)
logger.debug(f"Initialized WandB project {self.run_name}")
logger.debug(
"Make sure to log any initial configurations with `self.store_init_configuration` before training!"
)
@property
def tracker(self):
return self.run
@on_main_process
def store_init_configuration(self, values: dict):
"""
Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment.
Args:
values (Dictionary `str` to `bool`, `str`, `float` or `int`):
Values to be stored as initial hyperparameters as key-value pairs. The values need to have type `bool`,
`str`, `float`, `int`, or `None`.
"""
import wandb
wandb.config.update(values, allow_val_change=True)
logger.debug("Stored initial configuration hyperparameters to WandB")
@on_main_process
def log(self, values: dict, step: Optional[int] = None, **kwargs):
"""
Logs `values` to the current run.
Args:
values (Dictionary `str` to `str`, `float`, `int` or `dict` of `str` to `float`/`int`):
Values to be logged as key-value pairs. The values need to have type `str`, `float`, `int` or `dict` of
`str` to `float`/`int`.
step (`int`, *optional*):
The run step. If included, the log will be affiliated with this step.
kwargs:
Additional key word arguments passed along to the `wandb.log` method.
"""
self.run.log(values, step=step, **kwargs)
logger.debug("Successfully logged to WandB")
@on_main_process
def log_images(self, values: dict, step: Optional[int] = None, **kwargs):
"""
Logs `images` to the current run.
Args:
values (Dictionary `str` to `List` of `np.ndarray` or `PIL.Image`):
Values to be logged as key-value pairs. The values need to have type `List` of `np.ndarray` or
step (`int`, *optional*):
The run step. If included, the log will be affiliated with this step.
kwargs:
Additional key word arguments passed along to the `wandb.log` method.
"""
import wandb
for k, v in values.items():
self.log({k: [wandb.Image(image) for image in v]}, step=step, **kwargs)
logger.debug("Successfully logged images to WandB")
@on_main_process
def log_table(
self,
table_name: str,
columns: List[str] = None,
data: List[List[Any]] = None,
dataframe: Any = None,
step: Optional[int] = None,
**kwargs,
):
"""
Log a Table containing any object type (text, image, audio, video, molecule, html, etc). Can be defined either
with `columns` and `data` or with `dataframe`.
Args:
table_name (`str`):
The name to give to the logged table on the wandb workspace
columns (list of `str`, *optional*):
The name of the columns on the table
data (List of List of Any data type, *optional*):
The data to be logged in the table
dataframe (Any data type, *optional*):
The data to be logged in the table
step (`int`, *optional*):
The run step. If included, the log will be affiliated with this step.
"""
import wandb
values = {table_name: wandb.Table(columns=columns, data=data, dataframe=dataframe)}
self.log(values, step=step, **kwargs)
@on_main_process
def finish(self):
"""
Closes `wandb` writer
"""
self.run.finish()
logger.debug("WandB run closed")
class CometMLTracker(GeneralTracker):
"""
A `Tracker` class that supports `comet_ml`. Should be initialized at the start of your script.
API keys must be stored in a Comet config file.
Args:
run_name (`str`):
The name of the experiment run.
**kwargs (additional keyword arguments, *optional*):
Additional key word arguments passed along to the `Experiment.__init__` method.
"""
name = "comet_ml"
requires_logging_directory = False
@on_main_process
def __init__(self, run_name: str, **kwargs):
super().__init__()
self.run_name = run_name
from comet_ml import Experiment
self.writer = Experiment(project_name=run_name, **kwargs)
logger.debug(f"Initialized CometML project {self.run_name}")
logger.debug(
"Make sure to log any initial configurations with `self.store_init_configuration` before training!"
)
@property
def tracker(self):
return self.writer
@on_main_process
def store_init_configuration(self, values: dict):
"""
Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment.
Args:
values (Dictionary `str` to `bool`, `str`, `float` or `int`):
Values to be stored as initial hyperparameters as key-value pairs. The values need to have type `bool`,
`str`, `float`, `int`, or `None`.
"""
self.writer.log_parameters(values)
logger.debug("Stored initial configuration hyperparameters to CometML")
@on_main_process
def log(self, values: dict, step: Optional[int] = None, **kwargs):
"""
Logs `values` to the current run.
Args:
values (Dictionary `str` to `str`, `float`, `int` or `dict` of `str` to `float`/`int`):
Values to be logged as key-value pairs. The values need to have type `str`, `float`, `int` or `dict` of
`str` to `float`/`int`.
step (`int`, *optional*):
The run step. If included, the log will be affiliated with this step.
kwargs:
Additional key word arguments passed along to either `Experiment.log_metric`, `Experiment.log_other`,
or `Experiment.log_metrics` method based on the contents of `values`.
"""
if step is not None:
self.writer.set_step(step)
for k, v in values.items():
if isinstance(v, (int, float)):
self.writer.log_metric(k, v, step=step, **kwargs)
elif isinstance(v, str):
self.writer.log_other(k, v, **kwargs)
elif isinstance(v, dict):
self.writer.log_metrics(v, step=step, **kwargs)
logger.debug("Successfully logged to CometML")
@on_main_process
def finish(self):
"""
Closes `comet-ml` writer
"""
self.writer.end()
logger.debug("CometML run closed")
class AimTracker(GeneralTracker):
"""
A `Tracker` class that supports `aim`. Should be initialized at the start of your script.
Args:
run_name (`str`):
The name of the experiment run.
**kwargs (additional keyword arguments, *optional*):
Additional key word arguments passed along to the `Run.__init__` method.
"""
name = "aim"
requires_logging_directory = True
@on_main_process
def __init__(self, run_name: str, logging_dir: Optional[Union[str, os.PathLike]] = ".", **kwargs):
self.run_name = run_name
from aim import Run
self.writer = Run(repo=logging_dir, **kwargs)
self.writer.name = self.run_name
logger.debug(f"Initialized Aim project {self.run_name}")
logger.debug(
"Make sure to log any initial configurations with `self.store_init_configuration` before training!"
)
@property
def tracker(self):
return self.writer
@on_main_process
def store_init_configuration(self, values: dict):
"""
Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment.
Args:
values (`dict`):
Values to be stored as initial hyperparameters as key-value pairs.
"""
self.writer["hparams"] = values
@on_main_process
def log(self, values: dict, step: Optional[int], **kwargs):
"""
Logs `values` to the current run.
Args:
values (`dict`):
Values to be logged as key-value pairs.
step (`int`, *optional*):
The run step. If included, the log will be affiliated with this step.
kwargs:
Additional key word arguments passed along to the `Run.track` method.
"""
# Note: replace this with the dictionary support when merged
for key, value in values.items():
self.writer.track(value, name=key, step=step, **kwargs)
@on_main_process
def log_images(self, values: dict, step: Optional[int] = None, kwargs: Optional[Dict[str, dict]] = None):
"""
Logs `images` to the current run.
Args:
values (`Dict[str, Union[np.ndarray, PIL.Image, Tuple[np.ndarray, str], Tuple[PIL.Image, str]]]`):
Values to be logged as key-value pairs. The values need to have type `np.ndarray` or PIL.Image. If a
tuple is provided, the first element should be the image and the second element should be the caption.
step (`int`, *optional*):
The run step. If included, the log will be affiliated with this step.
kwargs (`Dict[str, dict]`):
Additional key word arguments passed along to the `Run.Image` and `Run.track` method specified by the
keys `aim_image` and `track`, respectively.
"""
import aim
aim_image_kw = {}
track_kw = {}
if kwargs is not None:
aim_image_kw = kwargs.get("aim_image", {})
track_kw = kwargs.get("track", {})
for key, value in values.items():
if isinstance(value, tuple):
img, caption = value
else:
img, caption = value, ""
aim_image = aim.Image(img, caption=caption, **aim_image_kw)
self.writer.track(aim_image, name=key, step=step, **track_kw)
@on_main_process
def finish(self):
"""
Closes `aim` writer
"""
self.writer.close()
class MLflowTracker(GeneralTracker):
"""
A `Tracker` class that supports `mlflow`. Should be initialized at the start of your script.
Args:
experiment_name (`str`, *optional*):
Name of the experiment. Environment variable MLFLOW_EXPERIMENT_NAME has priority over this argument.
logging_dir (`str` or `os.PathLike`, defaults to `"."`):
Location for mlflow logs to be stored.
run_id (`str`, *optional*):
If specified, get the run with the specified UUID and log parameters and metrics under that run. The runβs
end time is unset and its status is set to running, but the runβs other attributes (source_version,
source_type, etc.) are not changed. Environment variable MLFLOW_RUN_ID has priority over this argument.
tags (`Dict[str, str]`, *optional*):
An optional `dict` of `str` keys and values, or a `str` dump from a `dict`, to set as tags on the run. If a
run is being resumed, these tags are set on the resumed run. If a new run is being created, these tags are
set on the new run. Environment variable MLFLOW_TAGS has priority over this argument.
nested_run (`bool`, *optional*, defaults to `False`):
Controls whether run is nested in parent run. True creates a nested run. Environment variable
MLFLOW_NESTED_RUN has priority over this argument.
run_name (`str`, *optional*):
Name of new run (stored as a mlflow.runName tag). Used only when `run_id` is unspecified.
description (`str`, *optional*):
An optional string that populates the description box of the run. If a run is being resumed, the
description is set on the resumed run. If a new run is being created, the description is set on the new
run.
"""
name = "mlflow"
requires_logging_directory = False
@on_main_process
def __init__(
self,
experiment_name: str = None,
logging_dir: Optional[Union[str, os.PathLike]] = None,
run_id: Optional[str] = None,
tags: Optional[Union[Dict[str, Any], str]] = None,
nested_run: Optional[bool] = False,
run_name: Optional[str] = None,
description: Optional[str] = None,
):
experiment_name = os.environ.get("MLFLOW_EXPERIMENT_NAME", experiment_name)
run_id = os.environ.get("MLFLOW_RUN_ID", run_id)
tags = os.environ.get("MLFLOW_TAGS", tags)
if isinstance(tags, str):
tags = json.loads(tags)
nested_run = os.environ.get("MLFLOW_NESTED_RUN", nested_run)
import mlflow
exps = mlflow.search_experiments(filter_string=f"name = '{experiment_name}'")
if len(exps) > 0:
if len(exps) > 1:
logger.warning("Multiple experiments with the same name found. Using first one.")
experiment_id = exps[0].experiment_id
else:
experiment_id = mlflow.create_experiment(
name=experiment_name,
artifact_location=logging_dir,
tags=tags,
)
self.active_run = mlflow.start_run(
run_id=run_id,
experiment_id=experiment_id,
run_name=run_name,
nested=nested_run,
tags=tags,
description=description,
)
logger.debug(f"Initialized mlflow experiment {experiment_name}")
logger.debug(
"Make sure to log any initial configurations with `self.store_init_configuration` before training!"
)
@property
def tracker(self):
return self.active_run
@on_main_process
def store_init_configuration(self, values: dict):
"""
Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment.
Args:
values (`dict`):
Values to be stored as initial hyperparameters as key-value pairs.
"""
import mlflow
for name, value in list(values.items()):
# internally, all values are converted to str in MLflow
if len(str(value)) > mlflow.utils.validation.MAX_PARAM_VAL_LENGTH:
logger.warning_once(
f'Accelerate is attempting to log a value of "{value}" for key "{name}" as a parameter. MLflow\'s'
f" log_param() only accepts values no longer than {mlflow.utils.validation.MAX_PARAM_VAL_LENGTH} characters so we dropped this attribute."
)
del values[name]
values_list = list(values.items())
# MLflow cannot log more than 100 values in one go, so we have to split it
for i in range(0, len(values_list), mlflow.utils.validation.MAX_PARAMS_TAGS_PER_BATCH):
mlflow.log_params(dict(values_list[i : i + mlflow.utils.validation.MAX_PARAMS_TAGS_PER_BATCH]))
logger.debug("Stored initial configuration hyperparameters to MLflow")
@on_main_process
def log(self, values: dict, step: Optional[int]):
"""
Logs `values` to the current run.
Args:
values (`dict`):
Values to be logged as key-value pairs.
step (`int`, *optional*):
The run step. If included, the log will be affiliated with this step.
"""
metrics = {}
for k, v in values.items():
if isinstance(v, (int, float)):
metrics[k] = v
else:
logger.warning_once(
f'MLflowTracker is attempting to log a value of "{v}" of type {type(v)} for key "{k}" as a metric. '
"MLflow's log_metric() only accepts float and int types so we dropped this attribute."
)
import mlflow
mlflow.log_metrics(metrics, step=step)
logger.debug("Successfully logged to mlflow")
@on_main_process
def finish(self):
"""
End the active MLflow run.
"""
import mlflow
mlflow.end_run()
class ClearMLTracker(GeneralTracker):
"""
A `Tracker` class that supports `clearml`. Should be initialized at the start of your script.
Args:
run_name (`str`, *optional*):
Name of the experiment. Environment variables `CLEARML_PROJECT` and `CLEARML_TASK` have priority over this
argument.
**kwargs (additional keyword arguments, *optional*):
Kwargs passed along to the `Task.__init__` method.
"""
name = "clearml"
requires_logging_directory = False
@on_main_process
def __init__(self, run_name: str = None, **kwargs):
from clearml import Task
current_task = Task.current_task()
self._initialized_externally = False
if current_task:
self._initialized_externally = True
self.task = current_task
return
kwargs.setdefault("project_name", os.environ.get("CLEARML_PROJECT", run_name))
kwargs.setdefault("task_name", os.environ.get("CLEARML_TASK", run_name))
self.task = Task.init(**kwargs)
@property
def tracker(self):
return self.task
@on_main_process
def store_init_configuration(self, values: dict):
"""
Connect configuration dictionary to the Task object. Should be run at the beginning of your experiment.
Args:
values (`dict`):
Values to be stored as initial hyperparameters as key-value pairs.
"""
return self.task.connect_configuration(values)
@on_main_process
def log(self, values: Dict[str, Union[int, float]], step: Optional[int] = None, **kwargs):
"""
Logs `values` dictionary to the current run. The dictionary keys must be strings. The dictionary values must be
ints or floats
Args:
values (`Dict[str, Union[int, float]]`):
Values to be logged as key-value pairs. If the key starts with 'eval_'/'test_'/'train_', the value will
be reported under the 'eval'/'test'/'train' series and the respective prefix will be removed.
Otherwise, the value will be reported under the 'train' series, and no prefix will be removed.
step (`int`, *optional*):
If specified, the values will be reported as scalars, with the iteration number equal to `step`.
Otherwise they will be reported as single values.
kwargs:
Additional key word arguments passed along to the `clearml.Logger.report_single_value` or
`clearml.Logger.report_scalar` methods.
"""
clearml_logger = self.task.get_logger()
for k, v in values.items():
if not isinstance(v, (int, float)):
logger.warning_once(
"Accelerator is attempting to log a value of "
f'"{v}" of type {type(v)} for key "{k}" as a scalar. '
"This invocation of ClearML logger's report_scalar() "
"is incorrect so we dropped this attribute."
)
continue
if step is None:
clearml_logger.report_single_value(name=k, value=v, **kwargs)
continue
title, series = ClearMLTracker._get_title_series(k)
clearml_logger.report_scalar(title=title, series=series, value=v, iteration=step, **kwargs)
@on_main_process
def log_images(self, values: dict, step: Optional[int] = None, **kwargs):
"""
Logs `images` to the current run.
Args:
values (`Dict[str, List[Union[np.ndarray, PIL.Image]]`):
Values to be logged as key-value pairs. The values need to have type `List` of `np.ndarray` or
step (`int`, *optional*):
The run step. If included, the log will be affiliated with this step.
kwargs:
Additional key word arguments passed along to the `clearml.Logger.report_image` method.
"""
clearml_logger = self.task.get_logger()
for k, v in values.items():
title, series = ClearMLTracker._get_title_series(k)
clearml_logger.report_image(title=title, series=series, iteration=step, image=v, **kwargs)
@on_main_process
def log_table(
self,
table_name: str,
columns: List[str] = None,
data: List[List[Any]] = None,
dataframe: Any = None,
step: Optional[int] = None,
**kwargs,
):
"""
Log a Table to the task. Can be defined eitherwith `columns` and `data` or with `dataframe`.
Args:
table_name (`str`):
The name of the table
columns (list of `str`, *optional*):
The name of the columns on the table
data (List of List of Any data type, *optional*):
The data to be logged in the table. If `columns` is not specified, then the first entry in data will be
the name of the columns of the table
dataframe (Any data type, *optional*):
The data to be logged in the table
step (`int`, *optional*):
The run step. If included, the log will be affiliated with this step.
kwargs:
Additional key word arguments passed along to the `clearml.Logger.report_table` method.
"""
to_report = dataframe
if dataframe is None:
if data is None:
raise ValueError(
"`ClearMLTracker.log_table` requires that `data` to be supplied if `dataframe` is `None`"
)
to_report = [columns] + data if columns else data
title, series = ClearMLTracker._get_title_series(table_name)
self.task.get_logger().report_table(title=title, series=series, table_plot=to_report, iteration=step, **kwargs)
@on_main_process
def finish(self):
"""
Close the ClearML task. If the task was initialized externally (e.g. by manually calling `Task.init`), this
function is a noop
"""
if self.task and not self._initialized_externally:
self.task.close()
@staticmethod
def _get_title_series(name):
for prefix in ["eval", "test", "train"]:
if name.startswith(prefix + "_"):
return name[len(prefix) + 1 :], prefix
return name, "train"
class DVCLiveTracker(GeneralTracker):
"""
A `Tracker` class that supports `dvclive`. Should be initialized at the start of your script.
Args:
run_name (`str`, *optional*):
Ignored for dvclive. See `kwargs` instead.
kwargs:
Additional key word arguments passed along to [`dvclive.Live()`](https://dvc.org/doc/dvclive/live).
Example:
```py
from accelerate import Accelerator
accelerator = Accelerator(log_with="dvclive")
accelerator.init_trackers(project_name="my_project", init_kwargs={"dvclive": {"dir": "my_directory"}})
```
"""
name = "dvclive"
requires_logging_directory = False
@on_main_process
def __init__(self, run_name: Optional[str] = None, live: Optional[Any] = None, **kwargs):
from dvclive import Live
super().__init__()
self.live = live if live is not None else Live(**kwargs)
@property
def tracker(self):
return self.live
@on_main_process
def store_init_configuration(self, values: dict):
"""
Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment. Stores the
hyperparameters in a yaml file for future use.
Args:
values (Dictionary `str` to `bool`, `str`, `float`, `int`, or a List or Dict of those types):
Values to be stored as initial hyperparameters as key-value pairs. The values need to have type `bool`,
`str`, `float`, or `int`.
"""
self.live.log_params(values)
@on_main_process
def log(self, values: dict, step: Optional[int] = None, **kwargs):
"""
Logs `values` to the current run.
Args:
values (Dictionary `str` to `str`, `float`, or `int`):
Values to be logged as key-value pairs. The values need to have type `str`, `float`, or `int`.
step (`int`, *optional*):
The run step. If included, the log will be affiliated with this step.
kwargs:
Additional key word arguments passed along to `dvclive.Live.log_metric()`.
"""
from dvclive.plots import Metric
if step is not None:
self.live.step = step
for k, v in values.items():
if Metric.could_log(v):
self.live.log_metric(k, v, **kwargs)
else:
logger.warning_once(
"Accelerator attempted to log a value of "
f'"{v}" of type {type(v)} for key "{k}" as a scalar. '
"This invocation of DVCLive's Live.log_metric() "
"is incorrect so we dropped this attribute."
)
self.live.next_step()
@on_main_process
def finish(self):
"""
Closes `dvclive.Live()`.
"""
self.live.end()
LOGGER_TYPE_TO_CLASS = {
"aim": AimTracker,
"comet_ml": CometMLTracker,
"mlflow": MLflowTracker,
"tensorboard": TensorBoardTracker,
"wandb": WandBTracker,
"clearml": ClearMLTracker,
"dvclive": DVCLiveTracker,
}
def filter_trackers(
log_with: List[Union[str, LoggerType, GeneralTracker]],
logging_dir: Union[str, os.PathLike] = None,
):
"""
Takes in a list of potential tracker types and checks that:
- The tracker wanted is available in that environment
- Filters out repeats of tracker types
- If `all` is in `log_with`, will return all trackers in the environment
- If a tracker requires a `logging_dir`, ensures that `logging_dir` is not `None`
Args:
log_with (list of `str`, [`~utils.LoggerType`] or [`~tracking.GeneralTracker`], *optional*):
A list of loggers to be setup for experiment tracking. Should be one or several of:
- `"all"`
- `"tensorboard"`
- `"wandb"`
- `"comet_ml"`
- `"mlflow"`
- `"dvclive"`
If `"all"` is selected, will pick up all available trackers in the environment and initialize them. Can
also accept implementations of `GeneralTracker` for custom trackers, and can be combined with `"all"`.
logging_dir (`str`, `os.PathLike`, *optional*):
A path to a directory for storing logs of locally-compatible loggers.
"""
loggers = []
if log_with is not None:
if not isinstance(log_with, (list, tuple)):
log_with = [log_with]
if "all" in log_with or LoggerType.ALL in log_with:
loggers = [o for o in log_with if issubclass(type(o), GeneralTracker)] + get_available_trackers()
else:
for log_type in log_with:
if log_type not in LoggerType and not issubclass(type(log_type), GeneralTracker):
raise ValueError(f"Unsupported logging capability: {log_type}. Choose between {LoggerType.list()}")
if issubclass(type(log_type), GeneralTracker):
loggers.append(log_type)
else:
log_type = LoggerType(log_type)
if log_type not in loggers:
if log_type in get_available_trackers():
tracker_init = LOGGER_TYPE_TO_CLASS[str(log_type)]
if tracker_init.requires_logging_directory:
if logging_dir is None:
raise ValueError(
f"Logging with `{log_type}` requires a `logging_dir` to be passed in."
)
loggers.append(log_type)
else:
logger.debug(f"Tried adding logger {log_type}, but package is unavailable in the system.")
return loggers
| accelerate/src/accelerate/tracking.py/0 | {
"file_path": "accelerate/src/accelerate/tracking.py",
"repo_id": "accelerate",
"token_count": 17105
} |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
from typing import List, Optional, Union
import numpy as np
import torch
from ..state import AcceleratorState
from .constants import CUDA_DISTRIBUTED_TYPES
from .dataclasses import DistributedType, RNGType
from .imports import is_mlu_available, is_musa_available, is_npu_available, is_torch_xla_available, is_xpu_available
if is_torch_xla_available():
import torch_xla.core.xla_model as xm
def set_seed(seed: int, device_specific: bool = False, deterministic: bool = False):
"""
Helper function for reproducible behavior to set the seed in `random`, `numpy`, `torch`.
Args:
seed (`int`):
The seed to set.
device_specific (`bool`, *optional*, defaults to `False`):
Whether to differ the seed on each device slightly with `self.process_index`.
deterministic (`bool`, *optional*, defaults to `False`):
Whether to use deterministic algorithms where available. Can slow down training.
"""
if device_specific:
seed += AcceleratorState().process_index
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if is_xpu_available():
torch.xpu.manual_seed_all(seed)
elif is_npu_available():
torch.npu.manual_seed_all(seed)
elif is_mlu_available():
torch.mlu.manual_seed_all(seed)
elif is_musa_available():
torch.musa.manual_seed_all(seed)
else:
torch.cuda.manual_seed_all(seed)
# ^^ safe to call this function even if cuda is not available
if is_torch_xla_available():
xm.set_rng_state(seed)
if deterministic:
torch.use_deterministic_algorithms(True)
def synchronize_rng_state(rng_type: Optional[RNGType] = None, generator: Optional[torch.Generator] = None):
# Get the proper rng state
if rng_type == RNGType.TORCH:
rng_state = torch.get_rng_state()
elif rng_type == RNGType.CUDA:
rng_state = torch.cuda.get_rng_state()
elif rng_type == RNGType.XLA:
assert is_torch_xla_available(), "Can't synchronize XLA seeds as torch_xla is unavailable."
rng_state = torch.tensor(xm.get_rng_state())
elif rng_type == RNGType.NPU:
assert is_npu_available(), "Can't synchronize NPU seeds on an environment without NPUs."
rng_state = torch.npu.get_rng_state()
elif rng_type == RNGType.MLU:
assert is_mlu_available(), "Can't synchronize MLU seeds on an environment without MLUs."
rng_state = torch.mlu.get_rng_state()
elif rng_type == RNGType.MUSA:
assert is_musa_available(), "Can't synchronize MUSA seeds on an environment without MUSAs."
rng_state = torch.musa.get_rng_state()
elif rng_type == RNGType.XPU:
assert is_xpu_available(), "Can't synchronize XPU seeds on an environment without XPUs."
rng_state = torch.xpu.get_rng_state()
elif rng_type == RNGType.GENERATOR:
assert generator is not None, "Need a generator to synchronize its seed."
rng_state = generator.get_state()
# Broadcast the rng state from device 0 to other devices
state = AcceleratorState()
if state.distributed_type == DistributedType.XLA:
rng_state = rng_state.to(xm.xla_device())
xm.collective_broadcast([rng_state])
xm.mark_step()
rng_state = rng_state.cpu()
elif (
state.distributed_type in CUDA_DISTRIBUTED_TYPES
or state.distributed_type == DistributedType.MULTI_MLU
or state.distributed_type == DistributedType.MULTI_MUSA
or state.distributed_type == DistributedType.MULTI_NPU
or state.distributed_type == DistributedType.MULTI_XPU
):
rng_state = rng_state.to(state.device)
torch.distributed.broadcast(rng_state, 0)
rng_state = rng_state.cpu()
elif state.distributed_type == DistributedType.MULTI_CPU:
torch.distributed.broadcast(rng_state, 0)
# Set the broadcast rng state
if rng_type == RNGType.TORCH:
torch.set_rng_state(rng_state)
elif rng_type == RNGType.CUDA:
torch.cuda.set_rng_state(rng_state)
elif rng_type == RNGType.NPU:
torch.npu.set_rng_state(rng_state)
elif rng_type == RNGType.MLU:
torch.mlu.set_rng_state(rng_state)
elif rng_type == RNGType.MUSA:
torch.musa.set_rng_state(rng_state)
elif rng_type == RNGType.XPU:
torch.xpu.set_rng_state(rng_state)
elif rng_type == RNGType.XLA:
xm.set_rng_state(rng_state.item())
elif rng_type == RNGType.GENERATOR:
generator.set_state(rng_state)
def synchronize_rng_states(rng_types: List[Union[str, RNGType]], generator: Optional[torch.Generator] = None):
for rng_type in rng_types:
synchronize_rng_state(RNGType(rng_type), generator=generator)
| accelerate/src/accelerate/utils/random.py/0 | {
"file_path": "accelerate/src/accelerate/utils/random.py",
"repo_id": "accelerate",
"token_count": 2199
} |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from torch import nn
from accelerate.test_utils import memory_allocated_func, require_non_cpu, require_non_torch_xla, torch_device
from accelerate.utils.memory import find_executable_batch_size, release_memory
def raise_fake_out_of_memory():
raise RuntimeError("CUDA out of memory.")
class ModelForTest(nn.Module):
def __init__(self):
super().__init__()
self.linear1 = nn.Linear(3, 4)
self.batchnorm = nn.BatchNorm1d(4)
self.linear2 = nn.Linear(4, 5)
def forward(self, x):
return self.linear2(self.batchnorm(self.linear1(x)))
class MemoryTest(unittest.TestCase):
def test_memory_implicit(self):
batch_sizes = []
@find_executable_batch_size(starting_batch_size=128)
def mock_training_loop_function(batch_size):
nonlocal batch_sizes
batch_sizes.append(batch_size)
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
assert batch_sizes == [128, 64, 32, 16, 8]
def test_memory_explicit(self):
batch_sizes = []
@find_executable_batch_size(starting_batch_size=128)
def mock_training_loop_function(batch_size, arg1):
nonlocal batch_sizes
batch_sizes.append(batch_size)
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arg1
bs, arg1 = mock_training_loop_function("hello")
assert batch_sizes == [128, 64, 32, 16, 8]
assert [bs, arg1] == [8, "hello"]
def test_start_zero(self):
@find_executable_batch_size(starting_batch_size=0)
def mock_training_loop_function(batch_size):
pass
with self.assertRaises(RuntimeError) as cm:
mock_training_loop_function()
assert "No executable batch size found, reached zero." in cm.exception.args[0]
def test_approach_zero(self):
@find_executable_batch_size(starting_batch_size=16)
def mock_training_loop_function(batch_size):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(RuntimeError) as cm:
mock_training_loop_function()
assert "No executable batch size found, reached zero." in cm.exception.args[0]
def test_verbose_guard(self):
@find_executable_batch_size(starting_batch_size=128)
def mock_training_loop_function(batch_size, arg1, arg2):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(TypeError) as cm:
mock_training_loop_function(128, "hello", "world")
assert "Batch size was passed into `f`" in cm.exception.args[0]
assert "`f(arg1='hello', arg2='world')" in cm.exception.args[0]
def test_any_other_error(self):
@find_executable_batch_size(starting_batch_size=16)
def mock_training_loop_function(batch_size):
raise ValueError("Oops, we had an error!")
with self.assertRaises(ValueError) as cm:
mock_training_loop_function()
assert "Oops, we had an error!" in cm.exception.args[0]
@require_non_cpu
@require_non_torch_xla
def test_release_memory(self):
starting_memory = memory_allocated_func()
model = ModelForTest()
model.to(torch_device)
assert memory_allocated_func() > starting_memory
model = release_memory(model)
assert memory_allocated_func() == starting_memory
| accelerate/tests/test_memory_utils.py/0 | {
"file_path": "accelerate/tests/test_memory_utils.py",
"repo_id": "accelerate",
"token_count": 1740
} |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from accelerate.test_utils.testing import (
TempDirTestCase,
execute_subprocess_async,
get_launch_command,
path_in_accelerate_package,
require_multi_device,
require_non_torch_xla,
require_tp,
require_transformers,
slow,
)
from accelerate.utils import patch_environment
@require_non_torch_xla
@require_tp
@require_multi_device
@require_transformers
@slow
class TPIntegrationTest(TempDirTestCase):
test_scripts_folder = path_in_accelerate_package("test_utils", "scripts", "external_deps")
def setUp(self):
super().setUp()
self.test_tp_size = 2
self.model_name_or_path = "TinyLlama/TinyLlama-1.1B-Chat-v1.0"
self.batch_size = 1
from accelerate.utils import set_seed
set_seed(42)
def test_working_of_tp(self):
self.test_file_path = self.test_scripts_folder / "test_performance.py"
cmd = get_launch_command(
num_processes=self.test_tp_size, num_machines=1, machine_rank=0, use_tp=True, tp_size=self.test_tp_size
)
cmd.extend(
[
self.test_file_path,
f"--output_dir={self.tmpdir}",
f"--model_name_or_path={self.model_name_or_path}",
"--add_pad_token=true",
]
)
with patch_environment(omp_num_threads=1):
execute_subprocess_async(cmd)
| accelerate/tests/tp/test_tp.py/0 | {
"file_path": "accelerate/tests/tp/test_tp.py",
"repo_id": "accelerate",
"token_count": 814
} |
.PHONY: clean-ptx clean test
clean-ptx:
find target -name "*.ptx" -type f -delete
echo "" > candle-kernels/src/lib.rs
touch candle-kernels/build.rs
touch candle-examples/build.rs
touch candle-flash-attn/build.rs
clean:
cargo clean
test:
cargo test
all: test
| candle/Makefile/0 | {
"file_path": "candle/Makefile",
"repo_id": "candle",
"token_count": 107
} |
[package]
name = "candle-core"
version.workspace = true
edition.workspace = true
description.workspace = true
repository.workspace = true
keywords.workspace = true
categories.workspace = true
license.workspace = true
readme = "README.md"
[dependencies]
accelerate-src = { workspace = true, optional = true }
byteorder = { workspace = true }
candle-kernels = { workspace = true, optional = true }
candle-metal-kernels = { workspace = true, optional = true }
metal = { workspace = true, optional = true }
cudarc = { workspace = true, optional = true }
gemm = { workspace = true }
half = { workspace = true }
intel-mkl-src = { workspace = true, optional = true }
libc = { workspace = true, optional = true }
memmap2 = { workspace = true }
num-traits = { workspace = true }
num_cpus = { workspace = true }
rand = { workspace = true }
rand_distr = { workspace = true }
rayon = { workspace = true }
safetensors = { workspace = true }
thiserror = { workspace = true }
ug-cuda = { workspace = true, optional = true }
ug-metal = { workspace = true, optional = true }
yoke = { workspace = true }
zip = { workspace = true }
[target.'cfg(not(target_arch = "wasm32"))'.dependencies]
ug = { workspace = true }
[dev-dependencies]
anyhow = { workspace = true }
clap = { workspace = true }
criterion = { workspace = true }
[features]
default = []
cuda = ["cudarc", "dep:candle-kernels", "dep:ug-cuda"]
cudnn = ["cuda", "cudarc/cudnn"]
mkl = ["dep:libc", "dep:intel-mkl-src"]
accelerate = ["dep:libc", "dep:accelerate-src"]
metal = ["dep:metal", "dep:candle-metal-kernels", "dep:ug-metal"]
[[bench]]
name = "bench_main"
harness = false
[[example]]
name = "metal_basics"
required-features = ["metal"]
| candle/candle-core/Cargo.toml/0 | {
"file_path": "candle/candle-core/Cargo.toml",
"repo_id": "candle",
"token_count": 564
} |
#[cfg(feature = "accelerate")]
extern crate accelerate_src;
#[cfg(feature = "mkl")]
extern crate intel_mkl_src;
use anyhow::Result;
use candle_core::{Device, Tensor};
fn main() -> Result<()> {
// This requires the code to be run with MTL_CAPTURE_ENABLED=1
let device = Device::new_metal(0)?;
let metal_device = match &device {
Device::Metal(m) => m,
_ => anyhow::bail!("unexpected device"),
};
metal_device.capture("/tmp/candle.gputrace")?;
// This first synchronize ensures that a new command buffer gets created after setting up the
// capture scope.
device.synchronize()?;
let x = Tensor::randn(0f32, 1.0, (128, 128), &device)?;
let x1 = x.add(&x)?;
println!("{x1:?}");
// This second synchronize ensures that the command buffer gets commited before the end of the
// capture scope.
device.synchronize()?;
Ok(())
}
| candle/candle-core/examples/metal_basics.rs/0 | {
"file_path": "candle/candle-core/examples/metal_basics.rs",
"repo_id": "candle",
"token_count": 347
} |
use crate::{DType, Layout};
/// cudarc related errors
#[derive(thiserror::Error, Debug)]
pub enum CudaError {
#[error(transparent)]
Cuda(#[from] cudarc::driver::DriverError),
#[error(transparent)]
Compiler(#[from] cudarc::nvrtc::CompileError),
#[error(transparent)]
Cublas(#[from] cudarc::cublas::result::CublasError),
#[error(transparent)]
Curand(#[from] cudarc::curand::result::CurandError),
#[error("missing kernel '{module_name}'")]
MissingKernel { module_name: String },
#[error("unsupported dtype {dtype:?} for {op}")]
UnsupportedDtype { dtype: DType, op: &'static str },
#[error("internal error '{0}'")]
InternalError(&'static str),
#[error("matmul is only supported for contiguous tensors lstride: {lhs_stride:?} rstride: {rhs_stride:?} mnk: {mnk:?}")]
MatMulNonContiguous {
lhs_stride: Layout,
rhs_stride: Layout,
mnk: (usize, usize, usize),
},
#[error("{msg}, expected: {expected:?}, got: {got:?}")]
UnexpectedDType {
msg: &'static str,
expected: DType,
got: DType,
},
#[error("{cuda} when loading {module_name}")]
Load {
cuda: cudarc::driver::DriverError,
module_name: String,
},
}
impl From<CudaError> for crate::Error {
fn from(val: CudaError) -> Self {
crate::Error::Cuda(Box::new(val)).bt()
}
}
pub trait WrapErr<O> {
fn w(self) -> std::result::Result<O, crate::Error>;
}
impl<O, E: Into<CudaError>> WrapErr<O> for std::result::Result<O, E> {
fn w(self) -> std::result::Result<O, crate::Error> {
self.map_err(|e| crate::Error::Cuda(Box::new(e.into())).bt())
}
}
| candle/candle-core/src/cuda_backend/error.rs/0 | {
"file_path": "candle/candle-core/src/cuda_backend/error.rs",
"repo_id": "candle",
"token_count": 750
} |
//! Numpy support for tensors.
//!
//! The spec for the npy format can be found in
//! [npy-format](https://docs.scipy.org/doc/numpy-1.14.2/neps/npy-format.html).
//! The functions from this module can be used to read tensors from npy/npz files
//! or write tensors to these files. A npy file contains a single tensor (unnamed)
//! whereas a npz file can contain multiple named tensors. npz files are also compressed.
//!
//! These two formats are easy to use in Python using the numpy library.
//!
//! ```python
//! import numpy as np
//! x = np.arange(10)
//!
//! # Write a npy file.
//! np.save("test.npy", x)
//!
//! # Read a value from the npy file.
//! x = np.load("test.npy")
//!
//! # Write multiple values to a npz file.
//! values = { "x": x, "x_plus_one": x + 1 }
//! np.savez("test.npz", **values)
//!
//! # Load multiple values from a npz file.
//! values = np.loadz("test.npz")
//! ```
use crate::{DType, Device, Error, Result, Shape, Tensor};
use byteorder::{LittleEndian, ReadBytesExt};
use half::{bf16, f16, slice::HalfFloatSliceExt};
use std::collections::HashMap;
use std::fs::File;
use std::io::{BufReader, Read, Write};
use std::path::Path;
const NPY_MAGIC_STRING: &[u8] = b"\x93NUMPY";
const NPY_SUFFIX: &str = ".npy";
fn read_header<R: Read>(reader: &mut R) -> Result<String> {
let mut magic_string = vec![0u8; NPY_MAGIC_STRING.len()];
reader.read_exact(&mut magic_string)?;
if magic_string != NPY_MAGIC_STRING {
return Err(Error::Npy("magic string mismatch".to_string()));
}
let mut version = [0u8; 2];
reader.read_exact(&mut version)?;
let header_len_len = match version[0] {
1 => 2,
2 => 4,
otherwise => return Err(Error::Npy(format!("unsupported version {otherwise}"))),
};
let mut header_len = vec![0u8; header_len_len];
reader.read_exact(&mut header_len)?;
let header_len = header_len
.iter()
.rev()
.fold(0_usize, |acc, &v| 256 * acc + v as usize);
let mut header = vec![0u8; header_len];
reader.read_exact(&mut header)?;
Ok(String::from_utf8_lossy(&header).to_string())
}
#[derive(Debug, PartialEq)]
struct Header {
descr: DType,
fortran_order: bool,
shape: Vec<usize>,
}
impl Header {
fn shape(&self) -> Shape {
Shape::from(self.shape.as_slice())
}
fn to_string(&self) -> Result<String> {
let fortran_order = if self.fortran_order { "True" } else { "False" };
let mut shape = self
.shape
.iter()
.map(|x| x.to_string())
.collect::<Vec<_>>()
.join(",");
let descr = match self.descr {
DType::BF16 => Err(Error::Npy("bf16 is not supported".into()))?,
DType::F16 => "f2",
DType::F32 => "f4",
DType::F64 => "f8",
DType::I64 => "i8",
DType::U32 => "u4",
DType::U8 => "u1",
};
if !shape.is_empty() {
shape.push(',')
}
Ok(format!(
"{{'descr': '<{descr}', 'fortran_order': {fortran_order}, 'shape': ({shape}), }}"
))
}
// Hacky parser for the npy header, a typical example would be:
// {'descr': '<f8', 'fortran_order': False, 'shape': (128,), }
fn parse(header: &str) -> Result<Header> {
let header =
header.trim_matches(|c: char| c == '{' || c == '}' || c == ',' || c.is_whitespace());
let mut parts: Vec<String> = vec![];
let mut start_index = 0usize;
let mut cnt_parenthesis = 0i64;
for (index, c) in header.chars().enumerate() {
match c {
'(' => cnt_parenthesis += 1,
')' => cnt_parenthesis -= 1,
',' => {
if cnt_parenthesis == 0 {
parts.push(header[start_index..index].to_owned());
start_index = index + 1;
}
}
_ => {}
}
}
parts.push(header[start_index..].to_owned());
let mut part_map: HashMap<String, String> = HashMap::new();
for part in parts.iter() {
let part = part.trim();
if !part.is_empty() {
match part.split(':').collect::<Vec<_>>().as_slice() {
[key, value] => {
let key = key.trim_matches(|c: char| c == '\'' || c.is_whitespace());
let value = value.trim_matches(|c: char| c == '\'' || c.is_whitespace());
let _ = part_map.insert(key.to_owned(), value.to_owned());
}
_ => return Err(Error::Npy(format!("unable to parse header {header}"))),
}
}
}
let fortran_order = match part_map.get("fortran_order") {
None => false,
Some(fortran_order) => match fortran_order.as_ref() {
"False" => false,
"True" => true,
_ => return Err(Error::Npy(format!("unknown fortran_order {fortran_order}"))),
},
};
let descr = match part_map.get("descr") {
None => return Err(Error::Npy("no descr in header".to_string())),
Some(descr) => {
if descr.is_empty() {
return Err(Error::Npy("empty descr".to_string()));
}
if descr.starts_with('>') {
return Err(Error::Npy(format!("little-endian descr {descr}")));
}
// the only supported types in tensor are:
// float64, float32, float16,
// complex64, complex128,
// int64, int32, int16, int8,
// uint8, and bool.
match descr.trim_matches(|c: char| c == '=' || c == '<' || c == '|') {
"e" | "f2" => DType::F16,
"f" | "f4" => DType::F32,
"d" | "f8" => DType::F64,
// "i" | "i4" => DType::S32,
"q" | "i8" => DType::I64,
// "h" | "i2" => DType::S16,
// "b" | "i1" => DType::S8,
"B" | "u1" => DType::U8,
"I" | "u4" => DType::U32,
"?" | "b1" => DType::U8,
// "F" | "F4" => DType::C64,
// "D" | "F8" => DType::C128,
descr => return Err(Error::Npy(format!("unrecognized descr {descr}"))),
}
}
};
let shape = match part_map.get("shape") {
None => return Err(Error::Npy("no shape in header".to_string())),
Some(shape) => {
let shape = shape.trim_matches(|c: char| c == '(' || c == ')' || c == ',');
if shape.is_empty() {
vec![]
} else {
shape
.split(',')
.map(|v| v.trim().parse::<usize>())
.collect::<std::result::Result<Vec<_>, _>>()?
}
}
};
Ok(Header {
descr,
fortran_order,
shape,
})
}
}
impl Tensor {
// TODO: Add the possibility to read directly to a device?
pub(crate) fn from_reader<R: std::io::Read>(
shape: Shape,
dtype: DType,
reader: &mut R,
) -> Result<Self> {
let elem_count = shape.elem_count();
match dtype {
DType::BF16 => {
let mut data_t = vec![bf16::ZERO; elem_count];
reader.read_u16_into::<LittleEndian>(data_t.reinterpret_cast_mut())?;
Tensor::from_vec(data_t, shape, &Device::Cpu)
}
DType::F16 => {
let mut data_t = vec![f16::ZERO; elem_count];
reader.read_u16_into::<LittleEndian>(data_t.reinterpret_cast_mut())?;
Tensor::from_vec(data_t, shape, &Device::Cpu)
}
DType::F32 => {
let mut data_t = vec![0f32; elem_count];
reader.read_f32_into::<LittleEndian>(&mut data_t)?;
Tensor::from_vec(data_t, shape, &Device::Cpu)
}
DType::F64 => {
let mut data_t = vec![0f64; elem_count];
reader.read_f64_into::<LittleEndian>(&mut data_t)?;
Tensor::from_vec(data_t, shape, &Device::Cpu)
}
DType::U8 => {
let mut data_t = vec![0u8; elem_count];
reader.read_exact(&mut data_t)?;
Tensor::from_vec(data_t, shape, &Device::Cpu)
}
DType::U32 => {
let mut data_t = vec![0u32; elem_count];
reader.read_u32_into::<LittleEndian>(&mut data_t)?;
Tensor::from_vec(data_t, shape, &Device::Cpu)
}
DType::I64 => {
let mut data_t = vec![0i64; elem_count];
reader.read_i64_into::<LittleEndian>(&mut data_t)?;
Tensor::from_vec(data_t, shape, &Device::Cpu)
}
}
}
/// Reads a npy file and return the stored multi-dimensional array as a tensor.
pub fn read_npy<T: AsRef<Path>>(path: T) -> Result<Self> {
let mut reader = File::open(path.as_ref())?;
let header = read_header(&mut reader)?;
let header = Header::parse(&header)?;
if header.fortran_order {
return Err(Error::Npy("fortran order not supported".to_string()));
}
Self::from_reader(header.shape(), header.descr, &mut reader)
}
/// Reads a npz file and returns the stored multi-dimensional arrays together with their names.
pub fn read_npz<T: AsRef<Path>>(path: T) -> Result<Vec<(String, Self)>> {
let zip_reader = BufReader::new(File::open(path.as_ref())?);
let mut zip = zip::ZipArchive::new(zip_reader)?;
let mut result = vec![];
for i in 0..zip.len() {
let mut reader = zip.by_index(i)?;
let name = {
let name = reader.name();
name.strip_suffix(NPY_SUFFIX).unwrap_or(name).to_owned()
};
let header = read_header(&mut reader)?;
let header = Header::parse(&header)?;
if header.fortran_order {
return Err(Error::Npy("fortran order not supported".to_string()));
}
let s = Self::from_reader(header.shape(), header.descr, &mut reader)?;
result.push((name, s))
}
Ok(result)
}
/// Reads a npz file and returns the stored multi-dimensional arrays for some specified names.
pub fn read_npz_by_name<T: AsRef<Path>>(path: T, names: &[&str]) -> Result<Vec<Self>> {
let zip_reader = BufReader::new(File::open(path.as_ref())?);
let mut zip = zip::ZipArchive::new(zip_reader)?;
let mut result = vec![];
for name in names.iter() {
let mut reader = match zip.by_name(&format!("{name}{NPY_SUFFIX}")) {
Ok(reader) => reader,
Err(_) => Err(Error::Npy(format!(
"no array for {name} in {:?}",
path.as_ref()
)))?,
};
let header = read_header(&mut reader)?;
let header = Header::parse(&header)?;
if header.fortran_order {
return Err(Error::Npy("fortran order not supported".to_string()));
}
let s = Self::from_reader(header.shape(), header.descr, &mut reader)?;
result.push(s)
}
Ok(result)
}
fn write<T: Write>(&self, f: &mut T) -> Result<()> {
f.write_all(NPY_MAGIC_STRING)?;
f.write_all(&[1u8, 0u8])?;
let header = Header {
descr: self.dtype(),
fortran_order: false,
shape: self.dims().to_vec(),
};
let mut header = header.to_string()?;
let pad = 16 - (NPY_MAGIC_STRING.len() + 5 + header.len()) % 16;
for _ in 0..pad % 16 {
header.push(' ')
}
header.push('\n');
f.write_all(&[(header.len() % 256) as u8, (header.len() / 256) as u8])?;
f.write_all(header.as_bytes())?;
self.write_bytes(f)
}
/// Writes a multi-dimensional array in the npy format.
pub fn write_npy<T: AsRef<Path>>(&self, path: T) -> Result<()> {
let mut f = File::create(path.as_ref())?;
self.write(&mut f)
}
/// Writes multiple multi-dimensional arrays using the npz format.
pub fn write_npz<S: AsRef<str>, T: AsRef<Tensor>, P: AsRef<Path>>(
ts: &[(S, T)],
path: P,
) -> Result<()> {
let mut zip = zip::ZipWriter::new(File::create(path.as_ref())?);
let options: zip::write::FileOptions<()> =
zip::write::FileOptions::default().compression_method(zip::CompressionMethod::Stored);
for (name, tensor) in ts.iter() {
zip.start_file(format!("{}.npy", name.as_ref()), options)?;
tensor.as_ref().write(&mut zip)?
}
Ok(())
}
}
/// Lazy tensor loader.
pub struct NpzTensors {
index_per_name: HashMap<String, usize>,
path: std::path::PathBuf,
// We do not store a zip reader as it needs mutable access to extract data. Instead we
// re-create a zip reader for each tensor.
}
impl NpzTensors {
pub fn new<T: AsRef<Path>>(path: T) -> Result<Self> {
let path = path.as_ref().to_owned();
let zip_reader = BufReader::new(File::open(&path)?);
let mut zip = zip::ZipArchive::new(zip_reader)?;
let mut index_per_name = HashMap::new();
for i in 0..zip.len() {
let file = zip.by_index(i)?;
let name = {
let name = file.name();
name.strip_suffix(NPY_SUFFIX).unwrap_or(name).to_owned()
};
index_per_name.insert(name, i);
}
Ok(Self {
index_per_name,
path,
})
}
pub fn names(&self) -> Vec<&String> {
self.index_per_name.keys().collect()
}
/// This only returns the shape and dtype for a named tensor. Compared to `get`, this avoids
/// reading the whole tensor data.
pub fn get_shape_and_dtype(&self, name: &str) -> Result<(Shape, DType)> {
let index = match self.index_per_name.get(name) {
None => crate::bail!("cannot find tensor {name}"),
Some(index) => *index,
};
let zip_reader = BufReader::new(File::open(&self.path)?);
let mut zip = zip::ZipArchive::new(zip_reader)?;
let mut reader = zip.by_index(index)?;
let header = read_header(&mut reader)?;
let header = Header::parse(&header)?;
Ok((header.shape(), header.descr))
}
pub fn get(&self, name: &str) -> Result<Option<Tensor>> {
let index = match self.index_per_name.get(name) {
None => return Ok(None),
Some(index) => *index,
};
// We hope that the file has not changed since first reading it.
let zip_reader = BufReader::new(File::open(&self.path)?);
let mut zip = zip::ZipArchive::new(zip_reader)?;
let mut reader = zip.by_index(index)?;
let header = read_header(&mut reader)?;
let header = Header::parse(&header)?;
if header.fortran_order {
return Err(Error::Npy("fortran order not supported".to_string()));
}
let tensor = Tensor::from_reader(header.shape(), header.descr, &mut reader)?;
Ok(Some(tensor))
}
}
#[cfg(test)]
mod tests {
use super::Header;
#[test]
fn parse() {
let h = "{'descr': '<f8', 'fortran_order': False, 'shape': (128,), }";
assert_eq!(
Header::parse(h).unwrap(),
Header {
descr: crate::DType::F64,
fortran_order: false,
shape: vec![128]
}
);
let h = "{'descr': '<f4', 'fortran_order': True, 'shape': (256,1,128), }";
let h = Header::parse(h).unwrap();
assert_eq!(
h,
Header {
descr: crate::DType::F32,
fortran_order: true,
shape: vec![256, 1, 128]
}
);
assert_eq!(
h.to_string().unwrap(),
"{'descr': '<f4', 'fortran_order': True, 'shape': (256,1,128,), }"
);
let h = Header {
descr: crate::DType::U32,
fortran_order: false,
shape: vec![],
};
assert_eq!(
h.to_string().unwrap(),
"{'descr': '<u4', 'fortran_order': False, 'shape': (), }"
);
}
}
| candle/candle-core/src/npy.rs/0 | {
"file_path": "candle/candle-core/src/npy.rs",
"repo_id": "candle",
"token_count": 8727
} |
//! TensorScalar Enum and Trait
//!
use crate::{Result, Tensor, WithDType};
pub enum TensorScalar {
Tensor(Tensor),
Scalar(Tensor),
}
pub trait TensorOrScalar {
fn to_tensor_scalar(self) -> Result<TensorScalar>;
}
impl TensorOrScalar for &Tensor {
fn to_tensor_scalar(self) -> Result<TensorScalar> {
Ok(TensorScalar::Tensor(self.clone()))
}
}
impl<T: WithDType> TensorOrScalar for T {
fn to_tensor_scalar(self) -> Result<TensorScalar> {
let scalar = Tensor::new(self, &crate::Device::Cpu)?;
Ok(TensorScalar::Scalar(scalar))
}
}
| candle/candle-core/src/scalar.rs/0 | {
"file_path": "candle/candle-core/src/scalar.rs",
"repo_id": "candle",
"token_count": 277
} |
use anyhow::Result;
use candle_core::{Device, IndexOp, Tensor};
#[test]
fn integer_index() -> Result<()> {
let dev = Device::Cpu;
let tensor = Tensor::arange(0u32, 2 * 3, &dev)?.reshape((2, 3))?;
let result = tensor.i(1)?;
assert_eq!(result.dims(), &[3]);
assert_eq!(result.to_vec1::<u32>()?, &[3, 4, 5]);
let result = tensor.i((.., 2))?;
assert_eq!(result.dims(), &[2]);
assert_eq!(result.to_vec1::<u32>()?, &[2, 5]);
Ok(())
}
#[test]
fn range_index() -> Result<()> {
let dev = Device::Cpu;
// RangeFull
let tensor = Tensor::arange(0u32, 2 * 3, &dev)?.reshape((2, 3))?;
let result = tensor.i(..)?;
assert_eq!(result.dims(), &[2, 3]);
assert_eq!(result.to_vec2::<u32>()?, &[[0, 1, 2], [3, 4, 5]]);
// Range
let tensor = Tensor::arange(0u32, 4 * 3, &dev)?.reshape((4, 3))?;
let result = tensor.i(1..3)?;
assert_eq!(result.dims(), &[2, 3]);
assert_eq!(result.to_vec2::<u32>()?, &[[3, 4, 5], [6, 7, 8]]);
// RangeFrom
let result = tensor.i(2..)?;
assert_eq!(result.dims(), &[2, 3]);
assert_eq!(result.to_vec2::<u32>()?, &[[6, 7, 8], [9, 10, 11]]);
// RangeTo
let result = tensor.i(..2)?;
assert_eq!(result.dims(), &[2, 3]);
assert_eq!(result.to_vec2::<u32>()?, &[[0, 1, 2], [3, 4, 5]]);
// RangeInclusive
let result = tensor.i(1..=2)?;
assert_eq!(result.dims(), &[2, 3]);
assert_eq!(result.to_vec2::<u32>()?, &[[3, 4, 5], [6, 7, 8]]);
// RangeTo
let result = tensor.i(..1)?;
assert_eq!(result.dims(), &[1, 3]);
assert_eq!(result.to_vec2::<u32>()?, &[[0, 1, 2]]);
// RangeToInclusive
let result = tensor.i(..=1)?;
assert_eq!(result.dims(), &[2, 3]);
assert_eq!(result.to_vec2::<u32>()?, &[[0, 1, 2], [3, 4, 5]]);
// Empty range
let result = tensor.i(1..1)?;
assert_eq!(result.dims(), &[0, 3]);
let empty: [[u32; 3]; 0] = [];
assert_eq!(result.to_vec2::<u32>()?, &empty);
// Similar to PyTorch, allow empty ranges when the computed length is negative.
#[allow(clippy::reversed_empty_ranges)]
let result = tensor.i(1..0)?;
assert_eq!(result.dims(), &[0, 3]);
let empty: [[u32; 3]; 0] = [];
assert_eq!(result.to_vec2::<u32>()?, &empty);
Ok(())
}
#[test]
fn index_3d() -> Result<()> {
let tensor = Tensor::from_iter(0..24u32, &Device::Cpu)?.reshape((2, 3, 4))?;
assert_eq!(tensor.i((0, 0, 0))?.to_scalar::<u32>()?, 0);
assert_eq!(tensor.i((1, 0, 0))?.to_scalar::<u32>()?, 12);
assert_eq!(tensor.i((0, 1, 0))?.to_scalar::<u32>()?, 4);
assert_eq!(tensor.i((0, 1, 3))?.to_scalar::<u32>()?, 7);
assert_eq!(tensor.i((0..2, 0, 0))?.to_vec1::<u32>()?, &[0, 12]);
assert_eq!(
tensor.i((0..2, .., 0))?.to_vec2::<u32>()?,
&[[0, 4, 8], [12, 16, 20]]
);
assert_eq!(
tensor.i((..2, .., 3))?.to_vec2::<u32>()?,
&[[3, 7, 11], [15, 19, 23]]
);
assert_eq!(tensor.i((1, .., 3))?.to_vec1::<u32>()?, &[15, 19, 23]);
Ok(())
}
#[test]
fn slice_assign() -> Result<()> {
let dev = Device::Cpu;
let tensor = Tensor::arange(0u32, 4 * 5, &dev)?.reshape((4, 5))?;
let src = Tensor::arange(0u32, 2 * 3, &dev)?.reshape((3, 2))?;
let out = tensor.slice_assign(&[1..4, 3..5], &src)?;
assert_eq!(
out.to_vec2::<u32>()?,
&[
[0, 1, 2, 3, 4],
[5, 6, 7, 0, 1],
[10, 11, 12, 2, 3],
[15, 16, 17, 4, 5]
]
);
let out = tensor.slice_assign(&[0..3, 0..2], &src)?;
assert_eq!(
out.to_vec2::<u32>()?,
&[
[0, 1, 2, 3, 4],
[2, 3, 7, 8, 9],
[4, 5, 12, 13, 14],
[15, 16, 17, 18, 19]
]
);
Ok(())
}
| candle/candle-core/tests/indexing_tests.rs/0 | {
"file_path": "candle/candle-core/tests/indexing_tests.rs",
"repo_id": "candle",
"token_count": 1994
} |
use candle::{Result, Tensor};
pub struct Batcher<I> {
inner: I,
batch_size: usize,
return_last_incomplete_batch: bool,
}
impl<I> Batcher<I> {
fn new(inner: I) -> Self {
Self {
inner,
batch_size: 16,
return_last_incomplete_batch: false,
}
}
pub fn batch_size(mut self, batch_size: usize) -> Self {
self.batch_size = batch_size;
self
}
pub fn return_last_incomplete_batch(mut self, r: bool) -> Self {
self.return_last_incomplete_batch = r;
self
}
}
pub struct Iter1<I: Iterator<Item = Tensor>> {
inner: I,
}
pub struct Iter2<I: Iterator<Item = (Tensor, Tensor)>> {
inner: I,
}
impl<I: Iterator<Item = Tensor>> Batcher<Iter1<I>> {
pub fn new1(inner: I) -> Self {
Self::new(Iter1 { inner })
}
}
impl<I: Iterator<Item = (Tensor, Tensor)>> Batcher<Iter2<I>> {
pub fn new2(inner: I) -> Self {
Self::new(Iter2 { inner })
}
}
pub struct IterResult1<I: Iterator<Item = Result<Tensor>>> {
inner: I,
}
pub struct IterResult2<I: Iterator<Item = Result<(Tensor, Tensor)>>> {
inner: I,
}
impl<I: Iterator<Item = Result<Tensor>>> Batcher<IterResult1<I>> {
pub fn new_r1(inner: I) -> Self {
Self::new(IterResult1 { inner })
}
}
impl<I: Iterator<Item = Result<(Tensor, Tensor)>>> Batcher<IterResult2<I>> {
pub fn new_r2(inner: I) -> Self {
Self::new(IterResult2 { inner })
}
}
impl<I: Iterator<Item = Tensor>> Iterator for Batcher<Iter1<I>> {
type Item = Result<Tensor>;
fn next(&mut self) -> Option<Self::Item> {
let mut items = Vec::with_capacity(self.batch_size);
for _i in 0..self.batch_size {
// We have two levels of inner here so that we can have two implementations of the
// Iterator trait that are different for Iter1 and Iter2. If rust gets better
// specialization at some point we can get rid of this.
match self.inner.inner.next() {
Some(item) => items.push(item),
None => {
if self.return_last_incomplete_batch && !items.is_empty() {
break;
}
return None;
}
}
}
Some(Tensor::stack(&items, 0))
}
}
impl<I: Iterator<Item = (Tensor, Tensor)>> Iterator for Batcher<Iter2<I>> {
type Item = Result<(Tensor, Tensor)>;
fn next(&mut self) -> Option<Self::Item> {
let mut xs = Vec::with_capacity(self.batch_size);
let mut ys = Vec::with_capacity(self.batch_size);
for _i in 0..self.batch_size {
match self.inner.inner.next() {
Some((x, y)) => {
xs.push(x);
ys.push(y)
}
None => {
if self.return_last_incomplete_batch && !xs.is_empty() && !ys.is_empty() {
break;
}
return None;
}
}
}
let xs = Tensor::stack(&xs, 0);
let ys = Tensor::stack(&ys, 0);
Some(xs.and_then(|xs| ys.map(|ys| (xs, ys))))
}
}
impl<I: Iterator<Item = Result<Tensor>>> Iterator for Batcher<IterResult1<I>> {
type Item = Result<Tensor>;
fn next(&mut self) -> Option<Self::Item> {
let mut items = Vec::with_capacity(self.batch_size);
for _i in 0..self.batch_size {
// We have two levels of inner here so that we can have two implementations of the
// Iterator trait that are different for Iter1 and Iter2. If rust gets better
// specialization at some point we can get rid of this.
match self.inner.inner.next() {
Some(item) => items.push(item),
None => {
if self.return_last_incomplete_batch && !items.is_empty() {
break;
}
return None;
}
}
}
let items = items.into_iter().collect::<Result<Vec<Tensor>>>();
Some(items.and_then(|items| Tensor::stack(&items, 0)))
}
}
impl<I: Iterator<Item = Result<(Tensor, Tensor)>>> Iterator for Batcher<IterResult2<I>> {
type Item = Result<(Tensor, Tensor)>;
fn next(&mut self) -> Option<Self::Item> {
let mut xs = Vec::with_capacity(self.batch_size);
let mut ys = Vec::with_capacity(self.batch_size);
let mut errs = vec![];
for _i in 0..self.batch_size {
match self.inner.inner.next() {
Some(Ok((x, y))) => {
xs.push(x);
ys.push(y)
}
Some(Err(err)) => errs.push(err),
None => {
if self.return_last_incomplete_batch && !xs.is_empty() && !ys.is_empty() {
break;
}
return None;
}
}
}
if !errs.is_empty() {
return Some(Err(errs.swap_remove(0)));
}
let xs = Tensor::stack(&xs, 0);
let ys = Tensor::stack(&ys, 0);
Some(xs.and_then(|xs| ys.map(|ys| (xs, ys))))
}
}
| candle/candle-datasets/src/batcher.rs/0 | {
"file_path": "candle/candle-datasets/src/batcher.rs",
"repo_id": "candle",
"token_count": 2708
} |
#[cfg(feature = "mkl")]
extern crate intel_mkl_src;
#[cfg(feature = "accelerate")]
extern crate accelerate_src;
use candle_transformers::models::bert::{BertModel, Config, HiddenAct, DTYPE};
use anyhow::{Error as E, Result};
use candle::Tensor;
use candle_nn::VarBuilder;
use clap::Parser;
use hf_hub::{api::sync::Api, Repo, RepoType};
use tokenizers::{PaddingParams, Tokenizer};
#[derive(Parser, Debug)]
#[command(author, version, about, long_about = None)]
struct Args {
/// Run on CPU rather than on GPU.
#[arg(long)]
cpu: bool,
/// Enable tracing (generates a trace-timestamp.json file).
#[arg(long)]
tracing: bool,
/// The model to use, check out available models: https://huggingface.co/models?library=sentence-transformers&sort=trending
#[arg(long)]
model_id: Option<String>,
#[arg(long)]
revision: Option<String>,
/// When set, compute embeddings for this prompt.
#[arg(long)]
prompt: Option<String>,
/// Use the pytorch weights rather than the safetensors ones
#[arg(long)]
use_pth: bool,
/// The number of times to run the prompt.
#[arg(long, default_value = "1")]
n: usize,
/// L2 normalization for embeddings.
#[arg(long, default_value = "true")]
normalize_embeddings: bool,
/// Use tanh based approximation for Gelu instead of erf implementation.
#[arg(long, default_value = "false")]
approximate_gelu: bool,
}
impl Args {
fn build_model_and_tokenizer(&self) -> Result<(BertModel, Tokenizer)> {
let device = candle_examples::device(self.cpu)?;
let default_model = "sentence-transformers/all-MiniLM-L6-v2".to_string();
let default_revision = "refs/pr/21".to_string();
let (model_id, revision) = match (self.model_id.to_owned(), self.revision.to_owned()) {
(Some(model_id), Some(revision)) => (model_id, revision),
(Some(model_id), None) => (model_id, "main".to_string()),
(None, Some(revision)) => (default_model, revision),
(None, None) => (default_model, default_revision),
};
let repo = Repo::with_revision(model_id, RepoType::Model, revision);
let (config_filename, tokenizer_filename, weights_filename) = {
let api = Api::new()?;
let api = api.repo(repo);
let config = api.get("config.json")?;
let tokenizer = api.get("tokenizer.json")?;
let weights = if self.use_pth {
api.get("pytorch_model.bin")?
} else {
api.get("model.safetensors")?
};
(config, tokenizer, weights)
};
let config = std::fs::read_to_string(config_filename)?;
let mut config: Config = serde_json::from_str(&config)?;
let tokenizer = Tokenizer::from_file(tokenizer_filename).map_err(E::msg)?;
let vb = if self.use_pth {
VarBuilder::from_pth(&weights_filename, DTYPE, &device)?
} else {
unsafe { VarBuilder::from_mmaped_safetensors(&[weights_filename], DTYPE, &device)? }
};
if self.approximate_gelu {
config.hidden_act = HiddenAct::GeluApproximate;
}
let model = BertModel::load(vb, &config)?;
Ok((model, tokenizer))
}
}
fn main() -> Result<()> {
use tracing_chrome::ChromeLayerBuilder;
use tracing_subscriber::prelude::*;
let args = Args::parse();
let _guard = if args.tracing {
println!("tracing...");
let (chrome_layer, guard) = ChromeLayerBuilder::new().build();
tracing_subscriber::registry().with(chrome_layer).init();
Some(guard)
} else {
None
};
let start = std::time::Instant::now();
let (model, mut tokenizer) = args.build_model_and_tokenizer()?;
let device = &model.device;
if let Some(prompt) = args.prompt {
let tokenizer = tokenizer
.with_padding(None)
.with_truncation(None)
.map_err(E::msg)?;
let tokens = tokenizer
.encode(prompt, true)
.map_err(E::msg)?
.get_ids()
.to_vec();
let token_ids = Tensor::new(&tokens[..], device)?.unsqueeze(0)?;
let token_type_ids = token_ids.zeros_like()?;
println!("Loaded and encoded {:?}", start.elapsed());
for idx in 0..args.n {
let start = std::time::Instant::now();
let ys = model.forward(&token_ids, &token_type_ids, None)?;
if idx == 0 {
println!("{ys}");
}
println!("Took {:?}", start.elapsed());
}
} else {
let sentences = [
"The cat sits outside",
"A man is playing guitar",
"I love pasta",
"The new movie is awesome",
"The cat plays in the garden",
"A woman watches TV",
"The new movie is so great",
"Do you like pizza?",
];
let n_sentences = sentences.len();
if let Some(pp) = tokenizer.get_padding_mut() {
pp.strategy = tokenizers::PaddingStrategy::BatchLongest
} else {
let pp = PaddingParams {
strategy: tokenizers::PaddingStrategy::BatchLongest,
..Default::default()
};
tokenizer.with_padding(Some(pp));
}
let tokens = tokenizer
.encode_batch(sentences.to_vec(), true)
.map_err(E::msg)?;
let token_ids = tokens
.iter()
.map(|tokens| {
let tokens = tokens.get_ids().to_vec();
Ok(Tensor::new(tokens.as_slice(), device)?)
})
.collect::<Result<Vec<_>>>()?;
let attention_mask = tokens
.iter()
.map(|tokens| {
let tokens = tokens.get_attention_mask().to_vec();
Ok(Tensor::new(tokens.as_slice(), device)?)
})
.collect::<Result<Vec<_>>>()?;
let token_ids = Tensor::stack(&token_ids, 0)?;
let attention_mask = Tensor::stack(&attention_mask, 0)?;
let token_type_ids = token_ids.zeros_like()?;
println!("running inference on batch {:?}", token_ids.shape());
let embeddings = model.forward(&token_ids, &token_type_ids, Some(&attention_mask))?;
println!("generated embeddings {:?}", embeddings.shape());
// Apply some avg-pooling by taking the mean embedding value for all tokens (including padding)
let (_n_sentence, n_tokens, _hidden_size) = embeddings.dims3()?;
let embeddings = (embeddings.sum(1)? / (n_tokens as f64))?;
let embeddings = if args.normalize_embeddings {
normalize_l2(&embeddings)?
} else {
embeddings
};
println!("pooled embeddings {:?}", embeddings.shape());
let mut similarities = vec![];
for i in 0..n_sentences {
let e_i = embeddings.get(i)?;
for j in (i + 1)..n_sentences {
let e_j = embeddings.get(j)?;
let sum_ij = (&e_i * &e_j)?.sum_all()?.to_scalar::<f32>()?;
let sum_i2 = (&e_i * &e_i)?.sum_all()?.to_scalar::<f32>()?;
let sum_j2 = (&e_j * &e_j)?.sum_all()?.to_scalar::<f32>()?;
let cosine_similarity = sum_ij / (sum_i2 * sum_j2).sqrt();
similarities.push((cosine_similarity, i, j))
}
}
similarities.sort_by(|u, v| v.0.total_cmp(&u.0));
for &(score, i, j) in similarities[..5].iter() {
println!("score: {score:.2} '{}' '{}'", sentences[i], sentences[j])
}
}
Ok(())
}
pub fn normalize_l2(v: &Tensor) -> Result<Tensor> {
Ok(v.broadcast_div(&v.sqr()?.sum_keepdim(1)?.sqrt()?)?)
}
| candle/candle-examples/examples/bert/main.rs/0 | {
"file_path": "candle/candle-examples/examples/bert/main.rs",
"repo_id": "candle",
"token_count": 3718
} |
# candle-efficientvit
[Efο¬cientViT: Memory Efο¬cient Vision Transformer with Cascaded Group Attention](https://arxiv.org/abs/2305.07027).
This candle implementation uses a pre-trained EfficientViT (from Microsoft Research Asia) network for inference.
The classification head has been trained on the ImageNet dataset and returns the probabilities for the top-5 classes.
## Running an example
```
$ cargo run --example efficientvit --release -- --image candle-examples/examples/yolo-v8/assets/bike.jpg --which m1
loaded image Tensor[dims 3, 224, 224; f32]
model built
mountain bike, all-terrain bike, off-roader: 69.80%
unicycle, monocycle : 13.03%
bicycle-built-for-two, tandem bicycle, tandem: 9.28%
crash helmet : 2.25%
alp : 0.46%
```
| candle/candle-examples/examples/efficientvit/README.md/0 | {
"file_path": "candle/candle-examples/examples/efficientvit/README.md",
"repo_id": "candle",
"token_count": 273
} |
# candle-gemma: 2b and 7b LLMs from Google DeepMind
[Gemma](https://ai.google.dev/gemma/docs) is a collection of lightweight open
models published by Google Deepmind with a 2b and a 7b variant for the first
version, and a 2b and a 9b variant for v2.
## Running the example
```bash
$ cargo run --example gemma --features cuda -r -- \
--prompt "Here is a proof that square root of 2 is not rational: "
Here is a proof that square root of 2 is not rational:
Let us assume it to be rational. Then, we can write β2 = p/q where q β 0 and p and q are integers with no common factors other than 1. Squaring both sides gives us (p/q)^2 = 2 or p^2/q^2 = 2. This implies that p^2 is divisible by 2, which means that p must be even. Let us write p = 2m where m is an integer. Substituting this in the above equation we get:
(p^2)/q^2 = 2 or (4m^2)/q^2 = 2 or q^2/2m^2 = 1 which implies that q^2 must be divisible by 2, and hence q is even. This contradicts our assumption that p and q have no common factors other than 1. Hence we conclude that β2 cannot be rational.
```
## Access restrictions
In order to use the v1 examples, you have to accept the license on the
[HuggingFace Hub Gemma repo](https://huggingface.co/google/gemma-7b) and set up
your access token via the [HuggingFace cli login
command](https://huggingface.co/docs/huggingface_hub/guides/cli#huggingface-cli-login).
| candle/candle-examples/examples/gemma/README.md/0 | {
"file_path": "candle/candle-examples/examples/gemma/README.md",
"repo_id": "candle",
"token_count": 441
} |
use crate::model::{Cache, Config, Llama};
use candle::{DType, Device, Result};
use candle_datasets::nlp::tinystories::{Dataset, DatasetRandomIter};
use candle_nn::Optimizer;
fn valid_loss(
dataset: &Dataset,
model: &Llama,
args: &crate::TrainingCmd,
device: &Device,
cache: &mut Cache,
) -> Result<f64> {
let iter = DatasetRandomIter::new(dataset, true, model.config.seq_len, device.clone());
let batch_iter = candle_datasets::Batcher::new_r2(iter).batch_size(args.batch_size);
let mut sum_ce = 0f64;
let mut cnt = 0usize;
for inp_tgt in batch_iter.take(50) {
let (inp, tgt) = inp_tgt?;
let logits = model.forward(&inp, 0, cache)?;
let loss = candle_nn::loss::cross_entropy(&logits.flatten_to(1)?, &tgt.flatten_to(1)?)?;
sum_ce += loss.to_vec0::<f32>()? as f64;
cnt += 1;
}
Ok(sum_ce / cnt as f64)
}
pub fn run(args: &crate::TrainingCmd, common_args: &crate::Args) -> Result<()> {
let device = candle_examples::device(common_args.cpu)?;
let dataset = Dataset::new(&args.pretokenized_dir)?;
println!(
"loaded dataset, train: {} files, valid: {} files",
dataset.train_tokens(),
dataset.valid_tokens()
);
let varmap = candle_nn::VarMap::new();
let vb = candle_nn::VarBuilder::from_varmap(&varmap, DType::F32, &device);
let config = Config::tiny_15m();
let iter = DatasetRandomIter::new(&dataset, false, config.seq_len, device.clone());
let batch_iter = candle_datasets::Batcher::new_r2(iter).batch_size(args.batch_size);
let mut cache = Cache::new(false, &config, vb.pp("rot"))?;
let model = Llama::load(vb, config)?;
let params = candle_nn::ParamsAdamW {
lr: args.learning_rate,
..Default::default()
};
let mut opt = candle_nn::AdamW::new(varmap.all_vars(), params)?;
for (batch_index, batch) in batch_iter.enumerate() {
let (inp, tgt) = batch?;
let logits = model.forward(&inp, 0, &mut cache)?;
let loss = candle_nn::loss::cross_entropy(&logits.flatten_to(1)?, &tgt.flatten_to(1)?)?;
opt.backward_step(&loss)?;
if batch_index > 0 && batch_index % 100 == 0 {
// TODO: Add a way to deactivate the backprop graph tracking when computing the
// validation loss.
let loss = valid_loss(&dataset, &model, args, &device, &mut cache)?;
println!("{batch_index} {loss}");
}
if batch_index > 0 && batch_index % 1000 == 0 {
varmap.save("checkpoint.safetensors")?
}
}
Ok(())
}
| candle/candle-examples/examples/llama2-c/training.rs/0 | {
"file_path": "candle/candle-examples/examples/llama2-c/training.rs",
"repo_id": "candle",
"token_count": 1144
} |
# candle-metavoice
MetaVoice-1B is a text-to-speech model trained on 100K hours of speech, more
details on the [model
card](https://huggingface.co/metavoiceio/metavoice-1B-v0.1).
Note that the current candle implementation suffers from some limitations as of
2024-03-02:
- The speaker embeddings are hardcoded.
- The generated audio file quality is weaker than the Python implementation,
probably because of some implementation discrepancies.
## Run an example
```bash
cargo run --example metavoice --release -- \\
--prompt "This is a demo of text to speech by MetaVoice-1B, an open-source foundational audio model."
```
| candle/candle-examples/examples/metavoice/README.md/0 | {
"file_path": "candle/candle-examples/examples/metavoice/README.md",
"repo_id": "candle",
"token_count": 178
} |
# candle-modernbert
ModernBERT is a bidirectional encoder-only language model. In this example it is used for the fill-mask task:
## Usage
```bash
cargo run --example modernbert --release -- --model modern-bert-large --prompt 'The capital of France is [MASK].'
```
```markdown
Sentence: 1 : The capital of France is Paris.
```
| candle/candle-examples/examples/modernbert/README.md/0 | {
"file_path": "candle/candle-examples/examples/modernbert/README.md",
"repo_id": "candle",
"token_count": 102
} |
use std::path::PathBuf;
use anyhow::{Error as E, Result};
use candle::Tensor;
use candle_nn::VarBuilder;
use candle_transformers::models::bert::{self, BertForMaskedLM, Config};
use clap::Parser;
use hf_hub::{api::sync::Api, Repo, RepoType};
use tokenizers::{PaddingParams, Tokenizer};
#[derive(Parser, Debug)]
#[command(author, version, about, long_about = None)]
struct Args {
/// Run on CPU rather than on GPU.
#[arg(long)]
cpu: bool,
/// Enable tracing (generates a trace-timestamp.json file).
#[arg(long)]
tracing: bool,
/// The model to use, check out available models: https://huggingface.co/models?library=sentence-transformers&sort=trending
#[arg(long)]
model_id: Option<String>,
#[arg(long, default_value = "main")]
revision: String,
// Path to the tokenizer file.
#[arg(long)]
tokenizer_file: Option<String>,
// Path to the weight files.
#[arg(long)]
weight_files: Option<String>,
// Path to the config file.
#[arg(long)]
config_file: Option<String>,
/// When set, compute embeddings for this prompt.
#[arg(long)]
prompt: Option<String>,
}
fn main() -> Result<()> {
let args = Args::parse();
let api = Api::new()?;
let model_id = match &args.model_id {
Some(model_id) => model_id.to_string(),
None => "prithivida/Splade_PP_en_v1".to_string(),
};
let repo = api.repo(Repo::with_revision(
model_id,
RepoType::Model,
args.revision,
));
let tokenizer_filename = match args.tokenizer_file {
Some(file) => std::path::PathBuf::from(file),
None => repo.get("tokenizer.json")?,
};
let config_filename = match args.config_file {
Some(file) => std::path::PathBuf::from(file),
None => repo.get("config.json")?,
};
let weights_filename = match args.weight_files {
Some(files) => PathBuf::from(files),
None => match repo.get("model.safetensors") {
Ok(safetensors) => safetensors,
Err(_) => match repo.get("pytorch_model.bin") {
Ok(pytorch_model) => pytorch_model,
Err(e) => {
return Err(anyhow::Error::msg(format!("Model weights not found. The weights should either be a `model.safetensors` or `pytorch_model.bin` file. Error: {}", e)));
}
},
},
};
let config = std::fs::read_to_string(config_filename)?;
let config: Config = serde_json::from_str(&config)?;
let mut tokenizer = Tokenizer::from_file(tokenizer_filename).map_err(E::msg)?;
let device = candle_examples::device(args.cpu)?;
let dtype = bert::DTYPE;
let vb = if weights_filename.ends_with("model.safetensors") {
unsafe { VarBuilder::from_mmaped_safetensors(&[weights_filename], dtype, &device).unwrap() }
} else {
println!("Loading weights from pytorch_model.bin");
VarBuilder::from_pth(&weights_filename, dtype, &device).unwrap()
};
let model = BertForMaskedLM::load(vb, &config)?;
if let Some(prompt) = args.prompt {
let tokenizer = tokenizer
.with_padding(None)
.with_truncation(None)
.map_err(E::msg)?;
let tokens = tokenizer
.encode(prompt, true)
.map_err(E::msg)?
.get_ids()
.to_vec();
let token_ids = Tensor::new(&tokens[..], &device)?.unsqueeze(0)?;
let token_type_ids = token_ids.zeros_like()?;
let ys = model.forward(&token_ids, &token_type_ids, None)?;
let vec = Tensor::log(
&Tensor::try_from(1.0)?
.to_dtype(dtype)?
.to_device(&device)?
.broadcast_add(&ys.relu()?)?,
)?
.max(1)?;
let vec = normalize_l2(&vec)?;
let vec = vec.squeeze(0)?.to_vec1::<f32>()?;
let indices = (0..vec.len())
.filter(|&i| vec[i] != 0.0)
.map(|x| x as u32)
.collect::<Vec<_>>();
let tokens = tokenizer.decode(&indices, true).unwrap();
println!("{tokens:?}");
let values = indices.iter().map(|&i| vec[i as usize]).collect::<Vec<_>>();
println!("{values:?}");
} else {
let sentences = [
"The cat sits outside",
"A man is playing guitar",
"I love pasta",
"The new movie is awesome",
"The cat plays in the garden",
"A woman watches TV",
"The new movie is so great",
"Do you like pizza?",
];
let n_sentences = sentences.len();
if let Some(pp) = tokenizer.get_padding_mut() {
pp.strategy = tokenizers::PaddingStrategy::BatchLongest
} else {
let pp = PaddingParams {
strategy: tokenizers::PaddingStrategy::BatchLongest,
..Default::default()
};
tokenizer.with_padding(Some(pp));
}
let tokens = tokenizer
.encode_batch(sentences.to_vec(), true)
.map_err(E::msg)?;
let token_ids = tokens
.iter()
.map(|tokens| {
let tokens = tokens.get_ids().to_vec();
Ok(Tensor::new(tokens.as_slice(), &device)?)
})
.collect::<Result<Vec<_>>>()?;
let attention_mask = tokens
.iter()
.map(|tokens| {
let tokens = tokens.get_attention_mask().to_vec();
Ok(Tensor::new(tokens.as_slice(), &device)?)
})
.collect::<Result<Vec<_>>>()?;
let token_ids = Tensor::stack(&token_ids, 0)?;
let attention_mask = Tensor::stack(&attention_mask, 0)?;
let token_type_ids = token_ids.zeros_like()?;
let ys = model.forward(&token_ids, &token_type_ids, Some(&attention_mask))?;
let vector = Tensor::log(
&Tensor::try_from(1.0)?
.to_dtype(dtype)?
.to_device(&device)?
.broadcast_add(&ys.relu()?)?,
)?;
let vector = vector
.broadcast_mul(&attention_mask.unsqueeze(2)?.to_dtype(dtype)?)?
.max(1)?;
let vec = normalize_l2(&vector)?;
let mut similarities = vec![];
for i in 0..n_sentences {
let e_i = vec.get(i)?;
for j in (i + 1)..n_sentences {
let e_j = vec.get(j)?;
let sum_ij = (&e_i * &e_j)?.sum_all()?.to_scalar::<f32>()?;
let sum_i2 = (&e_i * &e_i)?.sum_all()?.to_scalar::<f32>()?;
let sum_j2 = (&e_j * &e_j)?.sum_all()?.to_scalar::<f32>()?;
let cosine_similarity = sum_ij / (sum_i2 * sum_j2).sqrt();
similarities.push((cosine_similarity, i, j))
}
}
similarities.sort_by(|u, v| v.0.total_cmp(&u.0));
for &(score, i, j) in similarities[..5].iter() {
println!("score: {score:.2} '{}' '{}'", sentences[i], sentences[j])
}
}
Ok(())
}
pub fn normalize_l2(v: &Tensor) -> Result<Tensor> {
Ok(v.broadcast_div(&v.sqr()?.sum_keepdim(1)?.sqrt()?)?)
}
| candle/candle-examples/examples/splade/main.rs/0 | {
"file_path": "candle/candle-examples/examples/splade/main.rs",
"repo_id": "candle",
"token_count": 3553
} |
#[cfg(feature = "mkl")]
extern crate intel_mkl_src;
#[cfg(feature = "accelerate")]
extern crate accelerate_src;
use std::io::Write;
use std::path::PathBuf;
use candle_transformers::models::t5;
use anyhow::{Error as E, Result};
use candle::{DType, Device, Tensor};
use candle_nn::VarBuilder;
use candle_transformers::generation::LogitsProcessor;
use clap::{Parser, ValueEnum};
use hf_hub::{api::sync::Api, Repo, RepoType};
use tokenizers::Tokenizer;
const DTYPE: DType = DType::F32;
#[derive(Clone, Debug, Copy, ValueEnum)]
enum Which {
T5Base,
T5Small,
T5Large,
T5_3B,
Mt5Base,
Mt5Small,
Mt5Large,
}
#[derive(Parser, Debug, Clone)]
#[command(author, version, about, long_about = None)]
struct Args {
/// Run on CPU rather than on GPU.
#[arg(long)]
cpu: bool,
/// Enable tracing (generates a trace-timestamp.json file).
#[arg(long)]
tracing: bool,
/// The model repository to use on the HuggingFace hub.
#[arg(long)]
model_id: Option<String>,
#[arg(long)]
revision: Option<String>,
#[arg(long)]
model_file: Option<String>,
#[arg(long)]
tokenizer_file: Option<String>,
#[arg(long)]
config_file: Option<String>,
/// Enable decoding.
#[arg(long)]
decode: bool,
// Enable/disable decoding.
#[arg(long, default_value = "false")]
disable_cache: bool,
/// Use this prompt, otherwise compute sentence similarities.
#[arg(long)]
prompt: Option<String>,
/// If set along with --decode, will use this prompt to initialize the decoder.
#[arg(long)]
decoder_prompt: Option<String>,
/// L2 normalization for embeddings.
#[arg(long, default_value = "true")]
normalize_embeddings: bool,
/// The temperature used to generate samples.
#[arg(long, default_value_t = 0.8)]
temperature: f64,
/// Nucleus sampling probability cutoff.
#[arg(long)]
top_p: Option<f64>,
/// Penalty to be applied for repeating tokens, 1. means no penalty.
#[arg(long, default_value_t = 1.1)]
repeat_penalty: f32,
/// The context size to consider for the repeat penalty.
#[arg(long, default_value_t = 64)]
repeat_last_n: usize,
/// The model to be used.
#[arg(long, default_value = "t5-small")]
which: Which,
}
struct T5ModelBuilder {
device: Device,
config: t5::Config,
weights_filename: Vec<PathBuf>,
}
impl T5ModelBuilder {
pub fn load(args: &Args) -> Result<(Self, Tokenizer)> {
let device = candle_examples::device(args.cpu)?;
let (default_model, default_revision) = match args.which {
Which::T5Base => ("t5-base", "main"),
Which::T5Small => ("t5-small", "refs/pr/15"),
Which::T5Large => ("t5-large", "main"),
Which::T5_3B => ("t5-3b", "main"),
Which::Mt5Base => ("google/mt5-base", "refs/pr/5"),
Which::Mt5Small => ("google/mt5-small", "refs/pr/6"),
Which::Mt5Large => ("google/mt5-large", "refs/pr/2"),
};
let default_model = default_model.to_string();
let default_revision = default_revision.to_string();
let (model_id, revision) = match (args.model_id.to_owned(), args.revision.to_owned()) {
(Some(model_id), Some(revision)) => (model_id, revision),
(Some(model_id), None) => (model_id, "main".to_string()),
(None, Some(revision)) => (default_model, revision),
(None, None) => (default_model, default_revision),
};
let repo = Repo::with_revision(model_id.clone(), RepoType::Model, revision);
let api = Api::new()?;
let repo = api.repo(repo);
let config_filename = match &args.config_file {
None => repo.get("config.json")?,
Some(f) => f.into(),
};
let tokenizer_filename = match &args.tokenizer_file {
None => match args.which {
Which::Mt5Base => api
.model("lmz/mt5-tokenizers".into())
.get("mt5-base.tokenizer.json")?,
Which::Mt5Small => api
.model("lmz/mt5-tokenizers".into())
.get("mt5-small.tokenizer.json")?,
Which::Mt5Large => api
.model("lmz/mt5-tokenizers".into())
.get("mt5-large.tokenizer.json")?,
_ => repo.get("tokenizer.json")?,
},
Some(f) => f.into(),
};
let weights_filename = match &args.model_file {
Some(f) => f.split(',').map(|v| v.into()).collect::<Vec<_>>(),
None => {
if model_id == "google/flan-t5-xxl" || model_id == "google/flan-ul2" {
candle_examples::hub_load_safetensors(&repo, "model.safetensors.index.json")?
} else {
vec![repo.get("model.safetensors")?]
}
}
};
let config = std::fs::read_to_string(config_filename)?;
let mut config: t5::Config = serde_json::from_str(&config)?;
config.use_cache = !args.disable_cache;
let tokenizer = Tokenizer::from_file(tokenizer_filename).map_err(E::msg)?;
Ok((
Self {
device,
config,
weights_filename,
},
tokenizer,
))
}
pub fn build_encoder(&self) -> Result<t5::T5EncoderModel> {
let vb = unsafe {
VarBuilder::from_mmaped_safetensors(&self.weights_filename, DTYPE, &self.device)?
};
Ok(t5::T5EncoderModel::load(vb, &self.config)?)
}
pub fn build_conditional_generation(&self) -> Result<t5::T5ForConditionalGeneration> {
let vb = unsafe {
VarBuilder::from_mmaped_safetensors(&self.weights_filename, DTYPE, &self.device)?
};
Ok(t5::T5ForConditionalGeneration::load(vb, &self.config)?)
}
}
fn main() -> Result<()> {
use tracing_chrome::ChromeLayerBuilder;
use tracing_subscriber::prelude::*;
let args = Args::parse();
let _guard = if args.tracing {
let (chrome_layer, guard) = ChromeLayerBuilder::new().build();
tracing_subscriber::registry().with(chrome_layer).init();
Some(guard)
} else {
None
};
let (builder, mut tokenizer) = T5ModelBuilder::load(&args)?;
let device = &builder.device;
let tokenizer = tokenizer
.with_padding(None)
.with_truncation(None)
.map_err(E::msg)?;
match args.prompt {
Some(prompt) => {
let tokens = tokenizer
.encode(prompt, true)
.map_err(E::msg)?
.get_ids()
.to_vec();
let input_token_ids = Tensor::new(&tokens[..], device)?.unsqueeze(0)?;
if !args.decode {
let mut model = builder.build_encoder()?;
let start = std::time::Instant::now();
let ys = model.forward(&input_token_ids)?;
println!("{ys}");
println!("Took {:?}", start.elapsed());
} else {
let mut model = builder.build_conditional_generation()?;
let mut output_token_ids = [builder
.config
.decoder_start_token_id
.unwrap_or(builder.config.pad_token_id)
as u32]
.to_vec();
if let Some(decoder_prompt) = &args.decoder_prompt {
print!("{decoder_prompt}");
output_token_ids.extend(
tokenizer
.encode(decoder_prompt.to_string(), false)
.map_err(E::msg)?
.get_ids()
.to_vec(),
);
}
let temperature = if args.temperature <= 0. {
None
} else {
Some(args.temperature)
};
let mut logits_processor = LogitsProcessor::new(299792458, temperature, args.top_p);
let encoder_output = model.encode(&input_token_ids)?;
let start = std::time::Instant::now();
for index in 0.. {
if output_token_ids.len() > 512 {
break;
}
let decoder_token_ids = if index == 0 || !builder.config.use_cache {
Tensor::new(output_token_ids.as_slice(), device)?.unsqueeze(0)?
} else {
let last_token = *output_token_ids.last().unwrap();
Tensor::new(&[last_token], device)?.unsqueeze(0)?
};
let logits = model
.decode(&decoder_token_ids, &encoder_output)?
.squeeze(0)?;
let logits = if args.repeat_penalty == 1. {
logits
} else {
let start_at = output_token_ids.len().saturating_sub(args.repeat_last_n);
candle_transformers::utils::apply_repeat_penalty(
&logits,
args.repeat_penalty,
&output_token_ids[start_at..],
)?
};
let next_token_id = logits_processor.sample(&logits)?;
if next_token_id as usize == builder.config.eos_token_id {
break;
}
output_token_ids.push(next_token_id);
if let Some(text) = tokenizer.id_to_token(next_token_id) {
let text = text.replace('β', " ").replace("<0x0A>", "\n");
print!("{text}");
std::io::stdout().flush()?;
}
}
let dt = start.elapsed();
println!(
"\n{} tokens generated ({:.2} token/s)\n",
output_token_ids.len(),
output_token_ids.len() as f64 / dt.as_secs_f64(),
);
}
}
None => {
let mut model = builder.build_encoder()?;
let sentences = [
"The cat sits outside",
"A man is playing guitar",
"I love pasta",
"The new movie is awesome",
"The cat plays in the garden",
"A woman watches TV",
"The new movie is so great",
"Do you like pizza?",
];
let n_sentences = sentences.len();
let mut all_embeddings = Vec::with_capacity(n_sentences);
for sentence in sentences {
let tokens = tokenizer
.encode(sentence, true)
.map_err(E::msg)?
.get_ids()
.to_vec();
let token_ids = Tensor::new(&tokens[..], model.device())?.unsqueeze(0)?;
let embeddings = model.forward(&token_ids)?;
println!("generated embeddings {:?}", embeddings.shape());
// Apply some avg-pooling by taking the mean embedding value for all tokens (including padding)
let (_n_sentence, n_tokens, _hidden_size) = embeddings.dims3()?;
let embeddings = (embeddings.sum(1)? / (n_tokens as f64))?;
let embeddings = if args.normalize_embeddings {
normalize_l2(&embeddings)?
} else {
embeddings
};
println!("pooled embeddings {:?}", embeddings.shape());
all_embeddings.push(embeddings)
}
let mut similarities = vec![];
for (i, e_i) in all_embeddings.iter().enumerate() {
for (j, e_j) in all_embeddings
.iter()
.enumerate()
.take(n_sentences)
.skip(i + 1)
{
let sum_ij = (e_i * e_j)?.sum_all()?.to_scalar::<f32>()?;
let sum_i2 = (e_i * e_i)?.sum_all()?.to_scalar::<f32>()?;
let sum_j2 = (e_j * e_j)?.sum_all()?.to_scalar::<f32>()?;
let cosine_similarity = sum_ij / (sum_i2 * sum_j2).sqrt();
similarities.push((cosine_similarity, i, j))
}
}
similarities.sort_by(|u, v| v.0.total_cmp(&u.0));
for &(score, i, j) in similarities[..5].iter() {
println!("score: {score:.2} '{}' '{}'", sentences[i], sentences[j])
}
}
}
Ok(())
}
pub fn normalize_l2(v: &Tensor) -> Result<Tensor> {
Ok(v.broadcast_div(&v.sqr()?.sum_keepdim(1)?.sqrt()?)?)
}
| candle/candle-examples/examples/t5/main.rs/0 | {
"file_path": "candle/candle-examples/examples/t5/main.rs",
"repo_id": "candle",
"token_count": 6911
} |
/******************************************************************************
* Copyright (c) 2023, Tri Dao.
******************************************************************************/
#pragma once
namespace flash {
////////////////////////////////////////////////////////////////////////////////////////////////////
template<bool Varlen=true>
struct BlockInfo {
template<typename Params>
__device__ BlockInfo(const Params ¶ms, const int bidb)
: sum_s_q(!Varlen || params.cu_seqlens_q == nullptr ? -1 : params.cu_seqlens_q[bidb])
, sum_s_k(!Varlen || params.cu_seqlens_k == nullptr || !params.is_seqlens_k_cumulative ? -1 : params.cu_seqlens_k[bidb])
, actual_seqlen_q(!Varlen || params.cu_seqlens_q == nullptr ? params.seqlen_q : params.cu_seqlens_q[bidb + 1] - sum_s_q)
// If is_seqlens_k_cumulative, then seqlen_k is cu_seqlens_k[bidb + 1] - cu_seqlens_k[bidb].
// Otherwise it's cu_seqlens_k[bidb], i.e., we use cu_seqlens_k to store the sequence lengths of K.
, leftpad_k(params.leftpad_k == nullptr ? 0 : params.leftpad_k[bidb])
, seqlen_k_cache((!Varlen || params.cu_seqlens_k == nullptr ? params.seqlen_k : (params.is_seqlens_k_cumulative ? params.cu_seqlens_k[bidb + 1] - sum_s_k : params.cu_seqlens_k[bidb])) - leftpad_k)
, actual_seqlen_k(params.seqused_k ? params.seqused_k[bidb] - leftpad_k : seqlen_k_cache + (params.knew_ptr == nullptr ? 0 : params.seqlen_knew))
{
}
template <typename index_t>
__forceinline__ __device__ index_t q_offset(const index_t batch_stride, const index_t row_stride, const int bidb) const {
return sum_s_q == -1 ? bidb * batch_stride : uint32_t(sum_s_q) * row_stride;
}
template <typename index_t>
__forceinline__ __device__ index_t k_offset(const index_t batch_stride, const index_t row_stride, const int bidb) const {
return sum_s_k == -1 ? bidb * batch_stride + leftpad_k * row_stride : uint32_t(sum_s_k + leftpad_k) * row_stride;
}
const int sum_s_q;
const int sum_s_k;
const int actual_seqlen_q;
// We have to have seqlen_k_cache declared before actual_seqlen_k, otherwise actual_seqlen_k is set to 0.
const int leftpad_k;
const int seqlen_k_cache;
const int actual_seqlen_k;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
} // namespace flash
| candle/candle-flash-attn/kernels/block_info.h/0 | {
"file_path": "candle/candle-flash-attn/kernels/block_info.h",
"repo_id": "candle",
"token_count": 930
} |
// Inspired by
// https://github.com/NVIDIA/DALI/blob/main/include/dali/core/static_switch.h
// and https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/Dispatch.h
#pragma once
/// @param COND - a boolean expression to switch by
/// @param CONST_NAME - a name given for the constexpr bool variable.
/// @param ... - code to execute for true and false
///
/// Usage:
/// ```
/// BOOL_SWITCH(flag, BoolConst, [&] {
/// some_function<BoolConst>(...);
/// });
/// ```
#define BOOL_SWITCH(COND, CONST_NAME, ...) \
[&] { \
if (COND) { \
constexpr static bool CONST_NAME = true; \
return __VA_ARGS__(); \
} else { \
constexpr static bool CONST_NAME = false; \
return __VA_ARGS__(); \
} \
}()
#ifdef FLASHATTENTION_DISABLE_DROPOUT
#define DROPOUT_SWITCH(COND, CONST_NAME, ...) \
[&] { \
constexpr static bool CONST_NAME = false; \
return __VA_ARGS__(); \
}()
#else
#define DROPOUT_SWITCH BOOL_SWITCH
#endif
#ifdef FLASHATTENTION_DISABLE_ALIBI
#define ALIBI_SWITCH(COND, CONST_NAME, ...) \
[&] { \
constexpr static bool CONST_NAME = false; \
return __VA_ARGS__(); \
}()
#else
#define ALIBI_SWITCH BOOL_SWITCH
#endif
#ifdef FLASHATTENTION_DISABLE_UNEVEN_K
#define EVENK_SWITCH(COND, CONST_NAME, ...) \
[&] { \
constexpr static bool CONST_NAME = true; \
return __VA_ARGS__(); \
}()
#else
#define EVENK_SWITCH BOOL_SWITCH
#endif
#ifdef FLASHATTENTION_DISABLE_SOFTCAP
#define SOFTCAP_SWITCH(COND, CONST_NAME, ...) \
[&] { \
constexpr static bool CONST_NAME = false; \
return __VA_ARGS__(); \
}()
#else
#define SOFTCAP_SWITCH BOOL_SWITCH
#endif
#ifdef FLASHATTENTION_DISABLE_LOCAL
#define LOCAL_SWITCH(COND, CONST_NAME, ...) \
[&] { \
constexpr static bool CONST_NAME = false; \
return __VA_ARGS__(); \
}()
#else
#define LOCAL_SWITCH BOOL_SWITCH
#endif
#define FP16_SWITCH(COND, ...) \
[&] { \
if (COND) { \
using elem_type = cutlass::half_t; \
return __VA_ARGS__(); \
} else { \
using elem_type = cutlass::bfloat16_t; \
return __VA_ARGS__(); \
} \
}()
#define HEADDIM_SWITCH(HEADDIM, ...) \
[&] { \
if (HEADDIM <= 32) { \
constexpr static int kHeadDim = 32; \
return __VA_ARGS__(); \
} else if (HEADDIM <= 64) { \
constexpr static int kHeadDim = 64; \
return __VA_ARGS__(); \
} else if (HEADDIM <= 96) { \
constexpr static int kHeadDim = 96; \
return __VA_ARGS__(); \
} else if (HEADDIM <= 128) { \
constexpr static int kHeadDim = 128; \
return __VA_ARGS__(); \
} else if (HEADDIM <= 160) { \
constexpr static int kHeadDim = 160; \
return __VA_ARGS__(); \
} else if (HEADDIM <= 192) { \
constexpr static int kHeadDim = 192; \
return __VA_ARGS__(); \
} else if (HEADDIM <= 224) { \
constexpr static int kHeadDim = 224; \
return __VA_ARGS__(); \
} else if (HEADDIM <= 256) { \
constexpr static int kHeadDim = 256; \
return __VA_ARGS__(); \
} \
}()
| candle/candle-flash-attn/kernels/static_switch.h/0 | {
"file_path": "candle/candle-flash-attn/kernels/static_switch.h",
"repo_id": "candle",
"token_count": 2335
} |
// WARNING: THIS IS ONLY VALID ASSUMING THAT inp IS CONTIGUOUS!
// TODO: proper error reporting when ids are larger than v_size.
#include "cuda_utils.cuh"
#include<stdint.h>
template<typename T, typename I>
__device__ void index_select(
const size_t numel,
const size_t num_dims,
const size_t *info,
const I *ids,
const T *inp,
T *out,
const size_t left_size,
const size_t src_dim_size,
const size_t ids_dim_size,
const size_t right_size
) {
const size_t *dims = info;
const size_t *strides = info + num_dims;
bool b = is_contiguous(num_dims, dims, strides);
for (unsigned int dst_i = blockIdx.x * blockDim.x + threadIdx.x; dst_i < numel; dst_i += blockDim.x * gridDim.x) {
unsigned int left_i = dst_i / (ids_dim_size * right_size);
unsigned int id_i = dst_i / right_size % ids_dim_size;
unsigned int right_i = dst_i % right_size;
unsigned int src_i = left_i * (src_dim_size * right_size) + ids[id_i] * right_size + right_i;
unsigned strided_i = b ? src_i : get_strided_index(src_i, num_dims, dims, strides);
out[dst_i] = inp[strided_i];
}
}
#define IS_OP(TYPENAME, INDEX_TYPENAME, FN_NAME) \
extern "C" __global__ void FN_NAME( \
const size_t numel, \
const size_t num_dims, \
const size_t *info, \
const INDEX_TYPENAME *ids, \
const TYPENAME *inp, \
TYPENAME *out, \
const size_t left_size, \
const size_t src_dim_size, \
const size_t ids_dim_size, \
const size_t right_size \
) { index_select(numel, num_dims, info, ids, inp, out, left_size, src_dim_size, ids_dim_size, right_size); } \
template<typename T, typename I>
__device__ void gather(
const size_t numel,
const I *ids,
const T *inp,
T *out,
const size_t left_size,
const size_t src_dim_size,
const size_t ids_dim_size,
const size_t right_size
) {
for (unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; i < numel; i += blockDim.x * gridDim.x) {
size_t post = i % right_size;
size_t idx = ids[i];
size_t pre = i / (right_size * ids_dim_size);
size_t src_i = (pre * src_dim_size + idx) * right_size + post;
out[i] = inp[src_i];
}
}
#define GATHER_OP(TYPENAME, INDEX_TYPENAME, FN_NAME) \
extern "C" __global__ void FN_NAME( \
const size_t numel, \
const INDEX_TYPENAME *ids, \
const TYPENAME *inp, \
TYPENAME *out, \
const size_t left_size, \
const size_t src_dim_size, \
const size_t ids_dim_size, \
const size_t right_size \
) { gather(numel, ids, inp, out, left_size, src_dim_size, ids_dim_size, right_size); } \
template<typename T, typename I>
__device__ void index_add(
const I *ids,
const size_t ids_dim_size,
const T *inp,
T *out,
const size_t left_size,
const size_t src_dim_size,
const size_t dst_dim_size,
const size_t right_size
) {
const size_t numel = left_size * right_size;
for (unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; i < numel; i += blockDim.x * gridDim.x) {
const size_t pre = i / right_size;
const size_t post = i % right_size;
for (unsigned int j = 0; j < ids_dim_size; ++j) {
const size_t idx = ids[j];
const size_t src_i = (pre * ids_dim_size + j) * right_size + post;
const size_t dst_i = (pre * dst_dim_size + idx) * right_size + post;
out[dst_i] += inp[src_i];
}
}
}
#define IA_OP(TYPENAME, INDEX_TYPENAME, FN_NAME) \
extern "C" __global__ void FN_NAME( \
const INDEX_TYPENAME *ids, \
const size_t ids_dim_size, \
const TYPENAME *inp, \
TYPENAME *out, \
const size_t left_size, \
const size_t src_dim_size, \
const size_t dst_dim_size, \
const size_t right_size \
) { index_add(ids, ids_dim_size, inp, out, left_size, src_dim_size, dst_dim_size, right_size); } \
template<typename T, typename I>
__device__ void scatter_add(
const I *ids,
const T *inp,
T *out,
const size_t left_size,
const size_t src_dim_size,
const size_t dst_dim_size,
const size_t right_size
) {
const size_t numel = left_size * right_size;
for (unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; i < numel; i += blockDim.x * gridDim.x) {
const size_t pre = i / right_size;
const size_t post = i % right_size;
for (unsigned int j = 0; j < src_dim_size; ++j) {
const size_t src_i = (pre * src_dim_size + j) * right_size + post;
const size_t idx = ids[src_i];
const size_t dst_i = (pre * dst_dim_size + idx) * right_size + post;
out[dst_i] += inp[src_i];
}
}
}
#define SA_OP(TYPENAME, INDEX_TYPENAME, FN_NAME) \
extern "C" __global__ void FN_NAME( \
const INDEX_TYPENAME *ids, \
const TYPENAME *inp, \
TYPENAME *out, \
const size_t left_size, \
const size_t src_dim_size, \
const size_t dst_dim_size, \
const size_t right_size \
) { scatter_add(ids, inp, out, left_size, src_dim_size, dst_dim_size, right_size); } \
#if __CUDA_ARCH__ >= 800
IS_OP(__nv_bfloat16, int64_t, is_i64_bf16)
IS_OP(__nv_bfloat16, uint32_t, is_u32_bf16)
IS_OP(__nv_bfloat16, uint8_t, is_u8_bf16)
GATHER_OP(__nv_bfloat16, int64_t, gather_i64_bf16)
GATHER_OP(__nv_bfloat16, uint32_t, gather_u32_bf16)
GATHER_OP(__nv_bfloat16, uint8_t, gather_u8_bf16)
IA_OP(__nv_bfloat16, int64_t, ia_i64_bf16)
IA_OP(__nv_bfloat16, uint32_t, ia_u32_bf16)
IA_OP(__nv_bfloat16, uint8_t, ia_u8_bf16)
SA_OP(__nv_bfloat16, int64_t, sa_i64_bf16)
SA_OP(__nv_bfloat16, uint32_t, sa_u32_bf16)
SA_OP(__nv_bfloat16, uint8_t, sa_u8_bf16)
#endif
#if __CUDA_ARCH__ >= 530
IS_OP(__half, int64_t, is_i64_f16)
IS_OP(__half, uint32_t, is_u32_f16)
IS_OP(__half, uint8_t, is_u8_f16)
GATHER_OP(__half, int64_t, gather_i64_f16)
GATHER_OP(__half, uint32_t, gather_u32_f16)
GATHER_OP(__half, uint8_t, gather_u8_f16)
IA_OP(__half, int64_t, ia_i64_f16)
IA_OP(__half, uint32_t, ia_u32_f16)
IA_OP(__half, uint8_t, ia_u8_f16)
SA_OP(__half, int64_t, sa_i64_f16)
SA_OP(__half, uint32_t, sa_u32_f16)
SA_OP(__half, uint8_t, sa_u8_f16)
#endif
IS_OP(float, int64_t, is_i64_f32)
IS_OP(double, int64_t, is_i64_f64)
IS_OP(uint8_t, int64_t, is_i64_u8)
IS_OP(uint32_t, int64_t, is_i64_u32)
IS_OP(int64_t, int64_t, is_i64_i64)
IS_OP(float, uint32_t, is_u32_f32)
IS_OP(double, uint32_t, is_u32_f64)
IS_OP(uint8_t, uint32_t, is_u32_u8)
IS_OP(int64_t, uint32_t, is_u32_i64)
IS_OP(uint32_t, uint32_t, is_u32_u32)
IS_OP(float, uint8_t, is_u8_f32)
IS_OP(double, uint8_t, is_u8_f64)
IS_OP(uint8_t, uint8_t, is_u8_u8)
IS_OP(uint32_t, uint8_t, is_u8_u32)
IS_OP(int64_t, uint8_t, is_u8_i64)
GATHER_OP(float, int64_t, gather_i64_f32)
GATHER_OP(double, int64_t, gather_i64_f64)
GATHER_OP(uint8_t, int64_t, gather_i64_u8)
GATHER_OP(uint32_t, int64_t, gather_i64_u32)
GATHER_OP(int64_t, int64_t, gather_i64_i64)
GATHER_OP(float, uint32_t, gather_u32_f32)
GATHER_OP(double, uint32_t, gather_u32_f64)
GATHER_OP(uint8_t, uint32_t, gather_u32_u8)
GATHER_OP(int64_t, uint32_t, gather_u32_i64)
GATHER_OP(uint32_t, uint32_t, gather_u32_u32)
GATHER_OP(float, uint8_t, gather_u8_f32)
GATHER_OP(double, uint8_t, gather_u8_f64)
GATHER_OP(uint8_t, uint8_t, gather_u8_u8)
GATHER_OP(uint32_t, uint8_t, gather_u8_u32)
GATHER_OP(int64_t, uint8_t, gather_u8_i64)
IA_OP(float, int64_t, ia_i64_f32)
IA_OP(double, int64_t, ia_i64_f64)
IA_OP(uint8_t, int64_t, ia_i64_u8)
IA_OP(int64_t, int64_t, ia_i64_i64)
IA_OP(uint32_t, int64_t, ia_i64_u32)
IA_OP(float, uint32_t, ia_u32_f32)
IA_OP(double, uint32_t, ia_u32_f64)
IA_OP(uint8_t, uint32_t, ia_u32_u8)
IA_OP(int64_t, uint32_t, ia_u32_i64)
IA_OP(uint32_t, uint32_t, ia_u32_u32)
IA_OP(float, uint8_t, ia_u8_f32)
IA_OP(double, uint8_t, ia_u8_f64)
IA_OP(uint8_t, uint8_t, ia_u8_u8)
IA_OP(uint32_t, uint8_t, ia_u8_u32)
IA_OP(int64_t, uint8_t, ia_u8_i64)
SA_OP(float, int64_t, sa_i64_f32)
SA_OP(double, int64_t, sa_i64_f64)
SA_OP(uint8_t, int64_t, sa_i64_u8)
SA_OP(int64_t, int64_t, sa_i64_i64)
SA_OP(uint32_t, int64_t, sa_i64_u32)
SA_OP(float, uint32_t, sa_u32_f32)
SA_OP(double, uint32_t, sa_u32_f64)
SA_OP(uint8_t, uint32_t, sa_u32_u8)
SA_OP(int64_t, uint32_t, sa_u32_i64)
SA_OP(uint32_t, uint32_t, sa_u32_u32)
SA_OP(float, uint8_t, sa_u8_f32)
SA_OP(double, uint8_t, sa_u8_f64)
SA_OP(uint8_t, uint8_t, sa_u8_u8)
SA_OP(uint32_t, uint8_t, sa_u8_u32)
SA_OP(int64_t, uint8_t, sa_u8_i64)
| candle/candle-kernels/src/indexing.cu/0 | {
"file_path": "candle/candle-kernels/src/indexing.cu",
"repo_id": "candle",
"token_count": 4357
} |
use metal::{
Buffer, CompileOptions, ComputeCommandEncoderRef, ComputePipelineState, Device, Function,
FunctionConstantValues, Library, MTLDataType, MTLSize, NSUInteger,
};
use std::collections::HashMap;
use std::ffi::c_void;
use std::sync::RwLock;
pub mod mlx_gemm;
pub mod sort;
pub mod utils;
pub use mlx_gemm::{call_mlx_gemm, GemmDType};
pub use sort::{call_arg_sort, call_mlx_arg_sort};
pub use utils::BufferOffset;
use utils::{get_block_dims, linear_split, EncoderParam, EncoderProvider};
const AFFINE: &str = include_str!("affine.metal");
const BINARY: &str = include_str!("binary.metal");
const CAST: &str = include_str!("cast.metal");
const CONV: &str = include_str!("conv.metal");
const FILL: &str = include_str!("fill.metal");
const INDEXING: &str = include_str!("indexing.metal");
const MLX_GEMM: &str = include_str!("mlx_gemm.metal");
const MLX_SORT: &str = include_str!("mlx_sort.metal");
const QUANTIZED: &str = include_str!("quantized.metal");
const RANDOM: &str = include_str!("random.metal");
const REDUCE: &str = include_str!("reduce.metal");
const SORT: &str = include_str!("sort.metal");
const TERNARY: &str = include_str!("ternary.metal");
const UNARY: &str = include_str!("unary.metal");
const SDPA: &str = include_str!("scaled_dot_product_attention.metal");
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum DType {
BF16,
F16,
F32,
I64,
U32,
U8,
}
impl DType {
fn size_in_bytes(&self) -> usize {
match self {
Self::U8 => 1,
Self::U32 => 4,
Self::I64 => 8,
Self::BF16 => 2,
Self::F16 => 2,
Self::F32 => 4,
}
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum Source {
Affine,
Binary,
Cast,
Conv,
Fill,
Gemm,
Indexing,
MlxSort,
Quantized,
Random,
Reduce,
Sort,
Ternary,
Unary,
Sdpa,
}
pub mod copy2d {
pub struct Kernel(pub &'static str);
pub const FLOAT: Kernel = Kernel("copy2d_f32");
pub const HALF: Kernel = Kernel("copy2d_f16");
pub const BFLOAT: Kernel = Kernel("copy2d_bf16");
pub const I64: Kernel = Kernel("copy2d_i64");
pub const U32: Kernel = Kernel("copy2d_u32");
pub const U8: Kernel = Kernel("copy2d_u8");
}
macro_rules! ops{
($($name:ident),+) => {
pub mod contiguous {
pub struct Kernel(pub &'static str);
$(
pub mod $name {
use super::Kernel;
pub const FLOAT: Kernel = Kernel(concat!(stringify!($name), "_f32"));
pub const HALF: Kernel = Kernel(concat!(stringify!($name), "_f16"));
pub const BFLOAT: Kernel = Kernel(concat!(stringify!($name), "_bf16"));
pub const I64: Kernel = Kernel(concat!(stringify!($name), "_i64"));
pub const U32: Kernel = Kernel(concat!(stringify!($name), "_u32"));
pub const U8: Kernel = Kernel(concat!(stringify!($name), "_u8"));
}
)+
pub mod copy {
use super::Kernel;
pub const FLOAT: Kernel = Kernel("copy_f32");
pub const HALF: Kernel = Kernel("copy_f16");
pub const BFLOAT: Kernel = Kernel("copy_bf16");
pub const I64: Kernel = Kernel("copy_i64");
pub const U32: Kernel = Kernel("copy_u32");
pub const U8: Kernel = Kernel("copy_u8");
}
}
pub mod contiguous_tiled {
pub struct Kernel(pub &'static str);
$(
pub mod $name {
use super::Kernel;
pub const FLOAT: Kernel = Kernel(concat!(stringify!($name), "_f32_tiled"));
pub const HALF: Kernel = Kernel(concat!(stringify!($name), "_f16_tiled"));
pub const BFLOAT: Kernel = Kernel(concat!(stringify!($name), "_bf16_tiled"));
pub const I64: Kernel = Kernel(concat!(stringify!($name), "_i64_tiled"));
pub const U32: Kernel = Kernel(concat!(stringify!($name), "_u32_tiled"));
pub const U8: Kernel = Kernel(concat!(stringify!($name), "_u8_tiled"));
}
)+
pub mod copy {
use super::Kernel;
pub const FLOAT: Kernel = Kernel("copy_f32_tiled");
pub const HALF: Kernel = Kernel("copy_f16_tiled");
pub const BFLOAT: Kernel = Kernel("copy_bf16_tiled");
pub const I64: Kernel = Kernel("copy_i64_tiled");
pub const U32: Kernel = Kernel("copy_u32_tiled");
pub const U8: Kernel = Kernel("copy_u8_tiled");
}
}
pub mod strided {
pub struct Kernel(pub &'static str);
$(
pub mod $name {
use super::Kernel;
pub const FLOAT: Kernel = Kernel(concat!(stringify!($name), "_f32_strided"));
pub const HALF: Kernel = Kernel(concat!(stringify!($name), "_f16_strided"));
pub const BFLOAT: Kernel = Kernel(concat!(stringify!($name), "_bf16_strided"));
pub const I64: Kernel = Kernel(concat!(stringify!($name), "_i64_strided"));
pub const U32: Kernel = Kernel(concat!(stringify!($name), "_u32_strided"));
pub const U8: Kernel = Kernel(concat!(stringify!($name), "_u8_strided"));
}
)+
pub mod copy {
use super::Kernel;
pub const FLOAT: Kernel = Kernel("copy_f32_strided");
pub const HALF: Kernel = Kernel("copy_f16_strided");
pub const BFLOAT: Kernel = Kernel("copy_bf16_strided");
pub const I64: Kernel = Kernel("copy_i64_strided");
pub const U32: Kernel = Kernel("copy_u32_strided");
pub const U8: Kernel = Kernel("copy_u8_strided");
}
}
};
}
pub mod unary {
ops!(
cos, sin, exp, sqr, sqrt, neg, log, gelu, abs, ceil, floor, relu, round, erf, gelu_erf,
tanh, recip, silu, sign, sigmoid
);
}
pub mod binary {
ops!(add, sub, mul, div, min, max, eq, ne, le, lt, ge, gt);
}
#[derive(thiserror::Error, Debug)]
pub enum MetalKernelError {
#[error("Could not lock kernel map: {0}")]
LockError(String),
#[error("Error while loading library: {0}")]
LoadLibraryError(String),
#[error("Error while loading function: {0}")]
LoadFunctionError(String),
#[error("Failed to create compute function")]
FailedToCreateComputeFunction,
#[error("Failed to create pipeline")]
FailedToCreatePipeline(String),
#[error("Invalid matmul arguments {lhs_stride:?} {rhs_stride:?} {mnk:?}")]
MatMulNonContiguous {
lhs_stride: Vec<usize>,
rhs_stride: Vec<usize>,
mnk: (usize, usize, usize),
},
#[error("Sdpa {variation} head size was {got}, expectd {expected:?}")]
SdpaHeadSizeMismatch {
variation: &'static str,
got: usize,
expected: Vec<usize>,
},
#[error("Sdpa {variation} got dtype {got:?}")]
SdpaHeadDTypeMismatch {
variation: &'static str,
got: SdpaDType,
},
}
impl<T> From<std::sync::PoisonError<T>> for MetalKernelError {
fn from(e: std::sync::PoisonError<T>) -> Self {
Self::LockError(e.to_string())
}
}
#[derive(Debug, Clone)]
pub enum KernelName {
Ref(&'static str),
Value(String),
}
impl AsRef<str> for KernelName {
fn as_ref(&self) -> &str {
match self {
Self::Ref(r) => r,
Self::Value(v) => v.as_str(),
}
}
}
impl std::hash::Hash for KernelName {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
match self {
Self::Ref(r) => r.hash(state),
Self::Value(v) => v.hash(state),
}
}
}
impl PartialEq for KernelName {
fn eq(&self, other: &Self) -> bool {
let v1: &str = self.as_ref();
let v2: &str = other.as_ref();
v1 == v2
}
}
impl Eq for KernelName {}
impl From<&'static str> for KernelName {
fn from(value: &'static str) -> Self {
Self::Ref(value)
}
}
impl From<String> for KernelName {
fn from(value: String) -> Self {
Self::Value(value)
}
}
type Libraries = HashMap<Source, Library>;
type Pipelines = HashMap<(KernelName, Option<ConstantValues>), ComputePipelineState>;
#[derive(Debug)]
pub struct Kernels {
libraries: RwLock<Libraries>,
pipelines: RwLock<Pipelines>,
}
impl Default for Kernels {
fn default() -> Self {
Self::new()
}
}
impl Kernels {
pub fn new() -> Self {
let libraries = RwLock::new(Libraries::new());
let pipelines = RwLock::new(Pipelines::new());
Self {
libraries,
pipelines,
}
}
fn get_library_source(&self, source: Source) -> &'static str {
match source {
Source::Affine => AFFINE,
Source::Binary => BINARY,
Source::Cast => CAST,
Source::Conv => CONV,
Source::Fill => FILL,
Source::Gemm => MLX_GEMM,
Source::Indexing => INDEXING,
Source::MlxSort => MLX_SORT,
Source::Quantized => QUANTIZED,
Source::Random => RANDOM,
Source::Reduce => REDUCE,
Source::Sort => SORT,
Source::Ternary => TERNARY,
Source::Unary => UNARY,
Source::Sdpa => SDPA,
}
}
/// Load the give library from its [`source`].
/// If this has been previously loaded it will just fetch it from cache.
pub fn load_library(
&self,
device: &Device,
source: Source,
) -> Result<Library, MetalKernelError> {
let mut libraries = self.libraries.write()?;
if let Some(lib) = libraries.get(&source) {
Ok(lib.clone())
} else {
let lib = {
let source_content = self.get_library_source(source);
device
.new_library_with_source(source_content, &CompileOptions::new())
.map_err(|e| MetalKernelError::LoadLibraryError(e.to_string()))?
};
libraries.insert(source, lib.clone());
Ok(lib)
}
}
fn load_function(
&self,
device: &Device,
source: Source,
name: &str,
constants: Option<FunctionConstantValues>,
) -> Result<Function, MetalKernelError> {
let func = self
.load_library(device, source)?
.get_function(name, constants)
.map_err(|e| MetalKernelError::LoadFunctionError(e.to_string()))?;
Ok(func)
}
/// Load the give pipeline
/// loads the library from source, then gets the function [`name`] from
/// that source
fn load_pipeline_with_constants(
&self,
device: &Device,
source: Source,
name: impl Into<KernelName>,
constants: Option<ConstantValues>,
) -> Result<ComputePipelineState, MetalKernelError> {
let mut pipelines = self.pipelines.write()?;
let key = (name.into(), constants);
if let Some(pipeline) = pipelines.get(&key) {
Ok(pipeline.clone())
} else {
let (name, constants) = key;
let func = self.load_function(
device,
source,
name.as_ref(),
constants.as_ref().map(|c| c.function_constant_values()),
)?;
let pipeline = device
.new_compute_pipeline_state_with_function(&func)
.map_err(|e| MetalKernelError::FailedToCreatePipeline(e.to_string()))?;
pipelines.insert((name, constants), pipeline.clone());
Ok(pipeline)
}
}
/// Load the give pipeline
/// loads the library from source, then gets the function [`name`] from
/// that source (without constants)
pub fn load_pipeline(
&self,
device: &Device,
source: Source,
name: impl Into<KernelName>,
) -> Result<ComputePipelineState, MetalKernelError> {
self.load_pipeline_with_constants(device, source, name, None)
}
}
#[allow(clippy::too_many_arguments)]
pub fn call_copy2d(
device: &Device,
ep: impl EncoderProvider,
kernels: &Kernels,
name: copy2d::Kernel,
input: &Buffer,
output: &Buffer,
d1: usize,
d2: usize,
src_s: usize,
dst_s: usize,
src_o_in_bytes: usize,
dst_o_in_bytes: usize,
) -> Result<(), MetalKernelError> {
let pipeline = kernels.load_pipeline(device, Source::Unary, name.0)?;
let encoder = ep.encoder();
let encoder: &ComputeCommandEncoderRef = encoder.as_ref();
encoder.set_compute_pipeline_state(&pipeline);
set_params!(
encoder,
(
d1 as i64,
d2 as i64,
src_s as i64,
dst_s as i64,
(input, src_o_in_bytes),
(output, dst_o_in_bytes)
)
);
let grid_dims = MTLSize {
width: d1 as u64,
height: d2 as u64,
depth: 1,
};
let group_dims = get_block_dims(d1 as u64, d2 as u64, 1);
encoder.use_resource(input, metal::MTLResourceUsage::Read);
encoder.use_resource(output, metal::MTLResourceUsage::Write);
encoder.dispatch_threads(grid_dims, group_dims);
Ok(())
}
#[allow(clippy::too_many_arguments)]
pub fn call_unary_contiguous_tiled(
device: &Device,
ep: impl EncoderProvider,
kernels: &Kernels,
kernel_name: unary::contiguous_tiled::Kernel,
length: usize,
input: BufferOffset,
output: &Buffer,
) -> Result<(), MetalKernelError> {
let pipeline = kernels.load_pipeline(device, Source::Unary, kernel_name.0)?;
let encoder = ep.encoder();
let encoder: &ComputeCommandEncoderRef = encoder.as_ref();
let tile_size = 2;
let tiles = length.div_ceil(tile_size);
encoder.set_compute_pipeline_state(&pipeline);
set_params!(encoder, (length, &input, output));
let (thread_group_count, thread_group_size) = linear_split(&pipeline, tiles);
encoder.use_resource(input.buffer, metal::MTLResourceUsage::Read);
encoder.use_resource(output, metal::MTLResourceUsage::Write);
encoder.dispatch_thread_groups(thread_group_count, thread_group_size);
Ok(())
}
#[allow(clippy::too_many_arguments)]
pub fn call_unary_contiguous(
device: &Device,
ep: impl EncoderProvider,
kernels: &Kernels,
kernel_name: unary::contiguous::Kernel,
length: usize,
input: BufferOffset,
output: &Buffer,
) -> Result<(), MetalKernelError> {
let pipeline = kernels.load_pipeline(device, Source::Unary, kernel_name.0)?;
let encoder = ep.encoder();
let encoder: &ComputeCommandEncoderRef = encoder.as_ref();
encoder.set_compute_pipeline_state(&pipeline);
set_params!(encoder, (length, &input, output));
let (thread_group_count, thread_group_size) = linear_split(&pipeline, length);
encoder.use_resource(input.buffer, metal::MTLResourceUsage::Read);
encoder.use_resource(output, metal::MTLResourceUsage::Write);
encoder.dispatch_thread_groups(thread_group_count, thread_group_size);
Ok(())
}
#[allow(clippy::too_many_arguments)]
pub fn call_unary_strided(
device: &Device,
ep: impl EncoderProvider,
kernels: &Kernels,
name: unary::strided::Kernel,
shape: &[usize],
input: BufferOffset,
strides: &[usize],
output: BufferOffset,
) -> Result<(), MetalKernelError> {
let pipeline = kernels.load_pipeline(device, Source::Unary, name.0)?;
let length: usize = shape.iter().product();
let num_dims: usize = shape.len();
let encoder = ep.encoder();
let encoder: &ComputeCommandEncoderRef = encoder.as_ref();
let (thread_group_count, thread_group_size) = linear_split(&pipeline, length);
encoder.set_compute_pipeline_state(&pipeline);
set_params!(encoder, (length, num_dims, shape, strides, &input, &output));
encoder.use_resource(input.buffer, metal::MTLResourceUsage::Read);
encoder.use_resource(output.buffer, metal::MTLResourceUsage::Write);
encoder.dispatch_thread_groups(thread_group_count, thread_group_size);
Ok(())
}
#[allow(clippy::too_many_arguments)]
pub fn call_binary_contiguous(
device: &Device,
ep: impl EncoderProvider,
kernels: &Kernels,
kernel_name: binary::contiguous::Kernel,
length: usize,
left: BufferOffset,
right: BufferOffset,
output: &Buffer,
) -> Result<(), MetalKernelError> {
let pipeline = kernels.load_pipeline(device, Source::Binary, kernel_name.0)?;
let encoder = ep.encoder();
let encoder: &ComputeCommandEncoderRef = encoder.as_ref();
encoder.set_compute_pipeline_state(&pipeline);
set_params!(encoder, (length, &left, &right, output));
let (thread_group_count, thread_group_size) = linear_split(&pipeline, length);
encoder.use_resource(left.buffer, metal::MTLResourceUsage::Read);
encoder.use_resource(right.buffer, metal::MTLResourceUsage::Read);
encoder.use_resource(output, metal::MTLResourceUsage::Write);
encoder.dispatch_thread_groups(thread_group_count, thread_group_size);
Ok(())
}
#[allow(clippy::too_many_arguments)]
pub fn call_binary_strided(
device: &Device,
ep: impl EncoderProvider,
kernels: &Kernels,
name: binary::strided::Kernel,
shape: &[usize],
left_input: BufferOffset,
left_strides: &[usize],
right_input: BufferOffset,
right_strides: &[usize],
output: &Buffer,
) -> Result<(), MetalKernelError> {
let pipeline = kernels.load_pipeline(device, Source::Binary, name.0)?;
let num_dims: usize = shape.len();
let encoder = ep.encoder();
let encoder: &ComputeCommandEncoderRef = encoder.as_ref();
let width: usize = shape.iter().product();
let length: usize = shape.iter().product();
let (thread_group_count, thread_group_size) = linear_split(&pipeline, width);
encoder.set_compute_pipeline_state(&pipeline);
set_params!(
encoder,
(
length,
num_dims,
shape,
left_strides,
right_strides,
&left_input,
&right_input,
output
)
);
encoder.use_resource(left_input.buffer, metal::MTLResourceUsage::Read);
encoder.use_resource(right_input.buffer, metal::MTLResourceUsage::Read);
encoder.use_resource(output, metal::MTLResourceUsage::Write);
encoder.dispatch_thread_groups(thread_group_count, thread_group_size);
Ok(())
}
#[allow(clippy::too_many_arguments)]
pub fn call_cast_contiguous(
device: &Device,
ep: impl EncoderProvider,
kernels: &Kernels,
kernel_name: &'static str,
length: usize,
input: BufferOffset,
output: &Buffer,
) -> Result<(), MetalKernelError> {
let pipeline = kernels.load_pipeline(device, Source::Cast, kernel_name)?;
let encoder = ep.encoder();
let encoder: &ComputeCommandEncoderRef = encoder.as_ref();
encoder.set_compute_pipeline_state(&pipeline);
set_params!(encoder, (length, &input, output));
let (thread_group_count, thread_group_size) = linear_split(&pipeline, length);
encoder.use_resource(input.buffer, metal::MTLResourceUsage::Read);
encoder.use_resource(output, metal::MTLResourceUsage::Write);
encoder.dispatch_thread_groups(thread_group_count, thread_group_size);
Ok(())
}
#[allow(clippy::too_many_arguments)]
pub fn call_cast_strided(
device: &Device,
ep: impl EncoderProvider,
kernels: &Kernels,
kernel_name: &'static str,
shape: &[usize],
input: BufferOffset,
input_strides: &[usize],
output: &Buffer,
) -> Result<(), MetalKernelError> {
let pipeline = kernels.load_pipeline(device, Source::Cast, kernel_name)?;
let encoder = ep.encoder();
let encoder: &ComputeCommandEncoderRef = encoder.as_ref();
encoder.set_compute_pipeline_state(&pipeline);
let length: usize = shape.iter().product();
set_params!(
encoder,
(length, shape.len(), shape, input_strides, &input, output)
);
let (thread_group_count, thread_group_size) = linear_split(&pipeline, length);
encoder.use_resource(input.buffer, metal::MTLResourceUsage::Read);
encoder.use_resource(output, metal::MTLResourceUsage::Write);
encoder.dispatch_thread_groups(thread_group_count, thread_group_size);
Ok(())
}
#[allow(clippy::too_many_arguments)]
pub fn call_reduce_contiguous(
device: &Device,
ep: impl EncoderProvider,
kernels: &Kernels,
kernel_name: &'static str,
shape: &[usize],
out_length: usize,
input: BufferOffset,
output: &Buffer,
) -> Result<(), MetalKernelError> {
let length = shape.iter().product::<usize>();
let num_dims = shape.len();
let work_per_threadgroup = length / out_length;
let pipeline = kernels.load_pipeline(device, Source::Reduce, kernel_name)?;
let encoder = ep.encoder();
let encoder: &ComputeCommandEncoderRef = encoder.as_ref();
encoder.set_compute_pipeline_state(&pipeline);
set_params!(
encoder,
(
length,
num_dims,
shape,
work_per_threadgroup,
&input,
output
)
);
let thread_group_count = MTLSize {
width: out_length as u64,
height: 1,
depth: 1,
};
let width = std::cmp::min(
pipeline.max_total_threads_per_threadgroup(),
(work_per_threadgroup / 2).next_power_of_two() as NSUInteger,
);
let thread_group_size = MTLSize {
width,
height: 1,
depth: 1,
};
encoder.use_resource(input.buffer, metal::MTLResourceUsage::Read);
encoder.use_resource(output, metal::MTLResourceUsage::Write);
encoder.dispatch_thread_groups(thread_group_count, thread_group_size);
Ok(())
}
#[allow(clippy::too_many_arguments)]
pub fn call_reduce_strided(
device: &Device,
ep: impl EncoderProvider,
kernels: &Kernels,
kernel_name: &'static str,
shape: &[usize],
strides: &[usize],
out_length: usize,
input: BufferOffset,
output: &Buffer,
) -> Result<(), MetalKernelError> {
let length: usize = shape.iter().product();
let num_dims = shape.len();
let work_per_threadgroup = length / out_length;
let pipeline = kernels.load_pipeline(device, Source::Reduce, kernel_name)?;
let encoder = ep.encoder();
let encoder: &ComputeCommandEncoderRef = encoder.as_ref();
encoder.set_compute_pipeline_state(&pipeline);
set_params!(
encoder,
(
length,
num_dims,
shape,
strides,
work_per_threadgroup,
&input,
output
)
);
let thread_group_count = MTLSize {
width: out_length as u64,
height: 1,
depth: 1,
};
let width = std::cmp::min(
pipeline.max_total_threads_per_threadgroup(),
(work_per_threadgroup / 2).next_power_of_two() as NSUInteger,
);
let thread_group_size = MTLSize {
width,
height: 1,
depth: 1,
};
encoder.use_resource(input.buffer, metal::MTLResourceUsage::Read);
encoder.use_resource(output, metal::MTLResourceUsage::Write);
encoder.dispatch_thread_groups(thread_group_count, thread_group_size);
Ok(())
}
#[allow(clippy::too_many_arguments)]
pub fn call_last_softmax(
device: &Device,
ep: impl EncoderProvider,
kernels: &Kernels,
kernel_name: &'static str,
length: usize,
elements: usize,
input: &Buffer,
input_offset: usize,
output: &Buffer,
) -> Result<(), MetalKernelError> {
let work_per_threadgroup = elements;
let pipeline = kernels.load_pipeline(device, Source::Reduce, kernel_name)?;
let encoder = ep.encoder();
let encoder: &ComputeCommandEncoderRef = encoder.as_ref();
encoder.set_compute_pipeline_state(&pipeline);
set_params!(
encoder,
(length, work_per_threadgroup, (input, input_offset), output)
);
let out_length = length / work_per_threadgroup;
let thread_group_count = MTLSize {
width: out_length as NSUInteger,
height: 1,
depth: 1,
};
let width = std::cmp::min(
pipeline.max_total_threads_per_threadgroup(),
(work_per_threadgroup / 2).next_power_of_two() as NSUInteger,
);
let thread_group_size = MTLSize {
width,
height: 1,
depth: 1,
};
encoder.use_resource(input, metal::MTLResourceUsage::Read);
encoder.use_resource(output, metal::MTLResourceUsage::Write);
encoder.dispatch_thread_groups(thread_group_count, thread_group_size);
Ok(())
}
#[allow(clippy::too_many_arguments)]
pub fn call_rms_norm(
device: &Device,
ep: impl EncoderProvider,
kernels: &Kernels,
kernel_name: &'static str,
length: usize,
elements_to_sum: usize,
eps: f32,
input: &Buffer,
input_offset: usize,
alpha: &Buffer,
alpha_offset: usize,
output: &Buffer,
) -> Result<(), MetalKernelError> {
let pipeline = kernels.load_pipeline(device, Source::Reduce, kernel_name)?;
let encoder = ep.encoder();
let encoder: &ComputeCommandEncoderRef = encoder.as_ref();
encoder.set_compute_pipeline_state(&pipeline);
set_params!(
encoder,
(
length,
elements_to_sum,
(input, input_offset),
output,
(alpha, alpha_offset),
eps
)
);
let out_length = length / elements_to_sum;
let thread_group_count = MTLSize {
width: out_length as u64,
height: 1,
depth: 1,
};
let width = std::cmp::min(
pipeline.max_total_threads_per_threadgroup(),
elements_to_sum as u64,
)
.next_power_of_two();
let thread_group_size = MTLSize {
width,
height: 1,
depth: 1,
};
encoder.use_resource(input, metal::MTLResourceUsage::Read);
encoder.use_resource(output, metal::MTLResourceUsage::Write);
encoder.set_threadgroup_memory_length(0, (width * 4).max(16) as u64);
encoder.dispatch_thread_groups(thread_group_count, thread_group_size);
Ok(())
}
#[allow(clippy::too_many_arguments)]
pub fn call_layer_norm(
device: &Device,
ep: impl EncoderProvider,
kernels: &Kernels,
kernel_name: &'static str,
length: usize,
elements_to_sum: usize,
eps: f32,
input: &Buffer,
input_offset: usize,
alpha: &Buffer,
alpha_offset: usize,
beta: &Buffer,
beta_offset: usize,
output: &Buffer,
) -> Result<(), MetalKernelError> {
let pipeline = kernels.load_pipeline(device, Source::Reduce, kernel_name)?;
let encoder = ep.encoder();
let encoder: &ComputeCommandEncoderRef = encoder.as_ref();
encoder.set_compute_pipeline_state(&pipeline);
set_params!(
encoder,
(
length,
elements_to_sum,
(input, input_offset),
output,
(alpha, alpha_offset),
(beta, beta_offset),
eps
)
);
let out_length = length / elements_to_sum;
let thread_group_count = MTLSize {
width: out_length as u64,
height: 1,
depth: 1,
};
let width = std::cmp::min(
pipeline.max_total_threads_per_threadgroup(),
elements_to_sum as u64,
)
.next_power_of_two();
let thread_group_size = MTLSize {
width,
height: 1,
depth: 1,
};
encoder.use_resource(input, metal::MTLResourceUsage::Read);
encoder.use_resource(output, metal::MTLResourceUsage::Write);
encoder.set_threadgroup_memory_length(0, (width * 8).max(32) as u64);
encoder.dispatch_thread_groups(thread_group_count, thread_group_size);
Ok(())
}
#[allow(clippy::too_many_arguments)]
pub fn call_rope_i(
device: &Device,
ep: impl EncoderProvider,
kernels: &Kernels,
kernel_name: &'static str,
bh: usize,
td: usize,
src: &Buffer,
src_offset: usize,
cos: &Buffer,
cos_offset: usize,
sin: &Buffer,
sin_offset: usize,
output: &Buffer,
) -> Result<(), MetalKernelError> {
let pipeline = kernels.load_pipeline(device, Source::Reduce, kernel_name)?;
let encoder = ep.encoder();
let encoder: &ComputeCommandEncoderRef = encoder.as_ref();
encoder.set_compute_pipeline_state(&pipeline);
set_params!(
encoder,
(
bh,
td,
(src, src_offset),
(cos, cos_offset),
(sin, sin_offset),
output
)
);
let (thread_group_count, thread_group_size) = linear_split(&pipeline, (bh * td) / 2);
encoder.use_resource(src, metal::MTLResourceUsage::Read);
encoder.use_resource(cos, metal::MTLResourceUsage::Read);
encoder.use_resource(sin, metal::MTLResourceUsage::Read);
encoder.use_resource(output, metal::MTLResourceUsage::Write);
encoder.dispatch_thread_groups(thread_group_count, thread_group_size);
Ok(())
}
#[allow(clippy::too_many_arguments)]
pub fn call_rope_thd(
device: &Device,
ep: impl EncoderProvider,
kernels: &Kernels,
kernel_name: &'static str,
b: usize,
t: usize,
h: usize,
d: usize,
src: &Buffer,
src_offset: usize,
cos: &Buffer,
cos_offset: usize,
sin: &Buffer,
sin_offset: usize,
output: &Buffer,
) -> Result<(), MetalKernelError> {
let pipeline = kernels.load_pipeline(device, Source::Reduce, kernel_name)?;
let encoder = ep.encoder();
let encoder: &ComputeCommandEncoderRef = encoder.as_ref();
encoder.set_compute_pipeline_state(&pipeline);
set_params!(
encoder,
(
b,
t,
h,
d,
(src, src_offset),
(cos, cos_offset),
(sin, sin_offset),
output
)
);
let (thread_group_count, thread_group_size) = linear_split(&pipeline, (b * t * h * d) / 2);
encoder.use_resource(src, metal::MTLResourceUsage::Read);
encoder.use_resource(cos, metal::MTLResourceUsage::Read);
encoder.use_resource(sin, metal::MTLResourceUsage::Read);
encoder.use_resource(output, metal::MTLResourceUsage::Write);
encoder.dispatch_thread_groups(thread_group_count, thread_group_size);
Ok(())
}
#[allow(clippy::too_many_arguments)]
pub fn call_rope(
device: &Device,
ep: impl EncoderProvider,
kernels: &Kernels,
kernel_name: &'static str,
bh: usize,
td: usize,
d: usize,
src: &Buffer,
src_offset: usize,
cos: &Buffer,
cos_offset: usize,
sin: &Buffer,
sin_offset: usize,
output: &Buffer,
) -> Result<(), MetalKernelError> {
let pipeline = kernels.load_pipeline(device, Source::Reduce, kernel_name)?;
let encoder = ep.encoder();
let encoder: &ComputeCommandEncoderRef = encoder.as_ref();
encoder.set_compute_pipeline_state(&pipeline);
set_params!(
encoder,
(
bh,
td,
d,
(src, src_offset),
(cos, cos_offset),
(sin, sin_offset),
output
)
);
let (thread_group_count, thread_group_size) = linear_split(&pipeline, (bh * td) / 2);
encoder.use_resource(src, metal::MTLResourceUsage::Read);
encoder.use_resource(cos, metal::MTLResourceUsage::Read);
encoder.use_resource(sin, metal::MTLResourceUsage::Read);
encoder.use_resource(output, metal::MTLResourceUsage::Write);
encoder.dispatch_thread_groups(thread_group_count, thread_group_size);
Ok(())
}
#[allow(clippy::too_many_arguments)]
pub fn call_affine(
device: &Device,
ep: impl EncoderProvider,
kernels: &Kernels,
name: &'static str,
size: usize,
input: BufferOffset,
output: &Buffer,
mul: f32,
add: f32,
) -> Result<(), MetalKernelError> {
let pipeline = kernels.load_pipeline(device, Source::Affine, name)?;
let encoder = ep.encoder();
let encoder: &ComputeCommandEncoderRef = encoder.as_ref();
encoder.set_compute_pipeline_state(&pipeline);
set_params!(encoder, (size, mul, add, &input, output));
let (thread_group_count, thread_group_size) = linear_split(&pipeline, size);
encoder.use_resource(input.buffer, metal::MTLResourceUsage::Read);
encoder.use_resource(output, metal::MTLResourceUsage::Write);
encoder.dispatch_thread_groups(thread_group_count, thread_group_size);
Ok(())
}
#[allow(clippy::too_many_arguments)]
pub fn call_affine_strided(
device: &Device,
ep: impl EncoderProvider,
kernels: &Kernels,
name: &'static str,
shape: &[usize],
input: BufferOffset,
input_stride: &[usize],
output: &Buffer,
mul: f32,
add: f32,
) -> Result<(), MetalKernelError> {
let pipeline = kernels.load_pipeline(device, Source::Affine, name)?;
let size: usize = shape.iter().product();
let encoder = ep.encoder();
let encoder: &ComputeCommandEncoderRef = encoder.as_ref();
encoder.set_compute_pipeline_state(&pipeline);
set_params!(
encoder,
(
size,
shape.len(),
shape,
input_stride,
mul,
add,
&input,
output
)
);
let (thread_group_count, thread_group_size) = linear_split(&pipeline, size);
encoder.use_resource(input.buffer, metal::MTLResourceUsage::Read);
encoder.use_resource(output, metal::MTLResourceUsage::Write);
encoder.dispatch_thread_groups(thread_group_count, thread_group_size);
Ok(())
}
#[allow(clippy::too_many_arguments)]
pub fn call_powf(
device: &Device,
ep: impl EncoderProvider,
kernels: &Kernels,
name: &'static str,
size: usize,
input: BufferOffset,
output: &Buffer,
mul: f32,
) -> Result<(), MetalKernelError> {
let pipeline = kernels.load_pipeline(device, Source::Affine, name)?;
let encoder = ep.encoder();
let encoder: &ComputeCommandEncoderRef = encoder.as_ref();
encoder.set_compute_pipeline_state(&pipeline);
set_params!(encoder, (size, mul, &input, output));
let (thread_group_count, thread_group_size) = linear_split(&pipeline, size);
encoder.use_resource(input.buffer, metal::MTLResourceUsage::Read);
encoder.use_resource(output, metal::MTLResourceUsage::Write);
encoder.dispatch_thread_groups(thread_group_count, thread_group_size);
Ok(())
}
#[allow(clippy::too_many_arguments)]
pub fn call_powf_strided(
device: &Device,
ep: impl EncoderProvider,
kernels: &Kernels,
name: &'static str,
shape: &[usize],
input: BufferOffset,
input_stride: &[usize],
output: &Buffer,
mul: f32,
) -> Result<(), MetalKernelError> {
let pipeline = kernels.load_pipeline(device, Source::Affine, name)?;
let size: usize = shape.iter().product();
let encoder = ep.encoder();
let encoder: &ComputeCommandEncoderRef = encoder.as_ref();
encoder.set_compute_pipeline_state(&pipeline);
set_params!(
encoder,
(size, shape.len(), shape, input_stride, mul, &input, output)
);
let (thread_group_count, thread_group_size) = linear_split(&pipeline, size);
encoder.use_resource(input.buffer, metal::MTLResourceUsage::Read);
encoder.use_resource(output, metal::MTLResourceUsage::Write);
encoder.dispatch_thread_groups(thread_group_count, thread_group_size);
Ok(())
}
#[allow(clippy::too_many_arguments)]
pub fn call_elu(
device: &Device,
ep: impl EncoderProvider,
kernels: &Kernels,
name: &'static str,
size: usize,
input: BufferOffset,
output: &Buffer,
mul: f32,
) -> Result<(), MetalKernelError> {
let pipeline = kernels.load_pipeline(device, Source::Affine, name)?;
let encoder = ep.encoder();
let encoder: &ComputeCommandEncoderRef = encoder.as_ref();
encoder.set_compute_pipeline_state(&pipeline);
set_params!(encoder, (size, mul, &input, output));
let (thread_group_count, thread_group_size) = linear_split(&pipeline, size);
encoder.use_resource(input.buffer, metal::MTLResourceUsage::Read);
encoder.use_resource(output, metal::MTLResourceUsage::Write);
encoder.dispatch_thread_groups(thread_group_count, thread_group_size);
Ok(())
}
#[allow(clippy::too_many_arguments)]
pub fn call_elu_strided(
device: &Device,
ep: impl EncoderProvider,
kernels: &Kernels,
name: &'static str,
shape: &[usize],
input: BufferOffset,
input_stride: &[usize],
output: &Buffer,
mul: f32,
) -> Result<(), MetalKernelError> {
let pipeline = kernels.load_pipeline(device, Source::Affine, name)?;
let size: usize = shape.iter().product();
let encoder = ep.encoder();
let encoder: &ComputeCommandEncoderRef = encoder.as_ref();
encoder.set_compute_pipeline_state(&pipeline);
set_params!(
encoder,
(size, shape.len(), shape, input_stride, mul, &input, output)
);
let (thread_group_count, thread_group_size) = linear_split(&pipeline, size);
encoder.use_resource(input.buffer, metal::MTLResourceUsage::Read);
encoder.use_resource(output, metal::MTLResourceUsage::Write);
encoder.dispatch_thread_groups(thread_group_count, thread_group_size);
Ok(())
}
#[allow(clippy::too_many_arguments)]
pub fn call_where_cond_strided(
device: &Device,
ep: impl EncoderProvider,
kernels: &Kernels,
name: &'static str,
shape: &[usize],
cond: BufferOffset,
cond_stride: &[usize],
left: BufferOffset,
left_stride: &[usize],
right: BufferOffset,
right_stride: &[usize],
output: &Buffer,
) -> Result<(), MetalKernelError> {
let pipeline = kernels.load_pipeline(device, Source::Ternary, name)?;
let encoder = ep.encoder();
let encoder: &ComputeCommandEncoderRef = encoder.as_ref();
encoder.set_compute_pipeline_state(&pipeline);
let size: usize = shape.iter().product();
let rank = shape.len();
set_params!(
encoder,
(
size,
rank,
shape,
cond_stride,
left_stride,
right_stride,
&cond,
&left,
&right,
output
)
);
let (thread_group_count, thread_group_size) = linear_split(&pipeline, size);
encoder.use_resource(cond.buffer, metal::MTLResourceUsage::Read);
encoder.use_resource(left.buffer, metal::MTLResourceUsage::Read);
encoder.use_resource(right.buffer, metal::MTLResourceUsage::Read);
encoder.use_resource(output, metal::MTLResourceUsage::Write);
encoder.dispatch_thread_groups(thread_group_count, thread_group_size);
Ok(())
}
#[allow(clippy::too_many_arguments)]
pub fn call_index_select(
device: &Device,
ep: impl EncoderProvider,
kernels: &Kernels,
name: &'static str,
shape: &[usize],
ids_size: usize,
dim: usize,
contiguous: bool,
src_dims: &[usize],
src_strides: &[usize],
input: BufferOffset,
ids: BufferOffset,
output: &Buffer,
) -> Result<(), MetalKernelError> {
let left_size: usize = shape[..dim].iter().product();
let right_size: usize = shape[dim + 1..].iter().product();
let src_dim_size = shape[dim];
let dst_el = ids_size * left_size * right_size;
let pipeline = kernels.load_pipeline(device, Source::Indexing, name)?;
let encoder = ep.encoder();
let encoder: &ComputeCommandEncoderRef = encoder.as_ref();
encoder.set_compute_pipeline_state(&pipeline);
set_params!(
encoder,
(
dst_el,
left_size,
src_dim_size,
right_size,
ids_size,
contiguous,
src_dims,
src_strides,
&input,
&ids,
output
)
);
let (thread_group_count, thread_group_size) = linear_split(&pipeline, dst_el);
encoder.use_resource(input.buffer, metal::MTLResourceUsage::Read);
encoder.use_resource(ids.buffer, metal::MTLResourceUsage::Read);
encoder.use_resource(output, metal::MTLResourceUsage::Write);
encoder.dispatch_thread_groups(thread_group_count, thread_group_size);
Ok(())
}
#[allow(clippy::too_many_arguments)]
pub fn call_gather(
device: &Device,
ep: impl EncoderProvider,
kernels: &Kernels,
name: &'static str,
shape: &[usize],
ids_size: usize,
dim: usize,
input: BufferOffset,
ids: BufferOffset,
output: &Buffer,
) -> Result<(), MetalKernelError> {
let left_size: usize = shape[..dim].iter().product();
let right_size: usize = shape[dim + 1..].iter().product();
let src_dim_size = shape[dim];
let dst_el = ids_size * left_size * right_size;
let pipeline = kernels.load_pipeline(device, Source::Indexing, name)?;
let encoder = ep.encoder();
let encoder: &ComputeCommandEncoderRef = encoder.as_ref();
encoder.set_compute_pipeline_state(&pipeline);
set_params!(
encoder,
(
dst_el,
left_size,
src_dim_size,
right_size,
ids_size,
&input,
&ids,
output
)
);
let (thread_group_count, thread_group_size) = linear_split(&pipeline, dst_el);
encoder.use_resource(input.buffer, metal::MTLResourceUsage::Read);
encoder.use_resource(ids.buffer, metal::MTLResourceUsage::Read);
encoder.use_resource(output, metal::MTLResourceUsage::Write);
encoder.dispatch_thread_groups(thread_group_count, thread_group_size);
Ok(())
}
#[allow(clippy::too_many_arguments)]
pub fn call_scatter_add(
device: &Device,
ep: impl EncoderProvider,
kernels: &Kernels,
name: &'static str,
src_shape: &[usize],
dst_shape: &[usize],
dim: usize,
input: BufferOffset,
ids: BufferOffset,
output: &Buffer,
) -> Result<(), MetalKernelError> {
let left_size: usize = src_shape[..dim].iter().product();
let right_size: usize = src_shape[dim + 1..].iter().product();
let src_dim_size = src_shape[dim];
let dst_el = left_size * right_size;
let dst_dim_size = dst_shape[dim];
let pipeline = kernels.load_pipeline(device, Source::Indexing, name)?;
let encoder = ep.encoder();
let encoder: &ComputeCommandEncoderRef = encoder.as_ref();
encoder.set_compute_pipeline_state(&pipeline);
set_params!(
encoder,
(
dst_el,
left_size,
src_dim_size,
right_size,
dst_dim_size,
&input,
&ids,
output
)
);
let (thread_group_count, thread_group_size) = linear_split(&pipeline, dst_el);
encoder.use_resource(input.buffer, metal::MTLResourceUsage::Read);
encoder.use_resource(ids.buffer, metal::MTLResourceUsage::Read);
encoder.use_resource(output, metal::MTLResourceUsage::Write);
encoder.dispatch_thread_groups(thread_group_count, thread_group_size);
Ok(())
}
#[allow(clippy::too_many_arguments)]
pub fn call_index_add(
device: &Device,
ep: impl EncoderProvider,
kernels: &Kernels,
name: &'static str,
src_shape: &[usize],
dst_shape: &[usize],
ids_shape: &[usize],
dim: usize,
input: BufferOffset,
ids: BufferOffset,
output: &Buffer,
) -> Result<(), MetalKernelError> {
let left_size: usize = src_shape[..dim].iter().product();
let right_size: usize = src_shape[dim + 1..].iter().product();
let src_dim_size = src_shape[dim];
let dst_el = left_size * right_size;
let dst_dim_size = dst_shape[dim];
let ids_dim_size = ids_shape[0];
let pipeline = kernels.load_pipeline(device, Source::Indexing, name)?;
let encoder = ep.encoder();
let encoder: &ComputeCommandEncoderRef = encoder.as_ref();
encoder.set_compute_pipeline_state(&pipeline);
set_params!(
encoder,
(
dst_el,
left_size,
src_dim_size,
right_size,
dst_dim_size,
ids_dim_size,
&input,
&ids,
output
)
);
let (thread_group_count, thread_group_size) = linear_split(&pipeline, dst_el);
encoder.use_resource(input.buffer, metal::MTLResourceUsage::Read);
encoder.use_resource(ids.buffer, metal::MTLResourceUsage::Read);
encoder.use_resource(output, metal::MTLResourceUsage::Write);
encoder.dispatch_thread_groups(thread_group_count, thread_group_size);
Ok(())
}
#[derive(Debug, PartialEq)]
pub enum Value {
USize(usize),
Bool(bool),
F32(f32),
U16(u16),
}
impl std::hash::Hash for Value {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
match self {
Value::F32(v) => v.to_bits().hash(state),
Value::USize(v) => v.hash(state),
Value::U16(v) => v.hash(state),
Value::Bool(v) => v.hash(state),
}
}
}
impl Value {
fn data_type(&self) -> MTLDataType {
match self {
Value::USize(_) => MTLDataType::UInt,
Value::F32(_) => MTLDataType::Float,
Value::U16(_) => MTLDataType::UShort,
Value::Bool(_) => MTLDataType::Bool,
}
}
}
/// Not true, good enough for our purposes.
impl Eq for Value {}
#[derive(Debug, Eq, PartialEq, Hash)]
struct ConstantValues(Vec<(usize, Value)>);
impl ConstantValues {
pub fn new(values: Vec<(usize, Value)>) -> Self {
Self(values)
}
fn function_constant_values(&self) -> FunctionConstantValues {
let f = FunctionConstantValues::new();
for (index, value) in &self.0 {
let ty = value.data_type();
match value {
Value::USize(v) => {
f.set_constant_value_at_index(
v as *const usize as *const c_void,
ty,
*index as u64,
);
}
Value::F32(v) => {
f.set_constant_value_at_index(
v as *const f32 as *const c_void,
ty,
*index as u64,
);
}
Value::U16(v) => {
f.set_constant_value_at_index(
v as *const u16 as *const c_void,
ty,
*index as u64,
);
}
Value::Bool(v) => {
f.set_constant_value_at_index(
v as *const bool as *const c_void,
ty,
*index as u64,
);
}
}
}
f
}
}
#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
pub enum SdpaDType {
BF16,
F16,
F32,
}
/// SDPA full is supported when:
/// - q head dim == 64, 128
/// - no mask
/// - q heads == kv heads
/// - final type != bf16 (TODO maybe just template this kernel too?)
/// - q,k,v are contiguous
#[allow(clippy::too_many_arguments)]
pub fn call_sdpa_full(
device: &Device,
ep: impl EncoderProvider,
kernels: &Kernels,
q_offset: usize,
q_shape: &[usize],
q_buffer: &Buffer,
k_offset: usize,
k_buffer: &Buffer,
v_offset: usize,
v_buffer: &Buffer,
output: &Buffer,
alpha: f32,
softcapping: f32,
itype: SdpaDType,
) -> Result<(), MetalKernelError> {
#[derive(Debug)]
#[repr(C)]
struct MLXFastAttentionParams {
m: i32,
n: i32,
k: i32,
ldq: i32, // ldq == ldo
ldk: i32,
ldv: i32,
lds: i32,
ldo: i32,
tiles_n: i32,
tiles_m: i32,
batch_stride_q: i32,
batch_stride_k: i32,
batch_stride_v: i32,
batch_stride_o: i32,
swizzle_log: i32,
gemm_n_iterations_aligned: i32,
gemm_k_iterations_aligned: i32,
gemm_sv_m_block_iterations: i32,
batch_ndim: i32,
alpha: f32,
softcapping: f32,
}
let bk = q_shape.last().unwrap();
const BN: usize = 16;
const BM: usize = 16;
const WM: usize = 2;
const WN: usize = 2;
let name = match (bk, itype) {
(32, SdpaDType::F16) => "steel_gemm_attention_bm_16_bn_16_bk_32_itype_half",
(64, SdpaDType::F16) => "steel_gemm_attention_bm_16_bn_16_bk_64_itype_half",
(96, SdpaDType::F16) => "steel_gemm_attention_bm_16_bn_16_bk_96_itype_half",
(128, SdpaDType::F16) => "steel_gemm_attention_bm_16_bn_16_bk_128_itype_half",
(256, SdpaDType::F16) => "steel_gemm_attention_bm_16_bn_16_bk_256_itype_half",
(32, SdpaDType::F32) => "steel_gemm_attention_bm_16_bn_16_bk_32_itype_float",
(64, SdpaDType::F32) => "steel_gemm_attention_bm_16_bn_16_bk_64_itype_float",
(96, SdpaDType::F32) => "steel_gemm_attention_bm_16_bn_16_bk_96_itype_float",
(128, SdpaDType::F32) => "steel_gemm_attention_bm_16_bn_16_bk_128_itype_float",
(256, SdpaDType::F32) => "steel_gemm_attention_bm_16_bn_16_bk_256_itype_float",
(other, SdpaDType::F16 | SdpaDType::F32) => {
return Err(MetalKernelError::SdpaHeadSizeMismatch {
variation: "full",
got: *other,
expected: vec![32, 64, 96, 128, 256],
})
}
(_, SdpaDType::BF16) => {
return Err(MetalKernelError::SdpaHeadDTypeMismatch {
variation: "full",
got: SdpaDType::BF16,
})
}
};
let pipeline = kernels.load_pipeline(device, Source::Sdpa, name)?;
let encoder = ep.encoder();
let encoder: &ComputeCommandEncoderRef = encoder.as_ref();
encoder.set_compute_pipeline_state(&pipeline);
// q = (bs, qhead, seq, hidden)
// k/v = (bs, kv_head, seq, hidden)
let qseq = q_shape[q_shape.len() - 2];
let m = q_shape[q_shape.len() - 2];
let n = m;
let k = q_shape[q_shape.len() - 1];
let bs_out = q_shape[0] * q_shape[1];
let batch_shape = [q_shape[0] * q_shape[1]];
let dk = q_shape[q_shape.len() - 1];
let ldq = dk;
let ldk = dk;
let ldv = dk;
let lds = BN;
let ldo = dk;
let tn = 1;
let tm = m.div_ceil(BM);
let b_stride_q = dk * qseq;
let b_stride_k = dk * qseq;
let b_stride_v = dk * qseq;
let b_stride_o = dk * qseq;
let swizzle_log = 0;
let gemm_n_iterations_aligned = n.div_ceil(BN);
let gemm_k_iterations_aligned = k.div_ceil(*bk);
let gemm_sv_m_block_iterations = m.div_ceil(BM);
let batch_ndim = batch_shape.len();
let alpha = if softcapping != 1. {
alpha / softcapping
} else {
alpha
};
let params = MLXFastAttentionParams {
m: m as i32,
n: n as i32,
k: k as i32,
ldq: ldq as i32,
ldk: ldk as i32,
ldv: ldv as i32,
lds: lds as i32,
ldo: ldo as i32,
tiles_n: tn,
tiles_m: tm as i32,
batch_stride_q: b_stride_q as i32,
batch_stride_k: b_stride_k as i32,
batch_stride_v: b_stride_v as i32,
batch_stride_o: b_stride_o as i32,
swizzle_log,
gemm_n_iterations_aligned: gemm_n_iterations_aligned as i32,
gemm_k_iterations_aligned: gemm_k_iterations_aligned as i32,
gemm_sv_m_block_iterations: gemm_sv_m_block_iterations as i32,
batch_ndim: batch_ndim as i32,
alpha,
softcapping,
};
let batch_strides = [b_stride_q, b_stride_k, b_stride_v, b_stride_o];
impl EncoderParam for MLXFastAttentionParams {
fn set_param(encoder: &ComputeCommandEncoderRef, position: u64, data: Self) {
encoder.set_bytes(
position,
core::mem::size_of::<MLXFastAttentionParams>() as u64,
&data as *const MLXFastAttentionParams as *const c_void,
);
}
}
set_params!(
encoder,
(
(q_buffer, q_offset),
(k_buffer, k_offset),
(v_buffer, v_offset),
output,
params,
&batch_shape[..],
&batch_strides[..]
)
);
let grid_dims = MTLSize {
width: 1,
height: tm as u64,
depth: bs_out as u64,
};
let group_dims = MTLSize {
width: 32,
height: WM as u64,
depth: WN as u64,
};
encoder.use_resource(q_buffer, metal::MTLResourceUsage::Read);
encoder.use_resource(k_buffer, metal::MTLResourceUsage::Read);
encoder.use_resource(v_buffer, metal::MTLResourceUsage::Read);
encoder.use_resource(output, metal::MTLResourceUsage::Write);
encoder.dispatch_thread_groups(grid_dims, group_dims);
Ok(())
}
/// SDPA full is supported when:
/// - q head dim == 64, 96, 128
/// - no mask
/// - q,k,v are contiguous
#[allow(clippy::too_many_arguments)]
pub fn call_sdpa_vector(
device: &Device,
ep: impl EncoderProvider,
kernels: &Kernels,
q_offset: usize,
q_shape: &[usize],
q_buffer: &Buffer,
k_offset: usize,
k_shape: &[usize],
k_stride: &[usize],
k_buffer: &Buffer,
v_offset: usize,
v_stride: &[usize],
v_buffer: &Buffer,
output: &Buffer,
alpha: f32,
softcapping: f32,
itype: SdpaDType,
) -> Result<(), MetalKernelError> {
let bk = q_shape.last().unwrap();
let gqa_factor = (q_shape[1] / k_shape[1]) as i32;
let n = k_shape[2] as i32;
let b = (q_shape[0] * q_shape[1]) as i32;
let kstride = k_stride[1];
let vstride = v_stride[1];
let name = match (bk, itype) {
(32, SdpaDType::F16) => "sdpa_vector_float16_t_32",
(64, SdpaDType::F16) => "sdpa_vector_float16_t_64",
(96, SdpaDType::F16) => "sdpa_vector_float16_t_96",
(128, SdpaDType::F16) => "sdpa_vector_float16_t_128",
(256, SdpaDType::F16) => "sdpa_vector_float16_t_256",
(32, SdpaDType::BF16) => "sdpa_vector_bfloat16_t_32",
(64, SdpaDType::BF16) => "sdpa_vector_bfloat16_t_64",
(96, SdpaDType::BF16) => "sdpa_vector_bfloat16_t_96",
(128, SdpaDType::BF16) => "sdpa_vector_bfloat16_t_128",
(256, SdpaDType::BF16) => "sdpa_vector_bfloat16_t_256",
(32, SdpaDType::F32) => "sdpa_vector_float_32",
(64, SdpaDType::F32) => "sdpa_vector_float_64",
(96, SdpaDType::F32) => "sdpa_vector_float_96",
(128, SdpaDType::F32) => "sdpa_vector_float_128",
(256, SdpaDType::F32) => "sdpa_vector_float_256",
(other, _) => {
return Err(MetalKernelError::SdpaHeadSizeMismatch {
variation: "vector",
got: *other,
expected: vec![32, 64, 96, 128, 256],
})
}
};
let alpha = if softcapping != 1. {
alpha / softcapping
} else {
alpha
};
let constants = Some(ConstantValues::new(vec![(
20,
Value::Bool(/* sdpa_vector_has_mask */ false),
)]));
let pipeline = kernels.load_pipeline_with_constants(device, Source::Sdpa, name, constants)?;
let encoder = ep.encoder();
let encoder: &ComputeCommandEncoderRef = encoder.as_ref();
encoder.set_compute_pipeline_state(&pipeline);
// q = (bs, qhead, seq, hidden)
// k/v = (bs, kv_head, kv_seq, hidden)
set_params!(
encoder,
(
(q_buffer, q_offset),
(k_buffer, k_offset),
(v_buffer, v_offset),
output,
gqa_factor,
n,
kstride,
vstride,
alpha,
softcapping
)
);
let grid_dims = MTLSize {
width: 1,
height: b as u64,
depth: 1_u64,
};
let group_dims = MTLSize {
width: 1024,
height: 1,
depth: 1,
};
encoder.use_resource(q_buffer, metal::MTLResourceUsage::Read);
encoder.use_resource(k_buffer, metal::MTLResourceUsage::Read);
encoder.use_resource(v_buffer, metal::MTLResourceUsage::Read);
encoder.use_resource(output, metal::MTLResourceUsage::Write);
encoder.dispatch_thread_groups(grid_dims, group_dims);
Ok(())
}
pub const SDPA_2PASS_BLOCKS: usize = 32;
/// SDPA vector 2pass is supported when:
/// - q head dim == 64, 96, 128
/// - no mask
/// - q,k,v are contiguous
#[allow(clippy::too_many_arguments)]
pub fn call_sdpa_vector_2pass(
device: &Device,
ep: impl EncoderProvider,
kernels: &Kernels,
q_offset: usize,
q_shape: &[usize],
q_buffer: &Buffer,
k_offset: usize,
k_shape: &[usize],
k_stride: &[usize],
k_buffer: &Buffer,
v_offset: usize,
v_stride: &[usize],
v_buffer: &Buffer,
output: &Buffer,
intermediate: &Buffer,
sums: &Buffer,
maxs: &Buffer,
alpha: f32,
softcapping: f32,
itype: SdpaDType,
) -> Result<(), MetalKernelError> {
let bk = q_shape.last().unwrap();
// First pass
{
let name_pass1 = match (bk, itype) {
(32, SdpaDType::F16) => "sdpa_vector_2pass_1_float16_t_32",
(64, SdpaDType::F16) => "sdpa_vector_2pass_1_float16_t_64",
(96, SdpaDType::F16) => "sdpa_vector_2pass_1_float16_t_96",
(128, SdpaDType::F16) => "sdpa_vector_2pass_1_float16_t_128",
(256, SdpaDType::F16) => "sdpa_vector_2pass_1_float16_t_256",
(32, SdpaDType::BF16) => "sdpa_vector_2pass_1_bfloat16_t_32",
(64, SdpaDType::BF16) => "sdpa_vector_2pass_1_bfloat16_t_64",
(96, SdpaDType::BF16) => "sdpa_vector_2pass_1_bfloat16_t_96",
(128, SdpaDType::BF16) => "sdpa_vector_2pass_1_bfloat16_t_128",
(256, SdpaDType::BF16) => "sdpa_vector_2pass_1_bfloat16_t_256",
(32, SdpaDType::F32) => "sdpa_vector_2pass_1_float_32",
(64, SdpaDType::F32) => "sdpa_vector_2pass_1_float_64",
(96, SdpaDType::F32) => "sdpa_vector_2pass_1_float_96",
(128, SdpaDType::F32) => "sdpa_vector_2pass_1_float_128",
(256, SdpaDType::F32) => "sdpa_vector_2pass_1_float_256",
(other, _) => {
return Err(MetalKernelError::SdpaHeadSizeMismatch {
variation: "vector_2pass_1",
got: *other,
expected: vec![32, 64, 96, 128, 256],
})
}
};
let gqa_factor = (q_shape[1] / k_shape[1]) as i32;
let n = k_shape[2] as i32;
let b = (q_shape[0] * q_shape[1]) as i32;
let kstride = k_stride[1];
let vstride = v_stride[1];
let alpha = if softcapping != 1. {
alpha / softcapping
} else {
alpha
};
let constants = Some(ConstantValues::new(vec![(
20,
Value::Bool(/* sdpa_vector_has_mask */ false),
)]));
let pipeline =
kernels.load_pipeline_with_constants(device, Source::Sdpa, name_pass1, constants)?;
let encoder = ep.encoder();
let encoder: &ComputeCommandEncoderRef = encoder.as_ref();
encoder.set_compute_pipeline_state(&pipeline);
// q = (bs, qhead, seq, hidden)
// k/v = (bs, kv_head, kv_seq, hidden)
set_params!(
encoder,
(
(q_buffer, q_offset),
(k_buffer, k_offset),
(v_buffer, v_offset),
intermediate,
sums,
maxs,
gqa_factor,
n,
kstride,
vstride,
alpha,
softcapping
)
);
let grid_dims = MTLSize {
width: 1,
height: b as u64,
depth: SDPA_2PASS_BLOCKS as u64,
};
let group_dims = MTLSize {
width: 8 * 32,
height: 1,
depth: 1,
};
encoder.use_resource(q_buffer, metal::MTLResourceUsage::Read);
encoder.use_resource(k_buffer, metal::MTLResourceUsage::Read);
encoder.use_resource(v_buffer, metal::MTLResourceUsage::Read);
encoder.use_resource(intermediate, metal::MTLResourceUsage::Write);
encoder.use_resource(sums, metal::MTLResourceUsage::Write);
encoder.use_resource(maxs, metal::MTLResourceUsage::Write);
encoder.dispatch_thread_groups(grid_dims, group_dims);
}
// Final pass
{
let name_pass2 = match (bk, itype) {
(32, SdpaDType::F16) => "sdpa_vector_2pass_2_float16_t_32",
(64, SdpaDType::F16) => "sdpa_vector_2pass_2_float16_t_64",
(96, SdpaDType::F16) => "sdpa_vector_2pass_2_float16_t_96",
(128, SdpaDType::F16) => "sdpa_vector_2pass_2_float16_t_128",
(256, SdpaDType::F16) => "sdpa_vector_2pass_2_float16_t_256",
(32, SdpaDType::BF16) => "sdpa_vector_2pass_2_bfloat16_t_32",
(64, SdpaDType::BF16) => "sdpa_vector_2pass_2_bfloat16_t_64",
(96, SdpaDType::BF16) => "sdpa_vector_2pass_2_bfloat16_t_96",
(128, SdpaDType::BF16) => "sdpa_vector_2pass_2_bfloat16_t_128",
(256, SdpaDType::BF16) => "sdpa_vector_2pass_2_bfloat16_t_256",
(32, SdpaDType::F32) => "sdpa_vector_2pass_2_float_32",
(64, SdpaDType::F32) => "sdpa_vector_2pass_2_float_64",
(96, SdpaDType::F32) => "sdpa_vector_2pass_2_float_96",
(128, SdpaDType::F32) => "sdpa_vector_2pass_2_float_128",
(256, SdpaDType::F32) => "sdpa_vector_2pass_2_float_256",
(other, _) => {
return Err(MetalKernelError::SdpaHeadSizeMismatch {
variation: "vector_2pass_2",
got: *other,
expected: vec![32, 64, 96, 128, 256],
})
}
};
let b = (q_shape[0] * q_shape[1]) as i32;
let pipeline = kernels.load_pipeline(device, Source::Sdpa, name_pass2)?;
let encoder = ep.encoder();
let encoder: &ComputeCommandEncoderRef = encoder.as_ref();
encoder.set_compute_pipeline_state(&pipeline);
// q = (bs, qhead, seq, hidden)
// k/v = (bs, kv_head, kv_seq, hidden)
set_params!(encoder, (intermediate, sums, maxs, output));
let grid_dims = MTLSize {
width: 1,
height: b as u64,
depth: 1,
};
let group_dims = MTLSize {
width: 1024,
height: 1,
depth: 1,
};
encoder.use_resource(intermediate, metal::MTLResourceUsage::Write);
encoder.use_resource(sums, metal::MTLResourceUsage::Write);
encoder.use_resource(maxs, metal::MTLResourceUsage::Write);
encoder.use_resource(output, metal::MTLResourceUsage::Write);
encoder.dispatch_thread_groups(grid_dims, group_dims);
}
Ok(())
}
#[allow(clippy::too_many_arguments)]
pub fn call_im2col1d_strided(
device: &Device,
ep: impl EncoderProvider,
kernels: &Kernels,
name: &'static str,
shape: &[usize],
strides: &[usize],
(k_size, stride, padding, dilation): (usize, usize, usize, usize),
input: BufferOffset,
output: &Buffer,
) -> Result<(), MetalKernelError> {
let pipeline = kernels.load_pipeline(device, Source::Conv, name)?;
let l_out = (shape[2] + 2 * padding - dilation * (k_size - 1) - 1) / stride + 1;
let dst_el = shape[0] * l_out * shape[1] * k_size;
let encoder = ep.encoder();
let encoder: &ComputeCommandEncoderRef = encoder.as_ref();
let (thread_group_count, thread_group_size) = linear_split(&pipeline, dst_el);
encoder.set_compute_pipeline_state(&pipeline);
set_params!(
encoder,
(dst_el, l_out, k_size, stride, padding, dilation, shape, strides, &input, output)
);
encoder.use_resource(input.buffer, metal::MTLResourceUsage::Read);
encoder.use_resource(output, metal::MTLResourceUsage::Write);
encoder.dispatch_thread_groups(thread_group_count, thread_group_size);
Ok(())
}
#[allow(clippy::too_many_arguments)]
pub fn call_col2im1d(
device: &Device,
ep: impl EncoderProvider,
kernels: &Kernels,
name: &'static str,
shape: &[usize],
k_size: usize,
stride: usize,
input: BufferOffset,
output: &Buffer,
) -> Result<(), MetalKernelError> {
let pipeline = kernels.load_pipeline(device, Source::Conv, name)?;
let l_in = shape[1];
let c_out = shape[2];
let l_out = (l_in - 1) * stride + k_size;
let dst_el = shape[0] * c_out * l_out;
let encoder = ep.encoder();
let encoder: &ComputeCommandEncoderRef = encoder.as_ref();
let (thread_group_count, thread_group_size) = linear_split(&pipeline, dst_el);
encoder.set_compute_pipeline_state(&pipeline);
set_params!(
encoder,
(dst_el, l_out, l_in, c_out, k_size, stride, &input, output)
);
encoder.use_resource(input.buffer, metal::MTLResourceUsage::Read);
encoder.use_resource(output, metal::MTLResourceUsage::Write);
encoder.dispatch_thread_groups(thread_group_count, thread_group_size);
Ok(())
}
#[allow(clippy::too_many_arguments)]
pub fn call_im2col_strided(
device: &Device,
ep: impl EncoderProvider,
kernels: &Kernels,
name: &'static str,
shape: &[usize],
strides: &[usize],
(h_k, w_k, stride, padding, dilation): (usize, usize, usize, usize, usize),
input: BufferOffset,
output: &Buffer,
) -> Result<(), MetalKernelError> {
let pipeline = kernels.load_pipeline(device, Source::Conv, name)?;
let h = shape[2];
let w = shape[3];
let h_out = (h + 2 * padding - dilation * (h_k - 1) - 1) / stride + 1;
let w_out = (w + 2 * padding - dilation * (w_k - 1) - 1) / stride + 1;
let dst_el = shape[0] * h_out * w_out * shape[1] * h_k * w_k;
let encoder = ep.encoder();
let encoder: &ComputeCommandEncoderRef = encoder.as_ref();
let (thread_group_count, thread_group_size) = linear_split(&pipeline, dst_el);
encoder.set_compute_pipeline_state(&pipeline);
set_params!(
encoder,
(
dst_el, h_out, w_out, h_k, w_k, stride, padding, dilation, shape, strides, &input,
output
)
);
encoder.use_resource(input.buffer, metal::MTLResourceUsage::Read);
encoder.use_resource(output, metal::MTLResourceUsage::Write);
encoder.dispatch_thread_groups(thread_group_count, thread_group_size);
Ok(())
}
#[allow(clippy::too_many_arguments)]
pub fn call_upsample_nearest_2d(
device: &Device,
ep: impl EncoderProvider,
kernels: &Kernels,
name: &'static str,
shape: &[usize],
strides: &[usize],
out_w: usize,
out_h: usize,
input: BufferOffset,
output: &Buffer,
) -> Result<(), MetalKernelError> {
let pipeline = kernels.load_pipeline(device, Source::Conv, name)?;
let dst_el = out_w * out_h * shape[0] * shape[1];
let scale_w = shape[2] as f32 / out_w as f32;
let scale_h = shape[3] as f32 / out_h as f32;
let (thread_group_count, thread_group_size) = linear_split(&pipeline, dst_el);
let encoder = ep.encoder();
let encoder: &ComputeCommandEncoderRef = encoder.as_ref();
encoder.set_compute_pipeline_state(&pipeline);
set_params!(
encoder,
(out_w, out_h, scale_w, scale_h, shape, strides, &input, output)
);
encoder.use_resource(input.buffer, metal::MTLResourceUsage::Read);
encoder.use_resource(output, metal::MTLResourceUsage::Write);
encoder.dispatch_thread_groups(thread_group_count, thread_group_size);
Ok(())
}
#[allow(clippy::too_many_arguments)]
pub fn call_random_uniform(
device: &Device,
ep: impl EncoderProvider,
kernels: &Kernels,
name: &'static str,
min: f32,
max: f32,
length: usize,
seed: &Buffer,
buffer: &Buffer,
) -> Result<(), MetalKernelError> {
if min >= max {
return Err(MetalKernelError::LoadLibraryError(
"min must be less than max".to_string(),
));
}
let pipeline = kernels.load_pipeline(device, Source::Random, name)?;
let encoder = ep.encoder();
let encoder: &ComputeCommandEncoderRef = encoder.as_ref();
let odd = (length % 2 != 0) as usize;
let (thread_group_count, thread_group_size) = linear_split(&pipeline, length / 2 + odd);
encoder.set_compute_pipeline_state(&pipeline);
set_params!(encoder, (length, min, max, seed, buffer));
encoder.use_resource(
seed,
metal::MTLResourceUsage::Read | metal::MTLResourceUsage::Write,
);
encoder.use_resource(buffer, metal::MTLResourceUsage::Write);
encoder.dispatch_thread_groups(thread_group_count, thread_group_size);
Ok(())
}
#[allow(clippy::too_many_arguments)]
pub fn call_random_normal(
device: &Device,
ep: impl EncoderProvider,
kernels: &Kernels,
name: &'static str,
mean: f32,
stddev: f32,
length: usize,
seed: &Buffer,
buffer: &Buffer,
) -> Result<(), MetalKernelError> {
let pipeline = kernels.load_pipeline(device, Source::Random, name)?;
let encoder = ep.encoder();
let encoder: &ComputeCommandEncoderRef = encoder.as_ref();
let odd = (length % 2 != 0) as usize;
let (thread_group_count, thread_group_size) = linear_split(&pipeline, length / 2 + odd);
encoder.set_compute_pipeline_state(&pipeline);
set_params!(encoder, (length, mean, stddev, seed, buffer));
encoder.use_resource(
seed,
metal::MTLResourceUsage::Read | metal::MTLResourceUsage::Write,
);
encoder.use_resource(buffer, metal::MTLResourceUsage::Write);
encoder.dispatch_thread_groups(thread_group_count, thread_group_size);
Ok(())
}
#[derive(Debug, Clone, Copy)]
pub enum GgmlDType {
Q4_0,
Q4_1,
Q5_0,
Q5_1,
Q8_0,
Q8_1,
Q2K,
Q3K,
Q4K,
Q5K,
Q6K,
Q8K,
F16,
F32,
}
#[allow(clippy::too_many_arguments)]
pub fn call_quantized_matmul_mv_t(
device: &Device,
ep: impl EncoderProvider,
kernels: &Kernels,
dtype: GgmlDType,
(b, m, n, k): (usize, usize, usize, usize),
lhs: &Buffer,
lhs_offset: usize,
rhs: &Buffer,
dst_offset: usize,
dst: &Buffer,
) -> Result<(), MetalKernelError> {
// Everything is in reverse
let ne00 = k as i64;
let ne01 = n as i64;
let ne02 = b as i64;
let ne03 = 1i64;
let nb00 = 0i64;
let nb01 = 0i64;
let nb02 = 0i64;
let ne10 = k as i64;
let ne11 = m as i64;
let ne12 = b as i64;
let ne13 = 1i64;
let nb10 = 0i64;
let nb11 = 0i64;
let nb12 = 0i64;
let ne0 = n as i64;
let ne1 = m as i64;
let r2: u32 = (ne12 / ne02) as u32;
let r3: u32 = (ne13 / ne03) as u32;
let (nth0, nth1, align) = match dtype {
GgmlDType::Q4_0
| GgmlDType::Q4_1
| GgmlDType::Q5_0
| GgmlDType::Q5_1
| GgmlDType::Q8_0
| GgmlDType::Q8_1 => {
let nth0 = 8;
let nth1 = 8;
let align = 8;
(nth0, nth1, align)
}
GgmlDType::Q2K => {
// Fixing a bug in Metal for GGML
// https://github.com/ggerganov/llama.cpp/blob/b8109bc0139f15a5b321909f47510b89dca47ffc/ggml-metal.m#L1576
let nth0 = 2;
let nth1 = 32;
let align = 4;
(nth0, nth1, align)
}
GgmlDType::Q4K => {
let nth0 = 4;
let nth1 = 8;
let align = 4;
(nth0, nth1, align)
}
GgmlDType::Q3K | GgmlDType::Q5K => {
let nth0 = 2;
let nth1 = 32;
let align = 4;
(nth0, nth1, align)
}
GgmlDType::Q6K => {
let nth0 = 2;
let nth1 = 32;
let align = 2;
(nth0, nth1, align)
}
GgmlDType::F16 | GgmlDType::Q8K => {
// Original implem uses rows
let nth0 = 32;
let nth1 = 1;
let align = 8;
(nth0, nth1, align)
}
GgmlDType::F32 => {
let nth0 = 32;
let nth1 = 1;
let align = 8;
(nth0, nth1, align)
}
};
let thread_groups_count = MTLSize {
width: divide(ne01 as usize, align),
height: ne11 as u64,
depth: (ne12 * ne13) as u64,
};
let threads_per_threadgroup = MTLSize {
width: nth0,
height: nth1,
depth: 1,
};
let name = match dtype {
GgmlDType::Q4_0 => "kernel_mul_mv_q4_0_f32",
GgmlDType::Q4_1 => "kernel_mul_mv_q4_1_f32",
GgmlDType::Q5_0 => "kernel_mul_mv_q5_0_f32",
GgmlDType::Q5_1 => "kernel_mul_mv_q5_1_f32",
GgmlDType::Q8_0 => "kernel_mul_mv_q8_0_f32",
GgmlDType::Q8_1 => "kernel_mul_mv_q8_1_f32",
GgmlDType::Q2K => "kernel_mul_mv_q2_K_f32",
GgmlDType::Q3K => "kernel_mul_mv_q3_K_f32",
GgmlDType::Q4K => "kernel_mul_mv_q4_K_f32",
GgmlDType::Q5K => "kernel_mul_mv_q5_K_f32",
GgmlDType::Q6K => "kernel_mul_mv_q6_K_f32",
GgmlDType::Q8K => "kernel_mul_mv_q8_K_f32",
GgmlDType::F16 => "kernel_mul_mv_f16_f32",
GgmlDType::F32 => "kernel_mul_mv_f32_f32",
};
let pipeline = kernels.load_pipeline(device, Source::Quantized, name)?;
let encoder = ep.encoder();
let encoder: &ComputeCommandEncoderRef = encoder.as_ref();
encoder.set_compute_pipeline_state(&pipeline);
set_params!(
encoder,
(
rhs,
(lhs, lhs_offset),
(dst, dst_offset),
ne00,
ne01,
ne02,
nb00,
nb01,
nb02,
ne10,
ne11,
ne12,
nb10,
nb11,
nb12,
ne0,
ne1,
r2,
r3
)
);
encoder.use_resource(lhs, metal::MTLResourceUsage::Read);
encoder.use_resource(rhs, metal::MTLResourceUsage::Read);
encoder.use_resource(dst, metal::MTLResourceUsage::Write);
encoder.dispatch_thread_groups(thread_groups_count, threads_per_threadgroup);
Ok(())
}
fn divide(m: usize, b: usize) -> NSUInteger {
m.div_ceil(b) as NSUInteger
}
#[allow(clippy::too_many_arguments)]
pub fn call_pool2d(
device: &Device,
ep: impl EncoderProvider,
kernels: &Kernels,
name: &'static str,
shape: &[usize],
strides: &[usize],
out_w: usize,
out_h: usize,
w_k: usize,
h_k: usize,
w_stride: usize,
h_stride: usize,
input: &Buffer,
output: &Buffer,
) -> Result<(), MetalKernelError> {
let dst_el = out_w * out_h * shape[0] * shape[1];
let pipeline: ComputePipelineState = kernels.load_pipeline(device, Source::Conv, name)?;
let (thread_group_count, thread_group_size) = linear_split(&pipeline, dst_el);
let encoder = ep.encoder();
let encoder: &ComputeCommandEncoderRef = encoder.as_ref();
encoder.set_compute_pipeline_state(&pipeline);
set_params!(
encoder,
(w_k, h_k, w_stride, h_stride, shape, strides, input, output)
);
encoder.use_resource(input, metal::MTLResourceUsage::Read);
encoder.use_resource(output, metal::MTLResourceUsage::Write);
encoder.dispatch_thread_groups(thread_group_count, thread_group_size);
Ok(())
}
#[allow(clippy::too_many_arguments)]
pub fn call_conv_transpose1d(
device: &Device,
ep: impl EncoderProvider,
kernels: &Kernels,
name: &'static str,
dilation: usize,
stride: usize,
padding: usize,
out_padding: usize,
c_out: usize,
l_out: usize,
b_size: usize,
src_shape: &[usize],
src_strides: &[usize],
kernel_shape: &[usize],
kernel_strides: &[usize],
input: &Buffer,
input_offset: usize,
kernel: &Buffer,
kernel_offset: usize,
output: &Buffer,
) -> Result<(), MetalKernelError> {
let dst_el = c_out * l_out * b_size;
let pipeline: ComputePipelineState = kernels.load_pipeline(device, Source::Conv, name)?;
let (thread_group_count, thread_group_size) = linear_split(&pipeline, dst_el);
let encoder = ep.encoder();
let encoder: &ComputeCommandEncoderRef = encoder.as_ref();
encoder.set_compute_pipeline_state(&pipeline);
set_params!(
encoder,
(
l_out,
stride,
padding,
out_padding,
dilation,
src_shape,
src_strides,
kernel_shape,
kernel_strides,
(input, input_offset),
(kernel, kernel_offset),
output
)
);
encoder.use_resource(input, metal::MTLResourceUsage::Read);
encoder.use_resource(kernel, metal::MTLResourceUsage::Read);
encoder.use_resource(output, metal::MTLResourceUsage::Write);
encoder.dispatch_thread_groups(thread_group_count, thread_group_size);
Ok(())
}
pub struct CallConvTranspose2dCfg<'a> {
pub dilation: usize,
pub stride: usize,
pub padding: usize,
pub output_padding: usize,
pub c_out: usize,
pub out_w: usize,
pub out_h: usize,
pub b_size: usize,
pub input_dims: &'a [usize],
pub input_stride: &'a [usize],
pub kernel_dims: &'a [usize],
pub kernel_stride: &'a [usize],
pub input_offset: usize,
pub kernel_offset: usize,
}
#[allow(clippy::too_many_arguments)]
pub fn call_conv_transpose2d(
device: &Device,
ep: impl EncoderProvider,
kernels: &Kernels,
name: &'static str,
cfg: CallConvTranspose2dCfg,
input: &Buffer,
kernel: &Buffer,
output: &Buffer,
) -> Result<(), MetalKernelError> {
let dst_el = cfg.c_out * cfg.out_w * cfg.out_h * cfg.b_size;
let pipeline: ComputePipelineState = kernels.load_pipeline(device, Source::Conv, name)?;
let (thread_group_count, thread_group_size) = linear_split(&pipeline, dst_el);
let encoder = ep.encoder();
let encoder: &ComputeCommandEncoderRef = encoder.as_ref();
encoder.set_compute_pipeline_state(&pipeline);
set_params!(
encoder,
(
cfg.out_w,
cfg.out_h,
cfg.stride,
cfg.padding,
cfg.output_padding,
cfg.dilation,
cfg.input_dims,
cfg.input_stride,
cfg.kernel_dims,
cfg.kernel_stride,
(input, cfg.input_offset),
(kernel, cfg.kernel_offset),
output
)
);
encoder.use_resource(input, metal::MTLResourceUsage::Read);
encoder.use_resource(kernel, metal::MTLResourceUsage::Read);
encoder.use_resource(output, metal::MTLResourceUsage::Write);
encoder.dispatch_thread_groups(thread_group_count, thread_group_size);
Ok(())
}
pub fn call_const_fill(
device: &Device,
ep: impl EncoderProvider,
kernels: &Kernels,
name: &'static str,
length: usize,
output: &Buffer,
v: f32,
) -> Result<(), MetalKernelError> {
let pipeline = kernels.load_pipeline(device, Source::Fill, name)?;
let encoder = ep.encoder();
let encoder: &ComputeCommandEncoderRef = encoder.as_ref();
encoder.set_compute_pipeline_state(&pipeline);
set_params!(encoder, (output, v, length));
let (thread_group_count, thread_group_size) = linear_split(&pipeline, length);
encoder.use_resource(output, metal::MTLResourceUsage::Write);
encoder.dispatch_thread_groups(thread_group_count, thread_group_size);
Ok(())
}
#[cfg(test)]
mod tests;
| candle/candle-metal-kernels/src/lib.rs/0 | {
"file_path": "candle/candle-metal-kernels/src/lib.rs",
"repo_id": "candle",
"token_count": 37608
} |
use candle_metal_kernels::{binary, call_binary_contiguous, call_binary_strided, Kernels};
use half::{bf16, f16};
use metal::objc::rc::autoreleasepool;
use metal::{Device, MTLResourceOptions};
use rand;
use std::any::type_name;
use std::time::Instant;
fn main() {
let device = Device::system_default().unwrap();
let kernels = Kernels::new();
let f32_1k = (0..1000).map(|_| rand::random::<f32>()).collect::<Vec<_>>();
let f32_10k = (0..10000)
.map(|_| rand::random::<f32>())
.collect::<Vec<_>>();
let f32_100k = (0..100000)
.map(|_| rand::random::<f32>())
.collect::<Vec<_>>();
let f16_map = |v: &[f32]| v.iter().map(|v| f16::from_f32(*v)).collect::<Vec<_>>();
let f16_1k = f16_map(&f32_1k);
let f16_10k = f16_map(&f32_10k);
let f16_100k = f16_map(&f32_100k);
let bf16_map = |v: &[f32]| v.iter().map(|v| bf16::from_f32(*v)).collect::<Vec<_>>();
let bf16_1k = bf16_map(&f32_1k);
let bf16_10k = bf16_map(&f32_10k);
let bf16_100k = bf16_map(&f32_100k);
let f32_ckernels = [
binary::contiguous::add::FLOAT,
binary::contiguous::sub::FLOAT,
binary::contiguous::mul::FLOAT,
binary::contiguous::div::FLOAT,
];
let f32_skernels = [
binary::strided::add::FLOAT,
binary::strided::sub::FLOAT,
binary::strided::mul::FLOAT,
binary::strided::div::FLOAT,
];
let f16_ckernels = [
binary::contiguous::add::HALF,
binary::contiguous::sub::HALF,
binary::contiguous::mul::HALF,
binary::contiguous::div::HALF,
];
let f16_skernels = [
binary::strided::add::HALF,
binary::strided::sub::HALF,
binary::strided::mul::HALF,
binary::strided::div::HALF,
];
let bf16_ckernels = [
binary::contiguous::add::BFLOAT,
binary::contiguous::sub::BFLOAT,
binary::contiguous::mul::BFLOAT,
binary::contiguous::div::BFLOAT,
];
let bf16_skernels = [
binary::strided::add::BFLOAT,
binary::strided::sub::BFLOAT,
binary::strided::mul::BFLOAT,
binary::strided::div::BFLOAT,
];
println!(
"{0: <5} | {1: <19} | {2: <6} | {3: <5} | {4: <11} | {5: <11}",
"dtype", "kernel", "size", "runs", "total time", "avg time"
);
// f32
run_binary_bench(&device, &kernels, &f32_1k, f32_ckernels, f32_skernels);
run_binary_bench(&device, &kernels, &f32_10k, f32_ckernels, f32_skernels);
run_binary_bench(&device, &kernels, &f32_100k, f32_ckernels, f32_skernels);
// f16
run_binary_bench(&device, &kernels, &f16_1k, f16_ckernels, f16_skernels);
run_binary_bench(&device, &kernels, &f16_10k, f16_ckernels, f16_skernels);
run_binary_bench(&device, &kernels, &f16_100k, f16_ckernels, f16_skernels);
// bf16
run_binary_bench(&device, &kernels, &bf16_1k, bf16_ckernels, bf16_skernels);
run_binary_bench(&device, &kernels, &bf16_10k, bf16_ckernels, bf16_skernels);
run_binary_bench(&device, &kernels, &bf16_100k, bf16_ckernels, bf16_skernels);
}
fn run_binary_bench<T: Clone>(
device: &Device,
kernels: &Kernels,
v: &[T],
contiguous: [binary::contiguous::Kernel; 4],
strided: [binary::strided::Kernel; 4],
) {
let command_queue = device.new_command_queue();
let options = MTLResourceOptions::StorageModeManaged;
let iterations = 1000;
let input = device.new_buffer_with_data(
v.as_ptr() as *const core::ffi::c_void,
core::mem::size_of_val(v) as u64,
options,
);
let mut output = device.new_buffer(core::mem::size_of_val(v) as u64, options);
// Contiguous
for kernel_name in contiguous {
let total_time = autoreleasepool(|| {
let command_buffer = command_queue.new_command_buffer();
let start = Instant::now();
for _ in 0..iterations {
call_binary_contiguous(
device,
&command_buffer,
kernels,
kernel_name,
v.len(),
&input,
&input,
&mut output,
)
.unwrap();
}
command_buffer.commit();
command_buffer.wait_until_completed();
start.elapsed()
});
println!(
"{0: <5} | {1: <19} | {2: <6} | {3: <5} | {4: <11?} | {5: <11?}",
type_name::<T>().split("::").last().unwrap(),
kernel_name.to_string(),
v.len(),
iterations,
total_time,
total_time / iterations
);
}
// Strided
let shape = vec![2, 5_000];
let strides = vec![2, 1];
let offset = 0;
for kernel_name in strided {
let total_time = autoreleasepool(|| {
let command_buffer = command_queue.new_command_buffer();
let start = Instant::now();
for _ in 0..iterations {
call_binary_strided(
device,
command_buffer,
&kernels,
kernel_name,
&shape,
&input,
&strides,
offset,
&input,
&strides,
offset,
&mut output,
)
.unwrap();
}
command_buffer.commit();
command_buffer.wait_until_completed();
start.elapsed()
});
println!(
"{0: <5} | {1: <19} | {2: <6} | {3: <5} | {4: <11?} | {5: <11?}",
type_name::<T>().split("::").last().unwrap(),
kernel_name.to_string(),
v.len(),
iterations,
total_time,
total_time / iterations
);
}
}
| candle/candle-metal-kernels/tmp/binary.rs/0 | {
"file_path": "candle/candle-metal-kernels/tmp/binary.rs",
"repo_id": "candle",
"token_count": 3149
} |
//! Encoding Utilities. (e.g., one-hot/cold encoding)
use candle::{bail, DType, Result, Tensor, WithDType};
/// One-hot/cold encoding.
///
/// Given an input tensor of indices, this function returns a tensor of the same shape as the input
/// tensor with an additional dimension of the given depth size. The values in the returned tensor are
/// all set to the `off_value` except for the positions represented by the indices, which are set to the `on_value`.
///
/// This method returns a tensor with a rank that is one rank larger than the input tensor.
///
/// As an example, the following tensor will be encoded to a one-hot matrix:
///
/// `[[0i64, 2], [1, -1]]`
///
/// with a depth of 4 will be encoded to:
///
/// `[[[1, 0, 0, 0], [0, 0, 1, 0]], [[0, 1, 0, 0], [0, 0, 0, 0]]]`
///
/// When the input tensor index has a value of -1, the corresponding one-hot vector will be ignored,
/// resulting in a vector of values set to the `off_value`.
///
///
/// This method supports one-cold encoding by setting `on_value` to `0` and `off_value` to `1`.
/// By default `on_value` is `1` and `off_value` is `0`.
///
/// Other encoding values can be used by setting `on_value` and `off_value` to the desired values.
///
/// # Examples
///
/// ## One-hot encoding
///
/// ```rust
/// use candle::{Shape, Tensor, Device};
/// use candle_nn::encoding::one_hot;
///
/// let device = candle::Device::Cpu;
///
/// let indices = Tensor::new(vec![vec![0i64, 2], vec![1, -1]], &device).unwrap();
/// let depth = 4;
/// let one_hot = one_hot(indices, depth, 1f32, 0f32).unwrap();
///
/// let expected_matrix = [
/// [[1.0, 0.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0]],
/// [[0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]],
/// ];
///
/// assert_eq!(one_hot.shape(), &Shape::from((2, 2, depth)));
///
/// let matrix = one_hot.to_vec3::<f32>().unwrap();
///
/// assert_eq!(matrix, expected_matrix);
///```
/// ## One-cold Encoding
///
/// ```rust
/// use candle::{Shape, Tensor, Device};
/// use candle_nn::encoding::one_hot;
///
///
/// let device = candle::Device::Cpu;
/// let depth = 4;
/// let indices = Tensor::new(vec![vec![0u8, 2], vec![1, 3]], &device).unwrap();
/// let one_cold = one_hot(indices, depth, 0u8, 1u8).unwrap();
///
/// let expected_matrix = [[[0, 1, 1, 1], [1, 1, 0, 1]], [[1, 0, 1, 1], [1, 1, 1, 0]]];
///
/// assert_eq!(one_cold.shape(), &Shape::from((2, 2, depth)));
///
/// let matrix = one_cold.to_vec3::<u8>().unwrap();
///
/// assert_eq!(matrix, expected_matrix);
/// ```
///
///
/// # Bails
///
/// This method bails if:
/// - One of the index value is less than -1.
/// - One of the index value is greater than or equal to the depth value.
/// - The input data type is not `U8`, `U32`, or `I64`.
///
/// # API Design
///
/// The api design for this method is loosely based on the [TensorFlow One-Hot](https://www.tensorflow.org/api_docs/python/tf/one_hot) method.
pub fn one_hot<D: WithDType>(
indices: Tensor,
depth: usize,
on_value: D,
off_value: D,
) -> Result<Tensor> {
let mut target_shape = indices.dims().to_vec();
target_shape.push(depth);
let indices = indices.flatten_all()?;
let mut out = vec![off_value; depth * indices.elem_count()];
match indices.dtype() {
DType::U8 => {
let indices = indices.to_vec1::<u8>()?;
for (i, &index) in indices.iter().enumerate() {
set_at_index(index, i * depth, depth, &mut out, on_value)?;
}
}
DType::U32 => {
let indices = indices.to_vec1::<u32>()?;
for (i, &index) in indices.iter().enumerate() {
set_at_index(index, i * depth, depth, &mut out, on_value)?;
}
}
DType::I64 => {
let indices = indices.to_vec1::<i64>()?;
for (i, &index) in indices.iter().enumerate() {
set_at_index(index, i * depth, depth, &mut out, on_value)?;
}
}
dtype => {
bail!("one_hot: unsupported data type {dtype:?}, expected U8, U32, or I64")
}
};
Tensor::from_vec(out, target_shape, indices.device())
}
fn set_at_index<D: WithDType, I: Into<i64>>(
value: I,
offset: usize,
depth: usize,
v: &mut [D],
on_value: D,
) -> Result<()> {
let value = value.into();
// Skip for an entire row of off_values
if value == -1 {
return Ok(());
}
if value < -1 {
bail!(
"one_hot: invalid negative index value {value}, expected a positive index value or -1"
);
}
let value = value as usize;
if value >= depth {
bail!("one_hot: index value {value} exceeds depth {depth}")
}
let idx = offset + value;
if idx >= v.len() {
bail!("one_hot: index out of bounds {idx}, len {}", v.len());
}
v[idx] = on_value;
Ok(())
}
| candle/candle-nn/src/encoding.rs/0 | {
"file_path": "candle/candle-nn/src/encoding.rs",
"repo_id": "candle",
"token_count": 2025
} |
#[cfg(feature = "mkl")]
extern crate intel_mkl_src;
#[cfg(feature = "accelerate")]
extern crate accelerate_src;
use anyhow::Result;
use candle::{test_utils, DType, Device, Tensor};
use candle_nn::{batch_norm, BatchNorm, BatchNormConfig, VarBuilder, VarMap};
/* The test below has been generated using the following PyTorch code:
import torch
torch.manual_seed(19551105)
m = torch.nn.BatchNorm2d(5, affine=False)
input = torch.randn(2, 5, 3, 4)
output = m(input)
print(input.flatten())
print(output.flatten())
print(m.running_mean)
print(m.running_var)
*/
#[test]
fn batch_norm_test() -> Result<()> {
let running_mean = Tensor::zeros(5, DType::F32, &Device::Cpu)?;
let running_var = Tensor::ones(5, DType::F32, &Device::Cpu)?;
let bn = BatchNorm::new_no_bias(5, running_mean.clone(), running_var.clone(), 1e-8)?;
let input: [f32; 120] = [
-0.7493, -1.0410, 1.6977, -0.6579, 1.7982, -0.0087, 0.2812, -0.1190, 0.2908, -0.5975,
-0.0278, -0.2138, -1.3130, -1.6048, -2.2028, 0.9452, 0.4002, 0.0831, 1.0004, 0.1860,
0.5004, 0.5539, 0.9991, -0.2540, -0.0703, -0.3752, -0.1096, -0.2374, 1.0258, -2.2208,
-0.0257, 0.6073, -1.1627, -0.0964, -1.9718, 1.6577, 0.1931, -0.3692, -0.8011, 0.9059,
0.4797, 0.6521, -0.0165, -0.6683, -0.4148, 2.0649, -0.8276, 1.7947, -0.2061, 0.5812,
-1.3598, 1.6192, 1.0466, -0.4423, 0.4202, 0.1749, 0.6969, 0.2616, -0.0369, -1.4951,
-0.0814, -0.1877, 0.0267, 0.6150, 0.2402, -1.1440, -2.0068, 0.6032, -2.6639, 0.8260,
0.1085, -0.1693, 1.2805, 0.7654, -0.4930, 0.3770, 1.1309, 0.2303, 0.2949, -0.2634, -0.5225,
0.4269, 0.6341, 1.5736, 0.9827, -1.2499, 0.3509, -1.6243, -0.8123, 0.7634, -0.3047, 0.0143,
-0.4032, 0.0537, 0.7022, 0.8405, -1.2221, -1.6847, -0.0714, -0.1608, 0.5579, -1.5858,
0.4617, -0.6480, 0.1332, 0.0419, -0.9784, 0.4173, 1.2313, -1.9046, -0.1656, 0.1259, 0.0763,
1.4252, -0.9115, -0.1093, -0.3100, -0.6734, -1.4357, 0.9205,
];
let input = Tensor::new(&input, &Device::Cpu)?.reshape((2, 5, 3, 4))?;
let output = bn.forward_train(&input)?;
assert_eq!(output.dims(), &[2, 5, 3, 4]);
let output = output.flatten_all()?;
assert_eq!(
test_utils::to_vec1_round(&output, 4)?,
&[
-0.6391, -0.9414, 1.8965, -0.5444, 2.0007, 0.1283, 0.4287, 0.014, 0.4387, -0.4818,
0.1085, -0.0842, -1.6809, -2.0057, -2.6714, 0.8328, 0.2262, -0.1268, 0.8943, -0.0123,
0.3377, 0.3973, 0.8928, -0.5021, 0.0861, -0.2324, 0.0451, -0.0884, 1.2311, -2.1603,
0.1327, 0.7939, -1.055, 0.0589, -1.9002, 1.8912, 0.2918, -0.3253, -0.7993, 1.0741,
0.6063, 0.7955, 0.0617, -0.6536, -0.3754, 2.3461, -0.8284, 2.0495, -0.201, 0.6476,
-1.4446, 1.7665, 1.1493, -0.4556, 0.4741, 0.2097, 0.7723, 0.3031, -0.0186, -1.5905,
0.053, -0.0572, 0.165, 0.7746, 0.3862, -1.0481, -1.9422, 0.7624, -2.6231, 0.9933,
0.2498, -0.0381, 1.2061, 0.6327, -0.7681, 0.2004, 1.0396, 0.037, 0.109, -0.5125,
-0.8009, 0.2559, 0.4865, 1.5324, 1.1861, -1.1461, 0.5261, -1.5372, -0.689, 0.957,
-0.1587, 0.1745, -0.2616, 0.2156, 0.8931, 1.0375, -1.2614, -1.7691, 0.0015, -0.0966,
0.6921, -1.6605, 0.5866, -0.6313, 0.226, 0.1258, -0.9939, 0.5378, 1.3484, -2.0319,
-0.1574, 0.1568, 0.1034, 1.5574, -0.9614, -0.0967, -0.313, -0.7047, -1.5264, 1.0134
]
);
let bn2 = BatchNorm::new(
5,
running_mean,
running_var,
Tensor::new(&[0.5f32], &Device::Cpu)?.broadcast_as(5)?,
Tensor::new(&[-1.5f32], &Device::Cpu)?.broadcast_as(5)?,
1e-8,
)?;
let output2 = bn2.forward_train(&input)?;
assert_eq!(output2.dims(), &[2, 5, 3, 4]);
let output2 = output2.flatten_all()?;
let diff2 = ((output2 - (output * 0.5)?)? + 1.5)?.sqr()?;
let sum_diff2 = diff2.sum_keepdim(0)?;
assert_eq!(test_utils::to_vec1_round(&sum_diff2, 4)?, &[0f32]);
assert_eq!(
test_utils::to_vec1_round(bn.running_mean(), 4)?,
&[-0.0133, 0.0197, -0.0153, -0.0073, -0.0020]
);
assert_eq!(
test_utils::to_vec1_round(bn.running_var(), 4)?,
&[0.9972, 0.9842, 0.9956, 0.9866, 0.9898]
);
Ok(())
}
// This test makes sure that we can train a batch norm layer using a VarMap.
#[test]
fn train_batch_norm() -> Result<()> {
let vm = VarMap::new();
let vb = VarBuilder::from_varmap(&vm, DType::F32, &Device::Cpu);
let bn = batch_norm(1, BatchNormConfig::default(), vb)?;
// Get a copy of the original mean to ensure it is being updated.
let original_mean = bn.running_mean().detach().copy()?;
let var_map_mean = {
vm.data()
.lock()
.unwrap()
.get("running_mean")
.unwrap()
.clone()
};
// Ensure the var map mean is the same as the running mean.
assert_eq!(
test_utils::to_vec1_round(bn.running_mean(), 4)?,
test_utils::to_vec1_round(var_map_mean.as_tensor(), 4)?,
);
// Train with a something guaranteed to be different from the running mean.
let mean_plus_one = {
let one = original_mean.ones_like()?;
original_mean.add(&one)?.reshape((1, 1))?
};
bn.forward_train(&mean_plus_one)?;
// Assert that the running mean has been updated.
assert_ne!(
test_utils::to_vec1_round(bn.running_mean(), 4)?,
test_utils::to_vec1_round(&original_mean, 4)?,
);
// Assert that the var map mean has been updated.
assert_eq!(
test_utils::to_vec1_round(bn.running_mean(), 4)?,
test_utils::to_vec1_round(var_map_mean.as_tensor(), 4)?,
);
Ok(())
}
| candle/candle-nn/tests/batch_norm.rs/0 | {
"file_path": "candle/candle-nn/tests/batch_norm.rs",
"repo_id": "candle",
"token_count": 3126
} |
use candle::test_utils::to_vec2_round;
use candle::{DType, Device, NdArray, Result, Tensor};
use candle_onnx::onnx::attribute_proto::AttributeType;
use candle_onnx::onnx::tensor_proto::DataType;
use candle_onnx::onnx::tensor_shape_proto::{dimension, Dimension};
use candle_onnx::onnx::{type_proto, TensorProto, TensorShapeProto, TypeProto};
use candle_onnx::onnx::{AttributeProto, GraphProto, ModelProto, NodeProto, ValueInfoProto};
use candle_onnx::simple_eval;
use std::collections::HashMap;
const INPUT_X: &str = "x";
const INPUT_Y: &str = "y";
const INPUT_A: &str = "a";
const OUTPUT_Z: &str = "z";
fn create_model_proto_with_graph(graph: Option<GraphProto>) -> ModelProto {
ModelProto {
metadata_props: vec![],
training_info: vec![],
functions: vec![],
ir_version: 0,
opset_import: vec![],
producer_name: "".to_string(),
producer_version: "".to_string(),
domain: "".to_string(),
model_version: 0,
doc_string: "".to_string(),
graph,
}
}
#[test]
fn test_evaluation_fails_without_defined_graph() -> Result<()> {
let manual_graph = create_model_proto_with_graph(None);
let inputs: HashMap<String, Tensor> = HashMap::new();
match candle_onnx::simple_eval(&manual_graph, inputs) {
Err(err) => assert_eq!(err.to_string(), "no graph defined in proto"),
Ok(_) => panic!("Expected an error due to undefined graph"),
}
Ok(())
}
// "Add"
#[test]
fn test_add_operation() -> Result<()> {
let manual_graph = create_model_proto_with_graph(Some(GraphProto {
node: vec![NodeProto {
op_type: "Add".to_string(),
domain: "".to_string(),
attribute: vec![],
input: vec![INPUT_X.to_string(), INPUT_Y.to_string()],
output: vec![OUTPUT_Z.to_string()],
name: "".to_string(),
doc_string: "".to_string(),
}],
name: "".to_string(),
initializer: vec![],
input: vec![],
output: vec![ValueInfoProto {
name: OUTPUT_Z.to_string(),
doc_string: "".to_string(),
r#type: None,
}],
value_info: vec![],
doc_string: "".to_string(),
sparse_initializer: vec![],
quantization_annotation: vec![],
}));
let mut inputs: HashMap<String, Tensor> = HashMap::new();
inputs.insert(INPUT_X.to_string(), Tensor::new(&[2.], &Device::Cpu)?);
inputs.insert(INPUT_Y.to_string(), Tensor::new(&[2.], &Device::Cpu)?);
let eval = candle_onnx::simple_eval(&manual_graph, inputs)?;
assert_eq!(eval.len(), 1);
let z = eval.get(OUTPUT_Z).expect("Output 'z' not found");
let first = z.to_vec1::<f64>()?[0];
assert_eq!(first, 4.0f64);
Ok(())
}
// "Sub"
#[test]
fn test_sub_operation() -> Result<()> {
let manual_graph = create_model_proto_with_graph(Some(GraphProto {
node: vec![NodeProto {
op_type: "Sub".to_string(),
domain: "".to_string(),
attribute: vec![],
input: vec![INPUT_X.to_string(), INPUT_Y.to_string()],
output: vec![OUTPUT_Z.to_string()],
name: "".to_string(),
doc_string: "".to_string(),
}],
name: "".to_string(),
initializer: vec![],
input: vec![],
output: vec![ValueInfoProto {
name: OUTPUT_Z.to_string(),
doc_string: "".to_string(),
r#type: None,
}],
value_info: vec![],
doc_string: "".to_string(),
sparse_initializer: vec![],
quantization_annotation: vec![],
}));
let mut inputs: HashMap<String, Tensor> = HashMap::new();
inputs.insert(INPUT_X.to_string(), Tensor::new(&[2.], &Device::Cpu)?);
inputs.insert(INPUT_Y.to_string(), Tensor::new(&[2.], &Device::Cpu)?);
let eval = candle_onnx::simple_eval(&manual_graph, inputs)?;
assert_eq!(eval.len(), 1);
let z = eval.get(OUTPUT_Z).expect("Output 'z' not found");
let first = z.to_vec1::<f64>()?[0];
assert_eq!(first, 0.0f64);
Ok(())
}
// "Mul"
#[test]
fn test_mul_operation() -> Result<()> {
let manual_graph = create_model_proto_with_graph(Some(GraphProto {
node: vec![NodeProto {
op_type: "Mul".to_string(),
domain: "".to_string(),
attribute: vec![],
input: vec![INPUT_X.to_string(), INPUT_Y.to_string()],
output: vec![OUTPUT_Z.to_string()],
name: "".to_string(),
doc_string: "".to_string(),
}],
name: "".to_string(),
initializer: vec![],
input: vec![],
output: vec![ValueInfoProto {
name: OUTPUT_Z.to_string(),
doc_string: "".to_string(),
r#type: None,
}],
value_info: vec![],
doc_string: "".to_string(),
sparse_initializer: vec![],
quantization_annotation: vec![],
}));
let mut inputs: HashMap<String, Tensor> = HashMap::new();
inputs.insert(INPUT_X.to_string(), Tensor::new(&[2.], &Device::Cpu)?);
inputs.insert(INPUT_Y.to_string(), Tensor::new(&[2.], &Device::Cpu)?);
let eval = candle_onnx::simple_eval(&manual_graph, inputs)?;
assert_eq!(eval.len(), 1);
let z = eval.get(OUTPUT_Z).expect("Output 'z' not found");
let first = z.to_vec1::<f64>()?[0];
assert_eq!(first, 4.0f64);
Ok(())
}
// "Div"
#[test]
fn test_div_operation() -> Result<()> {
let manual_graph = create_model_proto_with_graph(Some(GraphProto {
node: vec![NodeProto {
op_type: "Div".to_string(),
domain: "".to_string(),
attribute: vec![],
input: vec![INPUT_X.to_string(), INPUT_Y.to_string()],
output: vec![OUTPUT_Z.to_string()],
name: "".to_string(),
doc_string: "".to_string(),
}],
name: "".to_string(),
initializer: vec![],
input: vec![],
output: vec![ValueInfoProto {
name: OUTPUT_Z.to_string(),
doc_string: "".to_string(),
r#type: None,
}],
value_info: vec![],
doc_string: "".to_string(),
sparse_initializer: vec![],
quantization_annotation: vec![],
}));
let mut inputs: HashMap<String, Tensor> = HashMap::new();
inputs.insert(INPUT_X.to_string(), Tensor::new(&[2.], &Device::Cpu)?);
inputs.insert(INPUT_Y.to_string(), Tensor::new(&[2.], &Device::Cpu)?);
let eval = candle_onnx::simple_eval(&manual_graph, inputs)?;
assert_eq!(eval.len(), 1);
let z = eval.get(OUTPUT_Z).expect("Output 'z' not found");
let first = z.to_vec1::<f64>()?[0];
assert_eq!(first, 1.0f64);
Ok(())
}
// "Exp"
#[test]
fn test_exp_operation() -> Result<()> {
let manual_graph = create_model_proto_with_graph(Some(GraphProto {
node: vec![NodeProto {
op_type: "Exp".to_string(),
domain: "".to_string(),
attribute: vec![],
input: vec![INPUT_X.to_string()],
output: vec![OUTPUT_Z.to_string()],
name: "".to_string(),
doc_string: "".to_string(),
}],
name: "".to_string(),
initializer: vec![],
input: vec![],
output: vec![ValueInfoProto {
name: OUTPUT_Z.to_string(),
doc_string: "".to_string(),
r#type: None,
}],
value_info: vec![],
doc_string: "".to_string(),
sparse_initializer: vec![],
quantization_annotation: vec![],
}));
let x = Tensor::from_vec(vec![-1.0f32, 0.0f32, 1.0f32, 2.0f32], &[2, 2], &Device::Cpu)?;
let mut inputs: HashMap<String, Tensor> = HashMap::new();
inputs.insert(INPUT_X.to_string(), x);
let eval = candle_onnx::simple_eval(&manual_graph, inputs)?;
assert_eq!(eval.len(), 1);
let z = eval.get(OUTPUT_Z).expect("Output 'z' not found");
let results = z.to_vec2::<f32>()?;
assert_eq!(results[0][0], 0.36787944f32);
assert_eq!(results[0][1], 1.0f32);
assert_eq!(results[1], vec![std::f32::consts::E, 7.389056f32]);
Ok(())
}
// "Equal"
#[test]
fn test_equal_operation() -> Result<()> {
let manual_graph = create_model_proto_with_graph(Some(GraphProto {
node: vec![NodeProto {
op_type: "Equal".to_string(),
domain: "".to_string(),
attribute: vec![],
input: vec![INPUT_X.to_string(), INPUT_Y.to_string()],
output: vec![OUTPUT_Z.to_string()],
name: "".to_string(),
doc_string: "".to_string(),
}],
name: "".to_string(),
initializer: vec![],
input: vec![],
output: vec![ValueInfoProto {
name: OUTPUT_Z.to_string(),
doc_string: "".to_string(),
r#type: None,
}],
value_info: vec![],
doc_string: "".to_string(),
sparse_initializer: vec![],
quantization_annotation: vec![],
}));
let mut inputs: HashMap<String, Tensor> = HashMap::new();
inputs.insert(INPUT_X.to_string(), Tensor::new(&[2.], &Device::Cpu)?);
inputs.insert(INPUT_Y.to_string(), Tensor::new(&[2.], &Device::Cpu)?);
let eval = candle_onnx::simple_eval(&manual_graph, inputs)?;
assert_eq!(eval.len(), 1);
let z = eval.get(OUTPUT_Z).expect("Output 'z' not found");
let first = z.to_dtype(candle::DType::U8)?.to_vec1::<u8>()?.to_vec()[0];
assert_eq!(first, 1);
Ok(())
}
// "Not"
#[test]
fn test_not_operation() -> Result<()> {
let manual_graph = create_model_proto_with_graph(Some(GraphProto {
node: vec![NodeProto {
op_type: "Not".to_string(),
domain: "".to_string(),
attribute: vec![],
input: vec![INPUT_X.to_string()],
output: vec![OUTPUT_Z.to_string()],
name: "".to_string(),
doc_string: "".to_string(),
}],
name: "".to_string(),
initializer: vec![],
input: vec![],
output: vec![ValueInfoProto {
name: OUTPUT_Z.to_string(),
doc_string: "".to_string(),
r#type: None,
}],
value_info: vec![],
doc_string: "".to_string(),
sparse_initializer: vec![],
quantization_annotation: vec![],
}));
let mut inputs: HashMap<String, Tensor> = HashMap::new();
inputs.insert(INPUT_X.to_string(), Tensor::new(&[0.], &Device::Cpu)?);
let eval = candle_onnx::simple_eval(&manual_graph, inputs)?;
assert_eq!(eval.len(), 1);
let z = eval.get(OUTPUT_Z).expect("Output 'z' not found");
let first = z.to_dtype(candle::DType::U8)?.to_vec1::<u8>()?.to_vec()[0];
assert_eq!(first, 1);
Ok(())
}
// "MatMul"
#[test]
fn test_matmul_operation() -> Result<()> {
let manual_graph = create_model_proto_with_graph(Some(GraphProto {
node: vec![NodeProto {
op_type: "MatMul".to_string(),
domain: "".to_string(),
attribute: vec![],
input: vec![INPUT_X.to_string(), INPUT_Y.to_string()],
output: vec![OUTPUT_Z.to_string()],
name: "".to_string(),
doc_string: "".to_string(),
}],
name: "".to_string(),
initializer: vec![],
input: vec![],
output: vec![ValueInfoProto {
name: OUTPUT_Z.to_string(),
doc_string: "".to_string(),
r#type: None,
}],
value_info: vec![],
doc_string: "".to_string(),
sparse_initializer: vec![],
quantization_annotation: vec![],
}));
let mut inputs: HashMap<String, Tensor> = HashMap::new();
inputs.insert(
INPUT_X.to_string(),
Tensor::from_vec(
//
vec![1.0f32, 2.0f32, 3.0f32, 4.0f32],
&[2, 2],
&Device::Cpu,
)?,
);
inputs.insert(
INPUT_Y.to_string(),
Tensor::from_vec(
//
vec![5.0f32, 6.0f32, 7.0f32, 8.0f32],
&[2, 2],
&Device::Cpu,
)?,
);
let eval = candle_onnx::simple_eval(&manual_graph, inputs)?;
assert_eq!(eval.len(), 1);
let z = eval.get(OUTPUT_Z).expect("Output 'z' not found");
let results = z.to_vec2::<f32>()?;
assert_eq!(results, vec![vec![19.0, 22.0], vec![43.0, 50.0]]);
Ok(())
}
// "Reshape"
#[test]
fn test_reshape_operation() -> Result<()> {
let manual_graph = create_model_proto_with_graph(Some(GraphProto {
node: vec![NodeProto {
op_type: "Reshape".to_string(),
domain: "".to_string(),
attribute: vec![],
input: vec![INPUT_X.to_string(), INPUT_Y.to_string()],
output: vec![OUTPUT_Z.to_string()],
name: "".to_string(),
doc_string: "".to_string(),
}],
name: "".to_string(),
initializer: vec![],
input: vec![
ValueInfoProto {
name: INPUT_X.to_string(),
doc_string: "".to_string(),
r#type: None,
},
ValueInfoProto {
name: INPUT_Y.to_string(),
doc_string: "".to_string(),
r#type: None,
},
],
output: vec![ValueInfoProto {
name: OUTPUT_Z.to_string(),
doc_string: "".to_string(),
r#type: None,
}],
value_info: vec![],
doc_string: "".to_string(),
sparse_initializer: vec![],
quantization_annotation: vec![],
}));
let x = Tensor::from_vec(
//
vec![1.0f32, 2.0f32, 3.0f32, 4.0f32],
&[2, 2],
&Device::Cpu,
)?;
let y = Tensor::from_vec(
//
vec![4i64],
&[1],
&Device::Cpu,
)?;
let mut inputs: HashMap<String, Tensor> = HashMap::new();
inputs.insert(INPUT_X.to_string(), x);
inputs.insert(INPUT_Y.to_string(), y);
let eval = candle_onnx::simple_eval(&manual_graph, inputs)?;
assert_eq!(eval.len(), 1);
let z = eval.get(OUTPUT_Z).expect("Output 'z' not found");
let results = z.to_vec1::<f32>()?;
assert_eq!(results, vec![1.0, 2.0, 3.0, 4.0]);
Ok(())
}
// "LogSoftmax"
#[test]
fn test_logsoftmax_operation() -> Result<()> {
let manual_graph = create_model_proto_with_graph(Some(GraphProto {
node: vec![NodeProto {
op_type: "LogSoftmax".to_string(),
domain: "".to_string(),
attribute: vec![],
input: vec![INPUT_X.to_string()],
output: vec![OUTPUT_Z.to_string()],
name: "".to_string(),
doc_string: "".to_string(),
}],
name: "".to_string(),
initializer: vec![],
input: vec![
ValueInfoProto {
name: INPUT_X.to_string(),
doc_string: "".to_string(),
r#type: None,
},
ValueInfoProto {
name: INPUT_Y.to_string(),
doc_string: "".to_string(),
r#type: None,
},
],
output: vec![ValueInfoProto {
name: OUTPUT_Z.to_string(),
doc_string: "".to_string(),
r#type: None,
}],
value_info: vec![],
doc_string: "".to_string(),
sparse_initializer: vec![],
quantization_annotation: vec![],
}));
let x = Tensor::from_vec(
//
vec![1.0f32, 2.0f32, 3.0f32, 4.0f32],
&[2, 2],
&Device::Cpu,
)?;
let mut inputs: HashMap<String, Tensor> = HashMap::new();
inputs.insert(INPUT_X.to_string(), x);
let eval = candle_onnx::simple_eval(&manual_graph, inputs)?;
assert_eq!(eval.len(), 1);
let z = eval.get(OUTPUT_Z).expect("Output 'z' not found");
let results = z.to_vec2::<f32>()?;
assert_eq!(
results,
vec![vec![0.26894143, 0.7310586], vec![0.26894143, 0.7310586]]
);
Ok(())
}
// "Softmax"
#[test]
fn test_softmax_operation() -> Result<()> {
let manual_graph = create_model_proto_with_graph(Some(GraphProto {
node: vec![NodeProto {
op_type: "Softmax".to_string(),
domain: "".to_string(),
attribute: vec![],
input: vec![INPUT_X.to_string()],
output: vec![OUTPUT_Z.to_string()],
name: "".to_string(),
doc_string: "".to_string(),
}],
name: "".to_string(),
initializer: vec![],
input: vec![
ValueInfoProto {
name: INPUT_X.to_string(),
doc_string: "".to_string(),
r#type: None,
},
ValueInfoProto {
name: INPUT_Y.to_string(),
doc_string: "".to_string(),
r#type: None,
},
],
output: vec![ValueInfoProto {
name: OUTPUT_Z.to_string(),
doc_string: "".to_string(),
r#type: None,
}],
value_info: vec![],
doc_string: "".to_string(),
sparse_initializer: vec![],
quantization_annotation: vec![],
}));
let x = Tensor::from_vec(
//
vec![1.0f32, 2.0f32, 3.0f32, 4.0f32],
&[2, 2],
&Device::Cpu,
)?;
let mut inputs: HashMap<String, Tensor> = HashMap::new();
inputs.insert(INPUT_X.to_string(), x);
let eval = candle_onnx::simple_eval(&manual_graph, inputs)?;
assert_eq!(eval.len(), 1);
let z = eval.get(OUTPUT_Z).expect("Output 'z' not found");
let results = z.to_vec2::<f32>()?;
assert_eq!(
results,
vec![vec![0.26894143, 0.7310586], vec![0.26894143, 0.7310586]]
);
Ok(())
}
// "Transpose"
#[test]
fn test_transpose_operation() -> Result<()> {
let manual_graph = create_model_proto_with_graph(Some(GraphProto {
node: vec![NodeProto {
op_type: "Transpose".to_string(),
domain: "".to_string(),
attribute: vec![],
input: vec![INPUT_X.to_string()],
output: vec![OUTPUT_Z.to_string()],
name: "".to_string(),
doc_string: "".to_string(),
}],
name: "".to_string(),
initializer: vec![],
input: vec![
ValueInfoProto {
name: INPUT_X.to_string(),
doc_string: "".to_string(),
r#type: None,
},
ValueInfoProto {
name: INPUT_Y.to_string(),
doc_string: "".to_string(),
r#type: None,
},
],
output: vec![ValueInfoProto {
name: OUTPUT_Z.to_string(),
doc_string: "".to_string(),
r#type: None,
}],
value_info: vec![],
doc_string: "".to_string(),
sparse_initializer: vec![],
quantization_annotation: vec![],
}));
let x = Tensor::from_vec(
//
vec![1.0f32, 2.0f32, 3.0f32, 4.0f32],
&[2, 2],
&Device::Cpu,
)?;
let mut inputs: HashMap<String, Tensor> = HashMap::new();
inputs.insert(INPUT_X.to_string(), x);
let eval = candle_onnx::simple_eval(&manual_graph, inputs)?;
assert_eq!(eval.len(), 1);
let z = eval.get(OUTPUT_Z).expect("Output 'z' not found");
let results = z.to_vec2::<f32>()?;
assert_eq!(results, vec![vec![1.0, 3.0], vec![2.0, 4.0]]);
Ok(())
}
// "Dropout"
#[test]
fn test_dropout_operation() -> Result<()> {
let manual_graph = create_model_proto_with_graph(Some(GraphProto {
node: vec![NodeProto {
op_type: "Dropout".to_string(),
domain: "".to_string(),
attribute: vec![],
input: vec![INPUT_X.to_string()],
output: vec![OUTPUT_Z.to_string()],
name: "".to_string(),
doc_string: "".to_string(),
}],
name: "".to_string(),
initializer: vec![],
input: vec![
ValueInfoProto {
name: INPUT_X.to_string(),
doc_string: "".to_string(),
r#type: None,
},
ValueInfoProto {
name: INPUT_Y.to_string(),
doc_string: "".to_string(),
r#type: None,
},
],
output: vec![ValueInfoProto {
name: OUTPUT_Z.to_string(),
doc_string: "".to_string(),
r#type: None,
}],
value_info: vec![],
doc_string: "".to_string(),
sparse_initializer: vec![],
quantization_annotation: vec![],
}));
let x = Tensor::from_vec(
//
vec![1.0f32, 2.0f32, 3.0f32, 4.0f32],
&[2, 2],
&Device::Cpu,
)?;
let mut inputs: HashMap<String, Tensor> = HashMap::new();
inputs.insert(INPUT_X.to_string(), x);
let eval = candle_onnx::simple_eval(&manual_graph, inputs)?;
assert_eq!(eval.len(), 1);
let z = eval.get(OUTPUT_Z).expect("Output 'z' not found");
let results = z.to_vec2::<f32>()?;
assert_eq!(results, vec![vec![1.0, 2.0], vec![3.0, 4.0]]);
Ok(())
}
// "Flatten"
#[test]
fn test_flatten_operation() -> Result<()> {
let mut att_axis = AttributeProto {
name: "axis".to_string(),
ref_attr_name: "axis".to_string(),
i: 0,
doc_string: "axis".to_string(),
r#type: 2,
f: 0.0,
s: vec![],
t: None,
g: None,
sparse_tensor: None,
tp: None,
floats: vec![],
ints: vec![],
strings: vec![],
tensors: vec![],
graphs: vec![],
sparse_tensors: vec![],
type_protos: vec![],
};
let manual_graph = create_model_proto_with_graph(Some(GraphProto {
node: vec![NodeProto {
op_type: "Flatten".to_string(),
domain: "".to_string(),
attribute: vec![att_axis.clone()],
input: vec![INPUT_X.to_string()],
output: vec![OUTPUT_Z.to_string()],
name: "".to_string(),
doc_string: "".to_string(),
}],
name: "".to_string(),
initializer: vec![],
input: vec![
ValueInfoProto {
name: INPUT_X.to_string(),
doc_string: "".to_string(),
r#type: None,
},
ValueInfoProto {
name: INPUT_Y.to_string(),
doc_string: "".to_string(),
r#type: None,
},
],
output: vec![ValueInfoProto {
name: OUTPUT_Z.to_string(),
doc_string: "".to_string(),
r#type: None,
}],
value_info: vec![],
doc_string: "".to_string(),
sparse_initializer: vec![],
quantization_annotation: vec![],
}));
let x = Tensor::from_vec(
vec![
1.0f32, 2.0f32, 3.0f32, 4.0f32, 5.0f32, 6.0f32, 7.0f32, 8.0f32,
],
&[2, 2, 2],
&Device::Cpu,
)?;
let mut inputs: HashMap<String, Tensor> = HashMap::new();
inputs.insert(INPUT_X.to_string(), x);
let eval = candle_onnx::simple_eval(&manual_graph, inputs.clone())?;
assert_eq!(eval.len(), 1);
let z = eval.get(OUTPUT_Z).expect("Output 'z' not found");
let results = z.to_vec2::<f32>()?;
assert_eq!(results, vec![vec![1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]]);
att_axis.i = 1;
let manual_graph = create_model_proto_with_graph(Some(GraphProto {
node: vec![NodeProto {
op_type: "Flatten".to_string(),
domain: "".to_string(),
attribute: vec![att_axis.clone()],
input: vec![INPUT_X.to_string()],
output: vec![OUTPUT_Z.to_string()],
name: "".to_string(),
doc_string: "".to_string(),
}],
name: "".to_string(),
initializer: vec![],
input: vec![
ValueInfoProto {
name: INPUT_X.to_string(),
doc_string: "".to_string(),
r#type: None,
},
ValueInfoProto {
name: INPUT_Y.to_string(),
doc_string: "".to_string(),
r#type: None,
},
],
output: vec![ValueInfoProto {
name: OUTPUT_Z.to_string(),
doc_string: "".to_string(),
r#type: None,
}],
value_info: vec![],
doc_string: "".to_string(),
sparse_initializer: vec![],
quantization_annotation: vec![],
}));
let eval = candle_onnx::simple_eval(&manual_graph, inputs)?;
assert_eq!(eval.len(), 1);
let z = eval.get(OUTPUT_Z).expect("Output 'z' not found");
let results = z.to_vec2::<f32>()?;
assert_eq!(
results,
vec![vec![1.0, 2.0, 3.0, 4.0], vec![5.0, 6.0, 7.0, 8.0]]
);
Ok(())
}
// Below are ops that are implemented but not tested yet
// "MaxPool"
// #[test]
// "AveragePool"
// #[test]
// "BatchNormalization"
// #[test]
// "Squeeze"
// #[test]
// "ConstantOfShape"
#[test]
fn test_constant_of_shape() -> Result<()> {
// https://github.com/onnx/onnx/blob/main/docs/Operators.md#examples-31
test(&[4i64, 3, 2], Some(1.), &[1., 1., 1.])?;
// https://github.com/onnx/onnx/blob/main/docs/Operators.md#examples-31
test(&[0.], Some(0i64), &[0i64])?;
// "value" defaults to 0 f32
test(&[1i64, 2, 3, 4], None as Option<i64>, &[0., 0., 0., 0.])?;
fn test(
input: impl NdArray,
value: Option<impl NdArray>,
expected: impl NdArray,
) -> Result<()> {
let mut attribute = vec![];
if let Some(value) = value {
let tensor = Tensor::new(value, &Device::Cpu)?;
let (value, data_type) = match tensor.dtype() {
DType::U8 => (
tensor.to_vec0::<u8>()?.to_le_bytes().to_vec(),
DataType::Uint8,
),
DType::U32 => (
tensor.to_vec0::<u32>()?.to_le_bytes().to_vec(),
DataType::Uint32,
),
DType::I64 => (
tensor.to_vec0::<i64>()?.to_le_bytes().to_vec(),
DataType::Int64,
),
DType::F32 => (
tensor.to_vec0::<f32>()?.to_le_bytes().to_vec(),
DataType::Float,
),
DType::F64 => (
tensor.to_vec0::<f64>()?.to_le_bytes().to_vec(),
DataType::Double,
),
_ => panic!("unsupported DType in test"),
};
let tensor = TensorProto {
data_type: data_type.into(),
dims: tensor.dims().iter().map(|v| *v as i64).collect(),
raw_data: value,
segment: None,
float_data: vec![],
int32_data: vec![],
string_data: vec![],
int64_data: vec![],
name: "".to_string(),
doc_string: "".to_string(),
external_data: vec![],
data_location: 0,
double_data: vec![],
uint64_data: vec![],
};
attribute.push(AttributeProto {
name: "value".to_string(),
ref_attr_name: "value".to_string(),
i: 0,
doc_string: "value".to_string(),
r#type: AttributeType::Tensor.into(),
f: 0.0,
s: vec![],
t: Some(tensor),
g: None,
sparse_tensor: None,
tp: None,
floats: vec![],
ints: vec![],
strings: vec![],
tensors: vec![],
graphs: vec![],
sparse_tensors: vec![],
type_protos: vec![],
})
}
let manual_graph = create_model_proto_with_graph(Some(GraphProto {
node: vec![NodeProto {
op_type: "ConstantOfShape".to_string(),
domain: "".to_string(),
attribute,
input: vec![INPUT_X.to_string()],
output: vec![OUTPUT_Z.to_string()],
name: "".to_string(),
doc_string: "".to_string(),
}],
name: "".to_string(),
initializer: vec![],
input: vec![],
output: vec![ValueInfoProto {
name: OUTPUT_Z.to_string(),
doc_string: "".to_string(),
r#type: None,
}],
value_info: vec![],
doc_string: "".to_string(),
sparse_initializer: vec![],
quantization_annotation: vec![],
}));
let mut inputs: HashMap<String, Tensor> = HashMap::new();
inputs.insert(INPUT_X.to_string(), Tensor::new(input, &Device::Cpu)?);
let eval = candle_onnx::simple_eval(&manual_graph, inputs)?;
assert_eq!(eval.len(), 1);
let z = eval
.get(OUTPUT_Z)
.expect("Output 'z' not found")
.to_dtype(DType::F64)?;
let expected = Tensor::new(expected, &Device::Cpu)?.to_dtype(DType::F64)?;
match expected.dims().len() {
0 => assert_eq!(z.to_vec0::<f64>()?, expected.to_vec0::<f64>()?),
1 => assert_eq!(z.to_vec1::<f64>()?, expected.to_vec1::<f64>()?),
2 => assert_eq!(z.to_vec2::<f64>()?, expected.to_vec2::<f64>()?),
3 => assert_eq!(z.to_vec3::<f64>()?, expected.to_vec3::<f64>()?),
_ => unreachable!(),
};
Ok(())
}
Ok(())
}
// "Unsqueeze"
#[test]
fn test_unsqueeze() -> Result<()> {
let manual_graph = create_model_proto_with_graph(Some(GraphProto {
node: vec![NodeProto {
op_type: "Unsqueeze".to_string(),
domain: "".to_string(),
attribute: vec![],
input: vec![INPUT_X.to_string(), INPUT_Y.to_string()],
output: vec![OUTPUT_Z.to_string()],
name: "".to_string(),
doc_string: "".to_string(),
}],
name: "".to_string(),
initializer: vec![],
input: vec![],
output: vec![ValueInfoProto {
name: OUTPUT_Z.to_string(),
doc_string: "".to_string(),
r#type: None,
}],
value_info: vec![ValueInfoProto {
name: INPUT_X.to_string(),
doc_string: "".to_string(),
r#type: None,
}],
doc_string: "".to_string(),
sparse_initializer: vec![],
quantization_annotation: vec![],
}));
let x = Tensor::from_vec(
vec![
1.0f32, 2.0f32, //
3.0f32, 4.0f32, //
],
&[2, 2],
&Device::Cpu,
)?;
let y = Tensor::from_vec(vec![-1i64], &[1], &Device::Cpu)?;
let inputs = HashMap::from_iter([(INPUT_X.to_string(), x.clone()), (INPUT_Y.to_string(), y)]);
let eval = candle_onnx::simple_eval(&manual_graph, inputs)?;
assert_eq!(eval.len(), 1);
let z = eval.get(OUTPUT_Z).expect("Output 'z' not found");
assert_eq!(z.dims(), &[2, 2, 1]);
assert_eq!(
z.flatten_all()?.to_vec1::<f32>()?,
x.flatten_all()?.to_vec1::<f32>()?
);
Ok(())
}
// "Clip"
// #[test]
// "Gather"
#[test]
fn test_gather_operation() -> Result<()> {
// test taken from https://onnx.ai/onnx/operators/onnx__Gather.html#summary.
test(
&[[1.0, 1.2], [2.3, 3.4], [4.5, 5.7]],
&[[0i64, 1], [1, 2]],
0,
&[[[1.0, 1.2], [2.3, 3.4]], [[2.3, 3.4], [4.5, 5.7]]],
)?;
// test taken from https://onnx.ai/onnx/operators/onnx__Gather.html#summary.
test(
&[[1.0, 1.2, 1.9], [2.3, 3.4, 3.9], [4.5, 5.7, 5.9]],
&[[0i64, 2]],
1,
&[[[1.0, 1.9]], [[2.3, 3.9]], [[4.5, 5.9]]],
)?;
// all the tests below are generated from numpy.take, which works like
// onnx's Gather operation.
test(&[1.0, 2.0, 3.0, 4.0], 3i64, 0, 4.0)?;
test(&[[1.0, 2.0, 3.0, 4.0]], 3i64, 1, &[4.0])?;
test(
&[[1.0], [2.0], [3.0], [4.0]],
&[3i64, 2],
0,
&[[4.0], [3.0]],
)?;
test(
&[
[[1.0, 2.0], [3.0, 4.0]],
[[5.0, 6.0], [7.0, 8.0]],
[[9.0, 10.0], [11.0, 12.0]],
[[13.0, 14.0], [15.0, 16.0]],
],
1i64,
0,
&[[5.0, 6.0], [7.0, 8.0]],
)?;
test(
&[
[[1.0, 2.0], [3.0, 4.0]],
[[5.0, 6.0], [7.0, 8.0]],
[[9.0, 10.0], [11.0, 12.0]],
[[13.0, 14.0], [15.0, 16.0]],
],
&[1i64, 0],
0,
&[[[5.0, 6.0], [7.0, 8.0]], [[1.0, 2.0], [3.0, 4.0]]],
)?;
fn test(
data: impl NdArray,
indices: impl NdArray,
axis: i64,
expected: impl NdArray,
) -> Result<()> {
let att_axis = AttributeProto {
name: "axis".to_string(),
ref_attr_name: "axis".to_string(),
i: axis,
doc_string: "axis".to_string(),
r#type: 2,
f: 0.0,
s: vec![],
t: None,
g: None,
sparse_tensor: None,
tp: None,
floats: vec![],
ints: vec![],
strings: vec![],
tensors: vec![],
graphs: vec![],
sparse_tensors: vec![],
type_protos: vec![],
};
let manual_graph = create_model_proto_with_graph(Some(GraphProto {
node: vec![NodeProto {
op_type: "Gather".to_string(),
domain: "".to_string(),
attribute: vec![att_axis],
input: vec![INPUT_X.to_string(), INPUT_Y.to_string()],
output: vec![OUTPUT_Z.to_string()],
name: "".to_string(),
doc_string: "".to_string(),
}],
name: "".to_string(),
initializer: vec![],
input: vec![],
output: vec![ValueInfoProto {
name: OUTPUT_Z.to_string(),
doc_string: "".to_string(),
r#type: None,
}],
value_info: vec![],
doc_string: "".to_string(),
sparse_initializer: vec![],
quantization_annotation: vec![],
}));
let mut inputs: HashMap<String, Tensor> = HashMap::new();
inputs.insert(INPUT_X.to_string(), Tensor::new(data, &Device::Cpu)?);
inputs.insert(INPUT_Y.to_string(), Tensor::new(indices, &Device::Cpu)?);
let eval = candle_onnx::simple_eval(&manual_graph, inputs)?;
assert_eq!(eval.len(), 1);
let z = eval.get(OUTPUT_Z).expect("Output 'z' not found");
let expected = Tensor::new(expected, &Device::Cpu)?;
match expected.dims().len() {
0 => assert_eq!(z.to_vec0::<f64>()?, expected.to_vec0::<f64>()?),
1 => assert_eq!(z.to_vec1::<f64>()?, expected.to_vec1::<f64>()?),
2 => assert_eq!(z.to_vec2::<f64>()?, expected.to_vec2::<f64>()?),
3 => assert_eq!(z.to_vec3::<f64>()?, expected.to_vec3::<f64>()?),
_ => unreachable!(),
};
Ok(())
}
Ok(())
}
// GatherElements
#[test]
fn test_gather_elements() -> Result<()> {
// all the tests below are verified against `torch.gather()`
// Rank 1 index
test(&[1.0, 2.0, 3.0, 4.0], &[3i64], 0, &[4.0])?;
// Rank 2 index
test(&[[1.0, 2.0, 3.0, 4.0]], &[[3i64]], 1, &[[4.0]])?;
// https://github.com/onnx/onnx/blob/main/docs/Operators.md#examples-57 gather_elements_0
test(
&[[1., 2.], [3., 4.]],
&[[0i64, 0], [1, 0]],
1,
&[[1., 1.], [4., 3.]],
)?;
// https://github.com/onnx/onnx/blob/main/docs/Operators.md#examples-57 gather_elements_1
test(
&[[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]],
&[[1i64, 2, 0], [2, 0, 0]],
0,
&[[4., 8., 3.], [7., 2., 3.]],
)?;
// https://github.com/onnx/onnx/blob/main/docs/Operators.md#examples-57 gather_elements_negative_indices
test(
&[[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]],
&[[-1_i64, -2, 0], [-2, 0, 0]],
0,
&[[7., 5., 3.], [4., 2., 3.]],
)?;
test(
&[[1.0], [2.0], [3.0], [4.0]],
&[[3i64], [2]],
0,
&[[4.], [3.]],
)?;
// Rank 3
test(
&[
[[1.0, 2.0], [3.0, 4.0]],
[[5.0, 6.0], [7.0, 8.0]],
[[9.0, 10.0], [11.0, 12.0]],
[[13.0, 14.0], [15.0, 16.0]],
],
&[[[1i64]]],
0,
&[[[5.]]],
)?;
test(
&[
[[1.0, 2.0], [3.0, 4.0]],
[[5.0, 6.0], [7.0, 8.0]],
[[9.0, 10.0], [11.0, 12.0]],
[[13.0, 14.0], [15.0, 16.0]],
],
&[[[1i64]]],
1,
&[[[3.]]],
)?;
test(
&[
[[1.0, 2.0], [3.0, 4.0]],
[[5.0, 6.0], [7.0, 8.0]],
[[9.0, 10.0], [11.0, 12.0]],
[[13.0, 14.0], [15.0, 16.0]],
],
&[[[1i64], [0]]],
2,
&[[[2.], [3.]]],
)?;
// Error cases
// Invalid index
assert!(test(&[[1.0, 2.0, 3.0, 4.0]], &[[3i64]], 0, &[[1., 2., 3., 4.]]).is_err());
// Invalid axis/ dim
assert!(test(&[[1.0, 2.0, 3.0, 4.0]], &[[3i64]], 2, &[[1., 2., 3., 4.]]).is_err());
// Invalid rank
assert!(test(&[[1.0, 2.0, 3.0, 4.0]], &[3i64], 0, &[[1.]]).is_err());
fn test(
data: impl NdArray,
indices: impl NdArray,
axis: i64,
expected: impl NdArray,
) -> Result<()> {
let att_axis = AttributeProto {
name: "axis".to_string(),
ref_attr_name: "axis".to_string(),
i: axis,
doc_string: "axis".to_string(),
r#type: 2,
f: 0.0,
s: vec![],
t: None,
g: None,
sparse_tensor: None,
tp: None,
floats: vec![],
ints: vec![],
strings: vec![],
tensors: vec![],
graphs: vec![],
sparse_tensors: vec![],
type_protos: vec![],
};
let manual_graph = create_model_proto_with_graph(Some(GraphProto {
node: vec![NodeProto {
op_type: "GatherElements".to_string(),
domain: "".to_string(),
attribute: vec![att_axis],
input: vec![INPUT_X.to_string(), INPUT_Y.to_string()],
output: vec![OUTPUT_Z.to_string()],
name: "".to_string(),
doc_string: "".to_string(),
}],
name: "".to_string(),
initializer: vec![],
input: vec![],
output: vec![ValueInfoProto {
name: OUTPUT_Z.to_string(),
doc_string: "".to_string(),
r#type: None,
}],
value_info: vec![],
doc_string: "".to_string(),
sparse_initializer: vec![],
quantization_annotation: vec![],
}));
let mut inputs: HashMap<String, Tensor> = HashMap::new();
inputs.insert(INPUT_X.to_string(), Tensor::new(data, &Device::Cpu)?);
inputs.insert(INPUT_Y.to_string(), Tensor::new(indices, &Device::Cpu)?);
let eval = candle_onnx::simple_eval(&manual_graph, inputs)?;
assert_eq!(eval.len(), 1);
let z = eval.get(OUTPUT_Z).expect("Output 'z' not found");
let expected = Tensor::new(expected, &Device::Cpu)?;
match expected.dims().len() {
0 => assert_eq!(z.to_vec0::<f64>()?, expected.to_vec0::<f64>()?),
1 => assert_eq!(z.to_vec1::<f64>()?, expected.to_vec1::<f64>()?),
2 => assert_eq!(z.to_vec2::<f64>()?, expected.to_vec2::<f64>()?),
3 => assert_eq!(z.to_vec3::<f64>()?, expected.to_vec3::<f64>()?),
_ => unreachable!(),
};
Ok(())
}
Ok(())
}
// "Size"
#[test]
fn test_size_operation() -> Result<()> {
let manual_graph = create_model_proto_with_graph(Some(GraphProto {
node: vec![NodeProto {
op_type: "Size".to_string(),
domain: "".to_string(),
attribute: vec![],
input: vec![INPUT_X.to_string()],
output: vec![OUTPUT_Z.to_string()],
name: "".to_string(),
doc_string: "".to_string(),
}],
name: "".to_string(),
initializer: vec![],
input: vec![],
output: vec![ValueInfoProto {
name: OUTPUT_Z.to_string(),
doc_string: "".to_string(),
r#type: None,
}],
value_info: vec![ValueInfoProto {
name: INPUT_X.to_string(),
doc_string: "".to_string(),
r#type: None,
}],
doc_string: "".to_string(),
sparse_initializer: vec![],
quantization_annotation: vec![],
}));
let x = Tensor::from_vec(vec![1.0f32, 2.0f32, 3.0f32, 4.0f32], &[2, 2], &Device::Cpu)?;
let mut inputs: HashMap<String, Tensor> = HashMap::new();
inputs.insert(INPUT_X.to_string(), x);
let eval = candle_onnx::simple_eval(&manual_graph, inputs)?;
assert_eq!(eval.len(), 1);
let z = eval.get(OUTPUT_Z).expect("Output 'z' not found");
let results = z.to_scalar::<i64>()?;
assert_eq!(results, 4);
Ok(())
}
// "Shape"
#[test]
fn test_shape_operation() -> Result<()> {
let manual_graph = create_model_proto_with_graph(Some(GraphProto {
node: vec![NodeProto {
op_type: "Shape".to_string(),
domain: "".to_string(),
attribute: vec![],
input: vec![INPUT_X.to_string()],
output: vec![OUTPUT_Z.to_string()],
name: "".to_string(),
doc_string: "".to_string(),
}],
name: "".to_string(),
initializer: vec![],
input: vec![],
output: vec![ValueInfoProto {
name: OUTPUT_Z.to_string(),
doc_string: "".to_string(),
r#type: None,
}],
value_info: vec![ValueInfoProto {
name: INPUT_X.to_string(),
doc_string: "".to_string(),
r#type: None,
}],
doc_string: "".to_string(),
sparse_initializer: vec![],
quantization_annotation: vec![],
}));
let x = Tensor::from_vec(vec![1.0f32, 2.0f32, 3.0f32, 4.0f32], &[2, 2], &Device::Cpu)?;
let mut inputs: HashMap<String, Tensor> = HashMap::new();
inputs.insert(INPUT_X.to_string(), x);
let eval = candle_onnx::simple_eval(&manual_graph, inputs)?;
assert_eq!(eval.len(), 1);
let z = eval.get(OUTPUT_Z).expect("Output 'z' not found");
let results = z.to_vec1::<i64>()?;
assert_eq!(results, vec![2, 2]);
Ok(())
}
// "Conv"
// #[test]
// "Concat"
// #[test]
// "Abs"
#[test]
fn test_abs_operation() -> Result<()> {
let manual_graph = create_model_proto_with_graph(Some(GraphProto {
node: vec![NodeProto {
op_type: "Abs".to_string(),
domain: "".to_string(),
attribute: vec![],
input: vec![INPUT_X.to_string()],
output: vec![OUTPUT_Z.to_string()],
name: "".to_string(),
doc_string: "".to_string(),
}],
name: "".to_string(),
initializer: vec![],
input: vec![
ValueInfoProto {
name: INPUT_X.to_string(),
doc_string: "".to_string(),
r#type: None,
},
ValueInfoProto {
name: INPUT_Y.to_string(),
doc_string: "".to_string(),
r#type: None,
},
],
output: vec![ValueInfoProto {
name: OUTPUT_Z.to_string(),
doc_string: "".to_string(),
r#type: None,
}],
value_info: vec![],
doc_string: "".to_string(),
sparse_initializer: vec![],
quantization_annotation: vec![],
}));
let x = Tensor::from_vec(
vec![-1.0f32, 2.0f32, -3.0f32, 4.0f32],
&[2, 2],
&Device::Cpu,
)?;
let mut inputs: HashMap<String, Tensor> = HashMap::new();
inputs.insert(INPUT_X.to_string(), x);
let eval = candle_onnx::simple_eval(&manual_graph, inputs)?;
assert_eq!(eval.len(), 1);
let z = eval.get(OUTPUT_Z).expect("Output 'z' not found");
let results = z.to_vec2::<f32>()?;
assert_eq!(results, vec![vec![1.0, 2.0], vec![3.0, 4.0]]);
Ok(())
}
// "Cos"
#[test]
fn test_cos_operation() -> Result<()> {
let manual_graph = create_model_proto_with_graph(Some(GraphProto {
node: vec![NodeProto {
op_type: "Cos".to_string(),
domain: "".to_string(),
attribute: vec![],
input: vec![INPUT_X.to_string()],
output: vec![OUTPUT_Z.to_string()],
name: "".to_string(),
doc_string: "".to_string(),
}],
name: "".to_string(),
initializer: vec![],
input: vec![
ValueInfoProto {
name: INPUT_X.to_string(),
doc_string: "".to_string(),
r#type: None,
},
ValueInfoProto {
name: INPUT_Y.to_string(),
doc_string: "".to_string(),
r#type: None,
},
],
output: vec![ValueInfoProto {
name: OUTPUT_Z.to_string(),
doc_string: "".to_string(),
r#type: None,
}],
value_info: vec![],
doc_string: "".to_string(),
sparse_initializer: vec![],
quantization_annotation: vec![],
}));
let x = Tensor::from_vec(vec![0.0f32, 1.0f32, 2.0f32, 3.0f32], &[2, 2], &Device::Cpu)?;
let mut inputs: HashMap<String, Tensor> = HashMap::new();
inputs.insert(INPUT_X.to_string(), x);
let eval = candle_onnx::simple_eval(&manual_graph, inputs)?;
assert_eq!(eval.len(), 1);
let z = eval.get(OUTPUT_Z).expect("Output 'z' not found");
assert_eq!(to_vec2_round(z, 4)?, [[1.0, 0.5403], [-0.4161, -0.99]]);
Ok(())
}
// "Sin"
#[test]
fn test_sin_operation() -> Result<()> {
let manual_graph = create_model_proto_with_graph(Some(GraphProto {
node: vec![NodeProto {
op_type: "Sin".to_string(),
domain: "".to_string(),
attribute: vec![],
input: vec![INPUT_X.to_string()],
output: vec![OUTPUT_Z.to_string()],
name: "".to_string(),
doc_string: "".to_string(),
}],
name: "".to_string(),
initializer: vec![],
input: vec![
ValueInfoProto {
name: INPUT_X.to_string(),
doc_string: "".to_string(),
r#type: None,
},
ValueInfoProto {
name: INPUT_Y.to_string(),
doc_string: "".to_string(),
r#type: None,
},
],
output: vec![ValueInfoProto {
name: OUTPUT_Z.to_string(),
doc_string: "".to_string(),
r#type: None,
}],
value_info: vec![],
doc_string: "".to_string(),
sparse_initializer: vec![],
quantization_annotation: vec![],
}));
let x = Tensor::from_vec(vec![0.0f32, 1.0f32, 2.0f32, 3.0f32], &[2, 2], &Device::Cpu)?;
let mut inputs: HashMap<String, Tensor> = HashMap::new();
inputs.insert(INPUT_X.to_string(), x);
let eval = candle_onnx::simple_eval(&manual_graph, inputs)?;
assert_eq!(eval.len(), 1);
let z = eval.get(OUTPUT_Z).expect("Output 'z' not found");
assert_eq!(to_vec2_round(z, 4)?, [[0.0, 0.8415], [0.9093, 0.1411]]);
Ok(())
}
// "Neg"
#[test]
fn test_neg_operation() -> Result<()> {
let manual_graph = create_model_proto_with_graph(Some(GraphProto {
node: vec![NodeProto {
op_type: "Neg".to_string(),
domain: "".to_string(),
attribute: vec![],
input: vec![INPUT_X.to_string()],
output: vec![OUTPUT_Z.to_string()],
name: "".to_string(),
doc_string: "".to_string(),
}],
name: "".to_string(),
initializer: vec![],
input: vec![
ValueInfoProto {
name: INPUT_X.to_string(),
doc_string: "".to_string(),
r#type: None,
},
ValueInfoProto {
name: INPUT_Y.to_string(),
doc_string: "".to_string(),
r#type: None,
},
],
output: vec![ValueInfoProto {
name: OUTPUT_Z.to_string(),
doc_string: "".to_string(),
r#type: None,
}],
value_info: vec![],
doc_string: "".to_string(),
sparse_initializer: vec![],
quantization_annotation: vec![],
}));
let x = Tensor::from_vec(vec![1.0f32, 2.0f32, 3.0f32, 4.0f32], &[2, 2], &Device::Cpu)?;
let mut inputs: HashMap<String, Tensor> = HashMap::new();
inputs.insert(INPUT_X.to_string(), x);
let eval = candle_onnx::simple_eval(&manual_graph, inputs)?;
assert_eq!(eval.len(), 1);
let z = eval.get(OUTPUT_Z).expect("Output 'z' not found");
let results = z.to_vec2::<f32>()?;
assert_eq!(results, vec![vec![-1.0, -2.0], vec![-3.0, -4.0]]);
Ok(())
}
// "Erf"
// #[test]
// "Tanh"
#[test]
fn test_tanh_operation() -> Result<()> {
let manual_graph = create_model_proto_with_graph(Some(GraphProto {
node: vec![NodeProto {
op_type: "Tanh".to_string(),
domain: "".to_string(),
attribute: vec![],
input: vec![INPUT_X.to_string()],
output: vec![OUTPUT_Z.to_string()],
name: "".to_string(),
doc_string: "".to_string(),
}],
name: "".to_string(),
initializer: vec![],
input: vec![
ValueInfoProto {
name: INPUT_X.to_string(),
doc_string: "".to_string(),
r#type: None,
},
ValueInfoProto {
name: INPUT_Y.to_string(),
doc_string: "".to_string(),
r#type: None,
},
],
output: vec![ValueInfoProto {
name: OUTPUT_Z.to_string(),
doc_string: "".to_string(),
r#type: None,
}],
value_info: vec![],
doc_string: "".to_string(),
sparse_initializer: vec![],
quantization_annotation: vec![],
}));
let x = Tensor::from_vec(vec![0.0f32, 1.0f32, 2.0f32, 3.0f32], &[2, 2], &Device::Cpu)?;
let mut inputs: HashMap<String, Tensor> = HashMap::new();
inputs.insert(INPUT_X.to_string(), x);
let eval = candle_onnx::simple_eval(&manual_graph, inputs)?;
assert_eq!(eval.len(), 1);
let z = eval.get(OUTPUT_Z).expect("Output 'z' not found");
let results = z.to_vec2::<f32>()?;
assert_eq!(
results,
vec![vec![0.0, 0.7615942], vec![0.9640276, 0.9950548]]
);
Ok(())
}
// "Sigmoid"
#[test]
fn test_sigmoid_operation() -> Result<()> {
let manual_graph = create_model_proto_with_graph(Some(GraphProto {
node: vec![NodeProto {
op_type: "Sigmoid".to_string(),
domain: "".to_string(),
attribute: vec![],
input: vec![INPUT_X.to_string()],
output: vec![OUTPUT_Z.to_string()],
name: "".to_string(),
doc_string: "".to_string(),
}],
name: "".to_string(),
initializer: vec![],
input: vec![
ValueInfoProto {
name: INPUT_X.to_string(),
doc_string: "".to_string(),
r#type: None,
},
ValueInfoProto {
name: INPUT_Y.to_string(),
doc_string: "".to_string(),
r#type: None,
},
],
output: vec![ValueInfoProto {
name: OUTPUT_Z.to_string(),
doc_string: "".to_string(),
r#type: None,
}],
value_info: vec![],
doc_string: "".to_string(),
sparse_initializer: vec![],
quantization_annotation: vec![],
}));
let x = Tensor::from_vec(vec![0.0f32, 1.0f32, 2.0f32, 3.0f32], &[2, 2], &Device::Cpu)?;
let mut inputs: HashMap<String, Tensor> = HashMap::new();
inputs.insert(INPUT_X.to_string(), x);
let eval = candle_onnx::simple_eval(&manual_graph, inputs)?;
assert_eq!(eval.len(), 1);
let z = eval.get(OUTPUT_Z).expect("Output 'z' not found");
let results = z.to_vec2::<f32>()?;
assert_eq!(
results,
vec![vec![0.5, 0.7310586], vec![0.880797, 0.95257413]]
);
Ok(())
}
// "Gelu"
#[test]
fn test_gelu_operation() -> Result<()> {
let manual_graph = create_model_proto_with_graph(Some(GraphProto {
node: vec![NodeProto {
op_type: "Gelu".to_string(),
domain: "".to_string(),
attribute: vec![],
input: vec![INPUT_X.to_string()],
output: vec![OUTPUT_Z.to_string()],
name: "".to_string(),
doc_string: "".to_string(),
}],
name: "".to_string(),
initializer: vec![],
input: vec![
ValueInfoProto {
name: INPUT_X.to_string(),
doc_string: "".to_string(),
r#type: None,
},
ValueInfoProto {
name: INPUT_Y.to_string(),
doc_string: "".to_string(),
r#type: None,
},
],
output: vec![ValueInfoProto {
name: OUTPUT_Z.to_string(),
doc_string: "".to_string(),
r#type: None,
}],
value_info: vec![],
doc_string: "".to_string(),
sparse_initializer: vec![],
quantization_annotation: vec![],
}));
let x = Tensor::from_vec(vec![0.0f32, 1.0f32, 2.0f32, 3.0f32], &[2, 2], &Device::Cpu)?;
let mut inputs: HashMap<String, Tensor> = HashMap::new();
inputs.insert(INPUT_X.to_string(), x);
let eval = candle_onnx::simple_eval(&manual_graph, inputs)?;
assert_eq!(eval.len(), 1);
let z = eval.get(OUTPUT_Z).expect("Output 'z' not found");
let results = z.to_vec2::<f32>()?;
assert_eq!(
results,
vec![vec![0.0, 0.8413448], vec![1.9544997, 2.9959502]]
);
Ok(())
}
// "Relu"
#[test]
fn test_relu_operation() -> Result<()> {
let manual_graph = create_model_proto_with_graph(Some(GraphProto {
node: vec![NodeProto {
op_type: "Relu".to_string(),
domain: "".to_string(),
attribute: vec![],
input: vec![INPUT_X.to_string()],
output: vec![OUTPUT_Z.to_string()],
name: "".to_string(),
doc_string: "".to_string(),
}],
name: "".to_string(),
initializer: vec![],
input: vec![ValueInfoProto {
name: INPUT_X.to_string(),
doc_string: "".to_string(),
r#type: None,
}],
output: vec![ValueInfoProto {
name: OUTPUT_Z.to_string(),
doc_string: "".to_string(),
r#type: None,
}],
value_info: vec![],
doc_string: "".to_string(),
sparse_initializer: vec![],
quantization_annotation: vec![],
}));
let x = Tensor::from_vec(
vec![-1.0f32, 1.0f32, -2.0f32, 3.0f32],
&[2, 2],
&Device::Cpu,
)?;
let mut inputs: HashMap<String, Tensor> = HashMap::new();
inputs.insert(INPUT_X.to_string(), x);
let eval = candle_onnx::simple_eval(&manual_graph, inputs)?;
assert_eq!(eval.len(), 1);
let z = eval.get(OUTPUT_Z).expect("Output 'z' not found");
let results = z.to_vec2::<f32>()?;
assert_eq!(results, vec![vec![0.0, 1.0], vec![0.0, 3.0]]);
Ok(())
}
// "Constant"
// #[test]
// "Cast"
// #[test]
// "ReduceMax"
#[test]
fn test_reduce_max() -> Result<()> {
// Tests with random data generated with `np.random.uniform`
// https://github.com/onnx/onnx/blob/main/docs/Operators.md#examples-119 bool_inputs
// No special treatment reqired for bool
// `np.maximum.reduce(data, axis=axes, keepdims=True)`
test(
&[[1_u8, 1], [1, 0], [0, 1], [0, 0]],
Some(vec![1]),
1,
None,
&[[1_u8], [1], [1], [0]],
false,
)?;
// https://github.com/onnx/onnx/blob/main/docs/Operators.md#examples-119 default_axes_keepdims
// `np.maximum.reduce(data, axis=None, keepdims=True)`
test(
&[
[[5., 1.], [20., 2.]],
[[30., 1.], [40., 2.]],
[[55., 1.], [60., 2.]],
],
None,
1,
None,
&[[[60.]]],
false,
)?;
// same as above but with random
test(
&[
[[-7.648377, -5.4018507], [-7.318765, 7.2374434]],
[[6.304022, 4.939862], [4.5435624, 3.072864]],
[[-2.5058026, 8.008944], [9.587318, -8.794852]],
],
None,
1,
None,
&[[[9.587318]]],
false,
)?;
// https://github.com/onnx/onnx/blob/main/docs/Operators.md#examples-119 default_axes_donot_keep_dims
// `np.maximum.reduce(data, axis=None, keepdims=False)`
test(
&[
[[5., 1.], [20., 2.]],
[[30., 1.], [40., 2.]],
[[55., 1.], [60., 2.]],
],
None,
0,
None,
60.,
false,
)?;
// same as above but with random
// `np.maximum.reduce(data, axis=None, keepdims=False)`
test(
&[
[[-7.648377, -5.4018507], [-7.318765, 7.2374434]],
[[6.304022, 4.939862], [4.5435624, 3.072864]],
[[-2.5058026, 8.008944], [9.587318, -8.794852]],
],
None,
0,
None,
9.587318,
false,
)?;
// https://github.com/onnx/onnx/blob/main/docs/Operators.md#examples-119 keepdims
// `np.maximum.reduce(data, axis=tuple(axes), keepdims=True)`
test(
&[
[[5., 1.], [20., 2.]],
[[30., 1.], [40., 2.]],
[[55., 1.], [60., 2.]],
],
Some(vec![1]),
1,
None,
&[[[20., 2.]], [[40., 2.]], [[60., 2.]]],
false,
)?;
// keepdims with random data
// `np.maximum.reduce(data, axis=tuple(axes), keepdims=True)`
test(
&[
[[-7.648377, -5.4018507], [-7.318765, 7.2374434]],
[[6.304022, 4.939862], [4.5435624, 3.072864]],
[[-2.5058026, 8.008944], [9.587318, -8.794852]],
],
Some(vec![1]),
1,
None,
&[
[[-7.318765, 7.2374434]],
[[6.304022, 4.939862]],
[[9.587318, 8.008944]],
],
false,
)?;
// https://github.com/onnx/onnx/blob/main/docs/Operators.md#examples-119 negative_axes_keepdims
// axes = np.array([-1], dtype=np.int64)
// `np.maximum.reduce(data, axis=tuple(axes), keepdims=True)`
test(
&[
[[5., 1.], [20., 2.]],
[[30., 1.], [40., 2.]],
[[55., 1.], [60., 2.]],
],
Some(vec![-1]),
1,
None,
&[[[5.], [20.]], [[30.], [40.]], [[55.], [60.]]],
false,
)?;
// axes = np.array([-2], dtype=np.int64)
test(
&[
[[5., 1.], [20., 2.]],
[[30., 1.], [40., 2.]],
[[55., 1.], [60., 2.]],
],
Some(vec![-2]),
1,
None,
&[[[20., 2.]], [[40., 2.]], [[60., 2.]]],
false,
)?;
// with random
test(
&[
[[-4.1676497, -2.7603748], [-4.5138783, -0.762791]],
[[-6.3792877, 7.1619177], [-9.958144, 6.3753467]],
[[9.046973, 3.4554052], [-5.4674335, 5.4642754]],
],
Some(vec![-2]),
1,
None,
&[
[[-4.1676497, -0.762791]],
[[-6.3792877, 7.1619177]],
[[9.046973, 5.4642754]],
],
false,
)?;
// Multiple axes - keepdims=1 (true)
// axes = np.array([0, 1], dtype=np.int64)
// np.maximum.reduce(data, axis=tuple(axes), keepdims=True)
test(
&[
[[5., 1.], [20., 2.]],
[[30., 1.], [40., 2.]],
[[55., 1.], [60., 2.]],
],
Some(vec![0, 1]),
1,
None,
&[[[60., 2.]]],
false,
)?;
// axes = np.array([0, 2], dtype=np.int64)
// np.maximum.reduce(data, axis=tuple(axes), keepdims=True)
test(
&[
[[5., 1.], [20., 2.]],
[[30., 1.], [40., 2.]],
[[55., 1.], [60., 2.]],
],
Some(vec![0, 2]),
1,
None,
&[[[55.], [60.]]],
false,
)?;
// axes = np.array([2, 1], dtype=np.int64)
// np.maximum.reduce(data, axis=tuple(axes), keepdims=True)
test(
&[
[[5., 1.], [20., 2.]],
[[30., 1.], [40., 2.]],
[[55., 1.], [60., 2.]],
],
Some(vec![2, 1]),
1,
None,
&[[[20.]], [[40.]], [[60.]]],
false,
)?;
// axes = np.array([2, 0, 1], dtype=np.int64)
// np.maximum.reduce(data, axis=tuple(axes), keepdims=True)
test(
&[
[[5., 1.], [20., 2.]],
[[30., 1.], [40., 2.]],
[[55., 1.], [60., 2.]],
],
Some(vec![2, 0, 1]),
1,
None,
&[[[60.]]],
false,
)?;
// Multiple axes - keepdims=0 (false)
// axes = np.array([0, 1], dtype=np.int64)
// np.maximum.reduce(data, axis=tuple(axes), keepdims=False)
test(
&[
[[5., 1.], [20., 2.]],
[[30., 1.], [40., 2.]],
[[55., 1.], [60., 2.]],
],
Some(vec![0, 1]),
0,
None,
&[60., 2.],
false,
)?;
// axes = np.array([0, 2], dtype=np.int64)
// np.maximum.reduce(data, axis=tuple(axes), keepdims=False)
test(
&[
[[5., 1.], [20., 2.]],
[[30., 1.], [40., 2.]],
[[55., 1.], [60., 2.]],
],
Some(vec![0, 2]),
0,
None,
&[55., 60.],
false,
)?;
// axes = np.array([2, 1], dtype=np.int64)
// np.maximum.reduce(data, axis=tuple(axes), keepdims=False)
test(
&[
[[5., 1.], [20., 2.]],
[[30., 1.], [40., 2.]],
[[55., 1.], [60., 2.]],
],
Some(vec![2, 1]),
0,
None,
&[20., 40., 60.],
false,
)?;
// axes = np.array([2, 0, 1], dtype=np.int64)
// np.maximum.reduce(data, axis=tuple(axes), keepdims=False)
test(
&[
[[5., 1.], [20., 2.]],
[[30., 1.], [40., 2.]],
[[55., 1.], [60., 2.]],
],
Some(vec![2, 0, 1]),
0,
None,
60.,
false,
)?;
// Multiple axes - negative `axes` - keepdims=1 (true)
// axes = np.array([-1, 0, 1], dtype=np.int64)
// np.maximum.reduce(data, axis=tuple(axes), keepdims=True)
test(
&[
[[5., 1.], [20., 2.]],
[[30., 1.], [40., 2.]],
[[55., 1.], [60., 2.]],
],
Some(vec![-1, 0, 1]),
1,
None,
&[[[60.]]],
false,
)?;
// Multiple axes - negative `axes` - keepdims=0 (false)
// axes = np.array([-1, 0, 1], dtype=np.int64)
// np.maximum.reduce(data, axis=tuple(axes), keepdims=True)
test(
&[
[[5., 1.], [20., 2.]],
[[30., 1.], [40., 2.]],
[[55., 1.], [60., 2.]],
],
Some(vec![-1, 0, 1]),
0,
None,
60.,
false,
)?;
// `noop_with_empty_axes = true (1)` should yield tensor equivallent to the input tensor
test(
&[
[[-7.648377, -5.4018507], [-7.318765, 7.2374434]],
[[6.304022, 4.939862], [4.5435624, 3.072864]],
[[-2.5058026, 8.008944], [9.587318, -8.794852]],
],
None,
0,
Some(1),
&[
[[-7.648377, -5.4018507], [-7.318765, 7.2374434]],
[[6.304022, 4.939862], [4.5435624, 3.072864]],
[[-2.5058026, 8.008944], [9.587318, -8.794852]],
],
false,
)?;
// Rank-0 arrays are also valid
test(42., None, 0, None, 42., false)?;
test(42., None, 1, None, 42., false)?;
// Negative test - expect error
// axes = np.array([-2, 0, 1], dtype=np.int64)
// np.maximum.reduce(data, axis=tuple(axes), keepdims=True)
// Should error out with `duplicate value in "axes"`
assert!(test(
&[
[[5., 1.], [20., 2.]],
[[30., 1.], [40., 2.]],
[[55., 1.], [60., 2.]],
],
Some(vec![-2, 0, 1]),
1,
None,
&[[[60.]]],
false
)
.is_err());
// Negative test - expect error
// Should error out on empty set
assert!(test(&[[1_u8; 0]], Some(vec![-2, 0, 1]), 1, None, &[0.], false).is_err());
// Backward compatibility
test(
&[
[[5., 1.], [20., 2.]],
[[30., 1.], [40., 2.]],
[[55., 1.], [60., 2.]],
],
Some(vec![-1, 0, 1]),
0,
None,
60.,
true,
)?;
fn test(
data: impl NdArray,
axes: Option<Vec<i64>>,
keepdims: i64,
noop_with_empty_axes: Option<i64>,
expected: impl NdArray,
backward_comp: bool,
) -> Result<()> {
let has_axes = axes.is_some();
let att_keepdims = AttributeProto {
name: "keepdims".to_string(),
ref_attr_name: "keepdims".to_string(),
i: keepdims,
doc_string: "keepdims".to_string(),
r#type: 2,
f: 0.0,
s: vec![],
t: None,
g: None,
sparse_tensor: None,
tp: None,
floats: vec![],
ints: vec![],
strings: vec![],
tensors: vec![],
graphs: vec![],
sparse_tensors: vec![],
type_protos: vec![],
};
let mut attribute = vec![att_keepdims];
if let Some(noop) = noop_with_empty_axes {
if !has_axes {
let att_no_op_empty_axes = AttributeProto {
name: "noop_with_empty_axes".to_string(),
ref_attr_name: "noop_with_empty_axes".to_string(),
i: noop,
doc_string: "noop_with_empty_axes".to_string(),
r#type: 2,
f: 0.0,
s: vec![],
t: None,
g: None,
sparse_tensor: None,
tp: None,
floats: vec![],
ints: vec![],
strings: vec![],
tensors: vec![],
graphs: vec![],
sparse_tensors: vec![],
type_protos: vec![],
};
attribute.push(att_no_op_empty_axes);
}
}
if has_axes && backward_comp {
attribute.push(AttributeProto {
name: "axes".to_string(),
ref_attr_name: "axes".to_string(),
i: 0,
doc_string: "axes".to_string(),
r#type: 7,
f: 0.0,
s: vec![],
t: None,
g: None,
sparse_tensor: None,
tp: None,
floats: vec![],
ints: axes.clone().unwrap_or_default(),
strings: vec![],
tensors: vec![],
graphs: vec![],
sparse_tensors: vec![],
type_protos: vec![],
});
}
let manual_graph = create_model_proto_with_graph(Some(GraphProto {
node: vec![NodeProto {
op_type: "ReduceMax".to_string(),
domain: "".to_string(),
attribute,
input: if has_axes && !backward_comp {
vec![INPUT_X.to_string(), INPUT_Y.to_string()]
} else {
vec![INPUT_X.to_string()]
},
output: vec![OUTPUT_Z.to_string()],
name: "".to_string(),
doc_string: "".to_string(),
}],
name: "".to_string(),
initializer: vec![],
input: vec![],
output: vec![ValueInfoProto {
name: OUTPUT_Z.to_string(),
doc_string: "".to_string(),
r#type: None,
}],
value_info: vec![],
doc_string: "".to_string(),
sparse_initializer: vec![],
quantization_annotation: vec![],
}));
let mut inputs: HashMap<String, Tensor> = HashMap::new();
let input_tensor = Tensor::new(data, &Device::Cpu)?;
let input_dtype = input_tensor.dtype();
inputs.insert(INPUT_X.to_string(), input_tensor);
if !backward_comp {
if let Some(a) = axes {
inputs.insert(INPUT_Y.to_string(), Tensor::new(a, &Device::Cpu)?);
}
}
let eval = candle_onnx::simple_eval(&manual_graph, inputs)?;
assert_eq!(eval.len(), 1);
let z = eval.get(OUTPUT_Z).expect("Output 'z' not found");
let expected = Tensor::new(expected, &Device::Cpu)?;
match expected.dims().len() {
0 => {
if input_dtype == DType::U8 {
assert_eq!(z.to_vec0::<u8>()?, expected.to_vec0::<u8>()?)
} else {
assert_eq!(z.to_vec0::<f64>()?, expected.to_vec0::<f64>()?)
}
}
1 => {
if input_dtype == DType::U8 {
assert_eq!(z.to_vec1::<u8>()?, expected.to_vec1::<u8>()?)
} else {
assert_eq!(z.to_vec1::<f64>()?, expected.to_vec1::<f64>()?)
}
}
2 => {
if input_dtype == DType::U8 {
assert_eq!(z.to_vec2::<u8>()?, expected.to_vec2::<u8>()?)
} else {
assert_eq!(z.to_vec2::<f64>()?, expected.to_vec2::<f64>()?)
}
}
3 => {
if input_dtype == DType::U8 {
assert_eq!(z.to_vec3::<u8>()?, expected.to_vec3::<u8>()?)
} else {
assert_eq!(z.to_vec3::<f64>()?, expected.to_vec3::<f64>()?)
}
}
_ => unreachable!(),
};
Ok(())
}
Ok(())
}
// "ReduceMin"
#[test]
fn test_reduce_min() -> Result<()> {
// Tests with random data generated with `np.random.uniform`
// https://github.com/onnx/onnx/blob/main/docs/Operators.md#examples-121 bool_inputs
// No special treatment reqired for bool
// `np.minimum.reduce(data, axis=axes, keepdims=True)`
test(
&[[1_u8, 1], [1, 0], [0, 1], [0, 0]],
Some(vec![1]),
1,
None,
&[[1_u8], [0], [0], [0]],
false,
)?;
// https://github.com/onnx/onnx/blob/main/docs/Operators.md#examples-121 default_axes_keepdims
// `np.minimum.reduce(data, axis=None, keepdims=True)`
test(
&[
[[5., 1.], [20., 2.]],
[[30., 1.], [40., 2.]],
[[55., 1.], [60., 2.]],
],
None,
1,
None,
&[[[1.]]],
false,
)?;
// same as above but with random
test(
&[
[[-7.648377, -5.4018507], [-7.318765, 7.2374434]],
[[6.304022, 4.939862], [4.5435624, 3.072864]],
[[-2.5058026, 8.008944], [9.587318, -8.794852]],
],
None,
1,
None,
&[[[-8.794852]]],
false,
)?;
// https://github.com/onnx/onnx/blob/main/docs/Operators.md#examples-121 default_axes_donot_keep_dims
// `np.minimum.reduce(data, axis=None, keepdims=False)`
test(
&[
[[5., 1.], [20., 2.]],
[[30., 1.], [40., 2.]],
[[55., 1.], [60., 2.]],
],
None,
0,
None,
1.,
false,
)?;
// same as above but with random
// `np.minimum.reduce(data, axis=None, keepdims=False)`
test(
&[
[[-7.648377, -5.4018507], [-7.318765, 7.2374434]],
[[6.304022, 4.939862], [4.5435624, 3.072864]],
[[-2.5058026, 8.008944], [9.587318, -8.794852]],
],
None,
0,
None,
-8.794852,
false,
)?;
// https://github.com/onnx/onnx/blob/main/docs/Operators.md#examples-121 keepdims
// `np.minimum.reduce(data, axis=tuple(axes), keepdims=True)`
test(
&[
[[5., 1.], [20., 2.]],
[[30., 1.], [40., 2.]],
[[55., 1.], [60., 2.]],
],
Some(vec![1]),
1,
None,
&[[[5., 1.]], [[30., 1.]], [[55., 1.]]],
false,
)?;
// keepdims with random data
// `np.minimum.reduce(data, axis=tuple(axes), keepdims=True)`
test(
&[
[[-7.648377, -5.4018507], [-7.318765, 7.2374434]],
[[6.304022, 4.939862], [4.5435624, 3.072864]],
[[-2.5058026, 8.008944], [9.587318, -8.794852]],
],
Some(vec![1]),
1,
None,
&[
[[-7.648377, -5.4018507]],
[[4.5435624, 3.072864]],
[[-2.5058026, -8.794852]],
],
false,
)?;
// https://github.com/onnx/onnx/blob/main/docs/Operators.md#examples-121 negative_axes_keepdims
// axes = np.array([-1], dtype=np.int64)
// `np.minimum.reduce(data, axis=tuple(axes), keepdims=True)`
test(
&[
[[5., 1.], [20., 2.]],
[[30., 1.], [40., 2.]],
[[55., 1.], [60., 2.]],
],
Some(vec![-1]),
1,
None,
&[[[1.], [2.]], [[1.], [2.]], [[1.], [2.]]],
false,
)?;
// axes = np.array([-2], dtype=np.int64)
test(
&[
[[5., 1.], [20., 2.]],
[[30., 1.], [40., 2.]],
[[55., 1.], [60., 2.]],
],
Some(vec![-2]),
1,
None,
&[[[5., 1.]], [[30., 1.]], [[55., 1.]]],
false,
)?;
// with random
test(
&[
[[-4.1676497, -2.7603748], [-4.5138783, -0.762791]],
[[-6.3792877, 7.1619177], [-9.958144, 6.3753467]],
[[9.046973, 3.4554052], [-5.4674335, 5.4642754]],
],
Some(vec![-2]),
1,
None,
&[
[[-4.5138783, -2.7603748]],
[[-9.958144, 6.3753467]],
[[-5.4674335, 3.4554052]],
],
false,
)?;
// Multiple axes - keepdims=1 (true)
// axes = np.array([0, 1], dtype=np.int64)
// np.minimum.reduce(data, axis=tuple(axes), keepdims=True)
test(
&[
[[5., 1.], [20., 2.]],
[[30., 1.], [40., 2.]],
[[55., 1.], [60., 2.]],
],
Some(vec![0, 1]),
1,
None,
&[[[5., 1.]]],
false,
)?;
// axes = np.array([0, 2], dtype=np.int64)
// np.minimum.reduce(data, axis=tuple(axes), keepdims=True)
test(
&[
[[5., 1.], [20., 2.]],
[[30., 1.], [40., 2.]],
[[55., 1.], [60., 2.]],
],
Some(vec![0, 2]),
1,
None,
&[[[1.], [2.]]],
false,
)?;
// axes = np.array([2, 1], dtype=np.int64)
// np.minimum.reduce(data, axis=tuple(axes), keepdims=True)
test(
&[
[[5., 1.], [20., 2.]],
[[30., 1.], [40., 2.]],
[[55., 1.], [60., 2.]],
],
Some(vec![2, 1]),
1,
None,
&[[[1.]], [[1.]], [[1.]]],
false,
)?;
// axes = np.array([2, 0, 1], dtype=np.int64)
// np.minimum.reduce(data, axis=tuple(axes), keepdims=True)
test(
&[
[[5., 1.], [20., 2.]],
[[30., 1.], [40., 2.]],
[[55., 1.], [60., 2.]],
],
Some(vec![2, 0, 1]),
1,
None,
&[[[1.]]],
false,
)?;
// Multiple axes - keepdims=0 (false)
// axes = np.array([0, 1], dtype=np.int64)
// np.minimum.reduce(data, axis=tuple(axes), keepdims=False)
test(
&[
[[5., 1.], [20., 2.]],
[[30., 1.], [40., 2.]],
[[55., 1.], [60., 2.]],
],
Some(vec![0, 1]),
0,
None,
&[5., 1.],
false,
)?;
// axes = np.array([0, 2], dtype=np.int64)
// np.minimum.reduce(data, axis=tuple(axes), keepdims=False)
test(
&[
[[5., 1.], [20., 2.]],
[[30., 1.], [40., 2.]],
[[55., 1.], [60., 2.]],
],
Some(vec![0, 2]),
0,
None,
&[1., 2.],
false,
)?;
// axes = np.array([2, 1], dtype=np.int64)
// np.minimum.reduce(data, axis=tuple(axes), keepdims=False)
test(
&[
[[5., 1.], [20., 2.]],
[[30., 1.], [40., 2.]],
[[55., 1.], [60., 2.]],
],
Some(vec![2, 1]),
0,
None,
&[1., 1., 1.],
false,
)?;
// axes = np.array([2, 0, 1], dtype=np.int64)
// np.minimum.reduce(data, axis=tuple(axes), keepdims=False)
test(
&[
[[5., 1.], [20., 2.]],
[[30., 1.], [40., 2.]],
[[55., 1.], [60., 2.]],
],
Some(vec![2, 0, 1]),
0,
None,
1.,
false,
)?;
// Multiple axes - negative `axes` - keepdims=1 (true)
// axes = np.array([-1, 0, 1], dtype=np.int64)
// np.minimum.reduce(data, axis=tuple(axes), keepdims=True)
test(
&[
[[5., 1.], [20., 2.]],
[[30., 1.], [40., 2.]],
[[55., 1.], [60., 2.]],
],
Some(vec![-1, 0, 1]),
1,
None,
&[[[1.]]],
false,
)?;
// Multiple axes - negative `axes` - keepdims=0 (false)
// axes = np.array([-1, 0, 1], dtype=np.int64)
// np.minimum.reduce(data, axis=tuple(axes), keepdims=True)
test(
&[
[[5., 1.], [20., 2.]],
[[30., 1.], [40., 2.]],
[[55., 1.], [60., 2.]],
],
Some(vec![-1, 0, 1]),
0,
None,
1.,
false,
)?;
// `noop_with_empty_axes = true (1)` should yield tensor equivallent to the input tensor
test(
&[
[[-7.648377, -5.4018507], [-7.318765, 7.2374434]],
[[6.304022, 4.939862], [4.5435624, 3.072864]],
[[-2.5058026, 8.008944], [9.587318, -8.794852]],
],
None,
0,
Some(1),
&[
[[-7.648377, -5.4018507], [-7.318765, 7.2374434]],
[[6.304022, 4.939862], [4.5435624, 3.072864]],
[[-2.5058026, 8.008944], [9.587318, -8.794852]],
],
false,
)?;
// Rank-0 tensors are also valid
test(42., None, 0, None, 42., false)?;
test(42., None, 1, None, 42., false)?;
// Negative test - expect error
// axes = np.array([-2, 0, 1], dtype=np.int64)
// np.minimum.reduce(data, axis=tuple(axes), keepdims=True)
// Should error out with `duplicate value in "axes"`
assert!(test(
&[
[[5., 1.], [20., 2.]],
[[30., 1.], [40., 2.]],
[[55., 1.], [60., 2.]],
],
Some(vec![-2, 0, 1]),
1,
None,
&[0.],
false
)
.is_err());
// Negative test - expect error
// Should error out on empty set
assert!(test(&[[1_u8; 0]], Some(vec![-2, 0, 1]), 1, None, &[0.], false).is_err());
// Backward compatibility
test(
&[
[[5., 1.], [20., 2.]],
[[30., 1.], [40., 2.]],
[[55., 1.], [60., 2.]],
],
Some(vec![-1, 0, 1]),
0,
None,
1.,
true,
)?;
fn test(
data: impl NdArray,
axes: Option<Vec<i64>>,
keepdims: i64,
noop_with_empty_axes: Option<i64>,
expected: impl NdArray,
backward_comp: bool,
) -> Result<()> {
let has_axes = axes.is_some();
let att_keepdims = AttributeProto {
name: "keepdims".to_string(),
ref_attr_name: "keepdims".to_string(),
i: keepdims,
doc_string: "keepdims".to_string(),
r#type: 2,
f: 0.0,
s: vec![],
t: None,
g: None,
sparse_tensor: None,
tp: None,
floats: vec![],
ints: vec![],
strings: vec![],
tensors: vec![],
graphs: vec![],
sparse_tensors: vec![],
type_protos: vec![],
};
let mut attribute = vec![att_keepdims];
if let Some(noop) = noop_with_empty_axes {
if !has_axes {
let att_no_op_empty_axes = AttributeProto {
name: "noop_with_empty_axes".to_string(),
ref_attr_name: "noop_with_empty_axes".to_string(),
i: noop,
doc_string: "noop_with_empty_axes".to_string(),
r#type: 2,
f: 0.0,
s: vec![],
t: None,
g: None,
sparse_tensor: None,
tp: None,
floats: vec![],
ints: vec![],
strings: vec![],
tensors: vec![],
graphs: vec![],
sparse_tensors: vec![],
type_protos: vec![],
};
attribute.push(att_no_op_empty_axes);
}
}
if has_axes && backward_comp {
attribute.push(AttributeProto {
name: "axes".to_string(),
ref_attr_name: "axes".to_string(),
i: 0,
doc_string: "axes".to_string(),
r#type: 7,
f: 0.0,
s: vec![],
t: None,
g: None,
sparse_tensor: None,
tp: None,
floats: vec![],
ints: axes.clone().unwrap_or_default(),
strings: vec![],
tensors: vec![],
graphs: vec![],
sparse_tensors: vec![],
type_protos: vec![],
});
}
let manual_graph = create_model_proto_with_graph(Some(GraphProto {
node: vec![NodeProto {
op_type: "ReduceMin".to_string(),
domain: "".to_string(),
attribute,
input: if has_axes && !backward_comp {
vec![INPUT_X.to_string(), INPUT_Y.to_string()]
} else {
vec![INPUT_X.to_string()]
},
output: vec![OUTPUT_Z.to_string()],
name: "".to_string(),
doc_string: "".to_string(),
}],
name: "".to_string(),
initializer: vec![],
input: vec![],
output: vec![ValueInfoProto {
name: OUTPUT_Z.to_string(),
doc_string: "".to_string(),
r#type: None,
}],
value_info: vec![],
doc_string: "".to_string(),
sparse_initializer: vec![],
quantization_annotation: vec![],
}));
let mut inputs: HashMap<String, Tensor> = HashMap::new();
let input_tensor = Tensor::new(data, &Device::Cpu)?;
let input_dtype = input_tensor.dtype();
inputs.insert(INPUT_X.to_string(), input_tensor);
if !backward_comp {
if let Some(a) = axes {
inputs.insert(INPUT_Y.to_string(), Tensor::new(a, &Device::Cpu)?);
}
}
let eval = candle_onnx::simple_eval(&manual_graph, inputs)?;
assert_eq!(eval.len(), 1);
let z = eval.get(OUTPUT_Z).expect("Output 'z' not found");
let expected = Tensor::new(expected, &Device::Cpu)?;
match expected.dims().len() {
0 => {
if input_dtype == DType::U8 {
assert_eq!(z.to_vec0::<u8>()?, expected.to_vec0::<u8>()?)
} else {
assert_eq!(z.to_vec0::<f64>()?, expected.to_vec0::<f64>()?)
}
}
1 => {
if input_dtype == DType::U8 {
assert_eq!(z.to_vec1::<u8>()?, expected.to_vec1::<u8>()?)
} else {
assert_eq!(z.to_vec1::<f64>()?, expected.to_vec1::<f64>()?)
}
}
2 => {
if input_dtype == DType::U8 {
assert_eq!(z.to_vec2::<u8>()?, expected.to_vec2::<u8>()?)
} else {
assert_eq!(z.to_vec2::<f64>()?, expected.to_vec2::<f64>()?)
}
}
3 => {
if input_dtype == DType::U8 {
assert_eq!(z.to_vec3::<u8>()?, expected.to_vec3::<u8>()?)
} else {
assert_eq!(z.to_vec3::<f64>()?, expected.to_vec3::<f64>()?)
}
}
_ => unreachable!(),
};
Ok(())
}
Ok(())
}
// "ReduceMean"
#[test]
fn test_reduce_mean() -> Result<()> {
// https://github.com/onnx/onnx/blob/main/docs/Operators.md#examples-120 default_axes_keepdims
test(
&[
[[5., 1.], [20., 2.]],
[[30., 1.], [40., 2.]],
[[55., 1.], [60., 2.]],
],
None,
1,
&[[[18.25]]],
)?;
// https://github.com/onnx/onnx/blob/main/docs/Operators.md#examples-120 do_no_keepdims
test(
&[
[[5., 1.], [20., 2.]],
[[30., 1.], [40., 2.]],
[[55., 1.], [60., 2.]],
],
Some(vec![1]),
0,
&[[12.5, 1.5], [35.0, 1.5], [57.5, 1.5]],
)?;
// https://github.com/onnx/onnx/blob/main/docs/Operators.md#examples-120 keepdims
test(
&[
[[5., 1.], [20., 2.]],
[[30., 1.], [40., 2.]],
[[55., 1.], [60., 2.]],
],
Some(vec![1]),
1,
&[[[12.5, 1.5]], [[35.0, 1.5]], [[57.5, 1.5]]],
)?;
// https://github.com/onnx/onnx/blob/main/docs/Operators.md#examples-120 negative_axes_keepdims
test(
&[
[[5., 1.], [20., 2.]],
[[30., 1.], [40., 2.]],
[[55., 1.], [60., 2.]],
],
Some(vec![-2]),
1,
&[[[12.5, 1.5]], [[35.0, 1.5]], [[57.5, 1.5]]],
)?;
// All the test data below was generated based on numpy's np.mean
test(
&[
[[5., 1.], [20., 2.]],
[[30., 1.], [40., 2.]],
[[55., 1.], [60., 2.]],
],
Some(vec![1, 2]),
0,
&[7.0, 18.25, 29.5],
)?;
test(
&[
[[5., 1.], [20., 2.]],
[[30., 1.], [40., 2.]],
[[55., 1.], [60., 2.]],
],
Some(vec![1, 2]),
1,
&[[[7.0]], [[18.25]], [[29.5]]],
)?;
test(&[1., 2., 3.], None, 1, &[2.0])?;
fn test(
data: impl NdArray,
axes: Option<Vec<i64>>,
keepdims: i64,
expected: impl NdArray,
) -> Result<()> {
let has_axes = axes.is_some();
let att_axes = AttributeProto {
name: "axes".to_string(),
ref_attr_name: "axes".to_string(),
i: 0,
doc_string: "axes".to_string(),
r#type: 7,
f: 0.0,
s: vec![],
t: None,
g: None,
sparse_tensor: None,
tp: None,
floats: vec![],
ints: axes.unwrap_or_default(),
strings: vec![],
tensors: vec![],
graphs: vec![],
sparse_tensors: vec![],
type_protos: vec![],
};
let att_keepdims = AttributeProto {
name: "keepdims".to_string(),
ref_attr_name: "keepdims".to_string(),
i: keepdims,
doc_string: "keepdims".to_string(),
r#type: 2,
f: 0.0,
s: vec![],
t: None,
g: None,
sparse_tensor: None,
tp: None,
floats: vec![],
ints: vec![],
strings: vec![],
tensors: vec![],
graphs: vec![],
sparse_tensors: vec![],
type_protos: vec![],
};
let manual_graph = create_model_proto_with_graph(Some(GraphProto {
node: vec![NodeProto {
op_type: "ReduceMean".to_string(),
domain: "".to_string(),
attribute: if has_axes {
vec![att_axes, att_keepdims]
} else {
vec![att_keepdims]
},
input: vec![INPUT_X.to_string()],
output: vec![OUTPUT_Z.to_string()],
name: "".to_string(),
doc_string: "".to_string(),
}],
name: "".to_string(),
initializer: vec![],
input: vec![],
output: vec![ValueInfoProto {
name: OUTPUT_Z.to_string(),
doc_string: "".to_string(),
r#type: None,
}],
value_info: vec![],
doc_string: "".to_string(),
sparse_initializer: vec![],
quantization_annotation: vec![],
}));
let mut inputs: HashMap<String, Tensor> = HashMap::new();
inputs.insert(INPUT_X.to_string(), Tensor::new(data, &Device::Cpu)?);
let eval = candle_onnx::simple_eval(&manual_graph, inputs)?;
assert_eq!(eval.len(), 1);
let z = eval.get(OUTPUT_Z).expect("Output 'z' not found");
let expected = Tensor::new(expected, &Device::Cpu)?;
match expected.dims().len() {
0 => assert_eq!(z.to_vec0::<f64>()?, expected.to_vec0::<f64>()?),
1 => assert_eq!(z.to_vec1::<f64>()?, expected.to_vec1::<f64>()?),
2 => assert_eq!(z.to_vec2::<f64>()?, expected.to_vec2::<f64>()?),
3 => assert_eq!(z.to_vec3::<f64>()?, expected.to_vec3::<f64>()?),
_ => unreachable!(),
};
Ok(())
}
Ok(())
}
// "Sqrt"
#[test]
fn test_sqrt() -> Result<()> {
// https://github.com/onnx/onnx/blob/main/docs/Operators.md#examples-155
test(&[1., 4., 9.], &[1., 2., 3.])?;
fn test(data: impl NdArray, expected: impl NdArray) -> Result<()> {
let manual_graph = create_model_proto_with_graph(Some(GraphProto {
node: vec![NodeProto {
op_type: "Sqrt".to_string(),
domain: "".to_string(),
attribute: vec![],
input: vec![INPUT_X.to_string()],
output: vec![OUTPUT_Z.to_string()],
name: "".to_string(),
doc_string: "".to_string(),
}],
name: "".to_string(),
initializer: vec![],
input: vec![],
output: vec![ValueInfoProto {
name: OUTPUT_Z.to_string(),
doc_string: "".to_string(),
r#type: None,
}],
value_info: vec![],
doc_string: "".to_string(),
sparse_initializer: vec![],
quantization_annotation: vec![],
}));
let mut inputs: HashMap<String, Tensor> = HashMap::new();
inputs.insert(INPUT_X.to_string(), Tensor::new(data, &Device::Cpu)?);
let eval = candle_onnx::simple_eval(&manual_graph, inputs)?;
assert_eq!(eval.len(), 1);
let z = eval.get(OUTPUT_Z).expect("Output 'z' not found");
let expected = Tensor::new(expected, &Device::Cpu)?;
match expected.dims().len() {
0 => assert_eq!(z.to_vec0::<f64>()?, expected.to_vec0::<f64>()?),
1 => assert_eq!(z.to_vec1::<f64>()?, expected.to_vec1::<f64>()?),
2 => assert_eq!(z.to_vec2::<f64>()?, expected.to_vec2::<f64>()?),
3 => assert_eq!(z.to_vec3::<f64>()?, expected.to_vec3::<f64>()?),
_ => unreachable!(),
};
Ok(())
}
Ok(())
}
// "RandomUniform"
#[test]
fn test_random_uniform() -> Result<()> {
test(vec![3, 2, 1, 4], None, None)?;
test(vec![2, 2, 2, 2], Some(-10.0), None)?;
test(vec![2, 2, 2, 2], None, Some(10.0))?;
test(vec![1, 2, 3, 4], Some(-10.0), Some(10.0))?;
fn test(shape: Vec<i64>, low: Option<f32>, high: Option<f32>) -> Result<()> {
let att_low = AttributeProto {
name: "low".to_string(),
ref_attr_name: "low".to_string(),
i: 0,
doc_string: "low".to_string(),
r#type: 1, // FLOAT
f: low.unwrap_or(0.0),
s: vec![],
t: None,
g: None,
sparse_tensor: None,
tp: None,
floats: vec![],
ints: vec![],
strings: vec![],
tensors: vec![],
graphs: vec![],
sparse_tensors: vec![],
type_protos: vec![],
};
let att_high = AttributeProto {
name: "high".to_string(),
ref_attr_name: "high".to_string(),
i: 0,
doc_string: "high".to_string(),
r#type: 1, // FLOAT
f: high.unwrap_or(1.0),
s: vec![],
t: None,
g: None,
sparse_tensor: None,
tp: None,
floats: vec![],
ints: vec![],
strings: vec![],
tensors: vec![],
graphs: vec![],
sparse_tensors: vec![],
type_protos: vec![],
};
let att_shape = AttributeProto {
name: "shape".to_string(),
ref_attr_name: "shape".to_string(),
i: 0,
doc_string: "shape".to_string(),
r#type: 7, // INTS
f: 0.0,
s: vec![],
t: None,
g: None,
sparse_tensor: None,
tp: None,
floats: vec![],
ints: shape,
strings: vec![],
tensors: vec![],
graphs: vec![],
sparse_tensors: vec![],
type_protos: vec![],
};
let att_dtype = AttributeProto {
name: "dtype".to_string(),
ref_attr_name: "dtype".to_string(),
i: 11, // DOUBLE
doc_string: "dtype".to_string(),
r#type: 2, // INT
f: 0.0,
s: vec![],
t: None,
g: None,
sparse_tensor: None,
tp: None,
floats: vec![],
ints: vec![],
strings: vec![],
tensors: vec![],
graphs: vec![],
sparse_tensors: vec![],
type_protos: vec![],
};
let attrs = {
let mut mut_attrs = vec![att_shape, att_dtype];
if low.is_some() {
mut_attrs.push(att_low);
}
if high.is_some() {
mut_attrs.push(att_high);
}
mut_attrs
};
let manual_graph = create_model_proto_with_graph(Some(GraphProto {
node: vec![NodeProto {
op_type: "RandomUniform".to_string(),
domain: "".to_string(),
attribute: attrs,
input: vec![],
output: vec![OUTPUT_Z.to_string()],
name: "".to_string(),
doc_string: "".to_string(),
}],
name: "".to_string(),
initializer: vec![],
input: vec![],
output: vec![ValueInfoProto {
name: OUTPUT_Z.to_string(),
doc_string: "".to_string(),
r#type: None,
}],
value_info: vec![],
doc_string: "".to_string(),
sparse_initializer: vec![],
quantization_annotation: vec![],
}));
let eval = candle_onnx::simple_eval(&manual_graph, HashMap::new())?;
assert_eq!(eval.len(), 1);
let z = eval.get(OUTPUT_Z).expect("Output 'z' not found");
let min = z
.flatten_all()?
.to_vec1()?
.into_iter()
.reduce(f64::min)
.unwrap();
let max = z
.flatten_all()?
.to_vec1()?
.into_iter()
.reduce(f64::max)
.unwrap();
assert!(min >= low.unwrap_or(0.0).into());
assert!(max <= high.unwrap_or(1.0).into());
assert_ne!(min, max);
Ok(())
}
Ok(())
}
// "RandomNormal"
#[test]
fn test_random_normal() -> Result<()> {
test(vec![3, 2, 1, 4], None, None)?;
test(vec![2, 2, 2, 2], Some(-10.0), None)?;
test(vec![2, 2, 2, 2], None, Some(10.0))?;
test(vec![1, 2, 3, 4], Some(-10.0), Some(10.0))?;
fn test(shape: Vec<i64>, mean: Option<f32>, scale: Option<f32>) -> Result<()> {
let att_mean = AttributeProto {
name: "mean".to_string(),
ref_attr_name: "mean".to_string(),
i: 0,
doc_string: "mean".to_string(),
r#type: 1, // FLOAT
f: mean.unwrap_or(0.0),
s: vec![],
t: None,
g: None,
sparse_tensor: None,
tp: None,
floats: vec![],
ints: vec![],
strings: vec![],
tensors: vec![],
graphs: vec![],
sparse_tensors: vec![],
type_protos: vec![],
};
let att_scale = AttributeProto {
name: "scale".to_string(),
ref_attr_name: "scale".to_string(),
i: 0,
doc_string: "scale".to_string(),
r#type: 1, // FLOAT
f: scale.unwrap_or(1.0),
s: vec![],
t: None,
g: None,
sparse_tensor: None,
tp: None,
floats: vec![],
ints: vec![],
strings: vec![],
tensors: vec![],
graphs: vec![],
sparse_tensors: vec![],
type_protos: vec![],
};
let att_shape = AttributeProto {
name: "shape".to_string(),
ref_attr_name: "shape".to_string(),
i: 0,
doc_string: "shape".to_string(),
r#type: 7, // INTS
f: 0.0,
s: vec![],
t: None,
g: None,
sparse_tensor: None,
tp: None,
floats: vec![],
ints: shape,
strings: vec![],
tensors: vec![],
graphs: vec![],
sparse_tensors: vec![],
type_protos: vec![],
};
let att_dtype = AttributeProto {
name: "dtype".to_string(),
ref_attr_name: "dtype".to_string(),
i: 11, // DOUBLE
doc_string: "dtype".to_string(),
r#type: 2, // INT
f: 0.0,
s: vec![],
t: None,
g: None,
sparse_tensor: None,
tp: None,
floats: vec![],
ints: vec![],
strings: vec![],
tensors: vec![],
graphs: vec![],
sparse_tensors: vec![],
type_protos: vec![],
};
let attrs = {
let mut mut_attrs = vec![att_shape, att_dtype];
if mean.is_some() {
mut_attrs.push(att_mean);
}
if scale.is_some() {
mut_attrs.push(att_scale);
}
mut_attrs
};
let manual_graph = create_model_proto_with_graph(Some(GraphProto {
node: vec![NodeProto {
op_type: "RandomNormal".to_string(),
domain: "".to_string(),
attribute: attrs,
input: vec![],
output: vec![OUTPUT_Z.to_string()],
name: "".to_string(),
doc_string: "".to_string(),
}],
name: "".to_string(),
initializer: vec![],
input: vec![],
output: vec![ValueInfoProto {
name: OUTPUT_Z.to_string(),
doc_string: "".to_string(),
r#type: None,
}],
value_info: vec![],
doc_string: "".to_string(),
sparse_initializer: vec![],
quantization_annotation: vec![],
}));
let eval = candle_onnx::simple_eval(&manual_graph, HashMap::new())?;
assert_eq!(eval.len(), 1);
let z = eval.get(OUTPUT_Z).expect("Output 'z' not found");
let data = z.flatten_all()?.to_vec1::<f64>()?;
// test if values are unique
for (i, a) in data.iter().enumerate() {
for (j, b) in data.iter().enumerate() {
if i == j {
continue;
};
assert_ne!(a, b);
}
}
Ok(())
}
Ok(())
}
// "Range"
#[test]
fn test_range() -> Result<()> {
// https://github.com/onnx/onnx/blob/main/docs/Operators.md#examples-113
test(1., 5., 2., &[1., 3.])?;
// https://github.com/onnx/onnx/blob/main/docs/Operators.md#examples-113
test(10i64, 6i64, -3i64, &[10i64, 7i64])?;
fn test(
start: impl NdArray,
limit: impl NdArray,
delta: impl NdArray,
expected: impl NdArray,
) -> Result<()> {
let manual_graph = create_model_proto_with_graph(Some(GraphProto {
node: vec![NodeProto {
op_type: "Range".to_string(),
domain: "".to_string(),
attribute: vec![],
input: vec![
INPUT_X.to_string(),
INPUT_Y.to_string(),
INPUT_A.to_string(),
],
output: vec![OUTPUT_Z.to_string()],
name: "".to_string(),
doc_string: "".to_string(),
}],
name: "".to_string(),
initializer: vec![],
input: vec![],
output: vec![ValueInfoProto {
name: OUTPUT_Z.to_string(),
doc_string: "".to_string(),
r#type: None,
}],
value_info: vec![],
doc_string: "".to_string(),
sparse_initializer: vec![],
quantization_annotation: vec![],
}));
let mut inputs: HashMap<String, Tensor> = HashMap::new();
inputs.insert(INPUT_X.to_string(), Tensor::new(start, &Device::Cpu)?);
inputs.insert(INPUT_Y.to_string(), Tensor::new(limit, &Device::Cpu)?);
inputs.insert(INPUT_A.to_string(), Tensor::new(delta, &Device::Cpu)?);
let eval = candle_onnx::simple_eval(&manual_graph, inputs)?;
assert_eq!(eval.len(), 1);
let z = eval
.get(OUTPUT_Z)
.expect("Output 'z' not found")
.to_dtype(DType::F64)?;
let expected = Tensor::new(expected, &Device::Cpu)?.to_dtype(DType::F64)?;
match expected.dims().len() {
0 => assert_eq!(z.to_vec0::<f64>()?, expected.to_vec0::<f64>()?),
1 => assert_eq!(z.to_vec1::<f64>()?, expected.to_vec1::<f64>()?),
2 => assert_eq!(z.to_vec2::<f64>()?, expected.to_vec2::<f64>()?),
3 => assert_eq!(z.to_vec3::<f64>()?, expected.to_vec3::<f64>()?),
_ => unreachable!(),
};
Ok(())
}
Ok(())
}
// "Greater"
#[test]
fn test_greater() -> Result<()> {
// https://github.com/onnx/onnx/blob/main/docs/Operators.md#examples-63
test(&[1., 2., 3.], &[3., 2., 1.], &[0u8, 0, 1])?;
// https://github.com/onnx/onnx/blob/main/docs/Operators.md#examples-63
test(&[1., 2., 3.], 2., &[0u8, 0, 1])?;
fn test(a: impl NdArray, b: impl NdArray, expected: impl NdArray) -> Result<()> {
let manual_graph = create_model_proto_with_graph(Some(GraphProto {
node: vec![NodeProto {
op_type: "Greater".to_string(),
domain: "".to_string(),
attribute: vec![],
input: vec![INPUT_X.to_string(), INPUT_Y.to_string()],
output: vec![OUTPUT_Z.to_string()],
name: "".to_string(),
doc_string: "".to_string(),
}],
name: "".to_string(),
initializer: vec![],
input: vec![],
output: vec![ValueInfoProto {
name: OUTPUT_Z.to_string(),
doc_string: "".to_string(),
r#type: None,
}],
value_info: vec![],
doc_string: "".to_string(),
sparse_initializer: vec![],
quantization_annotation: vec![],
}));
let mut inputs: HashMap<String, Tensor> = HashMap::new();
inputs.insert(INPUT_X.to_string(), Tensor::new(a, &Device::Cpu)?);
inputs.insert(INPUT_Y.to_string(), Tensor::new(b, &Device::Cpu)?);
let eval = candle_onnx::simple_eval(&manual_graph, inputs)?;
assert_eq!(eval.len(), 1);
let z = eval
.get(OUTPUT_Z)
.expect("Output 'z' not found")
.to_dtype(DType::F64)?;
let expected = Tensor::new(expected, &Device::Cpu)?.to_dtype(DType::F64)?;
match expected.dims().len() {
0 => assert_eq!(z.to_vec0::<f64>()?, expected.to_vec0::<f64>()?),
1 => assert_eq!(z.to_vec1::<f64>()?, expected.to_vec1::<f64>()?),
2 => assert_eq!(z.to_vec2::<f64>()?, expected.to_vec2::<f64>()?),
3 => assert_eq!(z.to_vec3::<f64>()?, expected.to_vec3::<f64>()?),
_ => unreachable!(),
};
Ok(())
}
Ok(())
}
// "Less"
#[test]
fn test_less() -> Result<()> {
// https://github.com/onnx/onnx/blob/main/docs/Operators.md#examples-81
test(&[1., 2., 3.], &[3., 2., 1.], &[1u8, 0, 0])?;
// https://github.com/onnx/onnx/blob/main/docs/Operators.md#examples-81
test(&[1., 2., 3.], 2., &[1u8, 0, 0])?;
fn test(a: impl NdArray, b: impl NdArray, expected: impl NdArray) -> Result<()> {
let manual_graph = create_model_proto_with_graph(Some(GraphProto {
node: vec![NodeProto {
op_type: "Less".to_string(),
domain: "".to_string(),
attribute: vec![],
input: vec![INPUT_X.to_string(), INPUT_Y.to_string()],
output: vec![OUTPUT_Z.to_string()],
name: "".to_string(),
doc_string: "".to_string(),
}],
name: "".to_string(),
initializer: vec![],
input: vec![],
output: vec![ValueInfoProto {
name: OUTPUT_Z.to_string(),
doc_string: "".to_string(),
r#type: None,
}],
value_info: vec![],
doc_string: "".to_string(),
sparse_initializer: vec![],
quantization_annotation: vec![],
}));
let mut inputs: HashMap<String, Tensor> = HashMap::new();
inputs.insert(INPUT_X.to_string(), Tensor::new(a, &Device::Cpu)?);
inputs.insert(INPUT_Y.to_string(), Tensor::new(b, &Device::Cpu)?);
let eval = candle_onnx::simple_eval(&manual_graph, inputs)?;
assert_eq!(eval.len(), 1);
let z = eval
.get(OUTPUT_Z)
.expect("Output 'z' not found")
.to_dtype(DType::F64)?;
let expected = Tensor::new(expected, &Device::Cpu)?.to_dtype(DType::F64)?;
match expected.dims().len() {
0 => assert_eq!(z.to_vec0::<f64>()?, expected.to_vec0::<f64>()?),
1 => assert_eq!(z.to_vec1::<f64>()?, expected.to_vec1::<f64>()?),
2 => assert_eq!(z.to_vec2::<f64>()?, expected.to_vec2::<f64>()?),
3 => assert_eq!(z.to_vec3::<f64>()?, expected.to_vec3::<f64>()?),
_ => unreachable!(),
};
Ok(())
}
Ok(())
}
// "Log"
#[test]
fn test_log() -> Result<()> {
// https://github.com/onnx/onnx/blob/main/docs/Operators.md#examples-82
test(&[1., 10.], &[0., std::f64::consts::LN_10])?;
fn test(data: impl NdArray, expected: impl NdArray) -> Result<()> {
let manual_graph = create_model_proto_with_graph(Some(GraphProto {
node: vec![NodeProto {
op_type: "Log".to_string(),
domain: "".to_string(),
attribute: vec![],
input: vec![INPUT_X.to_string()],
output: vec![OUTPUT_Z.to_string()],
name: "".to_string(),
doc_string: "".to_string(),
}],
name: "".to_string(),
initializer: vec![],
input: vec![],
output: vec![ValueInfoProto {
name: OUTPUT_Z.to_string(),
doc_string: "".to_string(),
r#type: None,
}],
value_info: vec![],
doc_string: "".to_string(),
sparse_initializer: vec![],
quantization_annotation: vec![],
}));
let mut inputs: HashMap<String, Tensor> = HashMap::new();
inputs.insert(INPUT_X.to_string(), Tensor::new(data, &Device::Cpu)?);
let eval = candle_onnx::simple_eval(&manual_graph, inputs)?;
assert_eq!(eval.len(), 1);
let z = eval.get(OUTPUT_Z).expect("Output 'z' not found");
let expected = Tensor::new(expected, &Device::Cpu)?;
match expected.dims().len() {
0 => assert_eq!(z.to_vec0::<f64>()?, expected.to_vec0::<f64>()?),
1 => assert_eq!(z.to_vec1::<f64>()?, expected.to_vec1::<f64>()?),
2 => assert_eq!(z.to_vec2::<f64>()?, expected.to_vec2::<f64>()?),
3 => assert_eq!(z.to_vec3::<f64>()?, expected.to_vec3::<f64>()?),
_ => unreachable!(),
};
Ok(())
}
Ok(())
}
// "Min"
#[test]
fn test_min() -> Result<()> {
// https://github.com/onnx/onnx/blob/main/docs/Operators.md#examples-94
test(&[3., 2., 1.], &[1., 4., 4.], &[2., 5., 0.], &[1., 2., 0.])?;
fn test(
a: impl NdArray,
b: impl NdArray,
c: impl NdArray,
expected: impl NdArray,
) -> Result<()> {
let manual_graph = create_model_proto_with_graph(Some(GraphProto {
node: vec![NodeProto {
op_type: "Min".to_string(),
domain: "".to_string(),
attribute: vec![],
input: vec![
INPUT_X.to_string(),
INPUT_Y.to_string(),
INPUT_A.to_string(),
],
output: vec![OUTPUT_Z.to_string()],
name: "".to_string(),
doc_string: "".to_string(),
}],
name: "".to_string(),
initializer: vec![],
input: vec![],
output: vec![ValueInfoProto {
name: OUTPUT_Z.to_string(),
doc_string: "".to_string(),
r#type: None,
}],
value_info: vec![],
doc_string: "".to_string(),
sparse_initializer: vec![],
quantization_annotation: vec![],
}));
let mut inputs: HashMap<String, Tensor> = HashMap::new();
inputs.insert(INPUT_X.to_string(), Tensor::new(a, &Device::Cpu)?);
inputs.insert(INPUT_Y.to_string(), Tensor::new(b, &Device::Cpu)?);
inputs.insert(INPUT_A.to_string(), Tensor::new(c, &Device::Cpu)?);
let eval = candle_onnx::simple_eval(&manual_graph, inputs)?;
assert_eq!(eval.len(), 1);
let z = eval.get(OUTPUT_Z).expect("Output 'z' not found");
let expected = Tensor::new(expected, &Device::Cpu)?;
match expected.dims().len() {
0 => assert_eq!(z.to_vec0::<f64>()?, expected.to_vec0::<f64>()?),
1 => assert_eq!(z.to_vec1::<f64>()?, expected.to_vec1::<f64>()?),
2 => assert_eq!(z.to_vec2::<f64>()?, expected.to_vec2::<f64>()?),
3 => assert_eq!(z.to_vec3::<f64>()?, expected.to_vec3::<f64>()?),
_ => unreachable!(),
};
Ok(())
}
Ok(())
}
// "Where"
#[test]
fn test_where() -> Result<()> {
// https://github.com/onnx/onnx/blob/main/docs/Operators.md#examples-173
test(
&[[1u8, 0], [1, 1]],
&[[1i64, 2], [3, 4]],
&[[9i64, 8], [7, 6]],
&[[1i64, 8], [3, 4]],
)?;
// https://github.com/onnx/onnx/blob/main/docs/Operators.md#examples-173
test(
&[[1u8, 0], [1, 1]],
&[[1., 2.], [3., 4.]],
&[[9., 8.], [7., 6.]],
&[[1., 8.], [3., 4.]],
)?;
fn test(
condition: impl NdArray,
x: impl NdArray,
y: impl NdArray,
expected: impl NdArray,
) -> Result<()> {
let manual_graph = create_model_proto_with_graph(Some(GraphProto {
node: vec![NodeProto {
op_type: "Where".to_string(),
domain: "".to_string(),
attribute: vec![],
input: vec![
INPUT_X.to_string(),
INPUT_Y.to_string(),
INPUT_A.to_string(),
],
output: vec![OUTPUT_Z.to_string()],
name: "".to_string(),
doc_string: "".to_string(),
}],
name: "".to_string(),
initializer: vec![],
input: vec![],
output: vec![ValueInfoProto {
name: OUTPUT_Z.to_string(),
doc_string: "".to_string(),
r#type: None,
}],
value_info: vec![],
doc_string: "".to_string(),
sparse_initializer: vec![],
quantization_annotation: vec![],
}));
let mut inputs: HashMap<String, Tensor> = HashMap::new();
inputs.insert(INPUT_X.to_string(), Tensor::new(condition, &Device::Cpu)?);
inputs.insert(INPUT_Y.to_string(), Tensor::new(x, &Device::Cpu)?);
inputs.insert(INPUT_A.to_string(), Tensor::new(y, &Device::Cpu)?);
let eval = candle_onnx::simple_eval(&manual_graph, inputs)?;
assert_eq!(eval.len(), 1);
let z = eval
.get(OUTPUT_Z)
.expect("Output 'z' not found")
.to_dtype(DType::F64)?;
let expected = Tensor::new(expected, &Device::Cpu)?.to_dtype(DType::F64)?;
match expected.dims().len() {
0 => assert_eq!(z.to_vec0::<f64>()?, expected.to_vec0::<f64>()?),
1 => assert_eq!(z.to_vec1::<f64>()?, expected.to_vec1::<f64>()?),
2 => assert_eq!(z.to_vec2::<f64>()?, expected.to_vec2::<f64>()?),
3 => assert_eq!(z.to_vec3::<f64>()?, expected.to_vec3::<f64>()?),
_ => unreachable!(),
};
Ok(())
}
Ok(())
}
#[test]
fn test_floor() -> Result<()> {
let manual_graph = create_model_proto_with_graph(Some(GraphProto {
node: vec![NodeProto {
op_type: "Floor".to_string(),
domain: "".to_string(),
attribute: vec![],
input: vec![INPUT_X.to_string()],
output: vec![OUTPUT_Z.to_string()],
name: "".to_string(),
doc_string: "".to_string(),
}],
name: "".to_string(),
initializer: vec![],
input: vec![ValueInfoProto {
name: INPUT_X.to_string(),
doc_string: "".to_string(),
r#type: None,
}],
output: vec![ValueInfoProto {
name: OUTPUT_Z.to_string(),
doc_string: "".to_string(),
r#type: None,
}],
value_info: vec![],
doc_string: "".to_string(),
sparse_initializer: vec![],
quantization_annotation: vec![],
}));
let x = Tensor::from_vec(
// some values taken from https://numpy.org/doc/stable/reference/generated/numpy.floor.html
vec![
f64::NAN,
f64::INFINITY,
f64::NEG_INFINITY,
-1.7,
-1.5,
-0.2,
0.2,
1.5,
1.7,
2.0,
],
&[10],
&Device::Cpu,
)?;
let mut inputs: HashMap<String, Tensor> = HashMap::new();
inputs.insert(INPUT_X.to_string(), x);
let eval = candle_onnx::simple_eval(&manual_graph, inputs)?;
assert_eq!(eval.len(), 1);
let z = eval.get(OUTPUT_Z).expect("Output 'z' not found");
let results = z.to_vec1::<f64>()?;
assert!(results[0].is_nan());
assert_eq!(
results[1..],
vec![
f64::INFINITY,
f64::NEG_INFINITY,
-2.,
-2.,
-1.,
0.,
1.,
1.,
2.
]
);
Ok(())
}
#[test]
fn test_ceil() -> Result<()> {
let manual_graph = create_model_proto_with_graph(Some(GraphProto {
node: vec![NodeProto {
op_type: "Ceil".to_string(),
domain: "".to_string(),
attribute: vec![],
input: vec![INPUT_X.to_string()],
output: vec![OUTPUT_Z.to_string()],
name: "".to_string(),
doc_string: "".to_string(),
}],
name: "".to_string(),
initializer: vec![],
input: vec![ValueInfoProto {
name: INPUT_X.to_string(),
doc_string: "".to_string(),
r#type: None,
}],
output: vec![ValueInfoProto {
name: OUTPUT_Z.to_string(),
doc_string: "".to_string(),
r#type: None,
}],
value_info: vec![],
doc_string: "".to_string(),
sparse_initializer: vec![],
quantization_annotation: vec![],
}));
let x = Tensor::from_vec(
// some values taken from https://numpy.org/doc/stable/reference/generated/numpy.ceil.html
vec![
f64::NAN,
f64::INFINITY,
f64::NEG_INFINITY,
-1.7,
-1.5,
-0.2,
0.2,
1.5,
1.7,
2.0,
],
&[10],
&Device::Cpu,
)?;
let mut inputs: HashMap<String, Tensor> = HashMap::new();
inputs.insert(INPUT_X.to_string(), x);
let eval = candle_onnx::simple_eval(&manual_graph, inputs)?;
assert_eq!(eval.len(), 1);
let z = eval.get(OUTPUT_Z).expect("Output 'z' not found");
let results = z.to_vec1::<f64>()?;
assert!(results[0].is_nan());
assert_eq!(
results[1..],
vec![
f64::INFINITY,
f64::NEG_INFINITY,
-1.,
-1.,
-0.,
1.,
2.,
2.,
2.
]
);
Ok(())
}
// "ArgMin"
#[test]
fn test_argmin() -> Result<()> {
// tests from https://github.com/onnx/onnx/blob/main/docs/Operators.md#examples-7
// default_axes_keepdims
test(
&[[2u32, 1u32], [3u32, 10u32]],
None,
Some(1),
None,
&[[0i64, 0i64]],
)?;
// keepdims
test(
&[[2u32, 1u32], [3u32, 10u32]],
Some(1),
Some(1),
None,
&[[1i64], [0i64]],
)?;
// // negative_axis_keepdims
test(
&[[2u32, 1u32], [3u32, 10u32]],
Some(-1),
Some(1),
None,
&[[1i64], [0i64]],
)?;
// no_keepdims
test(
&[[2u32, 1u32], [3u32, 10u32]],
None,
Some(0),
None,
&[0i64, 0i64],
)?;
// tests from https://pytorch.org/docs/stable/generated/torch.argmin.html#torch.argmin
test(
&[
[0.1139, 0.2254, -0.1381, 0.3687],
[1.0100, -1.1975, -0.0102, -0.4732],
[-0.9240, 0.1207, -0.7506, -1.0213],
[1.7809, -1.2960, 0.9384, 0.1438],
],
Some(1),
Some(0),
None,
&[2i64, 1i64, 3i64, 1i64],
)?;
test(
&[
[0.1139, 0.2254, -0.1381, 0.3687],
[1.0100, -1.1975, -0.0102, -0.4732],
[-0.9240, 0.1207, -0.7506, -1.0213],
[1.7809, -1.2960, 0.9384, 0.1438],
],
Some(1),
None,
None,
&[[2i64], [1i64], [3i64], [1i64]],
)?;
fn test(
data: impl NdArray,
axis: Option<i64>,
keepdims: Option<i64>,
select_last_index: Option<i64>,
expected: impl NdArray,
) -> Result<()> {
let att_axis = AttributeProto {
name: "axis".to_string(),
ref_attr_name: "axis".to_string(),
i: axis.unwrap_or(0),
doc_string: "axis".to_string(),
r#type: 2, // INT
f: 0.0,
s: vec![],
t: None,
g: None,
sparse_tensor: None,
tp: None,
floats: vec![],
ints: vec![],
strings: vec![],
tensors: vec![],
graphs: vec![],
sparse_tensors: vec![],
type_protos: vec![],
};
let att_keepdims = AttributeProto {
name: "keepdims".to_string(),
ref_attr_name: "keepdims".to_string(),
i: keepdims.unwrap_or(1),
doc_string: "keepdims".to_string(),
r#type: 2, // INT
f: 0.0,
s: vec![],
t: None,
g: None,
sparse_tensor: None,
tp: None,
floats: vec![],
ints: vec![],
strings: vec![],
tensors: vec![],
graphs: vec![],
sparse_tensors: vec![],
type_protos: vec![],
};
let att_select_last_index = AttributeProto {
name: "select_last_index".to_string(),
ref_attr_name: "select_last_index".to_string(),
i: select_last_index.unwrap_or(0),
doc_string: "select_last_index".to_string(),
r#type: 2, // INT
f: 0.0,
s: vec![],
t: None,
g: None,
sparse_tensor: None,
tp: None,
floats: vec![],
ints: vec![],
strings: vec![],
tensors: vec![],
graphs: vec![],
sparse_tensors: vec![],
type_protos: vec![],
};
let attrs = {
let mut mut_attrs = vec![];
if axis.is_some() {
mut_attrs.push(att_axis);
}
if keepdims.is_some() {
mut_attrs.push(att_keepdims);
}
if select_last_index.is_some() {
mut_attrs.push(att_select_last_index);
}
mut_attrs
};
let manual_graph = create_model_proto_with_graph(Some(GraphProto {
node: vec![NodeProto {
op_type: "ArgMin".to_string(),
domain: "".to_string(),
attribute: attrs,
input: vec![INPUT_X.to_string()],
output: vec![OUTPUT_Z.to_string()],
name: "".to_string(),
doc_string: "".to_string(),
}],
name: "".to_string(),
initializer: vec![],
input: vec![],
output: vec![ValueInfoProto {
name: OUTPUT_Z.to_string(),
doc_string: "".to_string(),
r#type: None,
}],
value_info: vec![],
doc_string: "".to_string(),
sparse_initializer: vec![],
quantization_annotation: vec![],
}));
let mut inputs: HashMap<String, Tensor> = HashMap::new();
inputs.insert(INPUT_X.to_string(), Tensor::new(data, &Device::Cpu)?);
let eval = candle_onnx::simple_eval(&manual_graph, inputs)?;
let z = eval.get(OUTPUT_Z).expect("Output 'z' not found");
let expected = Tensor::new(expected, &Device::Cpu)?;
match expected.dims().len() {
1 => assert_eq!(z.to_vec1::<i64>()?, expected.to_vec1::<i64>()?),
2 => assert_eq!(z.to_vec2::<i64>()?, expected.to_vec2::<i64>()?),
_ => unreachable!(),
};
Ok(())
}
Ok(())
}
// "ArgMax"
#[test]
fn test_argmax() -> Result<()> {
// tests from https://github.com/onnx/onnx/blob/main/docs/Operators.md#examples-6
// default_axes_keepdims
test(
&[[2u32, 1u32], [3u32, 10u32]],
None,
Some(1),
None,
&[[1i64, 1i64]],
)?;
// keepdims
test(
&[[2u32, 1u32], [3u32, 10u32]],
Some(1),
Some(1),
None,
&[[0i64], [1i64]],
)?;
// // negative_axis_keepdims
test(
&[[2u32, 1u32], [3u32, 10u32]],
Some(-1),
Some(1),
None,
&[[0i64], [1i64]],
)?;
// no_keepdims
test(
&[[2u32, 1u32], [3u32, 10u32]],
None,
Some(0),
None,
&[1i64, 1i64],
)?;
// tests from https://pytorch.org/docs/stable/generated/torch.argmax.html
test(
&[
[1.3398, 0.2663, -0.2686, 0.2450],
[-0.7401, -0.8805, -0.3402, -1.1936],
[0.4907, -1.3948, -1.0691, -0.3132],
[-1.6092, 0.5419, -0.2993, 0.3195],
],
Some(1),
Some(0),
None,
&[0i64, 2i64, 0i64, 1i64],
)?;
test(
&[
[1.3398, 0.2663, -0.2686, 0.2450],
[-0.7401, -0.8805, -0.3402, -1.1936],
[0.4907, -1.3948, -1.0691, -0.3132],
[-1.6092, 0.5419, -0.2993, 0.3195],
],
Some(1),
None,
None,
&[[0i64], [2i64], [0i64], [1i64]],
)?;
fn test(
data: impl NdArray,
axis: Option<i64>,
keepdims: Option<i64>,
select_last_index: Option<i64>,
expected: impl NdArray,
) -> Result<()> {
let att_axis = AttributeProto {
name: "axis".to_string(),
ref_attr_name: "axis".to_string(),
i: axis.unwrap_or(0),
doc_string: "axis".to_string(),
r#type: 2, // INT
f: 0.0,
s: vec![],
t: None,
g: None,
sparse_tensor: None,
tp: None,
floats: vec![],
ints: vec![],
strings: vec![],
tensors: vec![],
graphs: vec![],
sparse_tensors: vec![],
type_protos: vec![],
};
let att_keepdims = AttributeProto {
name: "keepdims".to_string(),
ref_attr_name: "keepdims".to_string(),
i: keepdims.unwrap_or(1),
doc_string: "keepdims".to_string(),
r#type: 2, // INT
f: 0.0,
s: vec![],
t: None,
g: None,
sparse_tensor: None,
tp: None,
floats: vec![],
ints: vec![],
strings: vec![],
tensors: vec![],
graphs: vec![],
sparse_tensors: vec![],
type_protos: vec![],
};
let att_select_last_index = AttributeProto {
name: "select_last_index".to_string(),
ref_attr_name: "select_last_index".to_string(),
i: select_last_index.unwrap_or(0),
doc_string: "select_last_index".to_string(),
r#type: 2, // INT
f: 0.0,
s: vec![],
t: None,
g: None,
sparse_tensor: None,
tp: None,
floats: vec![],
ints: vec![],
strings: vec![],
tensors: vec![],
graphs: vec![],
sparse_tensors: vec![],
type_protos: vec![],
};
let attrs = {
let mut mut_attrs = vec![];
if axis.is_some() {
mut_attrs.push(att_axis);
}
if keepdims.is_some() {
mut_attrs.push(att_keepdims);
}
if select_last_index.is_some() {
mut_attrs.push(att_select_last_index);
}
mut_attrs
};
let manual_graph = create_model_proto_with_graph(Some(GraphProto {
node: vec![NodeProto {
op_type: "ArgMax".to_string(),
domain: "".to_string(),
attribute: attrs,
input: vec![INPUT_X.to_string()],
output: vec![OUTPUT_Z.to_string()],
name: "".to_string(),
doc_string: "".to_string(),
}],
name: "".to_string(),
initializer: vec![],
input: vec![],
output: vec![ValueInfoProto {
name: OUTPUT_Z.to_string(),
doc_string: "".to_string(),
r#type: None,
}],
value_info: vec![],
doc_string: "".to_string(),
sparse_initializer: vec![],
quantization_annotation: vec![],
}));
let mut inputs: HashMap<String, Tensor> = HashMap::new();
inputs.insert(INPUT_X.to_string(), Tensor::new(data, &Device::Cpu)?);
let eval = candle_onnx::simple_eval(&manual_graph, inputs)?;
let z = eval.get(OUTPUT_Z).expect("Output 'z' not found");
let expected = Tensor::new(expected, &Device::Cpu)?;
match expected.dims().len() {
1 => assert_eq!(z.to_vec1::<i64>()?, expected.to_vec1::<i64>()?),
2 => assert_eq!(z.to_vec2::<i64>()?, expected.to_vec2::<i64>()?),
_ => unreachable!(),
};
Ok(())
}
Ok(())
}
// "LeakyRelu"
#[test]
fn test_leakyrelu() -> Result<()> {
// tests from https://github.com/onnx/onnx/blob/main/docs/Operators.md#examples-80
// leakyrelu
test(&[-1.0, 0.0, 1.0], Some(0.1), &[-0.1, 0.0, 1.0])?;
fn test(data: impl NdArray, alpha: Option<f32>, expected: impl NdArray) -> Result<()> {
let att_alpha = AttributeProto {
name: "alpha".to_string(),
ref_attr_name: "alpha".to_string(),
i: 0,
doc_string: "alpha".to_string(),
r#type: 1, // FLOAT
f: alpha.unwrap_or(0.01),
s: vec![],
t: None,
g: None,
sparse_tensor: None,
tp: None,
floats: vec![],
ints: vec![],
strings: vec![],
tensors: vec![],
graphs: vec![],
sparse_tensors: vec![],
type_protos: vec![],
};
let attrs = {
let mut mut_attrs = vec![];
if alpha.is_some() {
mut_attrs.push(att_alpha);
}
mut_attrs
};
let manual_graph = create_model_proto_with_graph(Some(GraphProto {
node: vec![NodeProto {
op_type: "LeakyRelu".to_string(),
domain: "".to_string(),
attribute: attrs,
input: vec![INPUT_X.to_string()],
output: vec![OUTPUT_Z.to_string()],
name: "".to_string(),
doc_string: "".to_string(),
}],
name: "".to_string(),
initializer: vec![],
input: vec![],
output: vec![ValueInfoProto {
name: OUTPUT_Z.to_string(),
doc_string: "".to_string(),
r#type: None,
}],
value_info: vec![],
doc_string: "".to_string(),
sparse_initializer: vec![],
quantization_annotation: vec![],
}));
let mut inputs: HashMap<String, Tensor> = HashMap::new();
inputs.insert(INPUT_X.to_string(), Tensor::new(data, &Device::Cpu)?);
let eval = candle_onnx::simple_eval(&manual_graph, inputs)?;
let z = eval.get(OUTPUT_Z).expect("Output 'z' not found");
let expected = Tensor::new(expected, &Device::Cpu)?;
for both in z
.to_vec1::<f64>()?
.iter()
.zip(expected.to_vec1::<f64>()?.iter())
{
let (act, exp) = both;
assert!(f64::abs(act - exp) < f32::EPSILON.into());
}
Ok(())
}
Ok(())
}
// "If"
#[test]
fn test_if() -> Result<()> {
let x = vec![1.0, 2.0, 3.0, 4.0, 5.0];
let y = vec![5.0, 4.0, 3.0, 2.0, 1.0];
let output_type_proto = Some(TypeProto {
value: Some(type_proto::Value::TensorType(type_proto::Tensor {
elem_type: DataType::Float.into(),
shape: Some(TensorShapeProto {
dim: vec![Dimension {
denotation: "".to_string(),
value: Some(dimension::Value::DimValue(5)),
}],
}),
})),
denotation: "".to_string(),
});
let then_branch = GraphProto {
output: vec![ValueInfoProto {
name: "then_out".to_string(),
r#type: output_type_proto.clone(),
doc_string: "".to_string(),
}],
node: vec![NodeProto {
op_type: "Constant".to_string(),
input: vec![],
output: vec!["then_out".to_string()],
attribute: vec![AttributeProto {
name: "value".to_string(),
r#type: AttributeType::Tensor.into(),
t: Some(TensorProto {
dims: vec![x.len() as i64],
float_data: x.clone(),
data_type: DataType::Float.into(),
..TensorProto::default()
}),
..AttributeProto::default()
}],
..NodeProto::default()
}],
..GraphProto::default()
};
let else_branch = GraphProto {
output: vec![ValueInfoProto {
name: "else_out".to_string(),
r#type: output_type_proto.clone(),
doc_string: "".to_string(),
}],
node: vec![NodeProto {
op_type: "Constant".to_string(),
input: vec![],
output: vec!["else_out".to_string()],
attribute: vec![AttributeProto {
name: "value".to_string(),
r#type: AttributeType::Tensor.into(),
t: Some(TensorProto {
dims: vec![y.len() as i64],
float_data: y.clone(),
data_type: DataType::Float.into(),
..TensorProto::default()
}),
..AttributeProto::default()
}],
..NodeProto::default()
}],
..GraphProto::default()
};
let manual_graph = create_model_proto_with_graph(Some(GraphProto {
node: vec![NodeProto {
op_type: "If".to_string(),
attribute: vec![
AttributeProto {
name: "then_branch".to_string(),
r#type: AttributeType::Graph.into(),
g: Some(then_branch),
..AttributeProto::default()
},
AttributeProto {
name: "else_branch".to_string(),
r#type: AttributeType::Graph.into(),
g: Some(else_branch),
..AttributeProto::default()
},
],
input: vec!["cond".to_string()],
output: vec!["res".to_string()],
..NodeProto::default()
}],
input: vec![],
output: vec![ValueInfoProto {
name: "res".to_string(),
doc_string: "".to_string(),
r#type: output_type_proto.clone(),
}],
..GraphProto::default()
}));
for cond in [1u8, 0] {
let inputs =
HashMap::from_iter([("cond".to_string(), Tensor::full(cond, (1,), &Device::Cpu)?)]);
let outputs = candle_onnx::simple_eval(&manual_graph, inputs)?;
let expected = if cond != 0 { &x } else { &y };
let Some(res) = outputs.get("res") else {
candle::bail!("outputs didn't contain expected key `res`: {outputs:?}");
};
assert_eq!(&res.to_vec1::<f32>()?, expected);
}
Ok(())
}
#[test]
fn test_pad() -> Result<()> {
let data = Tensor::from_vec(
vec![
1.0, 2.0, 3.0, //
4.0, 5.0, 6.0, //
],
(2, 3),
&Device::Cpu,
)?;
let pads = Tensor::from_vec(vec![0i64, 1, 0, 0], (4,), &Device::Cpu)?;
let mode = "reflect";
let expected = Tensor::from_vec(
vec![
2.0, 1.0, 2.0, 3.0, //
5.0, 4.0, 5.0, 6.0, //
],
(2, 4),
&Device::Cpu,
)?;
let model = create_model_proto_with_graph(Some(GraphProto {
input: vec![
ValueInfoProto {
name: "data".to_string(),
..ValueInfoProto::default()
},
ValueInfoProto {
name: "pads".to_string(),
..ValueInfoProto::default()
},
],
output: vec![ValueInfoProto {
name: "output".to_string(),
..ValueInfoProto::default()
}],
node: vec![NodeProto {
op_type: "Pad".to_string(),
input: vec!["data".to_string(), "pads".to_string()],
output: vec!["output".to_string()],
attribute: vec![AttributeProto {
name: "mode".to_string(),
r#type: AttributeType::String.into(),
s: mode.as_bytes().to_vec(),
..AttributeProto::default()
}],
..NodeProto::default()
}],
..GraphProto::default()
}));
let inputs = HashMap::from_iter([("data".to_string(), data), ("pads".to_string(), pads)]);
let res = candle_onnx::simple_eval(&model, inputs)?;
let Some(actual) = res.get("output") else {
candle::bail!("outputs didn't contain expected key `output`: {res:?}");
};
assert_eq!(actual.to_vec2::<f64>()?, expected.to_vec2::<f64>()?);
Ok(())
}
#[test]
fn test_slice() -> Result<()> {
let model = create_model_proto_with_graph(Some(GraphProto {
node: vec![NodeProto {
op_type: "Slice".to_string(),
input: vec![
"data".to_string(),
"starts".to_string(),
"ends".to_string(),
"axes".to_string(),
"steps".to_string(),
],
output: vec!["result".to_string()],
..NodeProto::default()
}],
input: ["data", "starts", "ends", "axes", "steps"]
.into_iter()
.map(|name| ValueInfoProto {
name: name.to_string(),
r#type: None,
doc_string: "".to_string(),
})
.collect(),
output: ["result"]
.into_iter()
.map(|name| ValueInfoProto {
name: name.to_string(),
r#type: None,
doc_string: "".to_string(),
})
.collect(),
..GraphProto::default()
}));
/*
data = [
[1, 2, 3, 4],
[5, 6, 7, 8],
]
axes = [0, 1]
starts = [1, 0]
ends = [2, 3]
steps = [1, 2]
result = [
[5, 7],
]
*/
let outputs = candle_onnx::simple_eval(
&model,
HashMap::from_iter([
(
"data".to_string(),
Tensor::from_vec(vec![1i64, 2, 3, 4, 5, 6, 7, 8], (2, 4), &Device::Cpu)?,
),
(
"starts".to_string(),
Tensor::from_vec(vec![1i64, 0], (2,), &Device::Cpu)?,
),
(
"ends".to_string(),
Tensor::from_vec(vec![2i64, 3], (2,), &Device::Cpu)?,
),
(
"axes".to_string(),
Tensor::from_vec(vec![0i64, 1], (2,), &Device::Cpu)?,
),
(
"steps".to_string(),
Tensor::from_vec(vec![1i64, 2], (2,), &Device::Cpu)?,
),
]),
)?;
let actual = outputs.get("result").unwrap().to_vec2::<i64>()?;
assert_eq!(actual, vec![vec![5i64, 7]]);
/*
data = [
[1, 2, 3, 4],
[5, 6, 7, 8],
]
starts = [0, 1]
ends = [-1, 1000]
result = [
[2, 3, 4],
]
*/
let model = create_model_proto_with_graph(Some(GraphProto {
node: vec![NodeProto {
op_type: "Slice".to_string(),
input: vec!["data".to_string(), "starts".to_string(), "ends".to_string()],
output: vec!["result".to_string()],
..NodeProto::default()
}],
input: ["data", "starts", "ends"]
.into_iter()
.map(|name| ValueInfoProto {
name: name.to_string(),
r#type: None,
doc_string: "".to_string(),
})
.collect(),
output: ["result"]
.into_iter()
.map(|name| ValueInfoProto {
name: name.to_string(),
r#type: None,
doc_string: "".to_string(),
})
.collect(),
..GraphProto::default()
}));
let outputs = candle_onnx::simple_eval(
&model,
HashMap::from_iter([
(
"data".to_string(),
Tensor::from_vec(vec![1i64, 2, 3, 4, 5, 6, 7, 8], (2, 4), &Device::Cpu)?,
),
(
"starts".to_string(),
Tensor::from_vec(vec![0i64, 1], (2,), &Device::Cpu)?,
),
(
"ends".to_string(),
Tensor::from_vec(vec![-1i64, 1000], (2,), &Device::Cpu)?,
),
]),
)?;
let actual = outputs.get("result").unwrap().to_vec2::<i64>()?;
assert_eq!(actual, vec![vec![2i64, 3, 4]]);
Ok(())
}
#[test]
fn test_lstm() -> Result<()> {
// values generated from pytorch, so at least it's close enough to what pytorch does
/*
#!/usr/bin/env python3
# torch.nn.LSTM(input_size, hidden_size, num_layers=1, bias=True, batch_first=False, dropout=0.0, bidirectional=False, proj_size=0, device=None, dtype=None)
import torch
rand_gen = torch.Generator()
rand_gen.manual_seed(1)
input_size = 3
hidden_size = 5
batch_size = 1
sequence_length = 4
number_directions = 1
rnn = torch.nn.LSTM(input_size,hidden_size)
weight_ih_l0 = torch.randn(rnn.weight_ih_l0.shape, generator=rand_gen)
weight_hh_l0 = torch.randn(rnn.weight_hh_l0.shape, generator=rand_gen)
bias_ih_l0 = torch.randn(rnn.bias_ih_l0.shape, generator=rand_gen)
bias_hh_l0 = torch.randn(rnn.bias_hh_l0.shape, generator=rand_gen)
rnn.weight_ih_l0 = torch.nn.Parameter(weight_ih_l0)
rnn.weight_hh_l0 = torch.nn.Parameter(weight_hh_l0)
rnn.bias_ih_l0 = torch.nn.Parameter(bias_ih_l0)
rnn.bias_hh_l0 = torch.nn.Parameter(bias_hh_l0)
input = torch.randn(sequence_length, batch_size, input_size, generator=rand_gen)
h0 = torch.randn(number_directions, batch_size, hidden_size, generator=rand_gen)
c0 = torch.randn(number_directions, batch_size, hidden_size, generator=rand_gen)
output, (hn, cn) = rnn(input, (h0, c0))
def fmt_tensor(t):
return "Tensor::from_vec::<_, f32>(vec!"+ str(t.flatten().tolist()) + ", (" + "".join([str(n)+"," for n in t.shape])+"), &Device::Cpu)?"
print("let input_size = ", input_size, ";")
print("let hidden_size = ", hidden_size, ";")
print("let batch_size = ", batch_size, ";")
print("let sequence_length = ", sequence_length, ";")
print("let number_directions = ", number_directions, ";")
print("let weight_ih_l0 = ", fmt_tensor(rnn.weight_ih_l0), ";")
print("let weight_hh_l0 = ", fmt_tensor(rnn.weight_hh_l0), ";")
print("let bias_ih_l0 = ", fmt_tensor(rnn.bias_ih_l0), ";")
print("let bias_hh_l0 = ", fmt_tensor(rnn.bias_hh_l0), ";")
print("let input = ", fmt_tensor(input), ";")
print("let h0 = ", fmt_tensor(h0), ";")
print("let c0 = ", fmt_tensor(c0), ";")
print("let output = ", fmt_tensor(output), ";")
print("let hn = ", fmt_tensor(hn), ";")
print("let cn = ", fmt_tensor(cn), ";")
*/
let input_size = 3;
let hidden_size = 5;
let batch_size = 1;
let sequence_length = 4;
let number_directions = 1;
let weight_ih_l0 = Tensor::from_vec::<_, f32>(
vec![
-1.525_595_9,
-0.750_231_8,
-0.653_980_9,
-1.609_484_8,
-0.100_167_18,
-0.609_188_9,
-0.979_772_27,
-1.609_096_3,
-0.712_144_6,
0.303_722,
-0.777_314_3,
-0.251_455_25,
-0.222_270_49,
1.687_113_4,
0.228_425_17,
0.467_635_5,
-0.696_972_4,
-1.160_761_5,
0.699_542_4,
0.199_081_63,
0.865_692_4,
0.244_403_9,
-0.662_911_36,
0.807_308_26,
1.101_680_6,
-0.175_936_04,
-2.245_557_8,
-1.446_458,
0.061_155_282,
-0.617_744_45,
-0.798_069_83,
-0.131_623_21,
1.879_345_8,
-0.072_131_78,
0.157_770_6,
-0.773_454_9,
0.199_056_5,
0.045_702_778,
0.152_956_92,
-0.475_678_8,
-0.111_019_83,
0.292_735_25,
-0.157_845_15,
-0.028_787_14,
0.453_254_58,
1.142_161_1,
0.248_610_7,
-1.775_400_8,
-0.025_502_462,
-1.023_330_6,
-0.596_185_15,
-1.005_530_7,
0.428_542_3,
1.476_077_8,
-1.786_867_9,
1.610_317_6,
-0.703_956_66,
-0.185_265_8,
-0.996_235_1,
-0.831_255_26,
],
(20, 3),
&Device::Cpu,
)?;
let weight_hh_l0 = Tensor::from_vec::<_, f32>(
vec![
0.409_972_43,
0.408_450_66,
0.257_865_4,
1.095_021_4,
-0.506_486_6,
0.099_775_404,
-0.653_973_4,
0.731_693_7,
-1.456_733,
1.608_935_4,
0.093_769_975,
-1.259_749,
0.254_633_5,
-0.501_957_3,
-1.041_2,
0.732_267_2,
1.307_535_5,
-1.162_798_8,
0.119_636_11,
-0.163_135_33,
0.661_445_3,
1.189_920_5,
0.816_533_9,
-0.913_523_6,
-0.353_806_53,
0.763_927_04,
-0.588_950_7,
-0.763_597_37,
1.335_205_7,
0.604_273_6,
-0.103_442_08,
-0.151_216_92,
1.246_568_3,
0.505_721_4,
0.950_511_2,
1.296_648_3,
0.873_796_3,
-0.560_259_4,
1.285_784_5,
0.816_823_84,
-1.464_799_4,
-1.262_928_4,
1.122_018_8,
1.566_334_1,
2.558_138_4,
-0.233_363_88,
-0.013_472_13,
1.860_634_8,
1.549_620_5,
0.347_629_25,
0.093_008_03,
0.614_740_3,
0.712_364_55,
-1.776_507_3,
0.353_864_58,
1.199_613_2,
-0.712_258_93,
-0.620_034_4,
-0.228_134_95,
-0.789_274_63,
-1.611_111_8,
-1.871_612_9,
0.543_083_6,
0.660_678_6,
0.270_527_72,
0.559_691_97,
-0.318_396_3,
1.511_720_7,
-1.363_267_2,
-0.983_219_6,
1.511_266_7,
0.641_870_74,
-0.747_445_9,
-0.923_438_55,
0.573_398_4,
-0.109_299_51,
0.518_112_1,
0.106_535_35,
0.269_240_77,
1.324_768,
0.037_456_9,
-0.637_839_3,
-0.814_755_44,
-0.689_506_53,
0.843_654_3,
1.165_701_3,
0.526_932_2,
1.619_253_3,
-0.963_976_26,
0.141_520_38,
-0.163_660_96,
-0.358_222_57,
1.722_279_3,
-0.303_575_6,
0.238_874_2,
1.344_001_2,
0.103_225_69,
1.100_354_2,
-0.341_680_2,
0.947_338_9,
],
(20, 5),
&Device::Cpu,
)?;
let bias_ih_l0 = Tensor::from_vec::<_, f32>(
vec![
-0.568_515_96,
0.837_596_2,
1.783_660_7,
-0.195_424_66,
0.235_193_13,
1.914_243_3,
1.836_411_1,
1.324_532_4,
-0.070_514_58,
0.346_979_4,
-0.653_679_6,
1.558_620_2,
0.218_566_15,
-0.574_307_26,
1.457_125_1,
1.770_955_7,
-2.017_3,
0.423_503_2,
0.573_022,
-1.796_243,
],
(20,),
&Device::Cpu,
)?;
let bias_hh_l0 = Tensor::from_vec::<_, f32>(
vec![
1.247_040_4,
1.273_851_2,
0.390_949_25,
0.387_210_5,
0.144_403_95,
0.777_168_45,
-2.338_112_6,
-0.829_120_4,
1.166_139_1,
1.478_657_5,
0.267_608_73,
0.756_119_85,
-0.587_336_1,
-2.061_920_6,
0.430_473_48,
0.337_656_62,
-0.343_785_35,
-0.617_226_06,
1.252_969_3,
-0.051_417_42,
],
(20,),
&Device::Cpu,
)?;
let input = Tensor::from_vec::<_, f32>(
vec![
0.647_212_8,
-0.041_167_17,
-0.177_493_08,
-0.500_039_3,
0.867_274_94,
-0.273_192_23,
-0.460_768_13,
-0.099_093_71,
0.472_844_8,
1.004_948_5,
-0.287_142_04,
-1.161_862_1,
],
(4, 1, 3),
&Device::Cpu,
)?;
let h0 = Tensor::from_vec::<_, f32>(
vec![
0.027_581_785,
0.565_238_24,
-0.011_487_379,
0.670_640_05,
-0.492_925_05,
],
(1, 1, 5),
&Device::Cpu,
)?;
let c0 = Tensor::from_vec::<_, f32>(
vec![
1.505_028_5,
-2.326_355,
1.616_89,
-0.902_623_8,
0.173_668_24,
],
(1, 1, 5),
&Device::Cpu,
)?;
let output = Tensor::from_vec::<_, f32>(
vec![
0.595_601_7,
-0.017_232_792,
0.110_355_72,
-0.493_231_74,
0.047_632_16,
0.635_845_2,
0.040_328_12,
-0.378_861_16,
-0.746_434,
0.200_809_09,
0.584_026_5,
0.145_328_82,
-0.734_529_85,
-0.521_430_43,
0.219_038_17,
0.742_045_16,
0.319_438_8,
-0.047_266_465,
-0.282_384_96,
0.271_313_4,
],
(4, 1, 5),
&Device::Cpu,
)?;
let hn = Tensor::from_vec::<_, f32>(
vec![
0.742_045_16,
0.319_438_8,
-0.047_266_465,
-0.282_384_96,
0.271_313_4,
],
(1, 1, 5),
&Device::Cpu,
)?;
let cn = Tensor::from_vec::<_, f32>(
vec![
0.963_055_85,
1.003_307,
-1.754_899,
-1.596_712_2,
0.825_292_47,
],
(1, 1, 5),
&Device::Cpu,
)?;
// end of generated values
let model = create_model_proto_with_graph(Some(GraphProto {
node: vec![NodeProto {
op_type: "LSTM".to_string(),
name: "LSTM_test".to_string(),
attribute: vec![AttributeProto {
name: "hidden_size".to_string(),
r#type: AttributeType::Int.into(),
i: hidden_size as i64,
..AttributeProto::default()
}],
input: vec![
"input".to_string(),
"w".to_string(),
"r".to_string(),
"b".to_string(), // b
"".to_string(), // seq_lens
"h".to_string(),
"c".to_string(),
],
output: vec!["output".to_string(), "hn".to_string(), "cn".to_string()],
..NodeProto::default()
}],
input: ["input", "w", "r", "b", "h", "c"]
.into_iter()
.map(|name| ValueInfoProto {
name: name.to_string(),
..ValueInfoProto::default()
})
.collect(),
output: ["output", "hn", "cn"]
.into_iter()
.map(|name| ValueInfoProto {
name: name.to_string(),
..ValueInfoProto::default()
})
.collect(),
..GraphProto::default()
}));
// pytorch stores weight and bias as [ifco] but we want it as [iofc]
// so we need to re-arrange the tensors a bit
let idx_iofc = {
let stride = hidden_size as i64;
let dev = weight_ih_l0.device();
let idx_i = Tensor::arange(0, stride, dev)?;
let idx_f = Tensor::arange(stride, 2 * stride, dev)?;
let idx_g = Tensor::arange(2 * stride, 3 * stride, dev)?;
let idx_o = Tensor::arange(3 * stride, 4 * stride, dev)?;
Tensor::cat(&[&idx_i, &idx_o, &idx_f, &idx_g], 0)?
};
let w = weight_ih_l0.index_select(&idx_iofc, 0)?;
let w = w.reshape((number_directions, 4 * hidden_size, input_size))?;
let r = weight_hh_l0.index_select(&idx_iofc, 0)?;
let r = r.reshape((number_directions, 4 * hidden_size, hidden_size))?;
let wb = bias_ih_l0.index_select(&idx_iofc, 0)?;
let rb = bias_hh_l0.index_select(&idx_iofc, 0)?;
let b = Tensor::cat(&[wb, rb], 0)?.reshape((number_directions, 8 * hidden_size))?;
let output = output.reshape((sequence_length, number_directions, batch_size, hidden_size))?;
let result = simple_eval(
&model,
HashMap::from_iter([
("input".to_string(), input),
("w".to_string(), w),
("r".to_string(), r),
("b".to_string(), b),
("h".to_string(), h0),
("c".to_string(), c0),
]),
)?;
let actual_output = result.get("output").unwrap();
assert_eq!(output.dims(), actual_output.dims());
let actual_hn = result.get("hn").unwrap();
assert_eq!(hn.dims(), actual_hn.dims());
let actual_cn = result.get("cn").unwrap();
assert_eq!(cn.dims(), actual_cn.dims());
let diff_close_enough = |a: &Tensor, b| -> Result<_> {
let diffs = a.sub(b)?.flatten_all()?.to_vec1::<f32>()?;
Ok(diffs.iter().all(|f| f.abs() < 0.0001))
};
assert!(
diff_close_enough(&output, actual_output)?,
"output did not match expected\n{actual_output}\n{output}",
);
assert!(
diff_close_enough(&hn, actual_hn)?,
"hn did not match expected\n{actual_hn}\n{hn}",
);
assert!(
diff_close_enough(&cn, actual_cn)?,
"cn did not match expected\n{actual_cn}\n{cn}",
);
Ok(())
}
#[test]
fn test_expand_dim_changed() -> Result<()> {
// Create a manual graph for the Expand operation
let manual_graph = create_model_proto_with_graph(Some(GraphProto {
node: vec![NodeProto {
op_type: "Expand".to_string(),
domain: "".to_string(),
attribute: vec![],
input: vec!["data".to_string(), "new_shape".to_string()],
output: vec!["expanded".to_string()],
name: "".to_string(),
doc_string: "".to_string(),
}],
input: vec![
ValueInfoProto {
name: "data".to_string(),
doc_string: "".to_string(),
r#type: None,
},
ValueInfoProto {
name: "new_shape".to_string(),
doc_string: "".to_string(),
r#type: None,
},
],
output: vec![ValueInfoProto {
name: "expanded".to_string(),
doc_string: "".to_string(),
r#type: None,
}],
..GraphProto::default()
}));
// Input tensor with shape [3, 1]
let data = Tensor::from_vec(vec![1.0f32, 2.0f32, 3.0f32], (3, 1), &Device::Cpu)?;
// New shape tensor: [2, 1, 6]
let new_shape = Tensor::from_vec(vec![2i64, 1, 6], (3,), &Device::Cpu)?;
// Expected output after expansion
let expected = Tensor::from_vec(
vec![
1.0f32, 1.0f32, 1.0f32, 1.0f32, 1.0f32, 1.0f32, 2.0f32, 2.0f32, 2.0f32, 2.0f32, 2.0f32,
2.0f32, 3.0f32, 3.0f32, 3.0f32, 3.0f32, 3.0f32, 3.0f32, 1.0f32, 1.0f32, 1.0f32, 1.0f32,
1.0f32, 1.0f32, 2.0f32, 2.0f32, 2.0f32, 2.0f32, 2.0f32, 2.0f32, 3.0f32, 3.0f32, 3.0f32,
3.0f32, 3.0f32, 3.0f32,
],
(2, 3, 6),
&Device::Cpu,
)?;
// Execute the model evaluation
let inputs = HashMap::from_iter([
("data".to_string(), data),
("new_shape".to_string(), new_shape),
]);
let result = candle_onnx::simple_eval(&manual_graph, inputs)?;
// Retrieve and compare the result
let expanded = result.get("expanded").expect("Output 'expanded' not found");
assert_eq!(expanded.to_vec3::<f32>()?, expected.to_vec3::<f32>()?);
Ok(())
}
fn make_graph_helper(
op_name: &str,
inputs: &[&str],
outputs: &[&str],
attribs: Vec<AttributeProto>,
) -> ModelProto {
create_model_proto_with_graph(Some(GraphProto {
node: vec![NodeProto {
op_type: op_name.to_string(),
domain: "".to_string(),
attribute: attribs,
input: inputs.iter().map(|s| s.to_string()).collect(),
output: outputs.iter().map(|s| s.to_string()).collect(),
name: "".to_string(),
doc_string: "".to_string(),
}],
input: inputs
.iter()
.map(|name| ValueInfoProto {
name: name.to_string(),
..ValueInfoProto::default()
})
.collect(),
output: outputs
.iter()
.map(|name| ValueInfoProto {
name: name.to_string(),
..ValueInfoProto::default()
})
.collect(),
..GraphProto::default()
}))
}
#[test]
fn test_expand_dim_unchanged() -> Result<()> {
// Create a manual graph for the Expand operation
let manual_graph = make_graph_helper("Expand", &["data", "new_shape"], &["expanded"], vec![]);
// Input tensor with shape [3, 1] and dtype f32
let data = Tensor::from_vec(vec![1.0f32, 2.0f32, 3.0f32], (3, 1), &Device::Cpu)?;
// New shape tensor: [3, 4]
let new_shape = Tensor::from_vec(vec![3i64, 4], (2,), &Device::Cpu)?;
// Expected output after expansion, dtype f32
let expected = Tensor::from_vec(
vec![
1.0f32, 1.0f32, 1.0f32, 1.0f32, 2.0f32, 2.0f32, 2.0f32, 2.0f32, 3.0f32, 3.0f32, 3.0f32,
3.0f32,
],
(3, 4),
&Device::Cpu,
)?;
// Execute the model evaluation
let inputs = HashMap::from_iter([
("data".to_string(), data),
("new_shape".to_string(), new_shape),
]);
let result = candle_onnx::simple_eval(&manual_graph, inputs)?;
// Retrieve and compare the result
let expanded = result.get("expanded").expect("Output 'expanded' not found");
assert_eq!(expanded.to_vec2::<f32>()?, expected.to_vec2::<f32>()?);
Ok(())
}
fn make_split_graph_helper(inputs: &[&str], outputs: &[&str], axis: i64) -> ModelProto {
let attribs = vec![AttributeProto {
name: "axis".to_string(),
r#type: AttributeType::Int.into(),
i: axis,
..AttributeProto::default()
}];
make_graph_helper("Split", inputs, outputs, attribs)
}
#[test]
fn test_split_equal_parts_1d_opset13() -> Result<()> {
let input = Tensor::from_vec(
vec![1.0f32, 2.0f32, 3.0f32, 4.0f32, 5.0f32, 6.0f32],
(6,),
&Device::Cpu,
)?;
let mut inputs = HashMap::new();
inputs.insert("input".to_string(), input);
{
let manual_graph =
make_split_graph_helper(&["input"], &["output_1", "output_2", "output_3"], 0);
let eval = candle_onnx::simple_eval(&manual_graph, inputs.clone())?;
assert_eq!(eval.len(), 3);
let out1 = eval.get("output_1").expect("Output 'output_1' not found");
let out2 = eval.get("output_2").expect("Output 'output_2' not found");
let out3 = eval.get("output_3").expect("Output 'output_3' not found");
assert_eq!(out1.to_vec1::<f32>()?, vec![1.0f32, 2.0f32]);
assert_eq!(out2.to_vec1::<f32>()?, vec![3.0f32, 4.0f32]);
assert_eq!(out3.to_vec1::<f32>()?, vec![5.0f32, 6.0f32]);
}
{
let splits = Tensor::from_vec(vec![2i64, 4], (2,), &Device::Cpu)?;
inputs.insert("split".to_string(), splits);
let manual_graph =
make_split_graph_helper(&["input", "split"], &["output_1", "output_2"], 0);
let eval = candle_onnx::simple_eval(&manual_graph, inputs)?;
assert_eq!(eval.len(), 2);
let out1 = eval.get("output_1").expect("Output 'output_1' not found");
let out2 = eval.get("output_2").expect("Output 'output_2' not found");
assert_eq!(out1.to_vec1::<f32>()?, vec![1.0f32, 2.0f32]);
assert_eq!(out2.to_vec1::<f32>()?, vec![3.0f32, 4.0f32, 5.0f32, 6.0f32]);
}
Ok(())
}
fn make_reduce_sum_graph_helper(
inputs: &[&str],
outputs: &[&str],
keepdims: Option<i64>,
noop_with_empty_axes: Option<i64>,
) -> ModelProto {
let mut attribs = vec![];
if let Some(keepdims) = keepdims {
attribs.push(AttributeProto {
name: "keepdims".to_string(),
r#type: AttributeType::Int.into(),
i: keepdims,
..AttributeProto::default()
});
}
if let Some(noop_with_empty_axes) = noop_with_empty_axes {
attribs.push(AttributeProto {
name: "noop_with_empty_axes".to_string(),
r#type: AttributeType::Ints.into(),
i: noop_with_empty_axes,
..AttributeProto::default()
});
}
make_graph_helper("ReduceSum", inputs, outputs, attribs)
}
#[test]
fn test_reduce_sum_default_axes_keepdims() -> Result<()> {
let manual_graph = make_reduce_sum_graph_helper(&["data", "axes"], &["reduced"], Some(1), None);
// Test with example data
{
let data = Tensor::from_vec(
vec![
1.0f32, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0,
],
(3, 2, 2),
&Device::Cpu,
)?;
// let axes = Tensor::from_vec(Vec::<i64>::new(), (0,), &Device::Cpu)?;
let mut inputs = HashMap::new();
inputs.insert("data".to_string(), data);
// inputs.insert("axes".to_string(), axes);
let eval = candle_onnx::simple_eval(&manual_graph, inputs)?;
assert_eq!(eval.len(), 1);
let reduced = eval.get("reduced").expect("Output 'reduced' not found");
let expected = Tensor::from_vec(vec![78.0f32], (1, 1, 1), &Device::Cpu)?;
assert_eq!(reduced.to_vec3::<f32>()?, expected.to_vec3::<f32>()?);
}
{
let data = Tensor::from_vec(
vec![
-5.2f32, 7.8, -3.1, 9.4, 2.6, -8.7, 4.3, -1.9, 6.5, -0.8, -7.2, 3.6,
],
(3, 2, 2),
&Device::Cpu,
)?;
let mut inputs = HashMap::new();
inputs.insert("data".to_string(), data.clone());
let eval = candle_onnx::simple_eval(&manual_graph, inputs)?;
assert_eq!(eval.len(), 1);
let reduced = eval.get("reduced").expect("Output 'reduced' not found");
let expected = data.sum_all()?.reshape((1, 1, 1))?;
assert_eq!(reduced.to_vec3::<f32>()?, expected.to_vec3::<f32>()?);
}
Ok(())
}
#[test]
fn test_reduce_sum_do_not_keep_dims() -> Result<()> {
let manual_graph = make_reduce_sum_graph_helper(&["data", "axes"], &["reduced"], Some(0), None);
// Test with example data
{
let data = Tensor::from_vec(
vec![
1.0f32, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0,
],
(3, 2, 2),
&Device::Cpu,
)?;
let axes = Tensor::from_vec(vec![1i64], (1,), &Device::Cpu)?;
let mut inputs = HashMap::new();
inputs.insert("data".to_string(), data);
inputs.insert("axes".to_string(), axes);
let eval = candle_onnx::simple_eval(&manual_graph, inputs)?;
assert_eq!(eval.len(), 1);
let reduced = eval.get("reduced").expect("Output 'reduced' not found");
let expected = Tensor::from_vec(
vec![4.0f32, 6.0, 12.0, 14.0, 20.0, 22.0],
(3, 2),
&Device::Cpu,
)?;
assert_eq!(reduced.to_vec2::<f32>()?, expected.to_vec2::<f32>()?);
}
// Test with random data
{
let _shape = (3, 2, 2);
let data = Tensor::from_vec(
vec![
-5.2f32, 7.8, -3.1, 9.4, 2.6, -8.7, 4.3, -1.9, 6.5, -0.8, -7.2, 3.6,
],
(3, 2, 2),
&Device::Cpu,
)?;
let axes = Tensor::from_vec(vec![1i64], (1,), &Device::Cpu)?;
let mut inputs = HashMap::new();
inputs.insert("data".to_string(), data.clone());
inputs.insert("axes".to_string(), axes);
let eval = candle_onnx::simple_eval(&manual_graph, inputs)?;
assert_eq!(eval.len(), 1);
let reduced = eval.get("reduced").expect("Output 'reduced' not found");
// Calculate expected result
let expected = data.sum(1)?;
assert_eq!(reduced.to_vec2::<f32>()?, expected.to_vec2::<f32>()?);
}
Ok(())
}
// Xor
#[test]
fn test_xor() -> Result<()> {
// tests based on: https://github.com/onnx/onnx/blob/main/docs/Operators.md#Xor xor
// 2d
test(
&[[0_u8, 1, 0, 0], [0, 0, 1, 1], [0, 1, 1, 1]],
&[[1_u8, 1, 0, 0], [1, 0, 0, 1], [1, 1, 1, 0]],
&[[1_u8, 0, 0, 0], [1, 0, 1, 0], [1, 0, 0, 1]],
)?;
// 3d
test(
&[
[
[0_u8, 1, 1, 1, 1],
[0, 1, 1, 0, 0],
[1, 1, 1, 1, 1],
[0, 0, 0, 0, 1],
],
[
[0, 0, 1, 1, 1],
[1, 0, 1, 1, 1],
[1, 1, 0, 0, 1],
[1, 0, 0, 1, 0],
],
[
[1, 0, 0, 1, 1],
[1, 1, 1, 0, 0],
[1, 1, 0, 0, 1],
[1, 0, 0, 0, 1],
],
],
&[
[
[1_u8, 0, 0, 1, 1],
[0, 0, 1, 0, 1],
[1, 0, 0, 1, 0],
[0, 0, 0, 0, 0],
],
[
[1, 0, 0, 1, 1],
[1, 0, 1, 1, 1],
[0, 1, 0, 1, 1],
[1, 1, 1, 0, 0],
],
[
[0, 1, 1, 1, 0],
[1, 1, 0, 1, 0],
[0, 1, 1, 1, 0],
[1, 1, 0, 1, 0],
],
],
&[
[
[1_u8, 1, 1, 0, 0],
[0, 1, 0, 0, 1],
[0, 1, 1, 0, 1],
[0, 0, 0, 0, 1],
],
[
[1, 0, 1, 0, 0],
[0, 0, 0, 0, 0],
[1, 0, 0, 1, 0],
[0, 1, 1, 1, 0],
],
[
[1, 1, 1, 0, 1],
[0, 0, 1, 1, 0],
[1, 0, 1, 1, 1],
[0, 1, 0, 1, 1],
],
],
)?;
// 4d
test(
&[
[
[[0_u8, 1, 1, 0], [1, 0, 0, 0], [1, 1, 0, 1]],
[[1, 1, 0, 1], [0, 0, 0, 1], [0, 0, 0, 1]],
],
[
[[1, 1, 0, 0], [1, 0, 1, 0], [1, 0, 0, 0]],
[[1, 0, 0, 1], [1, 0, 1, 1], [1, 1, 0, 1]],
],
],
&[
[
[[1_u8, 0, 1, 0], [0, 0, 1, 1], [1, 0, 1, 0]],
[[0, 1, 0, 0], [1, 0, 0, 0], [0, 0, 0, 1]],
],
[
[[1, 1, 1, 0], [0, 0, 0, 1], [0, 0, 1, 0]],
[[0, 0, 0, 0], [1, 0, 0, 0], [1, 1, 1, 1]],
],
],
&[
[
[[1_u8, 1, 0, 0], [1, 0, 1, 1], [0, 1, 1, 1]],
[[1, 0, 0, 1], [1, 0, 0, 1], [0, 0, 0, 0]],
],
[
[[0, 0, 1, 0], [1, 0, 1, 1], [1, 0, 1, 0]],
[[1, 0, 0, 1], [0, 0, 1, 1], [0, 0, 1, 0]],
],
],
)?;
// tests based on: https://github.com/onnx/onnx/blob/main/docs/Operators.md#Xor xor_broadcast
// 3d vs 1d
test(
// Shape (3, 4, 5)
&[
[
[0_u8, 0, 0, 0, 1],
[0, 1, 0, 1, 1],
[1, 0, 0, 1, 1],
[0, 0, 1, 0, 1],
],
[
[0, 1, 0, 1, 1],
[1, 1, 0, 0, 1],
[0, 1, 1, 1, 0],
[0, 0, 0, 0, 1],
],
[
[1, 1, 0, 1, 1],
[0, 0, 0, 1, 1],
[0, 1, 1, 0, 1],
[1, 1, 0, 1, 1],
],
],
// shape (5)
&[1_u8, 0, 0, 1, 1],
// shape (3, 4, 5)
&[
[
[1_u8, 0, 0, 1, 0],
[1, 1, 0, 0, 0],
[0, 0, 0, 0, 0],
[1, 0, 1, 1, 0],
],
[
[1, 1, 0, 0, 0],
[0, 1, 0, 1, 0],
[1, 1, 1, 0, 1],
[1, 0, 0, 1, 0],
],
[
[0, 1, 0, 0, 0],
[1, 0, 0, 0, 0],
[1, 1, 1, 1, 0],
[0, 1, 0, 0, 0],
],
],
)?;
// 3d vs 2d
test(
// Shape (3, 4, 5)
&[
[
[0_u8, 0, 0, 0, 1],
[0, 1, 0, 1, 1],
[1, 0, 0, 1, 1],
[0, 0, 1, 0, 1],
],
[
[0, 1, 0, 1, 1],
[1, 1, 0, 0, 1],
[0, 1, 1, 1, 0],
[0, 0, 0, 0, 1],
],
[
[1, 1, 0, 1, 1],
[0, 0, 0, 1, 1],
[0, 1, 1, 0, 1],
[1, 1, 0, 1, 1],
],
],
// shape (4, 5)
&[
[0_u8, 1, 0, 1, 0],
[0, 0, 1, 0, 0],
[1, 1, 0, 1, 1],
[1, 1, 0, 1, 0],
],
// shape (3, 4, 5)
&[
[
[0_u8, 1, 0, 1, 1],
[0, 1, 1, 1, 1],
[0, 1, 0, 0, 0],
[1, 1, 1, 1, 1],
],
[
[0, 0, 0, 0, 1],
[1, 1, 1, 0, 1],
[1, 0, 1, 0, 1],
[1, 1, 0, 1, 1],
],
[
[1, 0, 0, 0, 1],
[0, 0, 1, 1, 1],
[1, 0, 1, 1, 0],
[0, 0, 0, 0, 1],
],
],
)?;
// 4d vs 2d
test(
// Shape (2, 3, 3, 4)
&[
[
[[1_u8, 0, 0, 1], [1, 1, 0, 0], [0, 1, 0, 0]],
[[1, 1, 0, 0], [0, 1, 0, 0], [1, 0, 0, 1]],
[[1, 0, 0, 0], [1, 1, 1, 0], [0, 0, 1, 1]],
],
[
[[0, 1, 0, 1], [1, 1, 0, 1], [1, 0, 1, 1]],
[[1, 1, 0, 0], [1, 0, 0, 0], [0, 0, 1, 1]],
[[1, 0, 0, 0], [1, 1, 0, 0], [0, 1, 0, 1]],
],
],
// shape (3, 4)
&[[0_u8, 0, 1, 1], [1, 1, 1, 1], [0, 1, 0, 1]],
// shape (2, 3, 3, 4)
&[
[
[[1_u8, 0, 1, 0], [0, 0, 1, 1], [0, 0, 0, 1]],
[[1, 1, 1, 1], [1, 0, 1, 1], [1, 1, 0, 0]],
[[1, 0, 1, 1], [0, 0, 0, 1], [0, 1, 1, 0]],
],
[
[[0, 1, 1, 0], [0, 0, 1, 0], [1, 1, 1, 0]],
[[1, 1, 1, 1], [0, 1, 1, 1], [0, 1, 1, 0]],
[[1, 0, 1, 1], [0, 0, 1, 1], [0, 0, 0, 0]],
],
],
)?;
// 4d vs 3d
test(
// Shape (2, 3, 3, 4)
&[
[
[[1_u8, 0, 0, 1], [1, 1, 0, 0], [0, 1, 0, 0]],
[[1, 1, 0, 0], [0, 1, 0, 0], [1, 0, 0, 1]],
[[1, 0, 0, 0], [1, 1, 1, 0], [0, 0, 1, 1]],
],
[
[[0, 1, 0, 1], [1, 1, 0, 1], [1, 0, 1, 1]],
[[1, 1, 0, 0], [1, 0, 0, 0], [0, 0, 1, 1]],
[[1, 0, 0, 0], [1, 1, 0, 0], [0, 1, 0, 1]],
],
],
// shape (3, 3, 4)
&[
[[1_u8, 1, 0, 0], [0, 0, 1, 1], [0, 1, 0, 0]],
[[0, 1, 0, 1], [0, 0, 0, 0], [0, 1, 0, 1]],
[[0, 1, 1, 0], [1, 0, 1, 1], [1, 1, 0, 1]],
],
// shape (2, 3, 3, 4)
&[
[
[[0_u8, 1, 0, 1], [1, 1, 1, 1], [0, 0, 0, 0]],
[[1, 0, 0, 1], [0, 1, 0, 0], [1, 1, 0, 0]],
[[1, 1, 1, 0], [0, 1, 0, 1], [1, 1, 1, 0]],
],
[
[[1, 0, 0, 1], [1, 1, 1, 0], [1, 1, 1, 1]],
[[1, 0, 0, 1], [1, 0, 0, 0], [0, 1, 1, 0]],
[[1, 1, 1, 0], [0, 1, 1, 1], [1, 0, 0, 0]],
],
],
)?;
// 4d vs 4d
test(
// Shape (1, 4, 1, 2)
&[[[[1_u8, 0]], [[1, 0]], [[1, 0]], [[1, 1]]]],
// shape (2, 1, 4, 2)
&[
[[[0_u8, 0], [1, 1], [1, 1], [1, 1]]],
[[[0, 1], [1, 0], [0, 1], [0, 0]]],
],
// shape (2, 4, 4, 2)
&[
[
[[1_u8, 0], [0, 1], [0, 1], [0, 1]],
[[1, 0], [0, 1], [0, 1], [0, 1]],
[[1, 0], [0, 1], [0, 1], [0, 1]],
[[1, 1], [0, 0], [0, 0], [0, 0]],
],
[
[[1, 1], [0, 0], [1, 1], [1, 0]],
[[1, 1], [0, 0], [1, 1], [1, 0]],
[[1, 1], [0, 0], [1, 1], [1, 0]],
[[1, 0], [0, 1], [1, 0], [1, 1]],
],
],
)?;
fn test(input: impl NdArray, other: impl NdArray, expected: impl NdArray) -> Result<()> {
let manual_graph = create_model_proto_with_graph(Some(GraphProto {
node: vec![NodeProto {
op_type: "Xor".to_string(),
domain: "".to_string(),
attribute: vec![],
input: vec![INPUT_X.to_string(), INPUT_Y.to_string()],
output: vec![OUTPUT_Z.to_string()],
name: "".to_string(),
doc_string: "".to_string(),
}],
name: "".to_string(),
initializer: vec![],
input: vec![],
output: vec![ValueInfoProto {
name: OUTPUT_Z.to_string(),
doc_string: "".to_string(),
r#type: None,
}],
value_info: vec![],
doc_string: "".to_string(),
sparse_initializer: vec![],
quantization_annotation: vec![],
}));
let inputs: HashMap<String, Tensor> = HashMap::from([
(INPUT_X.to_string(), Tensor::new(input, &Device::Cpu)?),
(INPUT_Y.to_string(), Tensor::new(other, &Device::Cpu)?),
]);
let eval = candle_onnx::simple_eval(&manual_graph, inputs)?;
assert_eq!(eval.len(), 1);
let z = eval.get(OUTPUT_Z).expect("Output 'z' not found");
let expected = Tensor::new(expected, &Device::Cpu)?;
match expected.dims().len() {
0 => {
assert_eq!(z.to_vec0::<u8>()?, expected.to_vec0::<u8>()?)
}
1 => {
assert_eq!(z.to_vec1::<u8>()?, expected.to_vec1::<u8>()?)
}
2 => {
assert_eq!(z.to_vec2::<u8>()?, expected.to_vec2::<u8>()?)
}
3 => {
assert_eq!(z.to_vec3::<u8>()?, expected.to_vec3::<u8>()?)
}
4 => {
// Candle has no method equivallent to `to_vec4()`
// So, as a hack, we flatten it to a single dim vec to test the results
assert_eq!(
z.flatten_all()?.to_vec1::<u8>()?,
expected.flatten_all()?.to_vec1::<u8>()?
)
}
_ => unreachable!(),
};
Ok(())
}
Ok(())
}
#[test]
fn test_sign_operation() -> Result<()> {
let manual_graph = create_model_proto_with_graph(Some(GraphProto {
node: vec![NodeProto {
op_type: "Sign".to_string(),
domain: "".to_string(),
attribute: vec![],
input: vec![INPUT_X.to_string()],
output: vec![OUTPUT_Z.to_string()],
name: "".to_string(),
doc_string: "".to_string(),
}],
name: "".to_string(),
initializer: vec![],
input: vec![],
output: vec![ValueInfoProto {
name: OUTPUT_Z.to_string(),
doc_string: "".to_string(),
r#type: None,
}],
value_info: vec![],
doc_string: "".to_string(),
sparse_initializer: vec![],
quantization_annotation: vec![],
}));
let mut inputs: HashMap<String, Tensor> = HashMap::new();
inputs.insert(
INPUT_X.to_string(),
Tensor::new(vec![-2f32, -1., 0., 1., 2.], &Device::Cpu)?,
);
let eval = candle_onnx::simple_eval(&manual_graph, inputs)?;
let z = eval.get(OUTPUT_Z).expect("Output 'z' not found");
assert_eq!(
z.to_dtype(candle::DType::I64)?.to_vec1::<i64>()?.to_vec(),
vec![-1, -1, 0, 1, 1]
);
Ok(())
}
| candle/candle-onnx/tests/ops.rs/0 | {
"file_path": "candle/candle-onnx/tests/ops.rs",
"repo_id": "candle",
"token_count": 105590
} |
# see https://github.com/pytorch/pytorch/blob/main/torch/nn/modules/container.py
from .module import Module
from typing import (
Any,
Dict,
Iterable,
Iterator,
Mapping,
Optional,
overload,
Tuple,
TypeVar,
Union,
)
from collections import OrderedDict, abc as container_abcs
import operator
from itertools import chain, islice
__all__ = ["Sequential", "ModuleList", "ModuleDict"]
T = TypeVar("T", bound=Module)
def _addindent(s_: str, numSpaces: int):
s = s_.split("\n")
# don't do anything for single-line stuff
if len(s) == 1:
return s_
first = s.pop(0)
s = [(numSpaces * " ") + line for line in s]
s = "\n".join(s)
s = first + "\n" + s
return s
class Sequential(Module):
r"""A sequential container.
Modules will be added to it in the order they are passed in the
constructor. Alternatively, an ``OrderedDict`` of modules can be
passed in. The ``forward()`` method of ``Sequential`` accepts any
input and forwards it to the first module it contains. It then
"chains" outputs to inputs sequentially for each subsequent module,
finally returning the output of the last module.
The value a ``Sequential`` provides over manually calling a sequence
of modules is that it allows treating the whole container as a
single module, such that performing a transformation on the
``Sequential`` applies to each of the modules it stores (which are
each a registered submodule of the ``Sequential``).
What's the difference between a ``Sequential`` and a
:class:`candle.nn.ModuleList`? A ``ModuleList`` is exactly what it
sounds like--a list for storing ``Module`` s! On the other hand,
the layers in a ``Sequential`` are connected in a cascading way.
"""
_modules: Dict[str, Module] # type: ignore[assignment]
@overload
def __init__(self, *args: Module) -> None: ...
@overload
def __init__(self, arg: "OrderedDict[str, Module]") -> None: ...
def __init__(self, *args):
super().__init__()
if len(args) == 1 and isinstance(args[0], OrderedDict):
for key, module in args[0].items():
self.add_module(key, module)
else:
for idx, module in enumerate(args):
self.add_module(str(idx), module)
def _get_item_by_idx(self, iterator, idx) -> T:
"""Get the idx-th item of the iterator"""
size = len(self)
idx = operator.index(idx)
if not -size <= idx < size:
raise IndexError("index {} is out of range".format(idx))
idx %= size
return next(islice(iterator, idx, None))
def __getitem__(self, idx: Union[slice, int]) -> Union["Sequential", T]:
if isinstance(idx, slice):
return self.__class__(OrderedDict(list(self._modules.items())[idx]))
else:
return self._get_item_by_idx(self._modules.values(), idx)
def __setitem__(self, idx: int, module: Module) -> None:
key: str = self._get_item_by_idx(self._modules.keys(), idx)
return setattr(self, key, module)
def __delitem__(self, idx: Union[slice, int]) -> None:
if isinstance(idx, slice):
for key in list(self._modules.keys())[idx]:
delattr(self, key)
else:
key = self._get_item_by_idx(self._modules.keys(), idx)
delattr(self, key)
# To preserve numbering
str_indices = [str(i) for i in range(len(self._modules))]
self._modules = OrderedDict(list(zip(str_indices, self._modules.values())))
def __len__(self) -> int:
return len(self._modules)
def __add__(self, other) -> "Sequential":
if isinstance(other, Sequential):
ret = Sequential()
for layer in self:
ret.append(layer)
for layer in other:
ret.append(layer)
return ret
else:
raise ValueError(
"add operator supports only objects " "of Sequential class, but {} is given.".format(str(type(other)))
)
def pop(self, key: Union[int, slice]) -> Module:
v = self[key]
del self[key]
return v
def __iadd__(self, other) -> "Sequential":
if isinstance(other, Sequential):
offset = len(self)
for i, module in enumerate(other):
self.add_module(str(i + offset), module)
return self
else:
raise ValueError(
"add operator supports only objects " "of Sequential class, but {} is given.".format(str(type(other)))
)
def __mul__(self, other: int) -> "Sequential":
if not isinstance(other, int):
raise TypeError(f"unsupported operand type(s) for *: {type(self)} and {type(other)}")
elif other <= 0:
raise ValueError(f"Non-positive multiplication factor {other} for {type(self)}")
else:
combined = Sequential()
offset = 0
for _ in range(other):
for module in self:
combined.add_module(str(offset), module)
offset += 1
return combined
def __rmul__(self, other: int) -> "Sequential":
return self.__mul__(other)
def __imul__(self, other: int) -> "Sequential":
if not isinstance(other, int):
raise TypeError(f"unsupported operand type(s) for *: {type(self)} and {type(other)}")
elif other <= 0:
raise ValueError(f"Non-positive multiplication factor {other} for {type(self)}")
else:
len_original = len(self)
offset = len(self)
for _ in range(other - 1):
for i in range(len_original):
self.add_module(str(i + offset), self._modules[str(i)])
offset += len_original
return self
def __dir__(self):
keys = super().__dir__()
keys = [key for key in keys if not key.isdigit()]
return keys
def __iter__(self) -> Iterator[Module]:
return iter(self._modules.values())
# NB: We can't really type check this function as the type of input
# may change dynamically (as is tested in
# TestScript.test_sequential_intermediary_types). Cannot annotate
# with Any as TorchScript expects a more precise type
def forward(self, input):
for module in self:
input = module(input)
return input
def append(self, module: Module) -> "Sequential":
r"""Appends a given module to the end.
Args:
module (nn.Module): module to append
"""
self.add_module(str(len(self)), module)
return self
def insert(self, index: int, module: Module) -> "Sequential":
if not isinstance(module, Module):
raise AssertionError("module should be of type: {}".format(Module))
n = len(self._modules)
if not (-n <= index <= n):
raise IndexError("Index out of range: {}".format(index))
if index < 0:
index += n
for i in range(n, index, -1):
self._modules[str(i)] = self._modules[str(i - 1)]
self._modules[str(index)] = module
return self
def extend(self, sequential) -> "Sequential":
for layer in sequential:
self.append(layer)
return self
class ModuleList(Module):
r"""Holds submodules in a list.
:class:`~candle.nn.ModuleList` can be indexed like a regular Python list, but
modules it contains are properly registered, and will be visible by all
:class:`~candle.nn.Module` methods.
Args:
modules (iterable, optional): an iterable of modules to add
Example::
class MyModule(nn.Module):
def __init__(self):
super().__init__()
self.linears = nn.ModuleList([nn.Linear(10, 10) for i in range(10)])
def forward(self, x):
# ModuleList can act as an iterable, or be indexed using ints
for i, l in enumerate(self.linears):
x = self.linears[i // 2](x) + l(x)
return x
"""
_modules: Dict[str, Module] # type: ignore[assignment]
def __init__(self, modules: Optional[Iterable[Module]] = None) -> None:
super().__init__()
if modules is not None:
self += modules
def _get_abs_string_index(self, idx):
"""Get the absolute index for the list of modules"""
idx = operator.index(idx)
if not (-len(self) <= idx < len(self)):
raise IndexError("index {} is out of range".format(idx))
if idx < 0:
idx += len(self)
return str(idx)
def __getitem__(self, idx: Union[int, slice]) -> Union[Module, "ModuleList"]:
if isinstance(idx, slice):
return self.__class__(list(self._modules.values())[idx])
else:
return self._modules[self._get_abs_string_index(idx)]
def __setitem__(self, idx: int, module: Module) -> None:
idx = self._get_abs_string_index(idx)
return setattr(self, str(idx), module)
def __delitem__(self, idx: Union[int, slice]) -> None:
if isinstance(idx, slice):
for k in range(len(self._modules))[idx]:
delattr(self, str(k))
else:
delattr(self, self._get_abs_string_index(idx))
# To preserve numbering, self._modules is being reconstructed with modules after deletion
str_indices = [str(i) for i in range(len(self._modules))]
self._modules = OrderedDict(list(zip(str_indices, self._modules.values())))
def __len__(self) -> int:
return len(self._modules)
def __iter__(self) -> Iterator[Module]:
return iter(self._modules.values())
def __iadd__(self, modules: Iterable[Module]) -> "ModuleList":
return self.extend(modules)
def __add__(self, other: Iterable[Module]) -> "ModuleList":
combined = ModuleList()
for i, module in enumerate(chain(self, other)):
combined.add_module(str(i), module)
return combined
def __repr__(self):
"""A custom repr for ModuleList that compresses repeated module representations"""
list_of_reprs = [repr(item) for item in self]
if len(list_of_reprs) == 0:
return self._get_name() + "()"
start_end_indices = [[0, 0]]
repeated_blocks = [list_of_reprs[0]]
for i, r in enumerate(list_of_reprs[1:], 1):
if r == repeated_blocks[-1]:
start_end_indices[-1][1] += 1
continue
start_end_indices.append([i, i])
repeated_blocks.append(r)
lines = []
main_str = self._get_name() + "("
for (start_id, end_id), b in zip(start_end_indices, repeated_blocks):
local_repr = f"({start_id}): {b}" # default repr
if start_id != end_id:
n = end_id - start_id + 1
local_repr = f"({start_id}-{end_id}): {n} x {b}"
local_repr = _addindent(local_repr, 2)
lines.append(local_repr)
main_str += "\n " + "\n ".join(lines) + "\n"
main_str += ")"
return main_str
def __dir__(self):
keys = super().__dir__()
keys = [key for key in keys if not key.isdigit()]
return keys
def insert(self, index: int, module: Module) -> None:
r"""Insert a given module before a given index in the list.
Args:
index (int): index to insert.
module (nn.Module): module to insert
"""
for i in range(len(self._modules), index, -1):
self._modules[str(i)] = self._modules[str(i - 1)]
self._modules[str(index)] = module
def append(self, module: Module) -> "ModuleList":
r"""Appends a given module to the end of the list.
Args:
module (nn.Module): module to append
"""
self.add_module(str(len(self)), module)
return self
def pop(self, key: Union[int, slice]) -> Module:
v = self[key]
del self[key]
return v
def extend(self, modules: Iterable[Module]) -> "ModuleList":
r"""Appends modules from a Python iterable to the end of the list.
Args:
modules (iterable): iterable of modules to append
"""
if not isinstance(modules, container_abcs.Iterable):
raise TypeError(
"ModuleList.extend should be called with an " "iterable, but got " + type(modules).__name__
)
offset = len(self)
for i, module in enumerate(modules):
self.add_module(str(offset + i), module)
return self
# remove forward altogether to fallback on Module's _forward_unimplemented
class ModuleDict(Module):
r"""Holds submodules in a dictionary.
:class:`~candle.nn.ModuleDict` can be indexed like a regular Python dictionary,
but modules it contains are properly registered, and will be visible by all
:class:`~candle.nn.Module` methods.
:class:`~candle.nn.ModuleDict` is an **ordered** dictionary that respects
* the order of insertion, and
* in :meth:`~candle.nn.ModuleDict.update`, the order of the merged
``OrderedDict``, ``dict`` (started from Python 3.6) or another
:class:`~candle.nn.ModuleDict` (the argument to
:meth:`~candle.nn.ModuleDict.update`).
Note that :meth:`~candle.nn.ModuleDict.update` with other unordered mapping
types (e.g., Python's plain ``dict`` before Python version 3.6) does not
preserve the order of the merged mapping.
Args:
modules (iterable, optional): a mapping (dictionary) of (string: module)
or an iterable of key-value pairs of type (string, module)
"""
_modules: Dict[str, Module] # type: ignore[assignment]
def __init__(self, modules: Optional[Mapping[str, Module]] = None) -> None:
super().__init__()
if modules is not None:
self.update(modules)
def __getitem__(self, key: str) -> Module:
return self._modules[key]
def __setitem__(self, key: str, module: Module) -> None:
self.add_module(key, module)
def __delitem__(self, key: str) -> None:
del self._modules[key]
def __len__(self) -> int:
return len(self._modules)
def __iter__(self) -> Iterator[str]:
return iter(self._modules)
def __contains__(self, key: str) -> bool:
return key in self._modules
def clear(self) -> None:
"""Remove all items from the ModuleDict."""
self._modules.clear()
def pop(self, key: str) -> Module:
r"""Remove key from the ModuleDict and return its module.
Args:
key (str): key to pop from the ModuleDict
"""
v = self[key]
del self[key]
return v
def keys(self) -> Iterable[str]:
r"""Return an iterable of the ModuleDict keys."""
return self._modules.keys()
def items(self) -> Iterable[Tuple[str, Module]]:
r"""Return an iterable of the ModuleDict key/value pairs."""
return self._modules.items()
def values(self) -> Iterable[Module]:
r"""Return an iterable of the ModuleDict values."""
return self._modules.values()
def update(self, modules: Mapping[str, Module]) -> None:
r"""Update the :class:`~candle.nn.ModuleDict` with the key-value pairs from a
mapping or an iterable, overwriting existing keys.
.. note::
If :attr:`modules` is an ``OrderedDict``, a :class:`~candle.nn.ModuleDict`, or
an iterable of key-value pairs, the order of new elements in it is preserved.
Args:
modules (iterable): a mapping (dictionary) from string to :class:`~candle.nn.Module`,
or an iterable of key-value pairs of type (string, :class:`~candle.nn.Module`)
"""
if not isinstance(modules, container_abcs.Iterable):
raise TypeError(
"ModuleDict.update should be called with an "
"iterable of key/value pairs, but got " + type(modules).__name__
)
if isinstance(modules, (OrderedDict, ModuleDict, container_abcs.Mapping)):
for key, module in modules.items():
self[key] = module
else:
# modules here can be a list with two items
for j, m in enumerate(modules):
if not isinstance(m, container_abcs.Iterable):
raise TypeError(
"ModuleDict update sequence element "
"#" + str(j) + " should be Iterable; is" + type(m).__name__
)
if not len(m) == 2:
raise ValueError(
"ModuleDict update sequence element "
"#" + str(j) + " has length " + str(len(m)) + "; 2 is required"
)
# modules can be Mapping (what it's typed at), or a list: [(name1, module1), (name2, module2)]
# that's too cumbersome to type correctly with overloads, so we add an ignore here
self[m[0]] = m[1] # type: ignore[assignment]
# remove forward altogether to fallback on Module's _forward_unimplemented
| candle/candle-pyo3/py_src/candle/nn/container.py/0 | {
"file_path": "candle/candle-pyo3/py_src/candle/nn/container.py",
"repo_id": "candle",
"token_count": 7602
} |
//! Based on the BEIT vision-language model.
//!
//! See "BEIT: BERT Pre-Training of Image Transformers", Bao et al. 2021
//! - [Arxiv](https://arxiv.org/abs/2106.08254)
//! - [Github](https://github.com/microsoft/unilm/tree/master/beit)
//!
use candle::{DType, Device, IndexOp, Result, Tensor, D};
use candle_nn::{layer_norm, LayerNorm, Linear, Module, VarBuilder};
const IMG_SIZE: usize = 384;
const PATCH_SIZE: usize = 16;
const NUM_CLASSES: usize = 1000;
const WINDOW_SIZE: usize = IMG_SIZE / PATCH_SIZE; // 384 / 16 = 24
const NB_TOKENS: usize = WINDOW_SIZE * WINDOW_SIZE + 1; // 24 * 24 + 1 = 577
fn linear(vb: VarBuilder, in_dim: usize, out_dim: usize, bias: bool) -> Result<Linear> {
if bias {
candle_nn::linear(in_dim, out_dim, vb)
} else {
candle_nn::linear_no_bias(in_dim, out_dim, vb)
}
}
#[derive(Debug)]
struct Attention {
qkv: Linear,
proj: Linear,
relative_position_bias_table: Tensor,
relative_position_index: Tensor,
num_heads: usize,
scale: f64,
}
impl Attention {
fn new(
vb: VarBuilder,
dim: usize,
num_heads: usize,
qkv_bias: bool,
proj_bias: bool,
) -> Result<Self> {
let qkv = linear(vb.pp("qkv"), dim, dim * 3, qkv_bias)?;
let proj = linear(vb.pp("proj"), dim, dim, proj_bias)?;
// num_relative_distance = token-token(47x47) + token-CLS(1) + CLS-token(1) + CLS-CLS(1) = 2212
let num_relative_distance = (2 * WINDOW_SIZE - 1) * (2 * WINDOW_SIZE - 1) + 3;
let relative_position_bias_table = vb.get(
(num_relative_distance, num_heads),
"relative_position_bias_table",
)?;
let relative_position_index =
Self::gen_relative_position_index(relative_position_bias_table.device())?;
let scale = 1. / ((dim / num_heads) as f64).sqrt();
Ok(Self {
qkv,
proj,
relative_position_bias_table,
relative_position_index,
num_heads,
scale,
})
}
}
impl Attention {
// See: https://github.com/huggingface/pytorch-image-models/blob/main/timm/models/beit.py#L61
fn gen_relative_position_index(device: &Device) -> Result<Tensor> {
let num_relative_distance = (2 * WINDOW_SIZE - 1) * (2 * WINDOW_SIZE - 1) + 3;
let w_area = WINDOW_SIZE * WINDOW_SIZE;
let t_arange: Tensor = Tensor::arange(0, WINDOW_SIZE as u32, device)?;
let t_ndgrid = Tensor::meshgrid(&[&t_arange, &t_arange], false)?;
let coords_flatten = Tensor::stack(&t_ndgrid, 0)?.flatten(1, 2)?;
let tmp1 = coords_flatten
.unsqueeze(2)?
.broadcast_as((2, w_area, w_area))?
.to_dtype(DType::I64)?;
let tmp2 = coords_flatten
.unsqueeze(1)?
.broadcast_as((2, w_area, w_area))?
.to_dtype(DType::I64)?;
let relative_coords = (tmp1 - tmp2)?
.transpose(0, 1)? // 102
.transpose(1, 2)? // 120
.contiguous()?;
let relative_coords = relative_coords.slice_assign(
&[0..w_area, 0..w_area, 0..1],
&(relative_coords.i((0..w_area, 0..w_area, 0..1))? + (WINDOW_SIZE - 1) as f64)?,
)?;
let relative_coords = relative_coords.slice_assign(
&[0..w_area, 0..w_area, 1..2],
&(relative_coords.i((0..w_area, 0..w_area, 1..2))? + (WINDOW_SIZE - 1) as f64)?,
)?;
let relative_coords = relative_coords.slice_assign(
&[0..w_area, 0..w_area, 0..1],
&(relative_coords.i((.., .., 0..1))? * (2. * (WINDOW_SIZE as f64) - 1.))?,
)?;
Tensor::zeros((w_area + 1, w_area + 1), DType::I64, device)?
.slice_assign(&[1.., 1..], &relative_coords.sum(2)?)?
.slice_assign(
&[0..1, 0..(w_area + 1)],
&(Tensor::ones((1, w_area + 1), DType::I64, device)?
* ((num_relative_distance - 3) as f64))?
.to_dtype(DType::I64)?,
)?
.slice_assign(
&[0..(w_area + 1), 0..1],
&(Tensor::ones((w_area + 1, 1), DType::I64, device)?
* ((num_relative_distance - 2) as f64))?
.to_dtype(DType::I64)?,
)?
.slice_assign(
&[0..1, 0..1],
&(Tensor::ones((1, 1), DType::I64, device)?
* ((num_relative_distance - 1) as f64))?
.to_dtype(DType::I64)?,
)
}
fn _get_rel_pos_bias(&self) -> Result<Tensor> {
self.relative_position_bias_table
.index_select(
&self
.relative_position_index
.flatten_all()?
.to_dtype(DType::U32)?,
0,
)?
.reshape((NB_TOKENS, NB_TOKENS, ()))?
.transpose(0, 1)? // 102
.transpose(0, 2)? // 201
.contiguous()?
.unsqueeze(0)
}
}
impl Module for Attention {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let (b, n, c) = xs.dims3()?;
let qkv = self
.qkv
.forward(xs)?
.reshape((b, n, 3, self.num_heads, c / self.num_heads))?
.transpose(1, 2)? // 02134
.transpose(0, 1)? // 20134
.transpose(2, 3)?; // 20314
let q = (qkv.i(0)? * self.scale)?;
let k = qkv.i(1)?.contiguous()?;
let v = qkv.i(2)?.contiguous()?;
let attn = (&q.matmul(&k.t()?)? + self._get_rel_pos_bias())?;
let attn = candle_nn::ops::softmax(&attn, D::Minus1)?;
let attn = attn.matmul(&v)?.transpose(1, 2)?.reshape((b, n, c))?;
self.proj.forward(&attn)
}
}
#[derive(Debug)]
struct LayerScale {
gamma: Tensor,
}
impl LayerScale {
fn new(vb: VarBuilder, dim: usize) -> Result<Self> {
let gamma = vb.get(dim, "gamma")?;
Ok(Self { gamma })
}
}
impl Module for LayerScale {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
xs.broadcast_mul(&self.gamma)
}
}
#[derive(Debug)]
struct Mlp {
fc1: Linear,
fc2: Linear,
}
impl Mlp {
fn new(vb: VarBuilder, in_features: usize, hidden_features: usize, bias: bool) -> Result<Self> {
let out_features = in_features;
let fc1 = linear(vb.pp("fc1"), in_features, hidden_features, bias)?;
let fc2 = linear(vb.pp("fc2"), hidden_features, out_features, bias)?;
Ok(Self { fc1, fc2 })
}
}
impl Module for Mlp {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let xs = self.fc1.forward(xs)?.gelu()?;
self.fc2.forward(&xs)
}
}
#[derive(Debug)]
struct Block {
norm1: LayerNorm,
attn: Attention,
ls1: LayerScale,
norm2: LayerNorm,
mlp: Mlp,
ls2: LayerScale,
}
impl Block {
fn new(vb: VarBuilder, dim: usize, num_heads: usize) -> Result<Self> {
let norm1 = layer_norm(dim, 1e-6, vb.pp("norm1"))?;
let attn = Attention::new(vb.pp("attn"), dim, num_heads, true, true)?;
let ls1 = LayerScale::new(vb.pp("ls1"), dim)?;
let norm2 = layer_norm(dim, 1e-6, vb.pp("norm2"))?;
let mlp = Mlp::new(vb.pp("mlp"), dim, dim * 4, true)?;
let ls2 = LayerScale::new(vb.pp("ls2"), dim)?;
Ok(Self {
norm1,
attn,
ls1,
norm2,
mlp,
ls2,
})
}
}
impl Module for Block {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let residual = xs;
let xs = self
.ls1
.forward(&self.attn.forward(&self.norm1.forward(xs)?)?)?;
let xs = (xs + residual)?;
let residual = &xs;
let xs = self
.ls2
.forward(&self.mlp.forward(&self.norm2.forward(&xs)?)?)?;
xs + residual
}
}
#[derive(Debug)]
struct PatchEmbed {
proj: candle_nn::Conv2d,
patch_size: (usize, usize),
}
impl PatchEmbed {
fn new(vb: VarBuilder, patch_size: usize, in_chans: usize, embed_dim: usize) -> Result<Self> {
let config = candle_nn::Conv2dConfig {
stride: patch_size,
..Default::default()
};
let proj = candle_nn::conv2d(in_chans, embed_dim, patch_size, config, vb.pp("proj"))?;
Ok(Self {
proj,
patch_size: (patch_size, patch_size),
})
}
}
impl Module for PatchEmbed {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let (_b, _c, h, w) = xs.dims4()?;
let (patch_h, patch_w) = self.patch_size;
if (h % patch_h) != 0 {
candle::bail!("image height {h} is not a multiple of patch height {patch_h}")
}
if (w % patch_w) != 0 {
candle::bail!("image width {w} is not a multiple of patch width {patch_w}")
}
let xs = self.proj.forward(xs)?;
let (b, c, h, w) = xs.dims4()?;
// flatten embeddings.
xs.reshape((b, c, h * w))?.transpose(1, 2)
}
}
#[derive(Debug)]
pub struct BeitVisionTransformer {
patch_embed: PatchEmbed,
cls_token: Tensor,
blocks: Vec<Block>,
norm: LayerNorm,
head: Linear,
}
impl BeitVisionTransformer {
pub fn new(vb: VarBuilder, depth: usize, embed_dim: usize, num_heads: usize) -> Result<Self> {
let patch_embed = PatchEmbed::new(vb.pp("patch_embed"), PATCH_SIZE, 3, embed_dim)?;
let cls_token = vb.get((1, 1, embed_dim), "cls_token")?;
let head = linear(vb.pp("head"), embed_dim, NUM_CLASSES, true)?;
let norm = layer_norm(embed_dim, 1e-6, vb.pp("norm"))?;
let vb_b = vb.pp("blocks");
let blocks = (0..depth)
.map(|i| Block::new(vb_b.pp(i.to_string()), embed_dim, num_heads))
.collect::<Result<Vec<_>>>()?;
Ok(Self {
patch_embed,
cls_token,
blocks,
norm,
head,
})
}
fn prepare_tokens_with_mask(&self, xs: &Tensor) -> Result<Tensor> {
let xs = self.patch_embed.forward(xs)?;
Tensor::cat(&[&self.cls_token, &xs], 1)
}
fn get_intermediate_layers_not_chunked(
&self,
xs: &Tensor,
blocks_to_take: &[usize],
) -> Result<Vec<Tensor>> {
let mut xs = self.prepare_tokens_with_mask(xs)?;
let mut output = Vec::new();
for (i, blk) in self.blocks.iter().enumerate() {
xs = blk.forward(&xs)?;
if blocks_to_take.contains(&i) {
output.push(xs.clone());
}
}
if output.len() != blocks_to_take.len() {
candle::bail!(
"only {} / {} blocks found",
output.len(),
blocks_to_take.len()
);
}
Ok(output)
}
pub fn get_intermediate_layers(
&self,
xs: &Tensor,
blocks_to_take: &[usize],
reshape: bool,
return_class_token: bool,
norm: bool,
) -> Result<Tensor> {
let outputs = self.get_intermediate_layers_not_chunked(xs, blocks_to_take)?;
let outputs = if norm {
outputs
.iter()
.map(|out| self.norm.forward(out))
.collect::<Result<Vec<_>>>()?
} else {
outputs
};
let class_tokens = outputs
.iter()
.map(|out| out.i((.., 0)))
.collect::<Result<Vec<_>>>()?;
let outputs = outputs
.iter()
.map(|out| out.i((.., 1..)))
.collect::<Result<Vec<_>>>()?;
let outputs = if reshape {
let (b, _c, w, h) = xs.dims4()?;
let patch_size = self.patch_embed.patch_size.0;
let num_channels = outputs[0].elem_count() / (b * (w / patch_size) * (h / patch_size));
outputs
.iter()
.map(|out| {
out.reshape((b, w / patch_size, h / patch_size, num_channels))?
.transpose(2, 3)?
.transpose(1, 2)
})
.collect::<Result<Vec<_>>>()?
} else {
outputs
};
let outputs = if return_class_token {
outputs
.iter()
.zip(class_tokens.iter())
.map(|(out, class_token)| Tensor::cat(&[out, class_token], D::Minus1))
.collect::<Result<Vec<_>>>()?
} else {
outputs
};
Tensor::stack(&outputs[..], 0)
}
}
impl Module for BeitVisionTransformer {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let mut xs = self.prepare_tokens_with_mask(xs)?;
for blk in self.blocks.iter() {
xs = blk.forward(&xs)?
}
let xs_moy_local_tokens = xs.i((.., 1..))?.mean(1)?;
let xs_norm = self.norm.forward(&xs_moy_local_tokens)?;
self.head.forward(&xs_norm)
}
}
pub fn vit_base(vb: VarBuilder) -> Result<BeitVisionTransformer> {
BeitVisionTransformer::new(vb, 12, 768, 12)
}
pub fn vit_large(vb: VarBuilder) -> Result<BeitVisionTransformer> {
BeitVisionTransformer::new(vb, 24, 1024, 16)
}
| candle/candle-transformers/src/models/beit.rs/0 | {
"file_path": "candle/candle-transformers/src/models/beit.rs",
"repo_id": "candle",
"token_count": 7083
} |
//! Implementation of the Descript Audio Codec (DAC) model
//!
//! See: [Descript Audio Codec](https://github.com/descriptinc/descript-audio-codec)
//!
/// An efficient neural codec for compressing/decompressing audio
///
use crate::models::encodec;
use candle::{IndexOp, Result, Tensor, D};
use candle_nn::{Conv1d, Conv1dConfig, ConvTranspose1d, ConvTranspose1dConfig, VarBuilder};
#[derive(serde::Deserialize, Debug, Clone)]
pub struct Config {
pub num_codebooks: usize,
pub model_bitrate: u32,
pub codebook_size: usize,
pub latent_dim: usize,
pub frame_rate: u32,
pub sampling_rate: u32,
}
#[derive(Debug, Clone)]
pub struct Snake1d {
alpha: Tensor,
}
impl Snake1d {
pub fn new(channels: usize, vb: VarBuilder) -> Result<Self> {
let alpha = vb.get((1, channels, 1), "alpha")?;
Ok(Self { alpha })
}
}
impl candle::Module for Snake1d {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let xs_shape = xs.shape();
let xs = xs.flatten_from(2)?;
let sin = self.alpha.broadcast_mul(&xs)?.sin()?;
let sin = (&sin * &sin)?;
(xs + (&self.alpha + 1e-9)?.recip()?.broadcast_mul(&sin)?)?.reshape(xs_shape)
}
}
#[derive(Debug, Clone)]
pub struct ResidualUnit {
snake1: Snake1d,
conv1: Conv1d,
snake2: Snake1d,
conv2: Conv1d,
}
impl ResidualUnit {
pub fn new(dim: usize, dilation: usize, vb: VarBuilder) -> Result<Self> {
let pad = ((7 - 1) * dilation) / 2;
let vb = vb.pp("block");
let snake1 = Snake1d::new(dim, vb.pp(0))?;
let cfg1 = Conv1dConfig {
dilation,
padding: pad,
..Default::default()
};
let conv1 = encodec::conv1d_weight_norm(dim, dim, 7, cfg1, vb.pp(1))?;
let snake2 = Snake1d::new(dim, vb.pp(2))?;
let conv2 = encodec::conv1d_weight_norm(dim, dim, 1, Default::default(), vb.pp(3))?;
Ok(Self {
snake1,
conv1,
snake2,
conv2,
})
}
}
impl candle::Module for ResidualUnit {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let ys = xs
.apply(&self.snake1)?
.apply(&self.conv1)?
.apply(&self.snake2)?
.apply(&self.conv2)?;
let pad = (xs.dim(D::Minus1)? - ys.dim(D::Minus1)?) / 2;
if pad > 0 {
&ys + xs.narrow(D::Minus1, pad, ys.dim(D::Minus1)?)
} else {
ys + xs
}
}
}
#[derive(Debug, Clone)]
pub struct EncoderBlock {
res1: ResidualUnit,
res2: ResidualUnit,
res3: ResidualUnit,
snake1: Snake1d,
conv1: Conv1d,
}
impl EncoderBlock {
pub fn new(dim: usize, stride: usize, vb: VarBuilder) -> Result<Self> {
let vb = vb.pp("block");
let res1 = ResidualUnit::new(dim / 2, 1, vb.pp(0))?;
let res2 = ResidualUnit::new(dim / 2, 3, vb.pp(1))?;
let res3 = ResidualUnit::new(dim / 2, 9, vb.pp(2))?;
let snake1 = Snake1d::new(dim / 2, vb.pp(3))?;
let cfg1 = Conv1dConfig {
stride,
padding: (stride + 1) / 2,
..Default::default()
};
let conv1 = encodec::conv1d_weight_norm(dim / 2, dim, 2 * stride, cfg1, vb.pp(4))?;
Ok(Self {
res1,
res2,
res3,
snake1,
conv1,
})
}
}
impl candle::Module for EncoderBlock {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
xs.apply(&self.res1)?
.apply(&self.res2)?
.apply(&self.res3)?
.apply(&self.snake1)?
.apply(&self.conv1)
}
}
#[derive(Debug, Clone)]
pub struct Encoder {
conv1: Conv1d,
blocks: Vec<EncoderBlock>,
snake1: Snake1d,
conv2: Conv1d,
}
impl candle::Module for Encoder {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let mut xs = xs.apply(&self.conv1)?;
for block in self.blocks.iter() {
xs = xs.apply(block)?
}
xs.apply(&self.snake1)?.apply(&self.conv2)
}
}
impl Encoder {
pub fn new(
mut d_model: usize,
strides: &[usize],
d_latent: usize,
vb: VarBuilder,
) -> Result<Self> {
let vb = vb.pp("block");
let cfg1 = Conv1dConfig {
padding: 3,
..Default::default()
};
let conv1 = encodec::conv1d_weight_norm(1, d_model, 7, cfg1, vb.pp(0))?;
let mut blocks = Vec::with_capacity(strides.len());
for (block_idx, stride) in strides.iter().enumerate() {
d_model *= 2;
let block = EncoderBlock::new(d_model, *stride, vb.pp(block_idx + 1))?;
blocks.push(block)
}
let snake1 = Snake1d::new(d_model, vb.pp(strides.len() + 1))?;
let cfg2 = Conv1dConfig {
padding: 1,
..Default::default()
};
let conv2 =
encodec::conv1d_weight_norm(d_model, d_latent, 3, cfg2, vb.pp(strides.len() + 2))?;
Ok(Self {
conv1,
blocks,
snake1,
conv2,
})
}
}
#[derive(Debug, Clone)]
pub struct DecoderBlock {
snake1: Snake1d,
conv_tr1: ConvTranspose1d,
res1: ResidualUnit,
res2: ResidualUnit,
res3: ResidualUnit,
}
impl DecoderBlock {
pub fn new(in_dim: usize, out_dim: usize, stride: usize, vb: VarBuilder) -> Result<Self> {
let vb = vb.pp("block");
let snake1 = Snake1d::new(in_dim, vb.pp(0))?;
let cfg = ConvTranspose1dConfig {
stride,
padding: (stride + 1) / 2,
..Default::default()
};
let conv_tr1 = encodec::conv_transpose1d_weight_norm(
in_dim,
out_dim,
2 * stride,
true,
cfg,
vb.pp(1),
)?;
let res1 = ResidualUnit::new(out_dim, 1, vb.pp(2))?;
let res2 = ResidualUnit::new(out_dim, 3, vb.pp(3))?;
let res3 = ResidualUnit::new(out_dim, 9, vb.pp(4))?;
Ok(Self {
snake1,
conv_tr1,
res1,
res2,
res3,
})
}
}
impl candle_nn::Module for DecoderBlock {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
xs.apply(&self.snake1)?
.apply(&self.conv_tr1)?
.apply(&self.res1)?
.apply(&self.res2)?
.apply(&self.res3)
}
}
#[derive(Debug, Clone)]
pub struct Decoder {
conv1: Conv1d,
blocks: Vec<DecoderBlock>,
snake1: Snake1d,
conv2: Conv1d,
}
impl Decoder {
pub fn new(
in_c: usize,
mut channels: usize,
rates: &[usize],
d_out: usize,
vb: VarBuilder,
) -> Result<Self> {
let vb = vb.pp("model");
let cfg1 = Conv1dConfig {
padding: 3,
..Default::default()
};
let conv1 = encodec::conv1d_weight_norm(in_c, channels, 7, cfg1, vb.pp(0))?;
let mut blocks = Vec::with_capacity(rates.len());
for (idx, stride) in rates.iter().enumerate() {
let block = DecoderBlock::new(channels, channels / 2, *stride, vb.pp(idx + 1))?;
channels /= 2;
blocks.push(block)
}
let snake1 = Snake1d::new(channels, vb.pp(rates.len() + 1))?;
let conv2 = encodec::conv1d_weight_norm(channels, d_out, 7, cfg1, vb.pp(rates.len() + 2))?;
Ok(Self {
conv1,
blocks,
snake1,
conv2,
})
}
}
impl candle::Module for Decoder {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let mut xs = xs.apply(&self.conv1)?;
for block in self.blocks.iter() {
xs = xs.apply(block)?
}
xs.apply(&self.snake1)?.apply(&self.conv2)
}
}
#[allow(unused)]
#[derive(Clone, Debug)]
pub struct VectorQuantizer {
in_proj: Conv1d,
out_proj: Conv1d,
codebook: candle_nn::Embedding,
}
impl VectorQuantizer {
pub fn new(in_dim: usize, cb_size: usize, cb_dim: usize, vb: VarBuilder) -> Result<Self> {
let in_proj =
encodec::conv1d_weight_norm(in_dim, cb_dim, 1, Default::default(), vb.pp("in_proj"))?;
let out_proj =
encodec::conv1d_weight_norm(cb_dim, in_dim, 1, Default::default(), vb.pp("out_proj"))?;
let codebook = candle_nn::embedding(cb_size, cb_dim, vb.pp("codebook"))?;
Ok(Self {
in_proj,
out_proj,
codebook,
})
}
pub fn embed_code(&self, embed_id: &Tensor) -> Result<Tensor> {
embed_id.apply(&self.codebook)
}
pub fn decode_code(&self, embed_id: &Tensor) -> Result<Tensor> {
self.embed_code(embed_id)?.transpose(1, 2)
}
}
#[derive(Clone, Debug)]
pub struct ResidualVectorQuantizer {
quantizers: Vec<VectorQuantizer>,
}
impl ResidualVectorQuantizer {
pub fn new(
input_dim: usize,
n_codebooks: usize,
cb_size: usize,
cb_dim: usize,
vb: VarBuilder,
) -> Result<Self> {
let vb = &vb.pp("quantizers");
let quantizers = (0..n_codebooks)
.map(|i| VectorQuantizer::new(input_dim, cb_size, cb_dim, vb.pp(i)))
.collect::<Result<Vec<_>>>()?;
Ok(Self { quantizers })
}
pub fn from_codes(&self, codes: &Tensor) -> Result<Tensor> {
let mut sum = None;
for (idx, quantizer) in self.quantizers.iter().enumerate() {
let z_p_i = quantizer.decode_code(&codes.i((.., idx))?)?;
let z_q_i = z_p_i.apply(&quantizer.out_proj)?;
let s = match sum {
None => z_q_i,
Some(s) => (s + z_q_i)?,
};
sum = Some(s)
}
match sum {
Some(s) => Ok(s),
None => candle::bail!("empty codebooks"),
}
}
}
#[derive(Debug, Clone)]
pub struct Model {
pub encoder: Encoder,
pub quantizer: ResidualVectorQuantizer,
pub decoder: Decoder,
}
impl Model {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let vb = vb.pp("model");
let encoder = Encoder::new(64, &[2, 4, 8, 8], cfg.latent_dim, vb.pp("encoder"))?;
let quantizer = ResidualVectorQuantizer::new(
cfg.latent_dim,
cfg.num_codebooks,
cfg.codebook_size,
8,
vb.pp("quantizer"),
)?;
let decoder = Decoder::new(cfg.latent_dim, 1536, &[8, 8, 4, 2], 1, vb.pp("decoder"))?;
Ok(Self {
encoder,
decoder,
quantizer,
})
}
pub fn decode_codes(&self, audio_codes: &Tensor) -> Result<Tensor> {
let audio_values = self.quantizer.from_codes(audio_codes)?;
audio_values.apply(&self.decoder)
}
}
| candle/candle-transformers/src/models/dac.rs/0 | {
"file_path": "candle/candle-transformers/src/models/dac.rs",
"repo_id": "candle",
"token_count": 5695
} |
use candle::{Device, Result, Tensor};
pub fn get_noise(
num_samples: usize,
height: usize,
width: usize,
device: &Device,
) -> Result<Tensor> {
let height = (height + 15) / 16 * 2;
let width = (width + 15) / 16 * 2;
Tensor::randn(0f32, 1., (num_samples, 16, height, width), device)
}
#[derive(Debug, Clone)]
pub struct State {
pub img: Tensor,
pub img_ids: Tensor,
pub txt: Tensor,
pub txt_ids: Tensor,
pub vec: Tensor,
}
impl State {
pub fn new(t5_emb: &Tensor, clip_emb: &Tensor, img: &Tensor) -> Result<Self> {
let dtype = img.dtype();
let (bs, c, h, w) = img.dims4()?;
let dev = img.device();
let img = img.reshape((bs, c, h / 2, 2, w / 2, 2))?; // (b, c, h, ph, w, pw)
let img = img.permute((0, 2, 4, 1, 3, 5))?; // (b, h, w, c, ph, pw)
let img = img.reshape((bs, h / 2 * w / 2, c * 4))?;
let img_ids = Tensor::stack(
&[
Tensor::full(0u32, (h / 2, w / 2), dev)?,
Tensor::arange(0u32, h as u32 / 2, dev)?
.reshape(((), 1))?
.broadcast_as((h / 2, w / 2))?,
Tensor::arange(0u32, w as u32 / 2, dev)?
.reshape((1, ()))?
.broadcast_as((h / 2, w / 2))?,
],
2,
)?
.to_dtype(dtype)?;
let img_ids = img_ids.reshape((1, h / 2 * w / 2, 3))?;
let img_ids = img_ids.repeat((bs, 1, 1))?;
let txt = t5_emb.repeat(bs)?;
let txt_ids = Tensor::zeros((bs, txt.dim(1)?, 3), dtype, dev)?;
let vec = clip_emb.repeat(bs)?;
Ok(Self {
img,
img_ids,
txt,
txt_ids,
vec,
})
}
}
fn time_shift(mu: f64, sigma: f64, t: f64) -> f64 {
let e = mu.exp();
e / (e + (1. / t - 1.).powf(sigma))
}
/// `shift` is a triple `(image_seq_len, base_shift, max_shift)`.
pub fn get_schedule(num_steps: usize, shift: Option<(usize, f64, f64)>) -> Vec<f64> {
let timesteps: Vec<f64> = (0..=num_steps)
.map(|v| v as f64 / num_steps as f64)
.rev()
.collect();
match shift {
None => timesteps,
Some((image_seq_len, y1, y2)) => {
let (x1, x2) = (256., 4096.);
let m = (y2 - y1) / (x2 - x1);
let b = y1 - m * x1;
let mu = m * image_seq_len as f64 + b;
timesteps
.into_iter()
.map(|v| time_shift(mu, 1., v))
.collect()
}
}
}
pub fn unpack(xs: &Tensor, height: usize, width: usize) -> Result<Tensor> {
let (b, _h_w, c_ph_pw) = xs.dims3()?;
let height = (height + 15) / 16;
let width = (width + 15) / 16;
xs.reshape((b, height, width, c_ph_pw / 4, 2, 2))? // (b, h, w, c, ph, pw)
.permute((0, 3, 1, 4, 2, 5))? // (b, c, h, ph, w, pw)
.reshape((b, c_ph_pw / 4, height * 2, width * 2))
}
#[allow(clippy::too_many_arguments)]
pub fn denoise<M: super::WithForward>(
model: &M,
img: &Tensor,
img_ids: &Tensor,
txt: &Tensor,
txt_ids: &Tensor,
vec_: &Tensor,
timesteps: &[f64],
guidance: f64,
) -> Result<Tensor> {
let b_sz = img.dim(0)?;
let dev = img.device();
let guidance = Tensor::full(guidance as f32, b_sz, dev)?;
let mut img = img.clone();
for window in timesteps.windows(2) {
let (t_curr, t_prev) = match window {
[a, b] => (a, b),
_ => continue,
};
let t_vec = Tensor::full(*t_curr as f32, b_sz, dev)?;
let pred = model.forward(&img, img_ids, txt, txt_ids, &t_vec, vec_, Some(&guidance))?;
img = (img + pred * (t_prev - t_curr))?
}
Ok(img)
}
| candle/candle-transformers/src/models/flux/sampling.rs/0 | {
"file_path": "candle/candle-transformers/src/models/flux/sampling.rs",
"repo_id": "candle",
"token_count": 2063
} |
//! MetaVoice Studio ML Models
//!
//! See MetaVoice's TTS and voice cloning models:
//! - [Github](https://github.com/metavoiceio/metavoice-src)
//! - [Website](https://studio.metavoice.ai/)
use candle::{DType, Device, Error as E, IndexOp, Module, Result, Tensor, D};
use candle_nn::{embedding, linear_b, rms_norm, Embedding, Linear, RmsNorm, VarBuilder};
// Equivalent to torch.repeat_interleave
pub(crate) fn repeat_interleave(img: &Tensor, repeats: usize, dim: usize) -> Result<Tensor> {
let img = img.unsqueeze(dim + 1)?;
let mut dims = img.dims().to_vec();
dims[dim + 1] = repeats;
img.broadcast_as(dims)?.flatten(dim, dim + 1)
}
pub mod speaker_encoder {
use super::*;
#[derive(Debug, Clone, serde::Deserialize)]
pub struct Config {
pub sampling_rate: usize,
pub partial_n_frames: usize,
pub model_hidden_size: usize,
pub model_embedding_size: usize,
pub model_num_layers: usize,
pub mel_window_length: usize,
pub mel_window_step: usize,
pub mel_n_channels: usize,
}
impl Config {
pub fn cfg() -> Self {
Self {
sampling_rate: 16_000,
partial_n_frames: 160,
model_hidden_size: 256,
model_embedding_size: 256,
model_num_layers: 3,
mel_window_length: 25,
mel_window_step: 10,
mel_n_channels: 40,
}
}
}
pub struct Model {
lstms: Vec<candle_nn::LSTM>,
linear: Linear,
cfg: Config,
}
type Slice = (usize, usize);
impl Model {
pub fn new(cfg: Config, vb: VarBuilder) -> Result<Self> {
let mut lstms = Vec::with_capacity(cfg.model_num_layers);
let vb_l = vb.pp("lstm");
for layer_idx in 0..cfg.model_num_layers {
let c = candle_nn::LSTMConfig {
layer_idx,
..Default::default()
};
let lstm = candle_nn::lstm(
cfg.mel_n_channels,
cfg.model_hidden_size,
c,
vb_l.pp(layer_idx),
)?;
lstms.push(lstm)
}
let linear = linear_b(
cfg.model_hidden_size,
cfg.model_embedding_size,
true,
vb.pp("linear"),
)?;
Ok(Self { lstms, linear, cfg })
}
fn compute_partial_slices(
&self,
n_samples: usize,
rate: f64,
min_coverage: f64,
) -> (Vec<Slice>, Vec<Slice>) {
let c = &self.cfg;
// Compute how many frames separate two partial utterances
let samples_per_frame = c.sampling_rate * c.mel_window_step / 1000;
let n_frames = n_samples / samples_per_frame + 1;
let frame_step =
(c.sampling_rate as f64 / rate / samples_per_frame as f64).round() as usize;
let steps = (n_frames + frame_step).saturating_sub(c.partial_n_frames) + 1;
// Compute the slices.
let mut wav_slices = vec![];
let mut mel_slices = vec![];
for i in (0..steps).step_by(frame_step) {
let mel_range = (i, i + c.partial_n_frames);
let wav_range = (
i * samples_per_frame,
(i + c.partial_n_frames) * samples_per_frame,
);
mel_slices.push(mel_range);
wav_slices.push(wav_range);
}
// Evaluate whether extra padding is warranted or not.
let last_wav_range = match wav_slices.last() {
None => return (wav_slices, mel_slices),
Some(l) => *l,
};
let coverage = (n_samples - last_wav_range.0) as f64
/ (last_wav_range.1 - last_wav_range.0) as f64;
if coverage > min_coverage && mel_slices.len() > 1 {
mel_slices.pop();
wav_slices.pop();
}
(wav_slices, mel_slices)
}
pub fn embed_utterance(
&self,
wav: &[f32],
mel_filters: &[f32],
rate: f64,
min_c: f64,
device: &Device,
) -> Result<Tensor> {
let (wav_slices, mel_slices) = self.compute_partial_slices(wav.len(), rate, min_c);
let max_wave_length = match wav_slices.last() {
Some(v) => v.1,
None => candle::bail!("empty wav slices"),
};
let wav = if max_wave_length > wav.len() {
let mut wav = wav.to_vec();
wav.resize(max_wave_length - wav.len(), 0.0);
std::borrow::Cow::Owned(wav)
} else {
std::borrow::Cow::Borrowed(wav)
};
let mel = crate::models::whisper::audio::log_mel_spectrogram_(
wav.as_ref(),
mel_filters,
/* fft_size */ self.cfg.mel_window_length,
/* fft_step */ self.cfg.mel_window_step,
self.cfg.mel_n_channels,
false,
);
let mels = mel_slices
.iter()
.flat_map(|s| [mel[s.0], mel[s.1]])
.collect::<Vec<_>>();
let mels = Tensor::from_vec(mels, (mel_slices.len(), 2), device)?;
let partial_embeds = self.forward(&mels)?;
let raw_embed = partial_embeds.mean(0)?;
let norm = raw_embed.sqr()?.sum_all()?.sqrt()?;
raw_embed.broadcast_div(&norm)
}
}
impl Module for Model {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
use candle_nn::RNN;
// This is different from the Python transformers version as candle LSTM is batch first.
let xs = xs.t()?;
let mut xs = xs.clone();
for layer in self.lstms.iter() {
let states = layer.seq(&xs)?;
xs = layer.states_to_tensor(&states)?;
}
let xs = xs.t()?;
let embeds_raw = xs.apply(&self.linear)?.relu()?;
let norm = embeds_raw.sqr()?.sum_keepdim(1)?.sqrt()?;
embeds_raw.broadcast_div(&norm)
}
}
}
type Rank = u32;
pub mod tokenizers {
use super::*;
use std::collections::HashMap;
pub struct BPE {
pub re: fancy_regex::Regex,
pub end_of_text: usize,
pub offset: usize,
pub ranks: HashMap<Vec<u8>, Rank>,
span: tracing::Span,
}
impl BPE {
pub fn from_json(json: &serde_json::Value, end_of_text: usize) -> Result<Self> {
let json = match json.as_object() {
None => candle::bail!("json value is not an object"),
Some(json) => json,
};
let re = match json.get("pat_str") {
None => candle::bail!("json object has no pat_str field"),
Some(pat_str) => match pat_str.as_str() {
None => candle::bail!("pat_str field is not a string"),
Some(pat_str) => fancy_regex::Regex::new(pat_str).map_err(E::wrap)?,
},
};
let offset = match json.get("offset") {
None => candle::bail!("json object has no offset field"),
Some(offset) => match offset.as_u64() {
None => candle::bail!("offset field is not a positive int"),
Some(offset) => offset as usize,
},
};
let mut ranks = HashMap::new();
for id in 0u8..=255 {
ranks.insert(vec![id], id as u32);
}
let mergeable_ranks = match json.get("mergeable_ranks") {
None => candle::bail!("json object has no mergeable_ranks field"),
Some(mr) => match mr.as_object() {
None => candle::bail!("mergeable_ranks is not an object"),
Some(mr) => mr,
},
};
for (key, value) in mergeable_ranks.iter() {
let value = match value.as_u64() {
None => candle::bail!("mergeable_ranks '{key}' is not a u64"),
Some(value) => value as u32,
};
if value < 256 {
continue;
}
// No escaping for other keys.
let key = key.as_bytes().to_vec();
ranks.insert(key, value);
}
Ok(Self {
re,
end_of_text,
offset,
ranks,
span: tracing::span!(tracing::Level::TRACE, "bpe"),
})
}
// Taken from:
// https://github.com/openai/tiktoken/blob/1b9faf2779855124f05174adf1383e53689ed94b/src/lib.rs#L16C1-L82C2
fn _byte_pair_merge(&self, piece: &[u8]) -> Vec<(usize, Rank)> {
// This is a vector of (start, rank).
// The rank is of the pair starting at position start.
let mut parts = Vec::with_capacity(piece.len() + 1);
// Note that we hash bytes when indexing into `ranks`, not token pairs. As long as we train BPE
// the way we currently do, this is equivalent. An easy way to break this would be to decouple
// merge priority from token index or to prevent specific token merges.
let mut min_rank: (Rank, usize) = (Rank::MAX, usize::MAX);
for i in 0..piece.len() - 1 {
let rank = *self.ranks.get(&piece[i..i + 2]).unwrap_or(&Rank::MAX);
if rank < min_rank.0 {
min_rank = (rank, i);
}
parts.push((i, rank));
}
parts.push((piece.len() - 1, Rank::MAX));
parts.push((piece.len(), Rank::MAX));
let get_rank = {
#[inline(always)]
|parts: &Vec<(usize, Rank)>, i: usize| {
if (i + 3) < parts.len() {
// Similar to `piece[i..i + 2]` above. The +3 is because we haven't yet deleted
// parts[i + 1], see comment in the main loop.
*self
.ranks
.get(&piece[parts[i].0..parts[i + 3].0])
.unwrap_or(&Rank::MAX)
} else {
Rank::MAX
}
}
};
// If you have n parts and m merges, this does O(mn) work.
// We could do something with a heap and do O(m log n) work.
// n is often very small so considerations like cache-locality outweigh the algorithmic
// complexity downsides of the `parts` vector.
while min_rank.0 != Rank::MAX {
let i = min_rank.1;
// Update parts[i] and parts[i - 1] before removing parts[i + 1], since
// `parts.remove(i + 1)` will thrash the cache.
if i > 0 {
parts[i - 1].1 = get_rank(&parts, i - 1);
}
parts[i].1 = get_rank(&parts, i);
parts.remove(i + 1);
min_rank = (Rank::MAX, usize::MAX);
for (i, &(_, rank)) in parts[..parts.len() - 1].iter().enumerate() {
if rank < min_rank.0 {
min_rank = (rank, i);
}
}
}
parts
}
pub fn byte_pair_encode(&self, piece: &[u8]) -> Vec<Rank> {
if piece.is_empty() {
return Vec::new();
}
if piece.len() == 1 {
return vec![self.ranks[piece]];
}
assert!(piece.len() > 1);
self._byte_pair_merge(piece)
.windows(2)
.map(|part| self.ranks[&piece[part[0].0..part[1].0]])
.collect()
}
pub fn encode(&self, text: &str) -> Result<Vec<u32>> {
let _enter = self.span.enter();
let mut bpe_tokens: Vec<u32> = Vec::new();
for word in self.re.find_iter(text) {
let word = word.map_err(E::wrap)?;
let word_tokens = self.byte_pair_encode(word.as_str().as_bytes());
for &token in word_tokens.iter() {
bpe_tokens.push(token + self.offset as u32)
}
}
bpe_tokens.push((self.end_of_text + self.offset) as u32);
Ok(bpe_tokens)
}
}
}
pub mod gpt {
use super::*;
#[derive(Debug, Clone, Copy, Eq, PartialEq, Hash)]
pub enum NormType {
LayerNorm,
RMSNorm,
}
#[derive(Debug, Clone, Copy, Eq, PartialEq, Hash)]
pub enum AttnKernelType {
Fa2,
TorchAttn,
Hand,
}
#[derive(Debug, Clone, Copy, Eq, PartialEq, Hash)]
pub enum NonLinearityType {
Gelu,
Swiglu,
}
enum Norm {
RMSNorm(candle_nn::RmsNorm),
LayerNorm(candle_nn::LayerNorm),
}
// https://github.com/metavoiceio/metavoice-src/blob/11550bb4e8a1ad032cc1556cc924f7a4e767cbfa/fam/llm/model.py#L27
#[derive(Debug, Clone)]
pub struct Config {
pub block_size: usize,
pub vocab_sizes: Vec<usize>,
pub target_vocab_sizes: Vec<usize>,
pub n_layer: usize,
pub n_head: usize,
pub n_embd: usize,
pub bias: bool,
pub causal: bool,
pub spk_emb_on_text: bool,
pub norm_type: NormType,
pub rmsnorm_eps: f64,
pub nonlinearity_type: NonLinearityType,
pub swiglu_multiple_of: Option<usize>,
pub attn_kernel_type: AttnKernelType,
pub kv_cache_enabled: bool,
}
impl Config {
pub fn cfg1b_v0_1() -> Self {
Self {
n_layer: 6,
n_head: 6,
n_embd: 384,
block_size: 1024,
bias: false,
vocab_sizes: vec![1538, 1025],
causal: false,
target_vocab_sizes: vec![1025, 1025, 1025, 1025, 1025, 1025],
swiglu_multiple_of: Some(256),
norm_type: NormType::LayerNorm,
kv_cache_enabled: false,
attn_kernel_type: AttnKernelType::TorchAttn,
spk_emb_on_text: true,
nonlinearity_type: NonLinearityType::Gelu,
rmsnorm_eps: 1e-5,
}
}
}
impl Norm {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
match cfg.norm_type {
NormType::RMSNorm => {
let rms_norm = candle_nn::rms_norm(cfg.n_embd, cfg.rmsnorm_eps, vb)?;
Ok(Self::RMSNorm(rms_norm))
}
NormType::LayerNorm => {
let ln_cfg = candle_nn::LayerNormConfig {
affine: cfg.bias,
..Default::default()
};
let layer_norm = candle_nn::layer_norm(cfg.n_embd, ln_cfg, vb)?;
Ok(Self::LayerNorm(layer_norm))
}
}
}
}
impl Module for Norm {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
match self {
Self::RMSNorm(m) => m.forward(xs),
Self::LayerNorm(m) => m.forward(xs),
}
}
}
// https://github.com/metavoiceio/metavoice-src/blob/11550bb4e8a1ad032cc1556cc924f7a4e767cbfa/fam/llm/layers/attn.py#L18
struct SelfAttention {
c_attn: Linear,
c_proj: Linear,
n_head: usize,
span: tracing::Span,
}
impl SelfAttention {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
// The different attention variants are likely to be identical but still we only accept
// TorchAttn for now.
if cfg.attn_kernel_type != AttnKernelType::TorchAttn {
candle::bail!("only TorchAttn is supported")
}
if cfg.kv_cache_enabled {
candle::bail!("kv_cache_enabled=true is not supported")
}
let c_attn = linear_b(cfg.n_embd, cfg.n_embd * 3, cfg.bias, vb.pp("c_attn"))?;
let c_proj = linear_b(cfg.n_embd, cfg.n_embd, cfg.bias, vb.pp("c_proj"))?;
Ok(Self {
c_attn,
c_proj,
n_head: cfg.n_head,
span: tracing::span!(tracing::Level::TRACE, "self-attn"),
})
}
}
impl Module for SelfAttention {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let (b, t, c) = xs.dims3()?;
let c_x = xs
.apply(&self.c_attn)?
.reshape((b, t, 3, self.n_head, c / self.n_head))?;
let q = c_x.i((.., .., 0))?;
let k = c_x.i((.., .., 1))?;
let v = c_x.i((.., .., 2))?;
let q = q.transpose(1, 2)?.contiguous()?;
let k = k.transpose(1, 2)?.contiguous()?;
let v = v.transpose(1, 2)?.contiguous()?;
let att = (q.matmul(&k.t()?)? / (k.dim(D::Minus1)? as f64).sqrt())?;
// TODO: causal mask
let att = candle_nn::ops::softmax_last_dim(&att)?;
let att = att.matmul(&v)?.transpose(1, 2)?;
att.reshape((b, t, c))?.apply(&self.c_proj)
}
}
// https://github.com/metavoiceio/metavoice-src/blob/11550bb4e8a1ad032cc1556cc924f7a4e767cbfa/fam/llm/layers/layers.py#L43
#[allow(clippy::upper_case_acronyms)]
enum MLP {
Gelu {
c_fc: Linear,
c_proj: Linear,
span: tracing::Span,
},
Swiglu {
w1: Linear,
w3: Linear,
c_proj: Linear,
span: tracing::Span,
},
}
impl MLP {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let hidden_dim = 4 * cfg.n_embd;
let slf = match cfg.nonlinearity_type {
NonLinearityType::Gelu => {
let c_fc = linear_b(cfg.n_embd, hidden_dim, cfg.bias, vb.pp("c_fc"))?;
let c_proj = linear_b(hidden_dim, cfg.n_embd, cfg.bias, vb.pp("c_proj"))?;
Self::Gelu {
c_fc,
c_proj,
span: tracing::span!(tracing::Level::TRACE, "mlp-gelu"),
}
}
NonLinearityType::Swiglu => {
let hidden_dim = (2 * hidden_dim) / 3;
let swiglu_multiple_of = match cfg.swiglu_multiple_of {
None => candle::bail!("swiglu-multiple-of has to be set"),
Some(smo) => smo,
};
let hidden_dim = swiglu_multiple_of * (hidden_dim + swiglu_multiple_of - 1)
/ swiglu_multiple_of;
let w1 = linear_b(cfg.n_embd, hidden_dim, cfg.bias, vb.pp("w1"))?;
let w3 = linear_b(cfg.n_embd, hidden_dim, cfg.bias, vb.pp("w3"))?;
let c_proj = linear_b(hidden_dim, cfg.n_embd, cfg.bias, vb.pp("c_proj"))?;
Self::Swiglu {
w1,
w3,
c_proj,
span: tracing::span!(tracing::Level::TRACE, "mlp-swiglu"),
}
}
};
Ok(slf)
}
}
impl Module for MLP {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
match self {
Self::Gelu { c_fc, c_proj, span } => {
let _enter = span.enter();
xs.apply(c_fc)?.gelu()?.apply(c_proj)
}
Self::Swiglu {
w1,
w3,
c_proj,
span,
} => {
let _enter = span.enter();
let w1 = xs.apply(w1)?;
let w3 = xs.apply(w3)?;
(w1.silu()? * w3)?.apply(c_proj)
}
}
}
}
// https://github.com/metavoiceio/metavoice-src/blob/11550bb4e8a1ad032cc1556cc924f7a4e767cbfa/fam/llm/layers/combined.py#L7
struct Block {
ln_1: Norm,
ln_2: Norm,
attn: SelfAttention,
mlp: MLP,
span: tracing::Span,
}
impl Block {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let ln_1 = Norm::new(cfg, vb.pp("ln_1"))?;
let ln_2 = Norm::new(cfg, vb.pp("ln_2"))?;
let attn = SelfAttention::new(cfg, vb.pp("attn"))?;
let mlp = MLP::new(cfg, vb.pp("mlp"))?;
Ok(Block {
ln_1,
ln_2,
attn,
mlp,
span: tracing::span!(tracing::Level::TRACE, "gpt-block"),
})
}
}
impl Module for Block {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let xs = (xs + xs.apply(&self.ln_1)?.apply(&self.attn))?;
let xs = (&xs + xs.apply(&self.ln_2)?.apply(&self.mlp))?;
Ok(xs)
}
}
// https://github.com/metavoiceio/metavoice-src/blob/11550bb4e8a1ad032cc1556cc924f7a4e767cbfa/fam/llm/model.py#L79
#[allow(clippy::upper_case_acronyms)]
pub struct Model {
wtes: Vec<candle_nn::Embedding>,
wpe: candle_nn::Embedding,
h: Vec<Block>,
ln_f: Norm,
lm_heads: Vec<Linear>,
cfg: Config,
dtype: DType,
span: tracing::Span,
}
impl Model {
pub fn new(cfg: Config, vb: VarBuilder) -> Result<Self> {
let vb_t = vb.pp("transformer");
let ln_f = Norm::new(&cfg, vb_t.pp("ln_f"))?;
let mut wtes = Vec::with_capacity(cfg.vocab_sizes.len());
let vb_w = vb_t.pp("wtes");
for (idx, vocab_size) in cfg.vocab_sizes.iter().enumerate() {
let wte = candle_nn::embedding(*vocab_size, cfg.n_embd, vb_w.pp(idx))?;
wtes.push(wte)
}
let wpe = candle_nn::embedding(cfg.block_size, cfg.n_embd, vb_t.pp("wpe"))?;
let mut h = Vec::with_capacity(cfg.n_layer);
let vb_h = vb_t.pp("h");
for idx in 0..cfg.n_layer {
let block = Block::new(&cfg, vb_h.pp(idx))?;
h.push(block)
}
let mut lm_heads = Vec::with_capacity(cfg.target_vocab_sizes.len());
let vb_l = vb.pp("lm_heads");
for (idx, vocab_size) in cfg.target_vocab_sizes.iter().enumerate() {
let head = linear_b(cfg.n_embd, *vocab_size, false, vb_l.pp(idx))?;
lm_heads.push(head)
}
Ok(Self {
wtes,
wpe,
h,
ln_f,
lm_heads,
cfg,
dtype: vb.dtype(),
span: tracing::span!(tracing::Level::TRACE, "gpt"),
})
}
pub fn config(&self) -> &Config {
&self.cfg
}
pub fn forward(&self, idx: &Tensor) -> Result<Vec<Tensor>> {
let _enter = self.span.enter();
let device = idx.device();
let (b, _num_hierarchies, t) = idx.dims3()?;
let pos = Tensor::arange(0u32, t as u32, device)?;
let pos_emb = pos.apply(&self.wpe)?;
let mut tok_emb = Tensor::zeros((b, t, self.cfg.n_embd), self.dtype, device)?;
for (wte_idx, wte) in self.wtes.iter().enumerate() {
let emb = idx.i((.., wte_idx, ..))?.apply(wte)?;
tok_emb = (tok_emb + emb)?;
}
// TODO: speaker embs.
let spk_emb = 0f64;
let mut xs = (pos_emb.broadcast_add(&tok_emb)? + spk_emb)?;
for block in self.h.iter() {
xs = xs.apply(block)?
}
let xs = xs.apply(&self.ln_f)?;
let mut logits = Vec::with_capacity(self.lm_heads.len());
for lm_head in self.lm_heads.iter() {
// non-causal mode only.
let ys = xs.apply(lm_head)?;
logits.push(ys)
}
Ok(logits)
}
}
}
pub mod transformer {
use super::*;
#[derive(Debug, Clone, serde::Deserialize)]
pub struct Config {
pub block_size: usize,
pub vocab_size: usize,
pub n_layer: usize,
pub n_head: usize,
pub dim: usize,
pub speaker_emb_dim: usize,
pub intermediate_size: Option<usize>,
pub n_local_heads: Option<usize>,
pub norm_eps: f64,
}
impl Config {
pub fn cfg1b_v0_1() -> Self {
Self {
n_layer: 24,
n_head: 16,
dim: 2048,
vocab_size: 2562,
speaker_emb_dim: 256,
block_size: 2048,
intermediate_size: None,
n_local_heads: None,
norm_eps: 1e-5,
}
}
pub(crate) fn n_local_heads(&self) -> usize {
self.n_local_heads.unwrap_or(self.n_head)
}
pub(crate) fn head_dim(&self) -> usize {
self.dim / self.n_head
}
pub(crate) fn intermediate_size(&self) -> usize {
match self.intermediate_size {
Some(intermediate_size) => intermediate_size,
None => {
let hidden_dim = self.dim * 4;
let n_hidden = ((2 * hidden_dim) as f64 / 3.) as usize;
(n_hidden + 255) / 256 * 256
}
}
}
}
#[derive(Debug, Clone)]
struct FeedForward {
w1: Linear,
w2: Linear,
w3: Linear,
span: tracing::Span,
}
impl FeedForward {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let i_size = cfg.intermediate_size();
let w1 = linear_b(cfg.dim, i_size, false, vb.pp("swiglu.w1"))?;
let w2 = linear_b(i_size, cfg.dim, false, vb.pp("w2"))?;
let w3 = linear_b(cfg.dim, i_size, false, vb.pp("swiglu.w3"))?;
Ok(Self {
w1,
w2,
w3,
span: tracing::span!(tracing::Level::TRACE, "feed-forward"),
})
}
}
impl Module for FeedForward {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let swiglu = (candle_nn::ops::silu(&xs.apply(&self.w1)?)? * xs.apply(&self.w3))?;
swiglu.apply(&self.w2)
}
}
#[derive(Debug, Clone)]
struct Attention {
wqkv: Linear,
wo: Linear,
dim: usize,
kv_size: usize,
n_local_heads: usize,
head_dim: usize,
n_head: usize,
kv_cache: Option<(Tensor, Tensor)>,
span: tracing::Span,
}
impl Attention {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let n_local_heads = cfg.n_local_heads();
let head_dim = cfg.head_dim();
let total_head_dim = (cfg.n_head + 2 * n_local_heads) * head_dim;
let wqkv = linear_b(cfg.dim, total_head_dim, false, vb.pp("wqkv"))?;
let wo = linear_b(cfg.dim, cfg.dim, false, vb.pp("wo"))?;
Ok(Self {
wqkv,
wo,
dim: cfg.dim,
kv_size: n_local_heads * head_dim,
n_local_heads,
head_dim,
n_head: cfg.n_head,
kv_cache: None,
span: tracing::span!(tracing::Level::TRACE, "feed-forward"),
})
}
fn forward(&mut self, xs: &Tensor, _pos: usize, mask: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let (b_sz, seqlen, _) = xs.dims3()?;
let qkv = xs.apply(&self.wqkv)?;
let q = qkv.narrow(D::Minus1, 0, self.dim)?;
let k = qkv.narrow(D::Minus1, self.dim, self.kv_size)?;
let v = qkv.narrow(D::Minus1, self.dim + self.kv_size, self.kv_size)?;
let q = q
.reshape((b_sz, seqlen, self.n_head, self.head_dim))?
.transpose(1, 2)?
.contiguous()?;
let k = k
.reshape((b_sz, seqlen, self.n_local_heads, self.head_dim))?
.transpose(1, 2)?;
let v = v
.reshape((b_sz, seqlen, self.n_local_heads, self.head_dim))?
.transpose(1, 2)?;
let (k, v) = match &self.kv_cache {
None => (k, v),
Some((prev_k, prev_v)) => {
let k = Tensor::cat(&[prev_k, &k], 2)?;
let v = Tensor::cat(&[prev_v, &v], 2)?;
(k, v)
}
};
self.kv_cache = Some((k.clone(), v.clone()));
let k = repeat_interleave(&k, self.n_head / self.n_local_heads, 1)?;
let v = repeat_interleave(&v, self.n_head / self.n_local_heads, 1)?;
let scale = 1f64 / f64::sqrt(self.head_dim as f64);
let attn_weights = (q.matmul(&k.transpose(2, 3)?)? * scale)?;
let attn_weights = attn_weights.broadcast_add(mask)?;
let attn_weights = candle_nn::ops::softmax_last_dim(&attn_weights)?;
let attn_output = attn_weights.matmul(&v)?;
attn_output
.transpose(1, 2)?
.reshape((b_sz, seqlen, self.dim))?
.apply(&self.wo)
}
fn clear_kv_cache(&mut self) {
self.kv_cache = None
}
}
#[derive(Debug, Clone)]
struct Block {
attention: Attention,
feed_forward: FeedForward,
ffn_norm: RmsNorm,
attention_norm: RmsNorm,
span: tracing::Span,
}
impl Block {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let attention = Attention::new(cfg, vb.pp("attention"))?;
let feed_forward = FeedForward::new(cfg, vb.pp("feed_forward"))?;
let ffn_norm = rms_norm(cfg.dim, cfg.norm_eps, vb.pp("ffn_norm"))?;
let attention_norm = rms_norm(cfg.dim, cfg.norm_eps, vb.pp("attention_norm"))?;
Ok(Self {
attention,
feed_forward,
ffn_norm,
attention_norm,
span: tracing::span!(tracing::Level::TRACE, "block"),
})
}
fn forward(&mut self, xs: &Tensor, pos: usize, mask: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let hs = xs.apply(&self.attention_norm)?;
let hs = (xs + self.attention.forward(&hs, pos, mask))?;
&hs + hs.apply(&self.ffn_norm)?.apply(&self.feed_forward)
}
fn clear_kv_cache(&mut self) {
self.attention.clear_kv_cache()
}
}
#[derive(Debug, Clone)]
pub struct Model {
tok_embeddings: Embedding,
pos_embeddings: Embedding,
speaker_cond_pos: Linear,
layers: Vec<Block>,
norm: RmsNorm,
output: Linear,
spk_cond_mask: Tensor,
span: tracing::Span,
}
impl Model {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let tok_embeddings = embedding(cfg.vocab_size, cfg.dim, vb.pp("tok_embeddings"))?;
let pos_embeddings = embedding(cfg.block_size, cfg.dim, vb.pp("pos_embeddings"))?;
let speaker_cond_pos = linear_b(
cfg.speaker_emb_dim,
cfg.dim,
false,
vb.pp("speaker_cond_pos"),
)?;
let mut layers = Vec::with_capacity(cfg.n_layer);
let vb_l = vb.pp("layers");
for layer_idx in 0..cfg.n_layer {
let layer = Block::new(cfg, vb_l.pp(layer_idx))?;
layers.push(layer)
}
let norm = rms_norm(cfg.dim, cfg.norm_eps, vb.pp("norm"))?;
let output = linear_b(cfg.dim, cfg.vocab_size, false, vb.pp("output"))?;
let dtype = vb.dtype();
let spk_cond_mask = Tensor::cat(
&[
Tensor::ones((1, 1, cfg.dim), dtype, vb.device())?,
Tensor::zeros((1, 1, cfg.dim), dtype, vb.device())?,
],
0,
)?;
Ok(Self {
tok_embeddings,
pos_embeddings,
speaker_cond_pos,
layers,
norm,
output,
spk_cond_mask,
span: tracing::span!(tracing::Level::TRACE, "transformer"),
})
}
pub fn clear_kv_cache(&mut self) {
for layer in self.layers.iter_mut() {
layer.clear_kv_cache()
}
}
pub fn forward(&mut self, xs: &Tensor, spk_emb: &Tensor, pos: usize) -> Result<Tensor> {
let _enter = self.span.enter();
let (_b_sz, seqlen) = xs.dims2()?;
let mask: Vec<_> = (0..seqlen)
.flat_map(|i| (0..seqlen).map(move |j| if i < j { f32::NEG_INFINITY } else { 0. }))
.collect();
let mask = Tensor::from_slice(&mask, (1, 1, seqlen, seqlen), xs.device())?;
let input_pos = Tensor::arange(pos as u32, (pos + seqlen) as u32, xs.device())?;
let tok_embeddings = xs.apply(&self.tok_embeddings)?;
let pos_embeddings = input_pos.apply(&self.pos_embeddings)?;
let mut xs = tok_embeddings
.broadcast_add(&pos_embeddings)?
.broadcast_add(
&spk_emb
.apply(&self.speaker_cond_pos)?
.broadcast_mul(&self.spk_cond_mask)?,
)?;
let mask = mask.to_dtype(xs.dtype())?;
for layer in self.layers.iter_mut() {
xs = layer.forward(&xs, pos, &mask)?
}
xs.narrow(1, seqlen - 1, 1)?
.apply(&self.norm)?
.apply(&self.output)
}
}
}
pub mod adapters {
// https://github.com/metavoiceio/metavoice-src/blob/9078234c496d76adbec06df789b6b04b1875f129/fam/llm/adapters/tilted_encodec.py
pub struct TiltedEncodec {
end_of_audio_token: u32,
span: tracing::Span,
}
impl TiltedEncodec {
pub fn new(end_of_audio_token: u32) -> Self {
Self {
end_of_audio_token,
span: tracing::span!(tracing::Level::TRACE, "tilted-encodec"),
}
}
pub fn decode(&self, tokens: &[Vec<u32>]) -> (Vec<u32>, Vec<Vec<u32>>) {
let _enter = self.span.enter();
let mut text_ids = vec![];
let mut extracted_audio_ids = vec![];
let mut min_audio_ids_len = usize::MAX;
for (book_id, tokens) in tokens.iter().enumerate() {
let mut audio_ids = vec![];
for &t in tokens.iter() {
#[allow(clippy::comparison_chain)]
if t > self.end_of_audio_token {
if book_id == 0 {
text_ids.push(t)
}
} else if t < self.end_of_audio_token {
audio_ids.push(t)
}
}
min_audio_ids_len = usize::min(min_audio_ids_len, audio_ids.len());
extracted_audio_ids.push(audio_ids)
}
for audio_ids in extracted_audio_ids.iter_mut() {
audio_ids.truncate(min_audio_ids_len)
}
(text_ids, extracted_audio_ids)
}
}
// https://github.com/metavoiceio/metavoice-src/blob/9078234c496d76adbec06df789b6b04b1875f129/fam/llm/adapters/flattened_encodec.py#L4
pub struct FlattenedInterleavedEncodec2Codebook {
end_of_audio_token: u32,
span: tracing::Span,
}
impl FlattenedInterleavedEncodec2Codebook {
pub fn new(end_of_audio_token: u32) -> Self {
Self {
end_of_audio_token,
span: tracing::span!(tracing::Level::TRACE, "encodec2codebook"),
}
}
pub fn decode(&self, tokens: &[u32]) -> (Vec<u32>, Vec<u32>, Vec<u32>) {
let _enter = self.span.enter();
let mut text_ids = vec![];
let mut audio_ids1 = vec![];
let mut audio_ids2 = vec![];
for &t in tokens.iter() {
#[allow(clippy::comparison_chain)]
if t < self.end_of_audio_token {
audio_ids1.push(t)
} else if t < 2 * self.end_of_audio_token {
audio_ids2.push(t - self.end_of_audio_token)
} else {
text_ids.push(t)
}
}
(text_ids, audio_ids1, audio_ids2)
}
}
}
| candle/candle-transformers/src/models/metavoice.rs/0 | {
"file_path": "candle/candle-transformers/src/models/metavoice.rs",
"repo_id": "candle",
"token_count": 21763
} |
//! # MobileNet-v4
//!
//! MobileNet-v4 inference implementation based on timm.
//!
//! ## Paper
//!
//! ["MobileNetV4 - Universal Models for the Mobile Ecosystem"](https://arxiv.org/abs/2404.10518)
//!
//! ## References
//!
//! - [PyTorch Implementation](https://github.com/huggingface/pytorch-image-models/blob/main/timm/models/mobilenetv3.py)
use candle::{Result, Tensor, D};
use candle_nn::{
batch_norm, conv2d_no_bias, linear, ops::softmax, Activation, Conv2dConfig, Func, VarBuilder,
};
#[derive(Clone, Debug)]
enum BlockType {
Convolutional {
out_channels: usize,
kernel: usize,
stride: usize,
},
UniversalBottleneck {
out_channels: usize,
start_kernel: usize,
mid_kernel: usize,
stride: usize,
expand: usize,
},
EdgeResidual {
out_channels: usize,
kernel: usize,
stride: usize,
expand: usize,
},
Attention {
out_channels: usize,
heads: usize,
kernel: usize,
stride: usize,
kv_dim: usize,
kv_stride: usize,
},
}
#[derive(Clone, Debug)]
pub struct Config {
stem_dim: usize,
activation: Activation,
stages: [Vec<BlockType>; 5],
}
#[rustfmt::skip]
impl Config {
pub fn small() -> Self {
Self {
stem_dim: 32,
activation: Activation::Relu,
stages: [
vec![
BlockType::Convolutional { out_channels: 32, kernel: 3, stride: 2},
BlockType::Convolutional { out_channels: 32, kernel: 1, stride: 1},
],
vec![
BlockType::Convolutional { out_channels: 96, kernel: 3, stride: 2},
BlockType::Convolutional { out_channels: 64, kernel: 1, stride: 1},
],
vec![
BlockType::UniversalBottleneck { out_channels: 96, start_kernel: 5, mid_kernel: 5, stride: 2, expand: 3},
BlockType::UniversalBottleneck { out_channels: 96, start_kernel: 0, mid_kernel: 3, stride: 1, expand: 2},
BlockType::UniversalBottleneck { out_channels: 96, start_kernel: 0, mid_kernel: 3, stride: 1, expand: 2},
BlockType::UniversalBottleneck { out_channels: 96, start_kernel: 0, mid_kernel: 3, stride: 1, expand: 2},
BlockType::UniversalBottleneck { out_channels: 96, start_kernel: 0, mid_kernel: 3, stride: 1, expand: 2},
BlockType::UniversalBottleneck { out_channels: 96, start_kernel: 3, mid_kernel: 0, stride: 1, expand: 4},
],
vec![
BlockType::UniversalBottleneck { out_channels: 128, start_kernel: 3, mid_kernel: 3, stride: 2, expand: 6},
BlockType::UniversalBottleneck { out_channels: 128, start_kernel: 5, mid_kernel: 5, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 128, start_kernel: 0, mid_kernel: 5, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 128, start_kernel: 0, mid_kernel: 5, stride: 1, expand: 3},
BlockType::UniversalBottleneck { out_channels: 128, start_kernel: 0, mid_kernel: 3, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 128, start_kernel: 0, mid_kernel: 3, stride: 1, expand: 4},
],
vec![
BlockType::Convolutional { out_channels: 960, kernel: 1, stride: 1},
],
],
}
}
pub fn medium() -> Self {
Self {
stem_dim: 32,
activation: Activation::Relu,
stages: [
vec![
BlockType::EdgeResidual { out_channels: 48, kernel: 3, stride: 2, expand: 4},
],
vec![
BlockType::UniversalBottleneck { out_channels: 80, start_kernel: 3, mid_kernel: 5, stride: 2, expand: 4},
BlockType::UniversalBottleneck { out_channels: 80, start_kernel: 3, mid_kernel: 3, stride: 1, expand: 2},
],
vec![
BlockType::UniversalBottleneck { out_channels: 160, start_kernel: 3, mid_kernel: 5, stride: 2, expand: 6},
BlockType::UniversalBottleneck { out_channels: 160, start_kernel: 3, mid_kernel: 3, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 160, start_kernel: 3, mid_kernel: 3, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 160, start_kernel: 3, mid_kernel: 5, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 160, start_kernel: 3, mid_kernel: 3, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 160, start_kernel: 3, mid_kernel: 0, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 160, start_kernel: 0, mid_kernel: 0, stride: 1, expand: 2},
BlockType::UniversalBottleneck { out_channels: 160, start_kernel: 3, mid_kernel: 0, stride: 1, expand: 4},
],
vec![
BlockType::UniversalBottleneck { out_channels: 256, start_kernel: 5, mid_kernel: 5, stride: 2, expand: 6},
BlockType::UniversalBottleneck { out_channels: 256, start_kernel: 5, mid_kernel: 5, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 256, start_kernel: 3, mid_kernel: 5, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 256, start_kernel: 3, mid_kernel: 5, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 256, start_kernel: 0, mid_kernel: 0, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 256, start_kernel: 3, mid_kernel: 0, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 256, start_kernel: 3, mid_kernel: 5, stride: 1, expand: 2},
BlockType::UniversalBottleneck { out_channels: 256, start_kernel: 5, mid_kernel: 5, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 256, start_kernel: 0, mid_kernel: 0, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 256, start_kernel: 0, mid_kernel: 0, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 256, start_kernel: 5, mid_kernel: 0, stride: 1, expand: 2},
],
vec![
BlockType::Convolutional { out_channels: 960, kernel: 1, stride: 1},
],
],
}
}
pub fn hybrid_medium() -> Self {
Self {
stem_dim: 32,
activation: Activation::Relu,
stages: [
vec![
BlockType::EdgeResidual { out_channels: 48, kernel: 3, stride: 2, expand: 4},
],
vec![
BlockType::UniversalBottleneck { out_channels: 80, start_kernel: 3, mid_kernel: 5, stride: 2, expand: 4},
BlockType::UniversalBottleneck { out_channels: 80, start_kernel: 3, mid_kernel: 3, stride: 1, expand: 2},
],
vec![
BlockType::UniversalBottleneck { out_channels: 160, start_kernel: 3, mid_kernel: 5, stride: 2, expand: 6},
BlockType::UniversalBottleneck { out_channels: 160, start_kernel: 0, mid_kernel: 0, stride: 1, expand: 2},
BlockType::UniversalBottleneck { out_channels: 160, start_kernel: 3, mid_kernel: 3, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 160, start_kernel: 3, mid_kernel: 5, stride: 1, expand: 4},
BlockType::Attention { out_channels: 160, heads: 4, kernel: 3, stride: 1, kv_stride:2, kv_dim: 64},
BlockType::UniversalBottleneck { out_channels: 160, start_kernel: 3, mid_kernel: 3, stride: 1, expand: 4},
BlockType::Attention { out_channels: 160, heads: 4, kernel: 3, stride: 1, kv_stride:2, kv_dim: 64},
BlockType::UniversalBottleneck { out_channels: 160, start_kernel: 3, mid_kernel: 0, stride: 1, expand: 4},
BlockType::Attention { out_channels: 160, heads: 4, kernel: 3, stride: 1, kv_stride:2, kv_dim: 64},
BlockType::UniversalBottleneck { out_channels: 160, start_kernel: 3, mid_kernel: 3, stride: 1, expand: 4},
BlockType::Attention { out_channels: 160, heads: 4, kernel: 3, stride: 1, kv_stride:2, kv_dim: 64},
BlockType::UniversalBottleneck { out_channels: 160, start_kernel: 3, mid_kernel: 0, stride: 1, expand: 4},
],
vec![
BlockType::UniversalBottleneck { out_channels: 256, start_kernel: 5, mid_kernel: 5, stride: 2, expand: 6},
BlockType::UniversalBottleneck { out_channels: 256, start_kernel: 5, mid_kernel: 5, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 256, start_kernel: 3, mid_kernel: 5, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 256, start_kernel: 3, mid_kernel: 5, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 256, start_kernel: 0, mid_kernel: 0, stride: 1, expand: 2},
BlockType::UniversalBottleneck { out_channels: 256, start_kernel: 3, mid_kernel: 5, stride: 1, expand: 2},
BlockType::UniversalBottleneck { out_channels: 256, start_kernel: 0, mid_kernel: 0, stride: 1, expand: 2},
BlockType::UniversalBottleneck { out_channels: 256, start_kernel: 0, mid_kernel: 0, stride: 1, expand: 4},
BlockType::Attention { out_channels: 256, heads: 4, kernel: 3, stride: 1, kv_stride:1, kv_dim: 64},
BlockType::UniversalBottleneck { out_channels: 256, start_kernel: 3, mid_kernel: 0, stride: 1, expand: 4},
BlockType::Attention { out_channels: 256, heads: 4, kernel: 3, stride: 1, kv_stride:1, kv_dim: 64},
BlockType::UniversalBottleneck { out_channels: 256, start_kernel: 5, mid_kernel: 5, stride: 1, expand: 4},
BlockType::Attention { out_channels: 256, heads: 4, kernel: 3, stride: 1, kv_stride:1, kv_dim: 64},
BlockType::UniversalBottleneck { out_channels: 256, start_kernel: 5, mid_kernel: 0, stride: 1, expand: 4},
BlockType::Attention { out_channels: 256, heads: 4, kernel: 3, stride: 1, kv_stride:1, kv_dim: 64},
BlockType::UniversalBottleneck { out_channels: 256, start_kernel: 5, mid_kernel: 0, stride: 1, expand: 4},
],
vec![
BlockType::Convolutional { out_channels: 960, kernel: 1, stride: 1},
],
],
}
}
pub fn large() -> Self {
Self {
stem_dim: 24,
activation: Activation::Relu,
stages: [
vec![
BlockType::EdgeResidual { out_channels: 48, kernel: 3, stride: 2, expand: 4},
],
vec![
BlockType::UniversalBottleneck { out_channels: 96, start_kernel: 3, mid_kernel: 5, stride: 2, expand: 4},
BlockType::UniversalBottleneck { out_channels: 96, start_kernel: 3, mid_kernel: 3, stride: 1, expand: 4},
],
vec![
BlockType::UniversalBottleneck { out_channels: 192, start_kernel: 3, mid_kernel: 5, stride: 2, expand: 4},
BlockType::UniversalBottleneck { out_channels: 192, start_kernel: 3, mid_kernel: 3, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 192, start_kernel: 3, mid_kernel: 3, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 192, start_kernel: 3, mid_kernel: 3, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 192, start_kernel: 3, mid_kernel: 5, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 192, start_kernel: 5, mid_kernel: 3, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 192, start_kernel: 5, mid_kernel: 3, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 192, start_kernel: 5, mid_kernel: 3, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 192, start_kernel: 5, mid_kernel: 3, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 192, start_kernel: 5, mid_kernel: 3, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 192, start_kernel: 3, mid_kernel: 0, stride: 1, expand: 4},
],
vec![
BlockType::UniversalBottleneck { out_channels: 512, start_kernel: 5, mid_kernel: 5, stride: 2, expand: 4},
BlockType::UniversalBottleneck { out_channels: 512, start_kernel: 5, mid_kernel: 5, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 512, start_kernel: 5, mid_kernel: 5, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 512, start_kernel: 5, mid_kernel: 5, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 512, start_kernel: 5, mid_kernel: 0, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 512, start_kernel: 5, mid_kernel: 3, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 512, start_kernel: 5, mid_kernel: 0, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 512, start_kernel: 5, mid_kernel: 0, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 512, start_kernel: 5, mid_kernel: 3, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 512, start_kernel: 5, mid_kernel: 5, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 512, start_kernel: 5, mid_kernel: 0, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 512, start_kernel: 5, mid_kernel: 0, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 512, start_kernel: 5, mid_kernel: 0, stride: 1, expand: 4},
],
vec![
BlockType::Convolutional { out_channels: 960, kernel: 1, stride: 1},
],
],
}
}
pub fn hybrid_large() -> Self {
Self {
stem_dim: 24,
activation: Activation::Gelu,
stages: [
vec![
BlockType::EdgeResidual { out_channels: 48, kernel: 3, stride: 2, expand: 4},
],
vec![
BlockType::UniversalBottleneck { out_channels: 96, start_kernel: 3, mid_kernel: 5, stride: 2, expand: 4},
BlockType::UniversalBottleneck { out_channels: 96, start_kernel: 3, mid_kernel: 3, stride: 1, expand: 4},
],
vec![
BlockType::UniversalBottleneck { out_channels: 192, start_kernel: 3, mid_kernel: 5, stride: 2, expand: 4},
BlockType::UniversalBottleneck { out_channels: 192, start_kernel: 3, mid_kernel: 3, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 192, start_kernel: 3, mid_kernel: 3, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 192, start_kernel: 3, mid_kernel: 3, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 192, start_kernel: 3, mid_kernel: 5, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 192, start_kernel: 5, mid_kernel: 3, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 192, start_kernel: 5, mid_kernel: 3, stride: 1, expand: 4},
BlockType::Attention { out_channels: 192, heads: 8, kernel: 3, stride: 1, kv_stride:2, kv_dim: 48},
BlockType::UniversalBottleneck { out_channels: 192, start_kernel: 5, mid_kernel: 3, stride: 1, expand: 4},
BlockType::Attention { out_channels: 192, heads: 8, kernel: 3, stride: 1, kv_stride:2, kv_dim: 48},
BlockType::UniversalBottleneck { out_channels: 192, start_kernel: 5, mid_kernel: 3, stride: 1, expand: 4},
BlockType::Attention { out_channels: 192, heads: 8, kernel: 3, stride: 1, kv_stride:2, kv_dim: 48},
BlockType::UniversalBottleneck { out_channels: 192, start_kernel: 5, mid_kernel: 3, stride: 1, expand: 4},
BlockType::Attention { out_channels: 192, heads: 8, kernel: 3, stride: 1, kv_stride:2, kv_dim: 48},
BlockType::UniversalBottleneck { out_channels: 192, start_kernel: 3, mid_kernel: 0, stride: 1, expand: 4},
],
vec![
BlockType::UniversalBottleneck { out_channels: 512, start_kernel: 5, mid_kernel: 5, stride: 2, expand: 4},
BlockType::UniversalBottleneck { out_channels: 512, start_kernel: 5, mid_kernel: 5, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 512, start_kernel: 5, mid_kernel: 5, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 512, start_kernel: 5, mid_kernel: 5, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 512, start_kernel: 5, mid_kernel: 0, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 512, start_kernel: 5, mid_kernel: 3, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 512, start_kernel: 5, mid_kernel: 0, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 512, start_kernel: 5, mid_kernel: 0, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 512, start_kernel: 5, mid_kernel: 3, stride: 1, expand: 4},
BlockType::UniversalBottleneck { out_channels: 512, start_kernel: 5, mid_kernel: 5, stride: 1, expand: 4},
BlockType::Attention { out_channels: 512, heads: 8, kernel: 3, stride: 1, kv_stride:1, kv_dim: 64},
BlockType::UniversalBottleneck { out_channels: 512, start_kernel: 5, mid_kernel: 0, stride: 1, expand: 4},
BlockType::Attention { out_channels: 512, heads: 8, kernel: 3, stride: 1, kv_stride:1, kv_dim: 64},
BlockType::UniversalBottleneck { out_channels: 512, start_kernel: 5, mid_kernel: 0, stride: 1, expand: 4},
BlockType::Attention { out_channels: 512, heads: 8, kernel: 3, stride: 1, kv_stride:1, kv_dim: 64},
BlockType::UniversalBottleneck { out_channels: 512, start_kernel: 5, mid_kernel: 0, stride: 1, expand: 4},
BlockType::Attention { out_channels: 512, heads: 8, kernel: 3, stride: 1, kv_stride:1, kv_dim: 64},
BlockType::UniversalBottleneck { out_channels: 512, start_kernel: 5, mid_kernel: 0, stride: 1, expand: 4},
],
vec![
BlockType::Convolutional { out_channels: 960, kernel: 1, stride: 1},
],
],
}
}
}
fn depthwise_conv(
channels: usize,
kernel: usize,
stride: usize,
padding: usize,
vb: VarBuilder,
) -> Result<Func<'static>> {
let conv2d_cfg = Conv2dConfig {
stride,
padding,
groups: channels,
..Default::default()
};
let bn = batch_norm(channels, 1e-5, vb.pp("bn"))?;
let conv = conv2d_no_bias(channels, channels, kernel, conv2d_cfg, vb.pp("conv"))?;
Ok(Func::new(move |xs| xs.apply(&conv)?.apply_t(&bn, false)))
}
fn pointwise_conv(
in_channels: usize,
out_channels: usize,
vb: VarBuilder,
) -> Result<Func<'static>> {
let conv2d_cfg = Conv2dConfig {
..Default::default()
};
let bn = batch_norm(out_channels, 1e-5, vb.pp("bn"))?;
let conv = conv2d_no_bias(in_channels, out_channels, 1, conv2d_cfg, vb.pp("conv"))?;
Ok(Func::new(move |xs| xs.apply(&conv)?.apply_t(&bn, false)))
}
//Universal block that uses two pointwise convolutions and all combinations of two depthwise convolutions.
#[allow(clippy::too_many_arguments)]
fn universal_inverted_bottleneck_block(
cfg: &Config,
in_channels: usize,
out_channels: usize,
expand: usize,
start_kernel: usize,
mid_kernel: usize,
stride: usize,
vb: VarBuilder,
) -> Result<Func<'static>> {
let act = cfg.activation;
let skip_connection = (in_channels == out_channels) && (stride == 1);
let dw_start_stride = if mid_kernel > 0 { 1 } else { stride };
let dw_start = depthwise_conv(
in_channels,
start_kernel,
dw_start_stride,
start_kernel / 2,
vb.pp("dw_start"),
);
let pw_exp = pointwise_conv(in_channels, in_channels * expand, vb.pp("pw_exp"))?;
let dw_mid = depthwise_conv(
in_channels * expand,
mid_kernel,
stride,
mid_kernel / 2,
vb.pp("dw_mid"),
);
let pw_proj = pointwise_conv(in_channels * expand, out_channels, vb.pp("pw_proj"))?;
let gamma = vb.get(out_channels, "layer_scale.gamma");
Ok(Func::new(move |xs| {
let residual = xs.clone();
let mut xs = xs.clone();
if let Ok(f) = &dw_start {
xs = xs.apply(f)?;
}
xs = xs.apply(&pw_exp)?.apply(&act)?;
if let Ok(f) = &dw_mid {
xs = xs.apply(f)?.apply(&act)?;
}
xs = xs.apply(&pw_proj)?;
if let Ok(g) = &gamma {
xs = xs.broadcast_mul(&g.reshape((1, (), 1, 1))?)?;
};
if skip_connection {
xs = (xs + residual)?;
}
Ok(xs)
}))
}
// Convolutional block including norm and activation.
fn conv_block(
cfg: &Config,
in_channels: usize,
out_channels: usize,
kernel: usize,
stride: usize,
vb: VarBuilder,
) -> Result<Func<'static>> {
let conv2d_cfg = Conv2dConfig {
stride,
padding: kernel / 2,
..Default::default()
};
let act = cfg.activation;
let bn = batch_norm(out_channels, 1e-5, vb.pp("bn1"))?;
let conv = conv2d_no_bias(in_channels, out_channels, kernel, conv2d_cfg, vb.pp("conv"))?;
Ok(Func::new(move |xs| {
xs.apply(&conv)?.apply_t(&bn, false)?.apply(&act)
}))
}
fn edge_residual_block(
cfg: &Config,
in_channels: usize,
out_channels: usize,
kernel: usize,
stride: usize,
expand: usize,
vb: VarBuilder,
) -> Result<Func<'static>> {
let conv_exp_cfg = Conv2dConfig {
stride,
padding: kernel / 2,
..Default::default()
};
let conv_pwl_cfg = Conv2dConfig {
..Default::default()
};
let act = cfg.activation;
let mid_channels = in_channels * expand;
let conv_exp = conv2d_no_bias(
in_channels,
mid_channels,
kernel,
conv_exp_cfg,
vb.pp("conv_exp"),
)?;
let bn1 = batch_norm(mid_channels, 1e-5, vb.pp("bn1"))?;
let conv_pwl = conv2d_no_bias(
mid_channels,
out_channels,
1,
conv_pwl_cfg,
vb.pp("conv_pwl"),
)?;
let bn2 = batch_norm(out_channels, 1e-5, vb.pp("bn2"))?;
Ok(Func::new(move |xs| {
let xs = xs
.apply(&conv_exp)?
.apply_t(&bn1, false)?
.apply(&act)?
.apply(&conv_pwl)?
.apply_t(&bn2, false)?;
Ok(xs)
}))
}
fn reshape_kv(t: &Tensor) -> Result<Tensor> {
let d = t.dims4()?;
let t = t
.reshape((d.0, d.1, ()))?
.transpose(1, 2)?
.unsqueeze(1)?
.contiguous()?;
Ok(t)
}
fn reshape_query(t: &Tensor, heads: usize, kv_dim: usize) -> Result<Tensor> {
let d = t.dims4()?;
let t = t
.reshape((d.0, heads, kv_dim, ()))?
.transpose(D::Minus1, D::Minus2)?
.contiguous()?;
Ok(t)
}
fn reshape_output(t: &Tensor, heads: usize, h: usize, w: usize) -> Result<Tensor> {
let d = t.dims4()?;
let t = t.transpose(1, 2)?;
let t = t
.reshape((d.0, h, w, d.3 * heads))?
.permute((0, 3, 1, 2))?
.contiguous()?;
Ok(t)
}
// Mobile multi-query attention
#[allow(clippy::too_many_arguments)]
fn mqa_block(
in_channels: usize,
out_channels: usize,
heads: usize,
kernel: usize,
stride: usize,
kv_dim: usize,
kv_stride: usize,
vb: VarBuilder,
) -> Result<Func<'static>> {
let down_conv2d_cfg = Conv2dConfig {
stride: kv_stride,
padding: kernel / 2,
groups: in_channels,
..Default::default()
};
let proj_conv2d_cfg = Conv2dConfig {
stride,
..Default::default()
};
let skip_connection = (in_channels == out_channels) && (stride == 1);
let gamma = vb.get(out_channels, "layer_scale.gamma");
let norm = batch_norm(out_channels, 1e-5, vb.pp("norm"))?;
let scale = (kv_dim as f64).powf(-0.5);
let vb = vb.pp("attn");
let query_proj = conv2d_no_bias(
out_channels,
kv_dim * heads,
1,
proj_conv2d_cfg,
vb.pp("query.proj"),
)?;
let key_down_conv = conv2d_no_bias(
in_channels,
out_channels,
kernel,
down_conv2d_cfg,
vb.pp("key.down_conv"),
);
let key_norm = batch_norm(out_channels, 1e-5, vb.pp("key.norm"));
let key_proj = conv2d_no_bias(out_channels, kv_dim, 1, proj_conv2d_cfg, vb.pp("key.proj"))?;
let value_down_conv = conv2d_no_bias(
in_channels,
out_channels,
kernel,
down_conv2d_cfg,
vb.pp("value.down_conv"),
);
let value_norm = batch_norm(out_channels, 1e-5, vb.pp("value.norm"));
let value_proj = conv2d_no_bias(
out_channels,
kv_dim,
1,
proj_conv2d_cfg,
vb.pp("value.proj"),
)?;
let output_proj = conv2d_no_bias(
kv_dim * heads,
out_channels,
1,
proj_conv2d_cfg,
vb.pp("output.proj"),
)?;
Ok(Func::new(move |xs| {
let (_, _, h, w) = xs.dims4()?;
let residual = xs.clone();
let xs = xs.apply_t(&norm, false)?;
// Query
let q = xs.apply(&query_proj)?;
let q = reshape_query(&q, heads, kv_dim)?;
let q = (q * scale)?;
// Keys
let mut k = xs.clone();
if let (Ok(kd), Ok(n)) = (&key_down_conv, &key_norm) {
k = k.apply(kd)?.apply_t(n, false)?;
}
let k = k.apply(&key_proj)?;
let k = reshape_kv(&k)?;
// Value
let mut v = xs.clone();
if let (Ok(vd), Ok(n)) = (&value_down_conv, &value_norm) {
v = v.apply(vd)?;
v = v.apply_t(n, false)?;
}
let v = v.apply(&value_proj)?;
let v = reshape_kv(&v)?;
let attn = q.broadcast_matmul(&(k.transpose(D::Minus2, D::Minus1)?))?;
let attn = softmax(&attn, D::Minus1)?;
let o = attn.broadcast_matmul(&v)?;
let o = reshape_output(&o, heads, h, w)?;
let mut xs = o.apply(&output_proj)?;
// Layer scale
if let Ok(g) = &gamma {
xs = xs.broadcast_mul(&g.reshape((1, (), 1, 1))?)?;
};
if skip_connection {
xs = (xs + residual)?;
}
Ok(xs)
}))
}
// Stem.
fn mobilenetv4_stem(cfg: &Config, vb: VarBuilder) -> Result<Func<'static>> {
let conv2d_cfg = Conv2dConfig {
stride: 2,
padding: 1,
..Default::default()
};
let act = cfg.activation;
let out_channels = cfg.stem_dim;
let bn = batch_norm(out_channels, 1e-5, vb.pp("bn1"))?;
let conv = conv2d_no_bias(3, out_channels, 3, conv2d_cfg, vb.pp("conv_stem"))?;
Ok(Func::new(move |xs| {
let xs = xs.apply(&conv)?.apply_t(&bn, false)?.apply(&act)?;
Ok(xs)
}))
}
// The blocks in all the 5 stages of the model.
fn mobilenetv4_blocks(cfg: &Config, vb: VarBuilder) -> Result<Func<'static>> {
let mut in_channels = cfg.stem_dim;
let mut blocks = Vec::new();
for stage in 0..5 {
let nblocks = cfg.stages[stage].len();
for block in 0..nblocks {
match cfg.stages[stage][block] {
BlockType::Convolutional {
out_channels,
kernel,
stride,
} => {
blocks.push(conv_block(
cfg,
in_channels,
out_channels,
kernel,
stride,
vb.pp(format!("{stage}.{block}")),
)?);
in_channels = out_channels;
}
BlockType::EdgeResidual {
out_channels,
kernel,
stride,
expand,
} => {
blocks.push(edge_residual_block(
cfg,
in_channels,
out_channels,
kernel,
stride,
expand,
vb.pp(format!("{stage}.{block}")),
)?);
in_channels = out_channels;
}
BlockType::UniversalBottleneck {
out_channels,
start_kernel,
mid_kernel,
stride,
expand,
} => {
blocks.push(universal_inverted_bottleneck_block(
cfg,
in_channels,
out_channels,
expand,
start_kernel,
mid_kernel,
stride,
vb.pp(format!("{stage}.{block}")),
)?);
in_channels = out_channels;
}
BlockType::Attention {
out_channels,
heads,
kernel,
stride,
kv_dim,
kv_stride,
} => {
blocks.push(mqa_block(
in_channels,
out_channels,
heads,
kernel,
stride,
kv_dim,
kv_stride,
vb.pp(format!("{stage}.{block}")),
)?);
in_channels = out_channels;
}
}
}
}
Ok(Func::new(move |xs| {
let mut xs = xs.clone();
for block in blocks.iter() {
xs = xs.apply(block)?
}
Ok(xs)
}))
}
// Classification head.
fn mobilenetv4_head(
cfg: &Config,
outputs: usize,
nclasses: usize,
vb: VarBuilder,
) -> Result<Func<'static>> {
let conv2d_cfg = Conv2dConfig {
..Default::default()
};
let act = cfg.activation;
let conv = conv2d_no_bias(960, outputs, 1, conv2d_cfg, vb.pp("conv_head"))?;
let norm = batch_norm(outputs, 1e-5, vb.pp("norm_head"))?;
let cls = linear(outputs, nclasses, vb.pp("classifier"))?;
Ok(Func::new(move |xs| {
let mut xs = xs.clone();
xs = xs.apply(&conv)?;
xs = xs.apply_t(&norm, false)?.apply(&act)?;
xs = xs.flatten_from(1)?;
xs = xs.apply(&cls)?;
Ok(xs)
}))
}
// Build a mobilenetv4 model for a given configuration.
fn mobilenetv4_model(
cfg: &Config,
nclasses: Option<usize>,
vb: VarBuilder,
) -> Result<Func<'static>> {
let cls = match nclasses {
None => None,
Some(nclasses) => {
let outputs = 1280;
let head = mobilenetv4_head(cfg, outputs, nclasses, vb.clone())?;
Some(head)
}
};
let stem = mobilenetv4_stem(cfg, vb.clone())?;
let blocks = mobilenetv4_blocks(cfg, vb.pp("blocks"))?;
Ok(Func::new(move |xs| {
let xs = xs.apply(&stem)?.apply(&blocks)?;
let xs = xs.mean_keepdim(D::Minus1)?.mean_keepdim(D::Minus2)?;
match &cls {
None => Ok(xs),
Some(cls) => xs.apply(cls),
}
}))
}
pub fn mobilenetv4(cfg: &Config, nclasses: usize, vb: VarBuilder) -> Result<Func<'static>> {
mobilenetv4_model(cfg, Some(nclasses), vb)
}
pub fn mobilenetv4_no_final_layer(cfg: &Config, vb: VarBuilder) -> Result<Func<'static>> {
mobilenetv4_model(cfg, None, vb)
}
| candle/candle-transformers/src/models/mobilenetv4.rs/0 | {
"file_path": "candle/candle-transformers/src/models/mobilenetv4.rs",
"repo_id": "candle",
"token_count": 16908
} |
//! Microsoft Phi-3 model implementation
//!
//! See Phi model details at:
//! - [Phi-3 Model](https://huggingface.co/microsoft/phi-3)
//!
//! The Phi series are decoder-only transformers designed for code and language tasks.
//! Key characteristics:
//! - Decoder-only transformer architecture
//! - RoPE embeddings
//! - Layer normalization
//! - QK normalization
//! - Mixed activation functions
//! - Improved context window handling
//!
//! References:
//! - [Hugging Face Implementation](https://huggingface.co/microsoft/phi-3)
//! - [Alternative Implementation](https://huggingface.co/microsoft/phi-3/tree/main)
//!
// This implementation is based on:
// https://huggingface.co/microsoft/Phi-3-mini-4k-instruct/blob/main/modeling_phi3.py
use crate::models::with_tracing::{linear_no_bias as linear, Linear, RmsNorm};
use candle::{DType, Device, Module, Result, Tensor, D};
use candle_nn::VarBuilder;
use std::sync::Arc;
// https://huggingface.co/microsoft/Phi-3-mini-4k-instruct/blob/main/config.json
#[derive(Debug, Clone, serde::Deserialize)]
pub struct Config {
pub vocab_size: usize,
pub hidden_act: candle_nn::Activation,
pub hidden_size: usize,
pub intermediate_size: usize,
pub num_hidden_layers: usize,
pub num_attention_heads: usize,
pub num_key_value_heads: usize,
pub rms_norm_eps: f64,
pub rope_theta: f64,
pub bos_token_id: Option<u32>,
pub eos_token_id: Option<u32>,
pub rope_scaling: Option<String>,
pub max_position_embeddings: usize,
}
impl Config {
pub fn head_dim(&self) -> usize {
self.hidden_size / self.num_attention_heads
}
}
#[derive(Debug, Clone)]
pub struct RotaryEmbedding {
sin: Tensor,
cos: Tensor,
}
impl RotaryEmbedding {
pub fn new(dtype: DType, cfg: &Config, dev: &Device) -> Result<Self> {
let dim = cfg.head_dim();
let max_seq_len = cfg.max_position_embeddings;
let inv_freq: Vec<_> = (0..dim)
.step_by(2)
.map(|i| 1f32 / cfg.rope_theta.powf(i as f64 / dim as f64) as f32)
.collect();
let inv_freq_len = inv_freq.len();
let inv_freq = Tensor::from_vec(inv_freq, (1, inv_freq_len), dev)?.to_dtype(dtype)?;
let t = Tensor::arange(0u32, max_seq_len as u32, dev)?
.to_dtype(dtype)?
.reshape((max_seq_len, 1))?;
let freqs = t.matmul(&inv_freq)?;
Ok(Self {
sin: freqs.sin()?,
cos: freqs.cos()?,
})
}
pub fn apply_rotary_emb_qkv(
&self,
q: &Tensor,
k: &Tensor,
seqlen_offset: usize,
) -> Result<(Tensor, Tensor)> {
let (_b_sz, _h, seq_len, _n_embd) = q.dims4()?;
let cos = self.cos.narrow(0, seqlen_offset, seq_len)?;
let sin = self.sin.narrow(0, seqlen_offset, seq_len)?;
let q_embed = candle_nn::rotary_emb::rope(&q.contiguous()?, &cos, &sin)?;
let k_embed = candle_nn::rotary_emb::rope(&k.contiguous()?, &cos, &sin)?;
Ok((q_embed, k_embed))
}
}
#[derive(Debug, Clone)]
struct Attention {
qkv_proj: Linear,
o_proj: Linear,
num_heads: usize,
num_kv_heads: usize,
num_kv_groups: usize,
head_dim: usize,
rotary_emb: Arc<RotaryEmbedding>,
kv_cache: Option<(Tensor, Tensor)>,
}
impl Attention {
fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder) -> Result<Self> {
let num_heads = cfg.num_attention_heads;
let num_kv_heads = cfg.num_key_value_heads;
let head_dim = cfg.head_dim();
let op_size = num_heads * head_dim + 2 * num_kv_heads * head_dim;
let qkv_proj = linear(cfg.hidden_size, op_size, vb.pp("qkv_proj"))?;
let o_proj = linear(num_heads * head_dim, cfg.hidden_size, vb.pp("o_proj"))?;
Ok(Self {
qkv_proj,
o_proj,
rotary_emb,
kv_cache: None,
num_heads,
num_kv_heads,
num_kv_groups: num_heads / num_kv_heads,
head_dim,
})
}
fn forward(
&mut self,
xs: &Tensor,
attention_mask: Option<&Tensor>,
seqlen_offset: usize,
) -> Result<Tensor> {
let (b_sz, q_len, _) = xs.dims3()?;
let qkv = self.qkv_proj.forward(xs)?;
let query_pos = self.num_heads * self.head_dim;
let query_states = qkv.narrow(D::Minus1, 0, query_pos)?;
let key_states = qkv.narrow(D::Minus1, query_pos, self.num_kv_heads * self.head_dim)?;
let value_states = qkv.narrow(
D::Minus1,
query_pos + self.num_kv_heads * self.head_dim,
self.num_kv_heads * self.head_dim,
)?;
let query_states = query_states
.reshape((b_sz, q_len, self.num_heads, self.head_dim))?
.transpose(1, 2)?;
let key_states = key_states
.reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))?
.transpose(1, 2)?;
let value_states = value_states
.reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))?
.transpose(1, 2)?;
let (query_states, key_states) =
self.rotary_emb
.apply_rotary_emb_qkv(&query_states, &key_states, seqlen_offset)?;
let (key_states, value_states) = match &self.kv_cache {
None => (key_states, value_states),
Some((prev_k, prev_v)) => {
let key_states = Tensor::cat(&[prev_k, &key_states], 2)?;
let value_states = Tensor::cat(&[prev_v, &value_states], 2)?;
(key_states, value_states)
}
};
self.kv_cache = Some((key_states.clone(), value_states.clone()));
let key_states = crate::utils::repeat_kv(key_states, self.num_kv_groups)?.contiguous()?;
let value_states =
crate::utils::repeat_kv(value_states, self.num_kv_groups)?.contiguous()?;
let attn_output = {
let scale = 1f64 / f64::sqrt(self.head_dim as f64);
let attn_weights = (query_states.matmul(&key_states.transpose(2, 3)?)? * scale)?;
let attn_weights = match attention_mask {
None => attn_weights,
Some(mask) => attn_weights.broadcast_add(mask)?,
};
let attn_weights = candle_nn::ops::softmax_last_dim(&attn_weights)?;
attn_weights.matmul(&value_states)?
};
attn_output
.transpose(1, 2)?
.reshape((b_sz, q_len, ()))?
.apply(&self.o_proj)
}
fn clear_kv_cache(&mut self) {
self.kv_cache = None
}
}
#[derive(Debug, Clone)]
struct Mlp {
gate_up_proj: Linear,
down_proj: Linear,
act_fn: candle_nn::Activation,
i_size: usize,
}
impl Mlp {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let hidden_size = cfg.hidden_size;
let i_size = cfg.intermediate_size;
let gate_up_proj = linear(hidden_size, 2 * i_size, vb.pp("gate_up_proj"))?;
let down_proj = linear(i_size, hidden_size, vb.pp("down_proj"))?;
Ok(Self {
gate_up_proj,
down_proj,
act_fn: cfg.hidden_act,
i_size,
})
}
}
impl Module for Mlp {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let up_states = xs.apply(&self.gate_up_proj)?;
let gate = up_states.narrow(D::Minus1, 0, self.i_size)?;
let up_states = up_states.narrow(D::Minus1, self.i_size, self.i_size)?;
let up_states = (up_states * gate.apply(&self.act_fn))?;
up_states.apply(&self.down_proj)
}
}
#[derive(Debug, Clone)]
struct DecoderLayer {
self_attn: Attention,
mlp: Mlp,
input_layernorm: RmsNorm,
post_attention_layernorm: RmsNorm,
}
impl DecoderLayer {
fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder) -> Result<Self> {
let self_attn = Attention::new(rotary_emb, cfg, vb.pp("self_attn"))?;
let mlp = Mlp::new(cfg, vb.pp("mlp"))?;
let input_layernorm =
RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb.pp("input_layernorm"))?;
let post_attention_layernorm = RmsNorm::new(
cfg.hidden_size,
cfg.rms_norm_eps,
vb.pp("post_attention_layernorm"),
)?;
Ok(Self {
self_attn,
mlp,
input_layernorm,
post_attention_layernorm,
})
}
fn forward(
&mut self,
xs: &Tensor,
attention_mask: Option<&Tensor>,
seqlen_offset: usize,
) -> Result<Tensor> {
let residual = xs;
let xs = self.input_layernorm.forward(xs)?;
let xs = self.self_attn.forward(&xs, attention_mask, seqlen_offset)?;
let xs = (xs + residual)?;
let residual = &xs;
let xs = xs.apply(&self.post_attention_layernorm)?.apply(&self.mlp)?;
residual + xs
}
fn clear_kv_cache(&mut self) {
self.self_attn.clear_kv_cache()
}
}
#[derive(Debug, Clone)]
pub struct Model {
embed_tokens: candle_nn::Embedding,
layers: Vec<DecoderLayer>,
norm: RmsNorm,
lm_head: Linear,
device: Device,
dtype: DType,
}
impl Model {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let vb_m = vb.pp("model");
let embed_tokens =
candle_nn::embedding(cfg.vocab_size, cfg.hidden_size, vb_m.pp("embed_tokens"))?;
let rotary_emb = Arc::new(RotaryEmbedding::new(vb.dtype(), cfg, vb_m.device())?);
let mut layers = Vec::with_capacity(cfg.num_hidden_layers);
let vb_l = vb_m.pp("layers");
for layer_idx in 0..cfg.num_hidden_layers {
let layer = DecoderLayer::new(rotary_emb.clone(), cfg, vb_l.pp(layer_idx))?;
layers.push(layer)
}
let norm = RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb_m.pp("norm"))?;
let lm_head = linear(cfg.hidden_size, cfg.vocab_size, vb.pp("lm_head"))?;
Ok(Self {
embed_tokens,
layers,
norm,
lm_head,
device: vb.device().clone(),
dtype: vb.dtype(),
})
}
fn prepare_decoder_attention_mask(
&self,
b_size: usize,
tgt_len: usize,
seqlen_offset: usize,
) -> Result<Tensor> {
let mask: Vec<_> = (0..tgt_len)
.flat_map(|i| (0..tgt_len).map(move |j| if i < j { f32::NEG_INFINITY } else { 0. }))
.collect();
let mask = Tensor::from_slice(&mask, (tgt_len, tgt_len), &self.device)?;
let mask = if seqlen_offset > 0 {
let mask0 = Tensor::zeros((tgt_len, seqlen_offset), DType::F32, &self.device)?;
Tensor::cat(&[&mask0, &mask], D::Minus1)?
} else {
mask
};
mask.expand((b_size, 1, tgt_len, tgt_len + seqlen_offset))?
.to_dtype(self.dtype)
}
pub fn forward(&mut self, input_ids: &Tensor, seqlen_offset: usize) -> Result<Tensor> {
let (b_size, seq_len) = input_ids.dims2()?;
let attention_mask = if seq_len <= 1 {
None
} else {
let mask = self.prepare_decoder_attention_mask(b_size, seq_len, seqlen_offset)?;
Some(mask)
};
let mut xs = self.embed_tokens.forward(input_ids)?;
for layer in self.layers.iter_mut() {
xs = layer.forward(&xs, attention_mask.as_ref(), seqlen_offset)?
}
xs.narrow(1, seq_len - 1, 1)?
.apply(&self.norm)?
.apply(&self.lm_head)
}
pub fn clear_kv_cache(&mut self) {
for layer in self.layers.iter_mut() {
layer.clear_kv_cache()
}
}
}
| candle/candle-transformers/src/models/phi3.rs/0 | {
"file_path": "candle/candle-transformers/src/models/phi3.rs",
"repo_id": "candle",
"token_count": 5916
} |
//! Recurrent Gemma model implementation with quantization support.
//!
//! Gemma is a large language model optimized for efficiency.
//! This implementation provides quantization for reduced memory and compute.
//!
//! Key characteristics:
//! - Recurrent blocks with gated recurrent units
//! - Convolution and attention blocks
//! - RMSNorm for layer normalization
//! - Rotary positional embeddings (RoPE)
//! - Support for 8-bit quantization
//!
//! References:
//! - [Gemma Paper](https://arxiv.org/abs/2401.06751)
//! - [Model Card](https://ai.google.dev/gemma)
//!
use crate::quantized_nn::{linear_b as linear, Embedding, Linear};
pub use crate::quantized_var_builder::VarBuilder;
use candle::{DType, Device, IndexOp, Module, Result, Tensor, D};
use std::sync::Arc;
use crate::models::recurrent_gemma::{Config, Rglru, RmsNorm, RotaryEmbedding, TemporalBlockType};
fn rms_norm(size: usize, eps: f64, vb: VarBuilder) -> Result<RmsNorm> {
let weight = vb.get(size, "weight")?.dequantize(vb.device())?;
Ok(RmsNorm::from_weight(weight, eps))
}
#[derive(Debug, Clone)]
struct Mlp {
gate_proj: Linear,
up_proj: Linear,
down_proj: Linear,
act_fn: candle_nn::Activation,
}
impl Mlp {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let h = cfg.hidden_size;
let intermediate_size = cfg.intermediate_size / 2;
let gate_proj = linear(h, intermediate_size, true, vb.pp("gate_proj"))?;
let up_proj = linear(h, intermediate_size, true, vb.pp("up_proj"))?;
let down_proj = linear(intermediate_size, h, true, vb.pp("down_proj"))?;
Ok(Self {
gate_proj,
up_proj,
down_proj,
act_fn: cfg.hidden_activation,
})
}
}
impl Module for Mlp {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let gate = xs.apply(&self.gate_proj)?.apply(&self.act_fn)?;
(gate * xs.apply(&self.up_proj))?.apply(&self.down_proj)
}
}
fn rglru(cfg: &Config, vb: VarBuilder) -> Result<Rglru> {
let h = cfg.hidden_size;
let lru_width = cfg.lru_width.unwrap_or(h);
let n_heads = cfg.num_attention_heads;
let block_width = lru_width / n_heads;
let recurrent_param = vb.get((lru_width,), "recurrent_param")?;
let input_gate_weight = vb.get((n_heads, block_width, block_width), "input_gate_weight")?;
let input_gate_bias = vb.get((n_heads, block_width), "input_gate_bias")?;
let recurrent_gate_weight =
vb.get((n_heads, block_width, block_width), "recurrent_gate_weight")?;
let recurrent_gate_bias = vb.get((n_heads, block_width), "recurrent_gate_bias")?;
Ok(Rglru {
recurrent_param: recurrent_param.dequantize(vb.device())?,
input_gate_bias: input_gate_bias.dequantize(vb.device())?,
input_gate_weight: input_gate_weight.dequantize(vb.device())?,
recurrent_gate_bias: recurrent_gate_bias.dequantize(vb.device())?,
recurrent_gate_weight: recurrent_gate_weight.dequantize(vb.device())?,
block_width,
n_heads,
recurrent_states: None,
})
}
#[derive(Debug, Clone)]
struct RecurrentBlock {
linear_y: Linear,
linear_x: Linear,
linear_out: Linear,
conv_1d: candle_nn::Conv1d,
conv1d_state: Option<Tensor>,
conv1d_width: usize,
rg_lru: Rglru,
act_fn: candle_nn::Activation,
}
impl RecurrentBlock {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let h = cfg.hidden_size;
let lru_width = cfg.lru_width.unwrap_or(h);
let linear_y = linear(h, lru_width, true, vb.pp("linear_y"))?;
let linear_x = linear(h, lru_width, true, vb.pp("linear_x"))?;
let linear_out = linear(lru_width, h, true, vb.pp("linear_out"))?;
let conv_1d = {
let ws = vb
.get((lru_width, 1, cfg.conv1d_width), "conv_1d.weight")?
.dequantize(vb.device())?;
let bs = vb.get(lru_width, "conv_1d.bias")?.dequantize(vb.device())?;
let config = candle_nn::Conv1dConfig {
groups: lru_width,
padding: cfg.conv1d_width - 1,
..Default::default()
};
candle_nn::Conv1d::new(ws, Some(bs), config)
};
let rg_lru = rglru(cfg, vb.pp("rg_lru"))?;
Ok(Self {
linear_y,
linear_x,
linear_out,
conv_1d,
conv1d_state: None,
conv1d_width: cfg.conv1d_width,
rg_lru,
act_fn: cfg.hidden_activation,
})
}
pub fn forward(&mut self, xs: &Tensor, pos: usize) -> Result<Tensor> {
let (_b_sz, seq_len, _) = xs.dims3()?;
let y_branch = xs.apply(&self.linear_y)?.apply(&self.act_fn)?;
let x_branch = xs.apply(&self.linear_x)?.transpose(1, 2)?;
let x_branch = if pos == 0 {
let x_len = x_branch.dim(D::Minus1)?;
let pad = self.conv1d_width as i64 - x_len as i64 - 1;
let padded = match pad.cmp(&0) {
std::cmp::Ordering::Equal => x_branch.clone(),
std::cmp::Ordering::Less => {
let rev_pad = (-pad) as usize;
x_branch.narrow(D::Minus1, rev_pad, x_len - rev_pad)?
}
std::cmp::Ordering::Greater => {
x_branch.pad_with_zeros(D::Minus1, pad as usize, 0)?
}
};
self.conv1d_state = Some(padded);
x_branch
.apply(&self.conv_1d)?
.narrow(D::Minus1, 0, seq_len)?
} else {
let conv_state = match self.conv1d_state.as_ref() {
None => candle::bail!("empty cache despite pos > 0"),
Some(s) => Tensor::cat(&[s, &x_branch], D::Minus1)?,
};
let w = self.conv_1d.weight().i((.., 0, ..))?;
let x_branch = conv_state.broadcast_mul(&w)?.sum(D::Minus1)?;
let x_branch = match self.conv_1d.bias() {
None => x_branch,
Some(b) => x_branch.broadcast_add(b)?,
};
let x_branch = x_branch.unsqueeze(D::Minus1)?;
self.conv1d_state = Some(conv_state.i((.., .., 1..))?);
x_branch
};
let x_branch = x_branch.transpose(1, 2)?;
let x_branch = self.rg_lru.forward(&x_branch, pos)?;
(x_branch * y_branch)?.apply(&self.linear_out)
}
}
#[derive(Debug, Clone)]
struct SdpaAttention {
q_proj: Linear,
k_proj: Linear,
v_proj: Linear,
o_proj: Linear,
n_heads: usize,
n_kv_heads: usize,
head_dim: usize,
hidden_size: usize,
kv_cache: Option<(Tensor, Tensor)>,
rotary_emb: Arc<RotaryEmbedding>,
}
impl SdpaAttention {
fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder) -> Result<Self> {
let h = cfg.hidden_size;
let n_heads = cfg.num_attention_heads;
let n_kv_heads = cfg.num_key_value_heads;
let hd = cfg.head_dim;
let q_proj = linear(h, n_heads * hd, cfg.attention_bias, vb.pp("q_proj"))?;
let k_proj = linear(h, n_kv_heads * hd, cfg.attention_bias, vb.pp("k_proj"))?;
let v_proj = linear(h, n_kv_heads * hd, cfg.attention_bias, vb.pp("v_proj"))?;
let o_proj = linear(n_heads * hd, h, true, vb.pp("o_proj"))?;
Ok(Self {
q_proj,
k_proj,
v_proj,
o_proj,
n_heads,
n_kv_heads,
head_dim: hd,
hidden_size: h,
kv_cache: None,
rotary_emb,
})
}
fn repeat_kv(&self, x: Tensor) -> Result<Tensor> {
let n_rep = self.n_heads / self.n_kv_heads;
crate::utils::repeat_kv(x, n_rep)
}
fn forward(
&mut self,
xs: &Tensor,
attention_mask: Option<&Tensor>,
pos: usize,
) -> Result<Tensor> {
let (bsz, q_len, _) = xs.dims3()?;
let query_states = xs.apply(&self.q_proj)?;
let key_states = xs.apply(&self.k_proj)?;
let value_states = xs.apply(&self.v_proj)?;
let query_states = query_states
.reshape((bsz, q_len, self.n_heads, self.head_dim))?
.transpose(1, 2)?;
let key_states = key_states
.reshape((bsz, q_len, self.n_kv_heads, self.head_dim))?
.transpose(1, 2)?;
let value_states = value_states
.reshape((bsz, q_len, self.n_kv_heads, self.head_dim))?
.transpose(1, 2)?;
let query_states = query_states.chunk(2, D::Minus1)?;
let key_states = key_states.chunk(2, D::Minus1)?;
let (query_rot, key_rot) =
self.rotary_emb
.apply_rotary_emb_qkv(&query_states[0], &key_states[0], pos)?;
let query_states = Tensor::cat(&[&query_rot, &query_states[1]], D::Minus1)?.contiguous()?;
let key_states = Tensor::cat(&[&key_rot, &key_states[1]], D::Minus1)?.contiguous()?;
let (key_states, value_states) = match &self.kv_cache {
None => (key_states, value_states),
Some((prev_k, prev_v)) => {
let key_states = Tensor::cat(&[prev_k, &key_states], 2)?;
let value_states = Tensor::cat(&[prev_v, &value_states], 2)?;
(key_states, value_states)
}
};
self.kv_cache = Some((key_states.clone(), value_states.clone()));
let key_states = self.repeat_kv(key_states)?;
let value_states = self.repeat_kv(value_states)?;
let xs = {
let att = (query_states.matmul(&key_states.t()?)? / (self.head_dim as f64).sqrt())?;
let att = if q_len == 1 {
att
} else {
match attention_mask {
None => att,
Some(mask) => att.broadcast_add(mask)?,
}
};
let att = candle_nn::ops::softmax_last_dim(&att)?;
att.matmul(&value_states.contiguous()?)?
};
let xs = xs
.transpose(1, 2)?
.reshape((bsz, q_len, self.hidden_size))?;
self.o_proj.forward(&xs)
}
}
#[derive(Debug, Clone)]
enum TemporalBlock {
Recurrent(RecurrentBlock),
Attention(SdpaAttention),
}
impl TemporalBlock {
fn forward(
&mut self,
xs: &Tensor,
attention_mask: Option<&Tensor>,
pos: usize,
) -> Result<Tensor> {
match self {
Self::Recurrent(b) => b.forward(xs, pos),
Self::Attention(b) => b.forward(xs, attention_mask, pos),
}
}
}
#[derive(Debug, Clone)]
struct DecoderLayer {
temporal_pre_norm: RmsNorm,
channel_pre_norm: RmsNorm,
temporal_block: TemporalBlock,
mlp_block: Mlp,
}
impl DecoderLayer {
fn new(
block_idx: usize,
rotary_emb: Arc<RotaryEmbedding>,
cfg: &Config,
vb: VarBuilder,
) -> Result<Self> {
let h = cfg.hidden_size;
let temporal_pre_norm = rms_norm(h, cfg.rms_norm_eps, vb.pp("temporal_pre_norm"))?;
let channel_pre_norm = rms_norm(h, cfg.rms_norm_eps, vb.pp("channel_pre_norm"))?;
let temporal_block = match cfg.block_types[block_idx % cfg.block_types.len()] {
TemporalBlockType::Recurrent => {
let block = RecurrentBlock::new(cfg, vb.pp("temporal_block"))?;
TemporalBlock::Recurrent(block)
}
TemporalBlockType::Attention => {
let block = SdpaAttention::new(rotary_emb, cfg, vb.pp("temporal_block"))?;
TemporalBlock::Attention(block)
}
};
let mlp_block = Mlp::new(cfg, vb.pp("mlp_block"))?;
Ok(Self {
temporal_pre_norm,
channel_pre_norm,
temporal_block,
mlp_block,
})
}
fn forward(
&mut self,
xs: &Tensor,
attention_mask: Option<&Tensor>,
pos: usize,
) -> Result<Tensor> {
let residual = xs;
let xs = xs.apply(&self.temporal_pre_norm)?;
let xs = self.temporal_block.forward(&xs, attention_mask, pos)?;
let xs = (xs + residual)?;
let residual = &xs;
let xs = xs.apply(&self.channel_pre_norm)?.apply(&self.mlp_block)?;
xs + residual
}
}
#[derive(Debug, Clone)]
pub struct Model {
embed_tokens: Embedding,
layers: Vec<DecoderLayer>,
final_norm: RmsNorm,
lm_head: Linear,
hidden_size: usize,
logits_soft_cap: f64,
device: Device,
}
impl Model {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let embed_tokens = Embedding::new(cfg.vocab_size, cfg.hidden_size, vb.pp("embed_tokens"))?;
let rotary_emb = Arc::new(RotaryEmbedding::new(DType::F32, cfg, vb.device())?);
let vb_b = vb.pp("layers");
let mut layers = Vec::with_capacity(cfg.num_hidden_layers);
for idx in 0..cfg.num_hidden_layers {
let layer = DecoderLayer::new(idx, rotary_emb.clone(), cfg, vb_b.pp(idx))?;
layers.push(layer)
}
let final_norm = rms_norm(cfg.hidden_size, cfg.rms_norm_eps, vb.pp("final_norm"))?;
let lm_head = linear(
cfg.hidden_size,
cfg.vocab_size,
false,
vb.pp("embed_tokens"),
)?;
Ok(Self {
embed_tokens,
layers,
final_norm,
lm_head,
hidden_size: cfg.hidden_size,
logits_soft_cap: cfg.logits_soft_cap,
device: vb.device().clone(),
})
}
fn prepare_decoder_attention_mask(
&self,
b_size: usize,
tgt_len: usize,
seqlen_offset: usize,
) -> Result<Tensor> {
let mask: Vec<_> = (0..tgt_len)
.flat_map(|i| (0..tgt_len).map(move |j| if i < j { f32::NEG_INFINITY } else { 0. }))
.collect();
let mask = Tensor::from_slice(&mask, (tgt_len, tgt_len), &self.device)?;
let mask = if seqlen_offset > 0 {
let mask0 = Tensor::zeros((tgt_len, seqlen_offset), DType::F32, &self.device)?;
Tensor::cat(&[&mask0, &mask], D::Minus1)?
} else {
mask
};
mask.expand((b_size, 1, tgt_len, tgt_len + seqlen_offset))?
.to_dtype(DType::F32)
}
pub fn forward(&mut self, xs: &Tensor, pos: usize) -> Result<Tensor> {
let (b_size, seq_len) = xs.dims2()?;
let attention_mask = if seq_len <= 1 {
None
} else {
let mask = self.prepare_decoder_attention_mask(b_size, seq_len, pos)?;
Some(mask)
};
let xs = xs.apply(&self.embed_tokens)?;
let mut xs = (xs * (self.hidden_size as f64).sqrt())?;
for layer in self.layers.iter_mut() {
xs = layer.forward(&xs, attention_mask.as_ref(), pos)?;
}
let logits = xs
.narrow(1, seq_len - 1, 1)?
.apply(&self.final_norm)?
.apply(&self.lm_head)?;
let logits = ((logits / self.logits_soft_cap)?.tanh()? * self.logits_soft_cap)?;
Ok(logits)
}
}
| candle/candle-transformers/src/models/quantized_recurrent_gemma.rs/0 | {
"file_path": "candle/candle-transformers/src/models/quantized_recurrent_gemma.rs",
"repo_id": "candle",
"token_count": 7858
} |
use candle::{DType, IndexOp, Result, Tensor, D};
use candle_nn::VarBuilder;
#[derive(Debug)]
struct PositionEmbeddingRandom {
positional_encoding_gaussian_matrix: Tensor,
}
impl PositionEmbeddingRandom {
fn new(num_pos_feats: usize, vb: VarBuilder) -> Result<Self> {
let positional_encoding_gaussian_matrix =
vb.get((2, num_pos_feats), "positional_encoding_gaussian_matrix")?;
Ok(Self {
positional_encoding_gaussian_matrix,
})
}
fn pe_encoding(&self, coords: &Tensor) -> Result<Tensor> {
let coords = coords.affine(2., -1.)?;
let coords = coords.broadcast_matmul(&self.positional_encoding_gaussian_matrix)?;
let coords = (coords * (2. * std::f64::consts::PI))?;
Tensor::cat(&[coords.sin()?, coords.cos()?], D::Minus1)
}
fn forward(&self, h: usize, w: usize) -> Result<Tensor> {
let device = self.positional_encoding_gaussian_matrix.device();
let x_embed = (Tensor::arange(0u32, w as u32, device)?.to_dtype(DType::F32)? + 0.5)?;
let y_embed = (Tensor::arange(0u32, h as u32, device)?.to_dtype(DType::F32)? + 0.5)?;
let x_embed = (x_embed / w as f64)?
.reshape((1, ()))?
.broadcast_as((h, w))?;
let y_embed = (y_embed / h as f64)?
.reshape(((), 1))?
.broadcast_as((h, w))?;
let coords = Tensor::stack(&[&x_embed, &y_embed], D::Minus1)?;
self.pe_encoding(&coords)?.permute((2, 0, 1))
}
fn forward_with_coords(
&self,
coords_input: &Tensor,
image_size: (usize, usize),
) -> Result<Tensor> {
let coords0 = (coords_input.narrow(D::Minus1, 0, 1)? / image_size.1 as f64)?;
let coords1 = (coords_input.narrow(D::Minus1, 1, 1)? / image_size.0 as f64)?;
let c = coords_input.dim(D::Minus1)?;
let coords_rest = coords_input.narrow(D::Minus1, 2, c - 2)?;
let coords = Tensor::cat(&[&coords0, &coords1, &coords_rest], D::Minus1)?;
self.pe_encoding(&coords)
}
}
#[derive(Debug)]
pub struct PromptEncoder {
pe_layer: PositionEmbeddingRandom,
point_embeddings: Vec<candle_nn::Embedding>,
not_a_point_embed: candle_nn::Embedding,
mask_downscaling_conv1: candle_nn::Conv2d,
mask_downscaling_ln1: super::LayerNorm2d,
mask_downscaling_conv2: candle_nn::Conv2d,
mask_downscaling_ln2: super::LayerNorm2d,
mask_downscaling_conv3: candle_nn::Conv2d,
no_mask_embed: candle_nn::Embedding,
image_embedding_size: (usize, usize),
input_image_size: (usize, usize),
embed_dim: usize,
span: tracing::Span,
}
impl PromptEncoder {
pub fn new(
embed_dim: usize,
image_embedding_size: (usize, usize),
input_image_size: (usize, usize),
mask_in_chans: usize,
vb: VarBuilder,
) -> Result<Self> {
let num_points_embeddings = 4;
let pe_layer = PositionEmbeddingRandom::new(embed_dim / 2, vb.pp("pe_layer"))?;
let not_a_point_embed = candle_nn::embedding(1, embed_dim, vb.pp("not_a_point_embed"))?;
let no_mask_embed = candle_nn::embedding(1, embed_dim, vb.pp("no_mask_embed"))?;
let cfg = candle_nn::Conv2dConfig {
stride: 2,
..Default::default()
};
let mask_downscaling_conv1 =
candle_nn::conv2d(1, mask_in_chans / 4, 2, cfg, vb.pp("mask_downscaling.0"))?;
let mask_downscaling_conv2 = candle_nn::conv2d(
mask_in_chans / 4,
mask_in_chans,
2,
cfg,
vb.pp("mask_downscaling.3"),
)?;
let mask_downscaling_conv3 = candle_nn::conv2d(
mask_in_chans,
embed_dim,
1,
Default::default(),
vb.pp("mask_downscaling.6"),
)?;
let mask_downscaling_ln1 =
super::LayerNorm2d::new(mask_in_chans / 4, 1e-6, vb.pp("mask_downscaling.1"))?;
let mask_downscaling_ln2 =
super::LayerNorm2d::new(mask_in_chans, 1e-6, vb.pp("mask_downscaling.4"))?;
let mut point_embeddings = Vec::with_capacity(num_points_embeddings);
let vb_e = vb.pp("point_embeddings");
for i in 0..num_points_embeddings {
let emb = candle_nn::embedding(1, embed_dim, vb_e.pp(i))?;
point_embeddings.push(emb)
}
let span = tracing::span!(tracing::Level::TRACE, "prompt-encoder");
Ok(Self {
pe_layer,
point_embeddings,
not_a_point_embed,
mask_downscaling_conv1,
mask_downscaling_ln1,
mask_downscaling_conv2,
mask_downscaling_ln2,
mask_downscaling_conv3,
no_mask_embed,
image_embedding_size,
input_image_size,
embed_dim,
span,
})
}
pub fn get_dense_pe(&self) -> Result<Tensor> {
self.pe_layer
.forward(self.image_embedding_size.0, self.image_embedding_size.1)?
.unsqueeze(0)
}
fn embed_masks(&self, masks: &Tensor) -> Result<Tensor> {
masks
.apply(&self.mask_downscaling_conv1)?
.apply(&self.mask_downscaling_ln1)?
.gelu()?
.apply(&self.mask_downscaling_conv2)?
.apply(&self.mask_downscaling_ln2)?
.gelu()?
.apply(&self.mask_downscaling_conv3)
}
fn embed_points(&self, points: &Tensor, labels: &Tensor, pad: bool) -> Result<Tensor> {
let points = (points + 0.5)?;
let dev = points.device();
let (points, labels) = if pad {
let padding_point = Tensor::zeros((points.dim(0)?, 1, 2), DType::F32, dev)?;
let padding_label = (Tensor::ones((labels.dim(0)?, 1), DType::F32, dev)? * (-1f64))?;
let points = Tensor::cat(&[&points, &padding_point], 1)?;
let labels = Tensor::cat(&[labels, &padding_label], 1)?;
(points, labels)
} else {
(points, labels.clone())
};
let point_embedding = self
.pe_layer
.forward_with_coords(&points, self.input_image_size)?;
let labels = labels.unsqueeze(2)?.broadcast_as(point_embedding.shape())?;
let zeros = point_embedding.zeros_like()?;
let point_embedding = labels.lt(0f32)?.where_cond(
&self
.not_a_point_embed
.embeddings()
.broadcast_as(zeros.shape())?,
&point_embedding,
)?;
let labels0 = labels.eq(0f32)?.where_cond(
&self.point_embeddings[0]
.embeddings()
.broadcast_as(zeros.shape())?,
&zeros,
)?;
let point_embedding = (point_embedding + labels0)?;
let labels1 = labels.eq(1f32)?.where_cond(
&self.point_embeddings[1]
.embeddings()
.broadcast_as(zeros.shape())?,
&zeros,
)?;
let point_embedding = (point_embedding + labels1)?;
Ok(point_embedding)
}
fn embed_boxes(&self, boxes: &Tensor) -> Result<Tensor> {
let boxes = (boxes + 0.5)?;
let coords = boxes.reshape(((), 2, 2))?;
let corner_embedding = self
.pe_layer
.forward_with_coords(&coords, self.input_image_size)?;
let ce1 = corner_embedding.i((.., 0))?;
let ce2 = corner_embedding.i((.., 1))?;
let ce1 = (ce1 + self.point_embeddings[2].embeddings())?;
let ce2 = (ce2 + self.point_embeddings[3].embeddings())?;
Tensor::cat(&[&ce1, &ce2], 1)
}
pub fn forward(
&self,
points: Option<(&Tensor, &Tensor)>,
boxes: Option<&Tensor>,
masks: Option<&Tensor>,
) -> Result<(Tensor, Tensor)> {
let _enter = self.span.enter();
let se_points = match points {
Some((coords, labels)) => Some(self.embed_points(coords, labels, boxes.is_none())?),
None => None,
};
let se_boxes = match boxes {
Some(boxes) => Some(self.embed_boxes(boxes)?),
None => None,
};
let sparse_embeddings = match (se_points, se_boxes) {
(Some(se_points), Some(se_boxes)) => Tensor::cat(&[se_points, se_boxes], 1)?,
(Some(se_points), None) => se_points,
(None, Some(se_boxes)) => se_boxes,
(None, None) => {
let dev = self.no_mask_embed.embeddings().device();
Tensor::zeros((1, 0, self.embed_dim), DType::F32, dev)?
}
};
let dense_embeddings = match masks {
None => {
let emb = self.no_mask_embed.embeddings();
emb.reshape((1, (), 1, 1))?.expand((
1,
emb.elem_count(),
self.image_embedding_size.0,
self.image_embedding_size.1,
))?
}
Some(masks) => self.embed_masks(masks)?,
};
Ok((sparse_embeddings, dense_embeddings))
}
}
| candle/candle-transformers/src/models/segment_anything/prompt_encoder.rs/0 | {
"file_path": "candle/candle-transformers/src/models/segment_anything/prompt_encoder.rs",
"repo_id": "candle",
"token_count": 4745
} |
//! # UniPC Scheduler
//!
//! UniPC is a training-free framework designed for the fast sampling of diffusion models, which consists of a
//! corrector (UniC) and a predictor (UniP) that share a unified analytical form and support arbitrary orders.
//!
//! UniPC is by design model-agnostic, supporting pixel-space/latent-space DPMs on unconditional/conditional
//! sampling. It can also be applied to both noise prediction and data prediction models. Compared with prior
//! methods, UniPC converges faster thanks to the increased order of accuracy. Both quantitative and qualitative
//! results show UniPC can improve sampling quality, especially at very low step counts (5~10).
//!
//! For more information, see the original publication:
//! UniPC: A Unified Predictor-Corrector Framework for Fast Sampling of Diffusion Models, W. Zhao et al, 2023.
//! https://arxiv.org/abs/2302.04867
//!
//! This work is based largely on UniPC implementation from the diffusers python package:
//! https://raw.githubusercontent.com/huggingface/diffusers/e8aacda762e311505ba05ae340af23b149e37af3/src/diffusers/schedulers/scheduling_unipc_multistep.py
use std::collections::HashSet;
use std::ops::Neg;
use super::schedulers::PredictionType;
use super::{
schedulers::{Scheduler, SchedulerConfig},
utils::{interp, linspace},
};
use candle::{Error, IndexOp, Result, Tensor};
#[derive(Debug, Clone, Copy)]
pub enum SigmaSchedule {
Karras(KarrasSigmaSchedule),
Exponential(ExponentialSigmaSchedule),
}
impl SigmaSchedule {
fn sigma_t(&self, t: f64) -> f64 {
match self {
Self::Karras(x) => x.sigma_t(t),
Self::Exponential(x) => x.sigma_t(t),
}
}
}
impl Default for SigmaSchedule {
fn default() -> Self {
Self::Karras(KarrasSigmaSchedule::default())
}
}
#[derive(Debug, Clone, Copy)]
pub struct KarrasSigmaSchedule {
pub sigma_min: f64,
pub sigma_max: f64,
pub rho: f64,
}
impl KarrasSigmaSchedule {
fn sigma_t(&self, t: f64) -> f64 {
let (min_inv_rho, max_inv_rho) = (
self.sigma_min.powf(1.0 / self.rho),
self.sigma_max.powf(1.0 / self.rho),
);
(max_inv_rho + ((1.0 - t) * (min_inv_rho - max_inv_rho))).powf(self.rho)
}
}
impl Default for KarrasSigmaSchedule {
fn default() -> Self {
Self {
sigma_max: 10.0,
sigma_min: 0.1,
rho: 4.0,
}
}
}
#[derive(Debug, Clone, Copy)]
pub struct ExponentialSigmaSchedule {
sigma_min: f64,
sigma_max: f64,
}
impl ExponentialSigmaSchedule {
fn sigma_t(&self, t: f64) -> f64 {
(t * (self.sigma_max.ln() - self.sigma_min.ln()) + self.sigma_min.ln()).exp()
}
}
impl Default for ExponentialSigmaSchedule {
fn default() -> Self {
Self {
sigma_max: 80.0,
sigma_min: 0.1,
}
}
}
#[derive(Debug, Default, Clone, Copy)]
pub enum SolverType {
#[default]
Bh1,
Bh2,
}
#[derive(Debug, Default, Clone, Copy)]
pub enum AlgorithmType {
#[default]
DpmSolverPlusPlus,
SdeDpmSolverPlusPlus,
}
#[derive(Debug, Default, Clone, Copy)]
pub enum FinalSigmasType {
#[default]
Zero,
SigmaMin,
}
#[derive(Debug, Clone)]
pub enum TimestepSchedule {
/// Timesteps will be determined by interpolation of sigmas
FromSigmas,
/// Timesteps will be separated by regular intervals
Linspace,
}
impl TimestepSchedule {
fn timesteps(
&self,
sigma_schedule: &SigmaSchedule,
num_inference_steps: usize,
num_training_steps: usize,
) -> Result<Vec<usize>> {
match self {
Self::FromSigmas => {
let sigmas: Tensor = linspace(1., 0., num_inference_steps)?
.to_vec1()?
.into_iter()
.map(|t| sigma_schedule.sigma_t(t))
.collect::<Vec<f64>>()
.try_into()?;
let log_sigmas = sigmas.log()?.to_vec1::<f64>()?;
let timesteps = interp(
&log_sigmas.iter().copied().rev().collect::<Vec<_>>(),
&linspace(
log_sigmas[log_sigmas.len() - 1] - 0.001,
log_sigmas[0] + 0.001,
num_inference_steps,
)?
.to_vec1::<f64>()?,
&linspace(0., num_training_steps as f64, num_inference_steps)?
.to_vec1::<f64>()?,
)
.into_iter()
.map(|f| (num_training_steps - 1) - (f as usize))
.collect::<Vec<_>>();
Ok(timesteps)
}
Self::Linspace => {
Ok(
linspace((num_training_steps - 1) as f64, 0., num_inference_steps)?
.to_vec1::<f64>()?
.into_iter()
.map(|f| f as usize)
.collect(),
)
}
}
}
}
#[derive(Debug, Clone)]
pub enum CorrectorConfiguration {
Disabled,
Enabled { skip_steps: HashSet<usize> },
}
impl Default for CorrectorConfiguration {
fn default() -> Self {
Self::Enabled {
skip_steps: [0, 1, 2].into_iter().collect(),
}
}
}
impl CorrectorConfiguration {
pub fn new(disabled_steps: impl IntoIterator<Item = usize>) -> Self {
Self::Enabled {
skip_steps: disabled_steps.into_iter().collect(),
}
}
}
#[derive(Debug, Clone)]
pub struct UniPCSchedulerConfig {
/// Configure the UNIC corrector. By default it is disabled
pub corrector: CorrectorConfiguration,
/// Determines how sigma relates to a given timestep
pub sigma_schedule: SigmaSchedule,
/// Determines the points
pub timestep_schedule: TimestepSchedule,
/// The solver order which can be `1` or higher. It is recommended to use `solver_order=2` for guided
/// sampling, and `solver_order=3` for unconditional sampling.
pub solver_order: usize,
/// Prediction type of the scheduler function
pub prediction_type: PredictionType,
pub num_training_timesteps: usize,
/// Whether to use the "dynamic thresholding" method. This is unsuitable for latent-space diffusion models such
/// as Stable Diffusion.
pub thresholding: bool,
/// The ratio for the dynamic thresholding method. Valid only when `thresholding=True`.
pub dynamic_thresholding_ratio: f64,
/// The threshold value for dynamic thresholding.
pub sample_max_value: f64,
pub solver_type: SolverType,
/// Whether to use lower-order solvers in the final steps.
pub lower_order_final: bool,
}
impl Default for UniPCSchedulerConfig {
fn default() -> Self {
Self {
corrector: Default::default(),
timestep_schedule: TimestepSchedule::FromSigmas,
sigma_schedule: SigmaSchedule::Karras(Default::default()),
prediction_type: PredictionType::Epsilon,
num_training_timesteps: 1000,
solver_order: 2,
thresholding: false,
dynamic_thresholding_ratio: 0.995,
sample_max_value: 1.0,
solver_type: SolverType::Bh1,
lower_order_final: true,
}
}
}
impl SchedulerConfig for UniPCSchedulerConfig {
fn build(&self, inference_steps: usize) -> Result<Box<dyn Scheduler>> {
Ok(Box::new(EdmDpmMultistepScheduler::new(
self.clone(),
inference_steps,
)?))
}
}
struct State {
model_outputs: Vec<Option<Tensor>>,
lower_order_nums: usize,
order: usize,
last_sample: Option<Tensor>,
}
impl State {
fn new(solver_order: usize) -> Self {
Self {
model_outputs: vec![None; solver_order],
lower_order_nums: 0,
order: 0,
last_sample: None,
}
}
fn lower_order_nums(&self) -> usize {
self.lower_order_nums
}
fn update_lower_order_nums(&mut self, n: usize) {
self.lower_order_nums = n;
}
fn model_outputs(&self) -> &[Option<Tensor>] {
self.model_outputs.as_slice()
}
fn update_model_output(&mut self, idx: usize, output: Option<Tensor>) {
self.model_outputs[idx] = output;
}
fn last_sample(&self) -> Option<&Tensor> {
self.last_sample.as_ref()
}
fn update_last_sample(&mut self, sample: Tensor) {
let _ = self.last_sample.replace(sample);
}
fn order(&self) -> usize {
self.order
}
fn update_order(&mut self, order: usize) {
self.order = order;
}
}
pub struct EdmDpmMultistepScheduler {
schedule: Schedule,
config: UniPCSchedulerConfig,
state: State,
}
impl EdmDpmMultistepScheduler {
pub fn new(config: UniPCSchedulerConfig, num_inference_steps: usize) -> Result<Self> {
let schedule = Schedule::new(
config.timestep_schedule.clone(),
config.sigma_schedule,
num_inference_steps,
config.num_training_timesteps,
)?;
Ok(Self {
schedule,
state: State::new(config.solver_order),
config,
})
}
fn step_index(&self, timestep: usize) -> usize {
let index_candidates = self
.schedule
.timesteps()
.iter()
.enumerate()
.filter(|(_, t)| (*t == ×tep))
.map(|(i, _)| i)
.collect::<Vec<_>>();
match index_candidates.len() {
0 => 0,
1 => index_candidates[0],
_ => index_candidates[1],
}
}
fn timestep(&self, step_idx: usize) -> usize {
self.schedule
.timesteps()
.get(step_idx)
.copied()
.unwrap_or(0)
}
fn convert_model_output(
&self,
model_output: &Tensor,
sample: &Tensor,
timestep: usize,
) -> Result<Tensor> {
let (alpha_t, sigma_t) = (
self.schedule.alpha_t(timestep),
self.schedule.sigma_t(timestep),
);
let x0_pred = match self.config.prediction_type {
PredictionType::Epsilon => ((sample - (model_output * sigma_t))? / alpha_t)?,
PredictionType::Sample => model_output.clone(),
PredictionType::VPrediction => ((alpha_t * sample)? - (sigma_t * model_output)?)?,
};
if self.config.thresholding {
self.threshold_sample(x0_pred)
} else {
Ok(x0_pred)
}
}
fn threshold_sample(&self, sample: Tensor) -> Result<Tensor> {
let shape = sample.shape().clone().into_dims();
let v = sample
.abs()?
.reshape((shape[0], shape[1] * shape[2..].iter().product::<usize>()))?
.to_dtype(candle::DType::F64)?
.to_vec2::<f64>()?;
let q = stats::Quantile::new(self.config.dynamic_thresholding_ratio)
.with_samples(v.into_iter().flatten());
let (threshold, max) = (q.quantile().max(self.config.sample_max_value), q.max());
sample.clamp(-threshold, threshold)? / (threshold / max).sqrt().min(1.)
}
fn multistep_uni_p_bh_update(&self, sample: &Tensor, timestep: usize) -> Result<Tensor> {
let step_index = self.step_index(timestep);
let ns = &self.schedule;
let model_outputs = self.state.model_outputs();
let Some(m0) = &model_outputs[model_outputs.len() - 1] else {
return Err(Error::Msg(
"Expected model output for predictor update".to_string(),
));
};
let (t0, tt) = (timestep, self.timestep(self.step_index(timestep) + 1));
let (sigma_t, sigma_s0) = (ns.sigma_t(tt), ns.sigma_t(t0));
let (alpha_t, _alpha_s0) = (ns.alpha_t(tt), ns.alpha_t(t0));
let (lambda_t, lambda_s0) = (ns.lambda_t(tt), ns.lambda_t(t0));
let h = lambda_t - lambda_s0;
let device = sample.device();
let (mut rks, mut d1s) = (vec![], vec![]);
for i in 1..self.state.order() {
let ti = self.timestep(step_index.saturating_sub(i + 1));
let Some(mi) = model_outputs
.get(model_outputs.len().saturating_sub(i + 1))
.into_iter()
.flatten()
.next()
else {
return Err(Error::Msg(
"Expected model output for predictor update".to_string(),
));
};
let (alpha_si, sigma_si) = (ns.alpha_t(ti), ns.sigma_t(ti));
let lambda_si = alpha_si.ln() - sigma_si.ln();
let rk = (lambda_si - lambda_s0) / h;
rks.push(rk);
d1s.push(((mi - m0)? / rk)?);
}
rks.push(1.0);
let rks = Tensor::new(rks, device)?;
let (mut r, mut b) = (vec![], vec![]);
let hh = h.neg();
let h_phi_1 = hh.exp_m1();
let mut h_phi_k = h_phi_1 / hh - 1.;
let mut factorial_i = 1.;
let b_h = match self.config.solver_type {
SolverType::Bh1 => hh,
SolverType::Bh2 => hh.exp_m1(),
};
for i in 1..self.state.order() + 1 {
r.push(rks.powf(i as f64 - 1.)?);
b.push(h_phi_k * factorial_i / b_h);
factorial_i = i as f64 + 1.;
h_phi_k = h_phi_k / hh - 1. / factorial_i;
}
let (r, b) = (Tensor::stack(&r, 0)?, Tensor::new(b, device)?);
let (d1s, rhos_p) = match d1s.len() {
0 => (None, None),
_ => {
let rhos_p = match self.state.order() {
2 => Tensor::new(&[0.5f64], m0.device())?.to_dtype(m0.dtype())?,
_ => {
let ((r1, r2), b1) = (r.dims2()?, b.dims1()?);
let inverse = linalg::inverse(&r.i((..(r1 - 1), ..(r2 - 1)))?)?;
let b = b.i(..(b1 - 1))?;
b.broadcast_mul(&inverse)?.sum(1)?.to_dtype(m0.dtype())?
}
};
(Some(Tensor::stack(&d1s, 1)?), Some(rhos_p))
}
};
let x_t_ = ((sigma_t / sigma_s0 * sample)? - (alpha_t * h_phi_1 * m0)?)?;
if let (Some(d1s), Some(rhos_p)) = (d1s, rhos_p) {
use linalg::{Permutation, TensordotFixedPosition, TensordotGeneral};
let output_shape = m0.shape().clone();
let pred_res = TensordotGeneral {
lhs_permutation: Permutation { dims: vec![0] },
rhs_permutation: Permutation {
dims: vec![1, 0, 2, 3, 4],
},
tensordot_fixed_position: TensordotFixedPosition {
len_uncontracted_lhs: 1,
len_uncontracted_rhs: output_shape.dims().iter().product::<usize>(),
len_contracted_axes: d1s.dim(1)?,
output_shape,
},
output_permutation: Permutation {
dims: vec![0, 1, 2, 3],
},
}
.eval(&rhos_p, &d1s)?;
x_t_ - (alpha_t * b_h * pred_res)?
} else {
Ok(x_t_)
}
}
fn multistep_uni_c_bh_update(
&self,
model_output: &Tensor,
model_outputs: &[Option<Tensor>],
last_sample: &Tensor,
sample: &Tensor,
timestep: usize,
) -> Result<Tensor> {
let step_index = self.step_index(timestep);
let Some(m0) = model_outputs.last().into_iter().flatten().next() else {
return Err(Error::Msg(
"Expected model output for corrector update".to_string(),
));
};
let model_t = model_output;
let (x, _xt) = (last_sample, sample);
let (t0, tt, ns) = (
self.timestep(self.step_index(timestep) - 1),
timestep,
&self.schedule,
);
let (sigma_t, sigma_s0) = (ns.sigma_t(tt), ns.sigma_t(t0));
let (alpha_t, _alpha_s0) = (ns.alpha_t(tt), ns.alpha_t(t0));
let (lambda_t, lambda_s0) = (ns.lambda_t(tt), ns.lambda_t(t0));
let h = lambda_t - lambda_s0;
let device = sample.device();
let (mut rks, mut d1s) = (vec![], vec![]);
for i in 1..self.state.order() {
let ti = self.timestep(step_index.saturating_sub(i + 1));
let Some(mi) = model_outputs
.get(model_outputs.len().saturating_sub(i + 1))
.into_iter()
.flatten()
.next()
else {
return Err(Error::Msg(
"Expected model output for corrector update".to_string(),
));
};
let (alpha_si, sigma_si) = (ns.alpha_t(ti), ns.sigma_t(ti));
let lambda_si = alpha_si.ln() - sigma_si.ln();
let rk = (lambda_si - lambda_s0) / h;
rks.push(rk);
d1s.push(((mi - m0)? / rk)?);
}
rks.push(1.0);
let rks = Tensor::new(rks, device)?;
let (mut r, mut b) = (vec![], vec![]);
let hh = h.neg();
let h_phi_1 = hh.exp_m1();
let mut h_phi_k = h_phi_1 / hh - 1.;
let mut factorial_i = 1.;
let b_h = match self.config.solver_type {
SolverType::Bh1 => hh,
SolverType::Bh2 => hh.exp_m1(),
};
for i in 1..self.state.order() + 1 {
r.push(rks.powf(i as f64 - 1.)?);
b.push(h_phi_k * factorial_i / b_h);
factorial_i = i as f64 + 1.;
h_phi_k = h_phi_k / hh - 1. / factorial_i;
}
let (r, b) = (Tensor::stack(&r, 0)?, Tensor::new(b, device)?);
let d1s = match d1s.len() {
0 => None,
_ => Some(Tensor::stack(&d1s, 1)?),
};
let rhos_c = match self.state.order() {
1 => Tensor::new(&[0.5f64], m0.device())?.to_dtype(m0.dtype())?,
_ => {
let inverse = linalg::inverse(&r)?;
b.broadcast_mul(&inverse)?.sum(1)?.to_dtype(m0.dtype())?
}
};
let x_t_ = ((sigma_t / sigma_s0 * x)? - (alpha_t * h_phi_1 * m0)?)?;
let corr_res = d1s
.map(|d1s| {
use linalg::{Permutation, TensordotFixedPosition, TensordotGeneral};
let output_shape = x_t_.shape().clone();
TensordotGeneral {
lhs_permutation: Permutation { dims: vec![0] },
rhs_permutation: Permutation {
dims: vec![1, 0, 2, 3, 4],
},
tensordot_fixed_position: TensordotFixedPosition {
len_uncontracted_lhs: 1,
len_uncontracted_rhs: output_shape.dims().iter().product::<usize>(),
len_contracted_axes: d1s.dim(1)?,
output_shape,
},
output_permutation: Permutation {
dims: vec![0, 1, 2, 3],
},
}
.eval(&rhos_c.i(..rhos_c.dims()[0] - 1)?, &d1s)
})
.unwrap_or_else(|| Tensor::zeros_like(m0))?;
let d1_t = (model_t - m0)?;
let x_t = (x_t_
- (alpha_t
* b_h
* (corr_res + rhos_c.i(rhos_c.dims()[0] - 1)?.broadcast_mul(&d1_t)?)?)?)?;
Ok(x_t)
}
}
impl Scheduler for EdmDpmMultistepScheduler {
fn step(&mut self, model_output: &Tensor, timestep: usize, sample: &Tensor) -> Result<Tensor> {
let step_index = self.step_index(timestep);
let model_output_converted = &self.convert_model_output(model_output, sample, timestep)?;
let sample = match (&self.config.corrector, self.state.last_sample()) {
(CorrectorConfiguration::Enabled { skip_steps: s }, Some(last_sample))
if !s.contains(&step_index) && step_index > 0 =>
{
&self.multistep_uni_c_bh_update(
model_output_converted,
self.state.model_outputs(),
last_sample,
sample,
timestep,
)?
}
(CorrectorConfiguration::Enabled { .. }, _) | (CorrectorConfiguration::Disabled, _) => {
sample
}
};
let mut model_outputs = self.state.model_outputs().to_vec();
for i in 0..self.config.solver_order.saturating_sub(1) {
self.state
.update_model_output(i, model_outputs[i + 1].take());
}
self.state.update_model_output(
model_outputs.len() - 1,
Some(model_output_converted.clone()),
);
let mut this_order = self.config.solver_order;
if self.config.lower_order_final {
this_order = self
.config
.solver_order
.min(self.schedule.timesteps.len() - step_index);
}
self.state
.update_order(this_order.min(self.state.lower_order_nums() + 1));
self.state.update_last_sample(sample.clone());
let prev_sample = self.multistep_uni_p_bh_update(sample, timestep)?;
let lower_order_nums = self.state.lower_order_nums();
if lower_order_nums < self.config.solver_order {
self.state.update_lower_order_nums(lower_order_nums + 1);
}
Ok(prev_sample)
}
fn scale_model_input(&self, sample: Tensor, _timestep: usize) -> Result<Tensor> {
Ok(sample)
}
fn timesteps(&self) -> &[usize] {
&self.schedule.timesteps
}
fn add_noise(&self, original: &Tensor, noise: Tensor, timestep: usize) -> Result<Tensor> {
let (alpha_t, sigma_t) = (
self.schedule.alpha_t(timestep),
self.schedule.sigma_t(timestep),
);
(alpha_t * original)? + (sigma_t * noise)?
}
fn init_noise_sigma(&self) -> f64 {
self.schedule.sigma_t(self.schedule.num_training_steps())
}
}
#[derive(Debug, Clone)]
struct Schedule {
timesteps: Vec<usize>,
num_training_steps: usize,
sigma_schedule: SigmaSchedule,
#[allow(unused)]
timestep_schedule: TimestepSchedule,
}
impl Schedule {
fn new(
timestep_schedule: TimestepSchedule,
sigma_schedule: SigmaSchedule,
num_inference_steps: usize,
num_training_steps: usize,
) -> Result<Self> {
Ok(Self {
timesteps: timestep_schedule.timesteps(
&sigma_schedule,
num_inference_steps,
num_training_steps,
)?,
timestep_schedule,
sigma_schedule,
num_training_steps,
})
}
fn timesteps(&self) -> &[usize] {
&self.timesteps
}
fn num_training_steps(&self) -> usize {
self.num_training_steps
}
fn t(&self, step: usize) -> f64 {
(step as f64 + 1.) / self.num_training_steps as f64
}
fn alpha_t(&self, t: usize) -> f64 {
(1. / (self.sigma_schedule.sigma_t(self.t(t)).powi(2) + 1.)).sqrt()
}
fn sigma_t(&self, t: usize) -> f64 {
self.sigma_schedule.sigma_t(self.t(t)) * self.alpha_t(t)
}
fn lambda_t(&self, t: usize) -> f64 {
self.alpha_t(t).ln() - self.sigma_t(t).ln()
}
}
mod stats {
//! This is a slightly modified form of the PΒ² quantile implementation from https://github.com/vks/average.
//! Also see: http://www.cs.wustl.edu/~jain/papers/ftp/psqr.pdf
use num_traits::{Float, ToPrimitive};
#[derive(Debug, Clone)]
pub struct Quantile {
q: [f64; 5],
n: [i64; 5],
m: [f64; 5],
dm: [f64; 5],
max: Option<f64>,
}
impl Quantile {
pub fn new(p: f64) -> Quantile {
assert!((0. ..=1.).contains(&p));
Quantile {
q: [0.; 5],
n: [1, 2, 3, 4, 0],
m: [1., 1. + 2. * p, 1. + 4. * p, 3. + 2. * p, 5.],
dm: [0., p / 2., p, (1. + p) / 2., 1.],
max: None,
}
}
pub fn max(&self) -> f64 {
self.max.unwrap_or(f64::NAN)
}
fn p(&self) -> f64 {
self.dm[2]
}
fn parabolic(&self, i: usize, d: f64) -> f64 {
let s = d.round() as i64;
self.q[i]
+ d / (self.n[i + 1] - self.n[i - 1]).to_f64().unwrap()
* ((self.n[i] - self.n[i - 1] + s).to_f64().unwrap()
* (self.q[i + 1] - self.q[i])
/ (self.n[i + 1] - self.n[i]).to_f64().unwrap()
+ (self.n[i + 1] - self.n[i] - s).to_f64().unwrap()
* (self.q[i] - self.q[i - 1])
/ (self.n[i] - self.n[i - 1]).to_f64().unwrap())
}
fn linear(&self, i: usize, d: f64) -> f64 {
let sum = if d < 0. { i - 1 } else { i + 1 };
self.q[i] + d * (self.q[sum] - self.q[i]) / (self.n[sum] - self.n[i]).to_f64().unwrap()
}
pub fn quantile(&self) -> f64 {
if self.len() >= 5 {
return self.q[2];
}
if self.is_empty() {
return f64::NAN;
}
let mut heights: [f64; 4] = [self.q[0], self.q[1], self.q[2], self.q[3]];
let len = self.len() as usize;
debug_assert!(len < 5);
sort_floats(&mut heights[..len]);
let desired_index = (len as f64) * self.p() - 1.;
let mut index = desired_index.ceil();
if desired_index == index && index >= 0. {
let index = index.round() as usize;
debug_assert!(index < 5);
if index < len - 1 {
return 0.5 * self.q[index] + 0.5 * self.q[index + 1];
}
}
index = index.max(0.);
let mut index = index.round() as usize;
debug_assert!(index < 5);
index = index.min(len - 1);
self.q[index]
}
fn len(&self) -> u64 {
self.n[4] as u64
}
fn is_empty(&self) -> bool {
self.len() == 0
}
pub fn add(&mut self, x: f64) {
self.max = self.max.map(|y| y.max(x)).or(Some(x));
if self.n[4] < 5 {
self.q[self.n[4] as usize] = x;
self.n[4] += 1;
if self.n[4] == 5 {
sort_floats(&mut self.q);
}
return;
}
let mut k: usize;
if x < self.q[0] {
self.q[0] = x;
k = 0;
} else {
k = 4;
for i in 1..5 {
if x < self.q[i] {
k = i;
break;
}
}
if self.q[4] < x {
self.q[4] = x;
}
};
for i in k..5 {
self.n[i] += 1;
}
for i in 0..5 {
self.m[i] += self.dm[i];
}
for i in 1..4 {
let d = self.m[i] - self.n[i].to_f64().unwrap();
if d >= 1. && self.n[i + 1] - self.n[i] > 1
|| d <= -1. && self.n[i - 1] - self.n[i] < -1
{
let d = Float::signum(d);
let q_new = self.parabolic(i, d);
if self.q[i - 1] < q_new && q_new < self.q[i + 1] {
self.q[i] = q_new;
} else {
self.q[i] = self.linear(i, d);
}
let delta = d.round() as i64;
debug_assert_eq!(delta.abs(), 1);
self.n[i] += delta;
}
}
}
pub fn with_samples(mut self, samples: impl IntoIterator<Item = f64>) -> Self {
for sample in samples {
self.add(sample);
}
self
}
}
fn sort_floats(v: &mut [f64]) {
v.sort_unstable_by(|a, b| a.total_cmp(b));
}
}
mod linalg {
use candle::{IndexOp, Result, Shape, Tensor};
pub fn inverse(m: &Tensor) -> Result<Tensor> {
adjoint(m)? / determinant(m)?.to_scalar::<f64>()?
}
pub fn adjoint(m: &Tensor) -> Result<Tensor> {
cofactor(m)?.transpose(0, 1)
}
pub fn cofactor(m: &Tensor) -> Result<Tensor> {
let s = m.shape().dim(0)?;
if s == 2 {
let mut v = vec![];
for i in 0..2 {
let mut x = vec![];
for j in 0..2 {
x.push((m.i((i, j))? * (-1.0f64).powi(i as i32 + j as i32))?)
}
v.push(Tensor::stack(&x, 0)?.unsqueeze(0)?);
}
return Tensor::stack(&v, 1)?.squeeze(0);
}
let minors = minors(m)?;
let mut v = vec![];
for i in 0..s {
let mut x = vec![];
for j in 0..s {
let det = (determinant(&minors.i((i, j))?)?
* ((-1.0f64).powi(i as i32) * (-1.0f64).powi(j as i32)))?;
x.push(det);
}
v.push(Tensor::stack(&x, 0)?.unsqueeze(0)?);
}
Tensor::stack(&v, 1)?.squeeze(0)
}
pub fn determinant(m: &Tensor) -> Result<Tensor> {
let s = m.shape().dim(0)?;
if s == 2 {
return (m.i((0, 0))? * m.i((1, 1))?)? - (m.i((0, 1))? * m.i((1, 0))?);
}
let cofactor = cofactor(m)?;
let m0 = m.i((0, 0))?;
let det = (0..s)
.map(|i| (m.i((0, i))? * cofactor.i((0, i))?))
.try_fold(m0.zeros_like()?, |acc, cur| (acc + cur?))?;
Ok(det)
}
pub fn minors(m: &Tensor) -> Result<Tensor> {
let s = m.shape().dim(0)?;
if s == 1 {
return m.i((0, 0));
}
let mut v = vec![];
for i in 0..s {
let msub = Tensor::cat(&[m.i((..i, ..))?, m.i(((i + 1).., ..))?], 0)?;
let mut x = vec![];
for j in 0..s {
let t = Tensor::cat(&[msub.i((.., ..j))?, msub.i((.., (j + 1)..))?], 1)?;
x.push(t);
}
v.push(Tensor::stack(&x, 0)?.unsqueeze(0)?);
}
Tensor::stack(&v, 1)?.squeeze(0)
}
#[derive(Debug)]
pub struct TensordotGeneral {
pub lhs_permutation: Permutation,
pub rhs_permutation: Permutation,
pub tensordot_fixed_position: TensordotFixedPosition,
pub output_permutation: Permutation,
}
impl TensordotGeneral {
pub fn eval(&self, lhs: &Tensor, rhs: &Tensor) -> Result<Tensor> {
let permuted_lhs = self.lhs_permutation.eval(lhs)?;
let permuted_rhs = self.rhs_permutation.eval(rhs)?;
let tensordotted = self
.tensordot_fixed_position
.eval(&permuted_lhs, &permuted_rhs)?;
self.output_permutation.eval(&tensordotted)
}
}
#[derive(Debug)]
pub struct TensordotFixedPosition {
pub len_uncontracted_lhs: usize,
pub len_uncontracted_rhs: usize,
pub len_contracted_axes: usize,
pub output_shape: Shape,
}
impl TensordotFixedPosition {
fn eval(&self, lhs: &Tensor, rhs: &Tensor) -> Result<Tensor> {
let lhs_view = lhs.reshape((self.len_uncontracted_lhs, self.len_contracted_axes))?;
let rhs_view = rhs.reshape((self.len_contracted_axes, self.len_uncontracted_rhs))?;
lhs_view.matmul(&rhs_view)?.reshape(&self.output_shape)
}
}
#[derive(Debug)]
pub struct Permutation {
pub dims: Vec<usize>,
}
impl Permutation {
fn eval(&self, tensor: &Tensor) -> Result<Tensor> {
tensor.permute(self.dims.as_slice())
}
}
}
| candle/candle-transformers/src/models/stable_diffusion/uni_pc.rs/0 | {
"file_path": "candle/candle-transformers/src/models/stable_diffusion/uni_pc.rs",
"repo_id": "candle",
"token_count": 17600
} |
use candle::{DType, Module, Result, Tensor, D};
use candle_nn::VarBuilder;
// https://github.com/huggingface/diffusers/blob/19edca82f1ff194c07317369a92b470dbae97f34/src/diffusers/pipelines/wuerstchen/modeling_wuerstchen_common.py#L22
#[derive(Debug)]
pub struct WLayerNorm {
eps: f64,
}
impl WLayerNorm {
pub fn new(_size: usize) -> Result<Self> {
Ok(Self { eps: 1e-6 })
}
}
impl Module for WLayerNorm {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let xs = xs.permute((0, 2, 3, 1))?;
let x_dtype = xs.dtype();
let internal_dtype = match x_dtype {
DType::F16 | DType::BF16 => DType::F32,
d => d,
};
let hidden_size = xs.dim(D::Minus1)?;
let xs = xs.to_dtype(internal_dtype)?;
let mean_x = (xs.sum_keepdim(D::Minus1)? / hidden_size as f64)?;
let xs = xs.broadcast_sub(&mean_x)?;
let norm_x = (xs.sqr()?.sum_keepdim(D::Minus1)? / hidden_size as f64)?;
xs.broadcast_div(&(norm_x + self.eps)?.sqrt()?)?
.to_dtype(x_dtype)?
.permute((0, 3, 1, 2))
}
}
#[derive(Debug)]
pub struct LayerNormNoWeights {
eps: f64,
}
impl LayerNormNoWeights {
pub fn new(_size: usize) -> Result<Self> {
Ok(Self { eps: 1e-6 })
}
}
impl Module for LayerNormNoWeights {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let x_dtype = xs.dtype();
let internal_dtype = match x_dtype {
DType::F16 | DType::BF16 => DType::F32,
d => d,
};
let hidden_size = xs.dim(D::Minus1)?;
let xs = xs.to_dtype(internal_dtype)?;
let mean_x = (xs.sum_keepdim(D::Minus1)? / hidden_size as f64)?;
let xs = xs.broadcast_sub(&mean_x)?;
let norm_x = (xs.sqr()?.sum_keepdim(D::Minus1)? / hidden_size as f64)?;
xs.broadcast_div(&(norm_x + self.eps)?.sqrt()?)?
.to_dtype(x_dtype)
}
}
#[derive(Debug)]
pub struct TimestepBlock {
mapper: candle_nn::Linear,
}
impl TimestepBlock {
pub fn new(c: usize, c_timestep: usize, vb: VarBuilder) -> Result<Self> {
let mapper = candle_nn::linear(c_timestep, c * 2, vb.pp("mapper"))?;
Ok(Self { mapper })
}
pub fn forward(&self, xs: &Tensor, t: &Tensor) -> Result<Tensor> {
let ab = self
.mapper
.forward(t)?
.unsqueeze(2)?
.unsqueeze(3)?
.chunk(2, 1)?;
xs.broadcast_mul(&(&ab[0] + 1.)?)?.broadcast_add(&ab[1])
}
}
#[derive(Debug)]
pub struct GlobalResponseNorm {
gamma: Tensor,
beta: Tensor,
}
impl GlobalResponseNorm {
pub fn new(dim: usize, vb: VarBuilder) -> Result<Self> {
let gamma = vb.get((1, 1, 1, dim), "gamma")?;
let beta = vb.get((1, 1, 1, dim), "beta")?;
Ok(Self { gamma, beta })
}
}
impl Module for GlobalResponseNorm {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let agg_norm = xs.sqr()?.sum_keepdim((1, 2))?.sqrt()?;
let stand_div_norm =
agg_norm.broadcast_div(&(agg_norm.mean_keepdim(D::Minus1)? + 1e-6)?)?;
xs.broadcast_mul(&stand_div_norm)?
.broadcast_mul(&self.gamma)?
.broadcast_add(&self.beta)?
+ xs
}
}
#[derive(Debug)]
pub struct ResBlock {
depthwise: candle_nn::Conv2d,
norm: WLayerNorm,
channelwise_lin1: candle_nn::Linear,
channelwise_grn: GlobalResponseNorm,
channelwise_lin2: candle_nn::Linear,
}
impl ResBlock {
pub fn new(c: usize, c_skip: usize, ksize: usize, vb: VarBuilder) -> Result<Self> {
let cfg = candle_nn::Conv2dConfig {
padding: ksize / 2,
groups: c,
..Default::default()
};
let depthwise = candle_nn::conv2d(c + c_skip, c, ksize, cfg, vb.pp("depthwise"))?;
let norm = WLayerNorm::new(c)?;
let channelwise_lin1 = candle_nn::linear(c, c * 4, vb.pp("channelwise.0"))?;
let channelwise_grn = GlobalResponseNorm::new(c * 4, vb.pp("channelwise.2"))?;
let channelwise_lin2 = candle_nn::linear(c * 4, c, vb.pp("channelwise.4"))?;
Ok(Self {
depthwise,
norm,
channelwise_lin1,
channelwise_grn,
channelwise_lin2,
})
}
pub fn forward(&self, xs: &Tensor, x_skip: Option<&Tensor>) -> Result<Tensor> {
let x_res = xs;
let xs = match x_skip {
None => xs.clone(),
Some(x_skip) => Tensor::cat(&[xs, x_skip], 1)?,
};
let xs = xs
.apply(&self.depthwise)?
.apply(&self.norm)?
.permute((0, 2, 3, 1))?;
let xs = xs
.apply(&self.channelwise_lin1)?
.gelu_erf()?
.apply(&self.channelwise_grn)?
.apply(&self.channelwise_lin2)?
.permute((0, 3, 1, 2))?;
xs + x_res
}
}
use super::attention_processor::Attention;
#[derive(Debug)]
pub struct AttnBlock {
self_attn: bool,
norm: WLayerNorm,
attention: Attention,
kv_mapper_lin: candle_nn::Linear,
}
impl AttnBlock {
pub fn new(
c: usize,
c_cond: usize,
nhead: usize,
self_attn: bool,
use_flash_attn: bool,
vb: VarBuilder,
) -> Result<Self> {
let norm = WLayerNorm::new(c)?;
let attention = Attention::new(c, nhead, c / nhead, use_flash_attn, vb.pp("attention"))?;
let kv_mapper_lin = candle_nn::linear(c_cond, c, vb.pp("kv_mapper.1"))?;
Ok(Self {
self_attn,
norm,
attention,
kv_mapper_lin,
})
}
pub fn forward(&self, xs: &Tensor, kv: &Tensor) -> Result<Tensor> {
let kv = candle_nn::ops::silu(kv)?.apply(&self.kv_mapper_lin)?;
let norm_xs = self.norm.forward(xs)?;
let kv = if self.self_attn {
let (b_size, channel, _, _) = xs.dims4()?;
let norm_xs = norm_xs.reshape((b_size, channel, ()))?.transpose(1, 2)?;
Tensor::cat(&[&norm_xs, &kv], 1)?.contiguous()?
} else {
kv
};
xs + self.attention.forward(&norm_xs, &kv)
}
}
| candle/candle-transformers/src/models/wuerstchen/common.rs/0 | {
"file_path": "candle/candle-transformers/src/models/wuerstchen/common.rs",
"repo_id": "candle",
"token_count": 3219
} |
[package]
name = "candle-wasm-example-bert"
version.workspace = true
edition.workspace = true
description.workspace = true
repository.workspace = true
keywords.workspace = true
categories.workspace = true
license.workspace = true
[dependencies]
candle = { workspace = true }
candle-nn = { workspace = true }
candle-transformers = { workspace = true }
num-traits = { workspace = true }
tokenizers = { workspace = true, features = ["unstable_wasm"] }
# App crates.
anyhow = { workspace = true }
byteorder = { workspace = true }
log = { workspace = true }
rand = { workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }
safetensors = { workspace = true }
# Wasm specific crates.
console_error_panic_hook = "0.1.7"
getrandom = { version = "0.2", features = ["js"] }
gloo = "0.11"
js-sys = "0.3.64"
wasm-bindgen = "0.2.87"
serde-wasm-bindgen = "0.6.0"
| candle/candle-wasm-examples/bert/Cargo.toml/0 | {
"file_path": "candle/candle-wasm-examples/bert/Cargo.toml",
"repo_id": "candle",
"token_count": 304
} |
[package]
name = "candle-wasm-example-llama2"
version.workspace = true
edition.workspace = true
description.workspace = true
repository.workspace = true
keywords.workspace = true
categories.workspace = true
license.workspace = true
[dependencies]
candle = { workspace = true }
candle-nn = { workspace = true }
candle-transformers = { workspace = true }
num-traits = { workspace = true }
tokenizers = { workspace = true, features = ["unstable_wasm"] }
# App crates.
anyhow = { workspace = true }
byteorder = { workspace = true }
log = { workspace = true }
rand = { workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }
# Wasm specific crates.
console_error_panic_hook = "0.1.7"
getrandom = { version = "0.2", features = ["js"] }
gloo = "0.11"
js-sys = "0.3.64"
wasm-bindgen = "0.2.87"
wasm-bindgen-futures = "0.4.37"
wasm-logger = "0.2"
yew-agent = "0.2.0"
yew = { version = "0.20.0", features = ["csr"] }
[dependencies.web-sys]
version = "0.3.70"
features = [
'Blob',
'Document',
'Element',
'HtmlElement',
'Node',
'Window',
'Request',
'RequestCache',
'RequestInit',
'RequestMode',
'Response',
'Performance',
]
| candle/candle-wasm-examples/llama2-c/Cargo.toml/0 | {
"file_path": "candle/candle-wasm-examples/llama2-c/Cargo.toml",
"repo_id": "candle",
"token_count": 434
} |
import snarkdown from "https://cdn.skypack.dev/snarkdown";
import hljs from "https://cdn.skypack.dev/highlight.js";
// models base url
const MODELS = {
moondream2_q4k: {
base_url:
"https://huggingface.co/santiagomed/candle-moondream/resolve/main/",
model: "model-q4_0.gguf",
tokenizer: "tokenizer.json",
quantized: true,
size: "1.51 GB",
},
};
const moodreamWorker = new Worker("./moondreamWorker.js", {
type: "module",
});
async function generateSequence(controller) {
const getValue = (id) => document.querySelector(`#${id}`).value;
const modelID = getValue("model");
const model = MODELS[modelID];
const weightsURL =
model.model instanceof Array
? model.model.map((m) => model.base_url + m)
: model.base_url + model.model;
const tokenizerURL = model.base_url + model.tokenizer;
const prompt = getValue("prompt").trim();
const temperature = getValue("temperature");
const topP = getValue("top-p");
const repeatPenalty = getValue("repeat_penalty");
const seed = getValue("seed");
const maxSeqLen = getValue("max-seq");
if (prompt?.value?.trim() === "") {
return;
}
function updateStatus(data) {
const outStatus = document.querySelector("#output-status");
const outGen = document.querySelector("#output-generation");
const outCounter = document.querySelector("#output-counter");
switch (data.status) {
case "loading":
outStatus.hidden = false;
outStatus.textContent = data.message;
outGen.hidden = true;
outCounter.hidden = true;
break;
case "generating":
const { message, prompt, sentence, tokensSec, totalTime } = data;
outStatus.hidden = true;
outCounter.hidden = false;
outGen.hidden = false;
outGen.innerHTML = snarkdown(prompt + sentence);
outCounter.innerHTML = `${(totalTime / 1000).toFixed(
2
)}s (${tokensSec.toFixed(2)} tok/s)`;
hljs.highlightAll();
break;
case "complete":
outStatus.hidden = true;
outGen.hidden = false;
break;
}
}
return new Promise((resolve, reject) => {
moodreamWorker.postMessage({
weightsURL,
modelID,
tokenizerURL,
quantized: model.quantized,
imageURL: currentImageURL,
prompt,
temp: temperature,
top_p: topP,
repeatPenalty,
seed: seed,
maxSeqLen,
verbose_prompt: false,
command: "start",
});
const handleAbort = () => {
moodreamWorker.postMessage({ command: "abort" });
};
const handleMessage = (event) => {
const { status, error, message, prompt, sentence } = event.data;
if (status) updateStatus(event.data);
if (error) {
moodreamWorker.removeEventListener("message", handleMessage);
reject(new Error(error));
}
if (status === "aborted") {
moodreamWorker.removeEventListener("message", handleMessage);
resolve(event.data);
}
if (status === "complete") {
moodreamWorker.removeEventListener("message", handleMessage);
resolve(event.data);
}
};
controller.signal.addEventListener("abort", handleAbort);
moodreamWorker.addEventListener("message", handleMessage);
});
}
const form = document.querySelector("#form");
const prompt = document.querySelector("#prompt");
const runBtn = document.querySelector("#run");
const modelSelect = document.querySelector("#model");
const dropArea = document.querySelector("#drop-area");
const canvas = document.querySelector("#canvas");
const ctxCanvas = canvas.getContext("2d");
const fileUpload = document.querySelector("#file-upload");
const clearImgBtn = document.querySelector("#clear-img-btn");
const imagesExamples = document.querySelector("#image-select");
let currentImageURL = null;
let runController = new AbortController();
let isRunning = false;
document.addEventListener("DOMContentLoaded", () => {
for (const [id, model] of Object.entries(MODELS)) {
const option = document.createElement("option");
option.value = id;
option.innerText = `${id} (${model.size})`;
modelSelect.appendChild(option);
}
const query = new URLSearchParams(window.location.search);
const modelID = query.get("model");
if (modelID) {
modelSelect.value = modelID;
} else {
modelSelect.value = "moondream2_q4k";
}
});
imagesExamples.addEventListener("click", (e) => {
// if (isEmbedding || isSegmenting) {
// return;
// }
const target = e.target;
if (target.nodeName === "IMG") {
const href = target.src;
clearImageCanvas();
currentImageURL = href;
drawImageCanvas(href);
}
});
modelSelect.addEventListener("change", (e) => {
const query = new URLSearchParams(window.location.search);
query.set("model", e.target.value);
window.history.replaceState({}, "", `${window.location.pathname}?${query}`);
window.parent.postMessage({ queryString: "?" + query }, "*");
const model = MODELS[e.target.value];
document.querySelector("#max-seq").max = model.seq_len;
document.querySelector("#max-seq").nextElementSibling.value = 200;
});
clearImgBtn.addEventListener("click", () => {
clearImageCanvas();
});
//add event listener to file input
fileUpload.addEventListener("input", async (e) => {
const target = e.target;
if (target.files.length > 0 && !target.files[0].type.includes("svg")) {
const href = URL.createObjectURL(target.files[0]);
clearImageCanvas();
await drawImageCanvas(href);
}
});
// add event listener to drop-area
dropArea.addEventListener("dragenter", (e) => {
e.preventDefault();
dropArea.classList.add("border-blue-700");
});
dropArea.addEventListener("dragleave", (e) => {
e.preventDefault();
dropArea.classList.remove("border-blue-700");
});
dropArea.addEventListener("dragover", (e) => {
e.preventDefault();
});
dropArea.addEventListener("drop", async (e) => {
e.preventDefault();
dropArea.classList.remove("border-blue-700");
const url = e.dataTransfer.getData("text/uri-list");
const files = e.dataTransfer.files;
if (files.length > 0) {
const href = URL.createObjectURL(files[0]);
clearImageCanvas();
await drawImageCanvas(href);
} else if (url) {
clearImageCanvas();
await drawImageCanvas(url);
}
});
form.addEventListener("submit", async (e) => {
e.preventDefault();
if (isRunning) {
stopRunning();
} else {
startRunning();
await generateSequence(runController);
stopRunning();
}
});
async function drawImageCanvas(imgURL) {
if (!imgURL) {
throw new Error("No image URL provided");
}
return new Promise((resolve, reject) => {
ctxCanvas.clearRect(0, 0, canvas.width, canvas.height);
ctxCanvas.clearRect(0, 0, canvas.width, canvas.height);
const img = new Image();
img.crossOrigin = "anonymous";
img.onload = () => {
canvas.width = img.width;
canvas.height = img.height;
ctxCanvas.drawImage(img, 0, 0);
clearImgBtn.disabled = false;
resolve(img);
};
img.src = imgURL;
currentImageURL = imgURL;
});
}
function clearImageCanvas() {
ctxCanvas.clearRect(0, 0, canvas.width, canvas.height);
clearImgBtn.disabled = true;
canvas.parentElement.style.height = "auto";
currentImageURL = null;
canvas.width = 0;
canvas.height = 0;
}
function startRunning() {
isRunning = true;
runBtn.textContent = "Stop";
prompt.disabled = true;
}
function stopRunning() {
runController.abort();
runController = new AbortController();
runBtn.textContent = "Run";
isRunning = false;
prompt.disabled = false;
}
prompt.addEventListener("input", (e) => {
runBtn.disabled = false;
});
| candle/candle-wasm-examples/moondream/code.js/0 | {
"file_path": "candle/candle-wasm-examples/moondream/code.js",
"repo_id": "candle",
"token_count": 2873
} |
//load the candle SAM Model wasm module
import init, { Model } from "./build/m.js";
async function fetchArrayBuffer(url, cacheModel = true) {
if (!cacheModel)
return new Uint8Array(await (await fetch(url)).arrayBuffer());
const cacheName = "sam-candle-cache";
const cache = await caches.open(cacheName);
const cachedResponse = await cache.match(url);
if (cachedResponse) {
const data = await cachedResponse.arrayBuffer();
return new Uint8Array(data);
}
const res = await fetch(url, { cache: "force-cache" });
cache.put(url, res.clone());
return new Uint8Array(await res.arrayBuffer());
}
class SAMModel {
static instance = {};
// keep current image embeddings state
static imageArrayHash = {};
// Add a new property to hold the current modelID
static currentModelID = null;
static async getInstance(modelURL, modelID) {
if (!this.instance[modelID]) {
await init();
self.postMessage({
status: "loading",
message: `Loading Model ${modelID}`,
});
const weightsArrayU8 = await fetchArrayBuffer(modelURL);
this.instance[modelID] = new Model(
weightsArrayU8,
/tiny|mobile/.test(modelID)
);
} else {
self.postMessage({ status: "loading", message: "Model Already Loaded" });
}
// Set the current modelID to the modelID that was passed in
this.currentModelID = modelID;
return this.instance[modelID];
}
// Remove the modelID parameter from setImageEmbeddings
static setImageEmbeddings(imageArrayU8) {
// check if image embeddings are already set for this image and model
const imageArrayHash = this.getSimpleHash(imageArrayU8);
if (
this.imageArrayHash[this.currentModelID] === imageArrayHash &&
this.instance[this.currentModelID]
) {
self.postMessage({
status: "embedding",
message: "Embeddings Already Set",
});
return;
}
this.imageArrayHash[this.currentModelID] = imageArrayHash;
this.instance[this.currentModelID].set_image_embeddings(imageArrayU8);
self.postMessage({ status: "embedding", message: "Embeddings Set" });
}
static getSimpleHash(imageArrayU8) {
// get simple hash of imageArrayU8
let imageArrayHash = 0;
for (let i = 0; i < imageArrayU8.length; i += 100) {
imageArrayHash ^= imageArrayU8[i];
}
return imageArrayHash.toString(16);
}
}
async function createImageCanvas(
{ mask_shape, mask_data }, // mask
{ original_width, original_height, width, height } // original image
) {
const [_, __, shape_width, shape_height] = mask_shape;
const maskCanvas = new OffscreenCanvas(shape_width, shape_height); // canvas for mask
const maskCtx = maskCanvas.getContext("2d");
const canvas = new OffscreenCanvas(original_width, original_height); // canvas for creating mask with original image size
const ctx = canvas.getContext("2d");
const imageData = maskCtx.createImageData(
maskCanvas.width,
maskCanvas.height
);
const data = imageData.data;
for (let p = 0; p < data.length; p += 4) {
data[p] = 0;
data[p + 1] = 0;
data[p + 2] = 0;
data[p + 3] = mask_data[p / 4] * 255;
}
maskCtx.putImageData(imageData, 0, 0);
let sx, sy;
if (original_height < original_width) {
sy = original_height / original_width;
sx = 1;
} else {
sy = 1;
sx = original_width / original_height;
}
ctx.drawImage(
maskCanvas,
0,
0,
maskCanvas.width * sx,
maskCanvas.height * sy,
0,
0,
original_width,
original_height
);
const blob = await canvas.convertToBlob();
return URL.createObjectURL(blob);
}
self.addEventListener("message", async (event) => {
const { modelURL, modelID, imageURL, points } = event.data;
try {
self.postMessage({ status: "loading", message: "Starting SAM" });
const sam = await SAMModel.getInstance(modelURL, modelID);
self.postMessage({ status: "loading", message: "Loading Image" });
const imageArrayU8 = await fetchArrayBuffer(imageURL, false);
self.postMessage({ status: "embedding", message: "Creating Embeddings" });
SAMModel.setImageEmbeddings(imageArrayU8);
if (!points) {
// no points only do the embeddings
self.postMessage({
status: "complete-embedding",
message: "Embeddings Complete",
});
return;
}
self.postMessage({ status: "segmenting", message: "Segmenting" });
const { mask, image } = sam.mask_for_point({ points });
const maskDataURL = await createImageCanvas(mask, image);
// Send the segment back to the main thread as JSON
self.postMessage({
status: "complete",
message: "Segmentation Complete",
output: { maskURL: maskDataURL },
});
} catch (e) {
self.postMessage({ error: e });
}
});
| candle/candle-wasm-examples/segment-anything/samWorker.js/0 | {
"file_path": "candle/candle-wasm-examples/segment-anything/samWorker.js",
"repo_id": "candle",
"token_count": 1747
} |
<html>
<head>
<meta content="text/html;charset=utf-8" http-equiv="Content-Type" />
<title>Candle YOLOv8 Rust/WASM</title>
</head>
<body></body>
</html>
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<style>
@import url("https://fonts.googleapis.com/css2?family=Source+Code+Pro:wght@200;300;400&family=Source+Sans+3:wght@100;200;300;400;500;600;700;800;900&display=swap");
html,
body {
font-family: "Source Sans 3", sans-serif;
}
code,
output,
select,
pre {
font-family: "Source Code Pro", monospace;
}
</style>
<script src="https://cdn.tailwindcss.com"></script>
<script
src="https://cdn.jsdelivr.net/gh/huggingface/hub-js-utils/share-canvas.js"
type="module"
></script>
<script type="module">
const MODEL_BASEURL =
"https://huggingface.co/lmz/candle-yolo-v8/resolve/main/";
const MODELS = {
yolov8n: {
model_size: "n",
url: "yolov8n.safetensors",
},
yolov8s: {
model_size: "s",
url: "yolov8s.safetensors",
},
yolov8m: {
model_size: "m",
url: "yolov8m.safetensors",
},
yolov8l: {
model_size: "l",
url: "yolov8l.safetensors",
},
yolov8x: {
model_size: "x",
url: "yolov8x.safetensors",
},
yolov8n_pose: {
model_size: "n",
url: "yolov8n-pose.safetensors",
},
yolov8s_pose: {
model_size: "s",
url: "yolov8s-pose.safetensors",
},
yolov8m_pose: {
model_size: "m",
url: "yolov8m-pose.safetensors",
},
yolov8l_pose: {
model_size: "l",
url: "yolov8l-pose.safetensors",
},
yolov8x_pose: {
model_size: "x",
url: "yolov8x-pose.safetensors",
},
};
const COCO_PERSON_SKELETON = [
[4, 0], // head
[3, 0],
[16, 14], // left lower leg
[14, 12], // left upper leg
[6, 12], // left torso
[6, 5], // top torso
[6, 8], // upper arm
[8, 10], // lower arm
[1, 2], // head
[1, 3], // right head
[2, 4], // left head
[3, 5], // right neck
[4, 6], // left neck
[5, 7], // right upper arm
[7, 9], // right lower arm
[5, 11], // right torso
[11, 12], // bottom torso
[11, 13], // right upper leg
[13, 15], // right lower leg
];
// init web worker
const yoloWorker = new Worker("./yoloWorker.js", { type: "module" });
let hasImage = false;
//add event listener to image examples
document.querySelector("#image-select").addEventListener("click", (e) => {
const target = e.target;
if (target.nodeName === "IMG") {
const href = target.src;
drawImageCanvas(href);
}
});
//add event listener to file input
document.querySelector("#file-upload").addEventListener("change", (e) => {
const target = e.target;
if (target.files.length > 0) {
const href = URL.createObjectURL(target.files[0]);
drawImageCanvas(href);
}
});
// add event listener to drop-area
const dropArea = document.querySelector("#drop-area");
dropArea.addEventListener("dragenter", (e) => {
e.preventDefault();
dropArea.classList.add("border-blue-700");
});
dropArea.addEventListener("dragleave", (e) => {
e.preventDefault();
dropArea.classList.remove("border-blue-700");
});
dropArea.addEventListener("dragover", (e) => {
e.preventDefault();
});
dropArea.addEventListener("drop", (e) => {
e.preventDefault();
dropArea.classList.remove("border-blue-700");
const url = e.dataTransfer.getData("text/uri-list");
const files = e.dataTransfer.files;
if (files.length > 0) {
const href = URL.createObjectURL(files[0]);
drawImageCanvas(href);
} else if (url) {
drawImageCanvas(url);
}
});
document.querySelector("#clear-btn").addEventListener("click", () => {
drawImageCanvas();
});
function drawImageCanvas(imgURL) {
const canvas = document.querySelector("#canvas");
const canvasResult = document.querySelector("#canvas-result");
canvasResult
.getContext("2d")
.clearRect(0, 0, canvas.width, canvas.height);
const ctx = canvas.getContext("2d");
ctx.clearRect(0, 0, canvas.width, canvas.height);
document.querySelector("#share-btn").classList.add("invisible");
document.querySelector("#clear-btn").classList.add("invisible");
document.querySelector("#detect").disabled = true;
hasImage = false;
canvas.parentElement.style.height = "auto";
if (imgURL && imgURL !== "") {
const img = new Image();
img.crossOrigin = "anonymous";
img.onload = () => {
canvas.width = img.width;
canvas.height = img.height;
ctx.drawImage(img, 0, 0);
canvas.parentElement.style.height = canvas.offsetHeight + "px";
hasImage = true;
document.querySelector("#detect").disabled = false;
document.querySelector("#clear-btn").classList.remove("invisible");
};
img.src = imgURL;
}
}
async function classifyImage(
imageURL, // URL of image to classify
modelID, // ID of model to use
modelURL, // URL to model file
modelSize, // size of model
confidence, // confidence threshold
iou_threshold, // IoU threshold
updateStatus // function receives status updates
) {
return new Promise((resolve, reject) => {
yoloWorker.postMessage({
imageURL,
modelID,
modelURL,
modelSize,
confidence,
iou_threshold,
});
function handleMessage(event) {
console.log("message", event.data);
if ("status" in event.data) {
updateStatus(event.data.status);
}
if ("error" in event.data) {
yoloWorker.removeEventListener("message", handleMessage);
reject(new Error(event.data.error));
}
if (event.data.status === "complete") {
yoloWorker.removeEventListener("message", handleMessage);
resolve(event.data);
}
}
yoloWorker.addEventListener("message", handleMessage);
});
}
// add event listener to detect button
document.querySelector("#detect").addEventListener("click", async () => {
if (!hasImage) {
return;
}
const modelID = document.querySelector("#model").value;
const modelURL = MODEL_BASEURL + MODELS[modelID].url;
const modelSize = MODELS[modelID].model_size;
const confidence = parseFloat(
document.querySelector("#confidence").value
);
const iou_threshold = parseFloat(
document.querySelector("#iou_threshold").value
);
const canvasInput = document.querySelector("#canvas");
const canvas = document.querySelector("#canvas-result");
canvas.width = canvasInput.width;
canvas.height = canvasInput.height;
const scale = canvas.width / canvas.offsetWidth;
const ctx = canvas.getContext("2d");
ctx.drawImage(canvasInput, 0, 0);
const imageURL = canvas.toDataURL();
const results = await await classifyImage(
imageURL,
modelID,
modelURL,
modelSize,
confidence,
iou_threshold,
updateStatus
);
const { output } = results;
ctx.lineWidth = 1 + 2 * scale;
ctx.strokeStyle = "#3c8566";
ctx.fillStyle = "#0dff9a";
const fontSize = 14 * scale;
ctx.font = `${fontSize}px sans-serif`;
for (const detection of output) {
// check keypoint for pose model data
let xmin, xmax, ymin, ymax, label, confidence, keypoints;
if ("keypoints" in detection) {
xmin = detection.xmin;
xmax = detection.xmax;
ymin = detection.ymin;
ymax = detection.ymax;
confidence = detection.confidence;
keypoints = detection.keypoints;
} else {
const [_label, bbox] = detection;
label = _label;
xmin = bbox.xmin;
xmax = bbox.xmax;
ymin = bbox.ymin;
ymax = bbox.ymax;
confidence = bbox.confidence;
}
const [x, y, w, h] = [xmin, ymin, xmax - xmin, ymax - ymin];
const text = `${label ? label + " " : ""}${confidence.toFixed(2)}`;
const width = ctx.measureText(text).width;
ctx.fillStyle = "#3c8566";
ctx.fillRect(x - 2, y - fontSize, width + 4, fontSize);
ctx.fillStyle = "#e3fff3";
ctx.strokeRect(x, y, w, h);
ctx.fillText(text, x, y - 2);
if (keypoints) {
ctx.save();
ctx.fillStyle = "magenta";
ctx.strokeStyle = "yellow";
for (const keypoint of keypoints) {
const { x, y } = keypoint;
ctx.beginPath();
ctx.arc(x, y, 3, 0, 2 * Math.PI);
ctx.fill();
}
ctx.beginPath();
for (const [xid, yid] of COCO_PERSON_SKELETON) {
//draw line between skeleton keypoitns
if (keypoints[xid] && keypoints[yid]) {
ctx.moveTo(keypoints[xid].x, keypoints[xid].y);
ctx.lineTo(keypoints[yid].x, keypoints[yid].y);
}
}
ctx.stroke();
ctx.restore();
}
}
});
function updateStatus(statusMessage) {
const button = document.querySelector("#detect");
if (statusMessage === "detecting") {
button.disabled = true;
button.classList.add("bg-blue-700");
button.classList.remove("bg-blue-950");
button.textContent = "Predicting...";
} else if (statusMessage === "complete") {
button.disabled = false;
button.classList.add("bg-blue-950");
button.classList.remove("bg-blue-700");
button.textContent = "Predict";
document.querySelector("#share-btn").classList.remove("invisible");
}
}
document.querySelector("#share-btn").addEventListener("click", () => {
shareToCommunity(
"lmz/candle-yolo",
"Candle + YOLOv8",
"YOLOv8 with [Candle](https://github.com/huggingface/candle)",
"canvas-result",
"share-btn"
);
});
</script>
</head>
<body class="container max-w-4xl mx-auto p-4">
<main class="grid grid-cols-1 gap-8 relative">
<span class="absolute text-5xl -ml-[1em]"> π―οΈ </span>
<div>
<h1 class="text-5xl font-bold">Candle YOLOv8</h1>
<h2 class="text-2xl font-bold">Rust/WASM Demo</h2>
<p class="max-w-lg">
This demo showcases object detection and pose estimation models in
your browser using Rust/WASM. It utilizes
<a
href="https://huggingface.co/lmz/candle-yolo-v8"
target="_blank"
class="underline hover:text-blue-500 hover:no-underline"
>
safetensor's YOLOv8 models
</a>
and a WASM runtime built with
<a
href="https://github.com/huggingface/candle/"
target="_blank"
class="underline hover:text-blue-500 hover:no-underline"
>Candle </a
>.
</p>
<p>
To run pose estimation, select a yolo pose model from the dropdown
</p>
</div>
<div>
<label for="model" class="font-medium">Models Options: </label>
<select
id="model"
class="border-2 border-gray-500 rounded-md font-light"
>
<option value="yolov8n" selected>yolov8n (6.37 MB)</option>
<option value="yolov8s">yolov8s (22.4 MB)</option>
<option value="yolov8m">yolov8m (51.9 MB)</option>
<option value="yolov8l">yolov8l (87.5 MB)</option>
<option value="yolov8x">yolov8x (137 MB)</option>
<!-- Pose models -->
<option value="yolov8n_pose">yolov8n_pose (6.65 MB)</option>
<option value="yolov8s_pose">yolov8s_pose (23.3 MB)</option>
<option value="yolov8m_pose">yolov8m_pose (53 MB)</option>
<option value="yolov8l_pose">yolov8l_pose (89.1 MB)</option>
<option value="yolov8x_pose">yolov8x_pose (139 MB)</option>
</select>
</div>
<div>
<button
id="detect"
disabled
class="bg-gray-700 hover:bg-gray-800 text-white font-normal py-2 px-4 rounded disabled:bg-gray-300 disabled:cursor-not-allowed"
>
Predict
</button>
</div>
<!-- drag and drop area -->
<div class="relative max-w-lg">
<div class="py-1">
<button
id="clear-btn"
class="text-xs bg-white rounded-md disabled:opacity-50 flex gap-1 items-center ml-auto invisible"
>
<svg
class=""
xmlns="http://www.w3.org/2000/svg"
viewBox="0 0 13 12"
height="1em"
>
<path
d="M1.6.7 12 11.1M12 .7 1.6 11.1"
stroke="#2E3036"
stroke-width="2"
/>
</svg>
Clear image
</button>
</div>
<div
id="drop-area"
class="flex flex-col items-center justify-center border-2 border-gray-300 border-dashed rounded-xl relative aspect-video w-full overflow-hidden"
>
<div
class="flex flex-col items-center justify-center space-y-1 text-center"
>
<svg
width="25"
height="25"
viewBox="0 0 25 25"
fill="none"
xmlns="http://www.w3.org/2000/svg"
>
<path
d="M3.5 24.3a3 3 0 0 1-1.9-.8c-.5-.5-.8-1.2-.8-1.9V2.9c0-.7.3-1.3.8-1.9.6-.5 1.2-.7 2-.7h18.6c.7 0 1.3.2 1.9.7.5.6.7 1.2.7 2v18.6c0 .7-.2 1.4-.7 1.9a3 3 0 0 1-2 .8H3.6Zm0-2.7h18.7V2.9H3.5v18.7Zm2.7-2.7h13.3c.3 0 .5 0 .6-.3v-.7l-3.7-5a.6.6 0 0 0-.6-.2c-.2 0-.4 0-.5.3l-3.5 4.6-2.4-3.3a.6.6 0 0 0-.6-.3c-.2 0-.4.1-.5.3l-2.7 3.6c-.1.2-.2.4 0 .7.1.2.3.3.6.3Z"
fill="#000"
/>
</svg>
<div class="flex text-sm text-gray-600">
<label
for="file-upload"
class="relative cursor-pointer bg-white rounded-md font-medium text-blue-950 hover:text-blue-700"
>
<span>Drag and drop your image here</span>
<span class="block text-xs">or</span>
<span class="block text-xs">Click to upload</span>
</label>
</div>
<input
id="file-upload"
name="file-upload"
type="file"
class="sr-only"
/>
</div>
<canvas
id="canvas"
class="absolute pointer-events-none w-full"
></canvas>
<canvas
id="canvas-result"
class="absolute pointer-events-none w-full"
></canvas>
</div>
<div class="text-right py-2">
<button
id="share-btn"
class="bg-white rounded-md hover:outline outline-orange-200 disabled:opacity-50 invisible"
>
<img
src="https://huggingface.co/datasets/huggingface/badges/raw/main/share-to-community-sm.svg"
/>
</button>
</div>
</div>
<div>
<div
class="flex gap-3 items-center overflow-x-scroll"
id="image-select"
>
<h3 class="font-medium">Examples:</h3>
<img
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/candle/examples/sf.jpg"
class="cursor-pointer w-24 h-24 object-cover"
/>
<img
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/candle/examples/bike.jpeg"
class="cursor-pointer w-24 h-24 object-cover"
/>
<img
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/candle/examples/000000000077.jpg"
class="cursor-pointer w-24 h-24 object-cover"
/>
</div>
</div>
<div>
<div class="grid grid-cols-3 max-w-md items-center gap-3">
<label class="text-sm font-medium" for="confidence"
>Confidence Threshold</label
>
<input
type="range"
id="confidence"
name="confidence"
min="0"
max="1"
step="0.01"
value="0.25"
oninput="this.nextElementSibling.value = Number(this.value).toFixed(2)"
/>
<output
class="text-xs font-light px-1 py-1 border border-gray-700 rounded-md w-min"
>0.25</output
>
<label class="text-sm font-medium" for="iou_threshold"
>IoU Threshold</label
>
<input
type="range"
id="iou_threshold"
name="iou_threshold"
min="0"
max="1"
step="0.01"
value="0.45"
oninput="this.nextElementSibling.value = Number(this.value).toFixed(2)"
/>
<output
class="font-extralight text-xs px-1 py-1 border border-gray-700 rounded-md w-min"
>0.45</output
>
</div>
</div>
</main>
</body>
</html>
| candle/candle-wasm-examples/yolo/lib-example.html/0 | {
"file_path": "candle/candle-wasm-examples/yolo/lib-example.html",
"repo_id": "candle",
"token_count": 9649
} |
use candle::quantized::{gguf_file, GgmlDType, QTensor};
use candle::{Device, Result};
use clap::{Parser, Subcommand, ValueEnum};
use rayon::prelude::*;
#[derive(ValueEnum, Debug, Clone)]
enum QuantizationMode {
/// The default quantization includes all 2d tensors, except the output tensor which always
/// uses Q6_K.
Llama,
}
impl QuantizationMode {
fn quantize(&self, name: &str, tensor: QTensor, dtype: GgmlDType) -> Result<QTensor> {
match self {
Self::Llama => {
// Same behavior as the llama.cpp quantization.
let should_quantize = name.ends_with(".weight") && tensor.rank() == 2;
if should_quantize {
let tensor = tensor.dequantize(&Device::Cpu)?;
if name == "output.weight" {
QTensor::quantize(&tensor, GgmlDType::Q6K)
} else {
QTensor::quantize(&tensor, dtype)
}
} else {
Ok(tensor)
}
}
}
}
}
#[derive(ValueEnum, Debug, Clone)]
enum Quantization {
#[value(name = "q4_0")]
Q4_0,
#[value(name = "q4_1")]
Q4_1,
#[value(name = "q5_0")]
Q5_0,
#[value(name = "q5_1")]
Q5_1,
#[value(name = "q8_0")]
Q8_0,
#[value(name = "q8_1")]
Q8_1,
Q2k,
Q3k,
Q4k,
Q5k,
Q6k,
Q8k,
F16,
F32,
}
impl Quantization {
fn dtype(&self) -> GgmlDType {
match self {
Quantization::Q4_0 => GgmlDType::Q4_0,
Quantization::Q4_1 => GgmlDType::Q4_1,
Quantization::Q5_0 => GgmlDType::Q5_0,
Quantization::Q5_1 => GgmlDType::Q5_1,
Quantization::Q8_0 => GgmlDType::Q8_0,
Quantization::Q8_1 => GgmlDType::Q8_1,
Quantization::Q2k => GgmlDType::Q2K,
Quantization::Q3k => GgmlDType::Q3K,
Quantization::Q4k => GgmlDType::Q4K,
Quantization::Q5k => GgmlDType::Q5K,
Quantization::Q6k => GgmlDType::Q6K,
Quantization::Q8k => GgmlDType::Q8K,
Quantization::F16 => GgmlDType::F16,
Quantization::F32 => GgmlDType::F32,
}
}
}
#[derive(ValueEnum, Debug, Clone)]
enum Format {
Safetensors,
Npz,
Ggml,
Gguf,
Pth,
Pickle,
}
impl Format {
fn infer<P: AsRef<std::path::Path>>(p: P) -> Option<Self> {
p.as_ref()
.extension()
.and_then(|e| e.to_str())
.and_then(|e| match e {
// We don't infer any format for .bin as it can be used for ggml/gguf or pytorch.
"safetensors" | "safetensor" => Some(Self::Safetensors),
"npz" => Some(Self::Npz),
"pth" | "pt" => Some(Self::Pth),
"ggml" => Some(Self::Ggml),
"gguf" => Some(Self::Gguf),
_ => None,
})
}
}
#[derive(Subcommand, Debug, Clone)]
enum Command {
Ls {
files: Vec<std::path::PathBuf>,
/// The file format to use, if unspecified infer from the file extension.
#[arg(long, value_enum)]
format: Option<Format>,
/// Enable verbose mode.
#[arg(short, long)]
verbose: bool,
},
Print {
file: std::path::PathBuf,
names: Vec<String>,
/// The file format to use, if unspecified infer from the file extension.
#[arg(long, value_enum)]
format: Option<Format>,
/// Print the whole content of each tensor.
#[arg(long)]
full: bool,
/// Line width for printing the tensors.
#[arg(long)]
line_width: Option<usize>,
},
Quantize {
/// The input file(s), in safetensors format.
in_file: Vec<std::path::PathBuf>,
/// The output file, in gguf format.
#[arg(long)]
out_file: std::path::PathBuf,
/// The quantization schema to apply.
#[arg(long, value_enum)]
quantization: Quantization,
/// Which tensor to quantize.
#[arg(long, value_enum, default_value_t = QuantizationMode::Llama)]
mode: QuantizationMode,
},
Dequantize {
/// The input file, in gguf format.
in_file: std::path::PathBuf,
/// The output file, in safetensors format.
#[arg(long)]
out_file: std::path::PathBuf,
},
}
#[derive(Parser, Debug, Clone)]
struct Args {
#[command(subcommand)]
command: Command,
}
fn run_print(
file: &std::path::PathBuf,
names: Vec<String>,
format: Option<Format>,
full: bool,
line_width: Option<usize>,
device: &Device,
) -> Result<()> {
if full {
candle::display::set_print_options_full();
}
if let Some(line_width) = line_width {
candle::display::set_line_width(line_width)
}
let format = match format {
Some(format) => format,
None => match Format::infer(file) {
Some(format) => format,
None => {
println!(
"{file:?}: cannot infer format from file extension, use the --format flag"
);
return Ok(());
}
},
};
match format {
Format::Npz => {
let tensors = candle::npy::NpzTensors::new(file)?;
let names = if names.is_empty() {
tensors.names().into_iter().map(|v| v.to_string()).collect()
} else {
names
};
for name in names.iter() {
println!("==== {name} ====");
match tensors.get(name)? {
Some(tensor) => println!("{tensor}"),
None => println!("not found"),
}
}
}
Format::Safetensors => {
use candle::safetensors::Load;
let tensors = unsafe { candle::safetensors::MmapedSafetensors::new(file)? };
let tensors: std::collections::HashMap<_, _> = tensors.tensors().into_iter().collect();
let names = if names.is_empty() {
tensors.keys().map(|v| v.to_string()).collect()
} else {
names
};
for name in names.iter() {
println!("==== {name} ====");
match tensors.get(name) {
Some(tensor_view) => {
let tensor = tensor_view.load(device)?;
println!("{tensor}")
}
None => println!("not found"),
}
}
}
Format::Pth => {
let pth_file = candle::pickle::PthTensors::new(file, None)?;
let names = if names.is_empty() {
pth_file
.tensor_infos()
.keys()
.map(|v| v.to_string())
.collect()
} else {
names
};
for name in names.iter() {
println!("==== {name} ====");
match pth_file.get(name)? {
Some(tensor) => {
println!("{tensor}")
}
None => println!("not found"),
}
}
}
Format::Pickle => {
candle::bail!("pickle format is not supported for print")
}
Format::Ggml => {
let mut file = std::fs::File::open(file)?;
let content = candle::quantized::ggml_file::Content::read(&mut file, device)?;
let names = if names.is_empty() {
content.tensors.keys().map(|v| v.to_string()).collect()
} else {
names
};
for name in names.iter() {
println!("==== {name} ====");
match content.tensors.get(name) {
Some(tensor) => {
let tensor = tensor.dequantize(device)?;
println!("{tensor}")
}
None => println!("not found"),
}
}
}
Format::Gguf => {
let mut file = std::fs::File::open(file)?;
let content = gguf_file::Content::read(&mut file)?;
let names = if names.is_empty() {
content.tensor_infos.keys().map(|v| v.to_string()).collect()
} else {
names
};
for name in names.iter() {
println!("==== {name} ====");
match content.tensor(&mut file, name, device) {
Ok(tensor) => {
let tensor = tensor.dequantize(device)?;
println!("{tensor}")
}
Err(_) => println!("not found"),
}
}
}
}
Ok(())
}
fn run_ls(
file: &std::path::PathBuf,
format: Option<Format>,
verbose: bool,
device: &Device,
) -> Result<()> {
let format = match format {
Some(format) => format,
None => match Format::infer(file) {
Some(format) => format,
None => {
println!(
"{file:?}: cannot infer format from file extension, use the --format flag"
);
return Ok(());
}
},
};
match format {
Format::Npz => {
let tensors = candle::npy::NpzTensors::new(file)?;
let mut names = tensors.names();
names.sort();
for name in names {
let shape_dtype = match tensors.get_shape_and_dtype(name) {
Ok((shape, dtype)) => format!("[{shape:?}; {dtype:?}]"),
Err(err) => err.to_string(),
};
println!("{name}: {shape_dtype}")
}
}
Format::Safetensors => {
let tensors = unsafe { candle::safetensors::MmapedSafetensors::new(file)? };
let mut tensors = tensors.tensors();
tensors.sort_by(|a, b| a.0.cmp(&b.0));
for (name, view) in tensors.iter() {
let dtype = view.dtype();
let dtype = match candle::DType::try_from(dtype) {
Ok(dtype) => format!("{dtype:?}"),
Err(_) => format!("{dtype:?}"),
};
let shape = view.shape();
println!("{name}: [{shape:?}; {dtype}]")
}
}
Format::Pth => {
let mut tensors = candle::pickle::read_pth_tensor_info(file, verbose, None)?;
tensors.sort_by(|a, b| a.name.cmp(&b.name));
for tensor_info in tensors.iter() {
println!(
"{}: [{:?}; {:?}]",
tensor_info.name,
tensor_info.layout.shape(),
tensor_info.dtype,
);
if verbose {
println!(" {:?}", tensor_info);
}
}
}
Format::Pickle => {
let file = std::fs::File::open(file)?;
let mut reader = std::io::BufReader::new(file);
let mut stack = candle::pickle::Stack::empty();
stack.read_loop(&mut reader)?;
for (i, obj) in stack.stack().iter().enumerate() {
println!("{i} {obj:?}");
}
}
Format::Ggml => {
let mut file = std::fs::File::open(file)?;
let content = candle::quantized::ggml_file::Content::read(&mut file, device)?;
let mut tensors = content.tensors.into_iter().collect::<Vec<_>>();
tensors.sort_by(|a, b| a.0.cmp(&b.0));
for (name, qtensor) in tensors.iter() {
println!("{name}: [{:?}; {:?}]", qtensor.shape(), qtensor.dtype());
}
}
Format::Gguf => {
let mut file = std::fs::File::open(file)?;
let content = gguf_file::Content::read(&mut file)?;
if verbose {
let mut metadata = content.metadata.into_iter().collect::<Vec<_>>();
metadata.sort_by(|a, b| a.0.cmp(&b.0));
println!("metadata entries ({})", metadata.len());
for (key, value) in metadata.iter() {
println!(" {key}: {value:?}");
}
}
let mut tensors = content.tensor_infos.into_iter().collect::<Vec<_>>();
tensors.sort_by(|a, b| a.0.cmp(&b.0));
for (name, info) in tensors.iter() {
println!("{name}: [{:?}; {:?}]", info.shape, info.ggml_dtype);
}
}
}
Ok(())
}
fn run_quantize_safetensors(
in_files: &[std::path::PathBuf],
out_file: std::path::PathBuf,
q: Quantization,
) -> Result<()> {
let mut out_file = std::fs::File::create(out_file)?;
let mut tensors = std::collections::HashMap::new();
for in_file in in_files.iter() {
let in_tensors = candle::safetensors::load(in_file, &Device::Cpu)?;
tensors.extend(in_tensors)
}
println!("tensors: {}", tensors.len());
let dtype = q.dtype();
let block_size = dtype.block_size();
let qtensors = tensors
.into_par_iter()
.map(|(name, tensor)| {
let should_quantize = tensor.rank() == 2 && tensor.dim(1)? % block_size == 0;
println!(" quantizing {name} {tensor:?} {should_quantize}");
let tensor = if should_quantize {
QTensor::quantize(&tensor, dtype)?
} else {
QTensor::quantize(&tensor, GgmlDType::F32)?
};
Ok((name, tensor))
})
.collect::<Result<Vec<_>>>()?;
let qtensors = qtensors
.iter()
.map(|(k, v)| (k.as_str(), v))
.collect::<Vec<_>>();
gguf_file::write(&mut out_file, &[], &qtensors)?;
Ok(())
}
fn run_dequantize(
in_file: std::path::PathBuf,
out_file: std::path::PathBuf,
device: &Device,
) -> Result<()> {
let mut in_file = std::fs::File::open(in_file)?;
let content = gguf_file::Content::read(&mut in_file)?;
let mut tensors = std::collections::HashMap::new();
for (tensor_name, _) in content.tensor_infos.iter() {
let tensor = content.tensor(&mut in_file, tensor_name, device)?;
let tensor = tensor.dequantize(device)?;
tensors.insert(tensor_name.to_string(), tensor);
}
candle::safetensors::save(&tensors, out_file)?;
Ok(())
}
fn run_quantize(
in_files: &[std::path::PathBuf],
out_file: std::path::PathBuf,
q: Quantization,
qmode: QuantizationMode,
device: &Device,
) -> Result<()> {
if in_files.is_empty() {
candle::bail!("no specified input files")
}
if let Some(extension) = out_file.extension() {
if extension == "safetensors" {
candle::bail!("the generated file cannot use the safetensors extension")
}
}
if let Some(extension) = in_files[0].extension() {
if extension == "safetensors" {
return run_quantize_safetensors(in_files, out_file, q);
}
}
if in_files.len() != 1 {
candle::bail!("only a single in-file can be used when quantizing gguf files")
}
// Open the out file early so as to fail directly on missing directories etc.
let mut out_file = std::fs::File::create(out_file)?;
let mut in_ = std::fs::File::open(&in_files[0])?;
let content = gguf_file::Content::read(&mut in_)?;
println!("tensors: {}", content.tensor_infos.len());
let dtype = q.dtype();
let qtensors = content
.tensor_infos
.par_iter()
.map(|(name, _)| {
println!(" quantizing {name}");
let mut in_file = std::fs::File::open(&in_files[0])?;
let tensor = content.tensor(&mut in_file, name, device)?;
let tensor = qmode.quantize(name, tensor, dtype)?;
Ok((name, tensor))
})
.collect::<Result<Vec<_>>>()?;
let qtensors = qtensors
.iter()
.map(|(k, v)| (k.as_str(), v))
.collect::<Vec<_>>();
let metadata = content
.metadata
.iter()
.map(|(k, v)| (k.as_str(), v))
.collect::<Vec<_>>();
gguf_file::write(&mut out_file, metadata.as_slice(), &qtensors)?;
Ok(())
}
fn main() -> anyhow::Result<()> {
let args = Args::parse();
let device = Device::Cpu;
match args.command {
Command::Ls {
files,
format,
verbose,
} => {
let multiple_files = files.len() > 1;
for file in files.iter() {
if multiple_files {
println!("--- {file:?} ---");
}
run_ls(file, format.clone(), verbose, &device)?
}
}
Command::Print {
file,
names,
format,
full,
line_width,
} => run_print(&file, names, format, full, line_width, &device)?,
Command::Quantize {
in_file,
out_file,
quantization,
mode,
} => run_quantize(&in_file, out_file, quantization, mode, &device)?,
Command::Dequantize { in_file, out_file } => run_dequantize(in_file, out_file, &device)?,
}
Ok(())
}
| candle/tensor-tools/src/main.rs/0 | {
"file_path": "candle/tensor-tools/src/main.rs",
"repo_id": "candle",
"token_count": 9444
} |
{{- if $.Values.autoscaling.enabled }}
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:
labels: {{ include "labels.standard" . | nindent 4 }}
name: {{ include "name" . }}
namespace: {{ .Release.Namespace }}
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: {{ include "name" . }}
minReplicas: {{ $.Values.autoscaling.minReplicas }}
maxReplicas: {{ $.Values.autoscaling.maxReplicas }}
metrics:
{{- if ne "" $.Values.autoscaling.targetMemoryUtilizationPercentage }}
- type: Resource
resource:
name: memory
target:
type: Utilization
averageUtilization: {{ $.Values.autoscaling.targetMemoryUtilizationPercentage | int }}
{{- end }}
{{- if ne "" $.Values.autoscaling.targetCPUUtilizationPercentage }}
- type: Resource
resource:
name: cpu
target:
type: Utilization
averageUtilization: {{ $.Values.autoscaling.targetCPUUtilizationPercentage | int }}
{{- end }}
behavior:
scaleDown:
stabilizationWindowSeconds: 600
policies:
- type: Percent
value: 10
periodSeconds: 60
scaleUp:
stabilizationWindowSeconds: 0
policies:
- type: Pods
value: 1
periodSeconds: 30
{{- end }}
| chat-ui/chart/templates/hpa.yaml/0 | {
"file_path": "chat-ui/chart/templates/hpa.yaml",
"repo_id": "chat-ui",
"token_count": 543
} |
# Cloudflare
| Feature | Available |
| --------------------------- | --------- |
| [Tools](../tools) | No |
| [Multimodal](../multimodal) | No |
You may use Cloudflare Workers AI to run your own models with serverless inference.
You will need to have a Cloudflare account, then get your [account ID](https://developers.cloudflare.com/fundamentals/setup/find-account-and-zone-ids/) as well as your [API token](https://developers.cloudflare.com/workers-ai/get-started/rest-api/#1-get-an-api-token) for Workers AI.
You can either specify them directly in your `.env.local` using the `CLOUDFLARE_ACCOUNT_ID` and `CLOUDFLARE_API_TOKEN` variables, or you can set them directly in the endpoint config.
You can find the list of models available on Cloudflare [here](https://developers.cloudflare.com/workers-ai/models/#text-generation).
```ini
MODELS=`[
{
"name" : "nousresearch/hermes-2-pro-mistral-7b",
"tokenizer": "nousresearch/hermes-2-pro-mistral-7b",
"parameters": {
"stop": ["<|im_end|>"]
},
"endpoints" : [
{
"type" : "cloudflare"
<!-- optionally specify these
"accountId": "your-account-id",
"authToken": "your-api-token"
-->
}
]
}
]`
```
| chat-ui/docs/source/configuration/models/providers/cloudflare.md/0 | {
"file_path": "chat-ui/docs/source/configuration/models/providers/cloudflare.md",
"repo_id": "chat-ui",
"token_count": 510
} |
# Running on Docker
Pre-built docker images are provided with and without MongoDB built in. Refer to the [configuration section](../configuration/overview) for env variables that must be provided. We recommend using the `--env-file` option to avoid leaking secrets into your shell history.
```bash
# Without built-in DB
docker run -p 3000:3000 --env-file .env.local --name chat-ui ghcr.io/huggingface/chat-ui
# With built-in DB
docker run -p 3000:3000 --env-file .env.local -v chat-ui:/data --name chat-ui ghcr.io/huggingface/chat-ui-db
```
| chat-ui/docs/source/installation/docker.md/0 | {
"file_path": "chat-ui/docs/source/installation/docker.md",
"repo_id": "chat-ui",
"token_count": 165
} |
import { navigating } from "$app/stores";
import { tick } from "svelte";
import { get } from "svelte/store";
const detachedOffset = 10;
/**
* @param node element to snap scroll to bottom
* @param dependency pass in a dependency to update scroll on changes.
*/
export const snapScrollToBottom = (node: HTMLElement, dependency: unknown) => {
let prevScrollValue = node.scrollTop;
let isDetached = false;
const handleScroll = () => {
// if user scrolled up, we detach
if (node.scrollTop < prevScrollValue) {
isDetached = true;
}
// if user scrolled back to within 10px of bottom, we reattach
if (node.scrollTop - (node.scrollHeight - node.clientHeight) >= -detachedOffset) {
isDetached = false;
}
prevScrollValue = node.scrollTop;
};
const updateScroll = async (_options: { force?: boolean } = {}) => {
const defaultOptions = { force: false };
const options = { ...defaultOptions, ..._options };
const { force } = options;
if (!force && isDetached && !get(navigating)) return;
// wait for next tick to ensure that the DOM is updated
await tick();
node.scrollTo({ top: node.scrollHeight });
};
node.addEventListener("scroll", handleScroll);
if (dependency) {
updateScroll({ force: true });
}
return {
update: updateScroll,
destroy: () => {
node.removeEventListener("scroll", handleScroll);
},
};
};
| chat-ui/src/lib/actions/snapScrollToBottom.ts/0 | {
"file_path": "chat-ui/src/lib/actions/snapScrollToBottom.ts",
"repo_id": "chat-ui",
"token_count": 437
} |
<script lang="ts">
import { base } from "$app/paths";
import { page } from "$app/state";
import { createEventDispatcher } from "svelte";
import CarbonCheckmark from "~icons/carbon/checkmark";
import CarbonTrashCan from "~icons/carbon/trash-can";
import CarbonClose from "~icons/carbon/close";
import CarbonEdit from "~icons/carbon/edit";
import type { ConvSidebar } from "$lib/types/ConvSidebar";
interface Props {
conv: ConvSidebar;
}
let { conv }: Props = $props();
let confirmDelete = $state(false);
const dispatch = createEventDispatcher<{
deleteConversation: string;
editConversationTitle: { id: string; title: string };
}>();
</script>
<a
data-sveltekit-noscroll
onmouseleave={() => {
confirmDelete = false;
}}
href="{base}/conversation/{conv.id}"
class="group flex h-10 flex-none items-center gap-1.5 rounded-lg pl-2.5 pr-2 text-gray-600 hover:bg-gray-100 dark:text-gray-300 dark:hover:bg-gray-700 sm:h-[2.35rem] {conv.id ===
page.params.id
? 'bg-gray-100 dark:bg-gray-700'
: ''}"
>
<div class="flex flex-1 items-center truncate">
{#if confirmDelete}
<span class="mr-1 font-semibold"> Delete </span>
{/if}
{#if conv.avatarUrl}
<img
src="{base}{conv.avatarUrl}"
alt="Assistant avatar"
class="mr-1.5 inline size-4 flex-none rounded-full object-cover"
/>
{conv.title.replace(/\p{Emoji}/gu, "")}
{:else if conv.assistantId}
<div
class="mr-1.5 flex size-4 flex-none items-center justify-center rounded-full bg-gray-300 text-xs font-bold uppercase text-gray-500"
></div>
{conv.title.replace(/\p{Emoji}/gu, "")}
{:else}
{conv.title}
{/if}
</div>
{#if confirmDelete}
<button
type="button"
class="flex h-5 w-5 items-center justify-center rounded md:hidden md:group-hover:flex"
title="Cancel delete action"
onclick={(e) => {
e.preventDefault();
confirmDelete = false;
}}
>
<CarbonClose class="text-xs text-gray-400 hover:text-gray-500 dark:hover:text-gray-300" />
</button>
<button
type="button"
class="flex h-5 w-5 items-center justify-center rounded md:hidden md:group-hover:flex"
title="Confirm delete action"
onclick={(e) => {
e.preventDefault();
confirmDelete = false;
dispatch("deleteConversation", conv.id);
}}
>
<CarbonCheckmark class="text-xs text-gray-400 hover:text-gray-500 dark:hover:text-gray-300" />
</button>
{:else}
<button
type="button"
class="flex h-5 w-5 items-center justify-center rounded md:hidden md:group-hover:flex"
title="Edit conversation title"
onclick={(e) => {
e.preventDefault();
const newTitle = prompt("Edit this conversation title:", conv.title);
if (!newTitle) return;
dispatch("editConversationTitle", { id: conv.id, title: newTitle });
}}
>
<CarbonEdit class="text-xs text-gray-400 hover:text-gray-500 dark:hover:text-gray-300" />
</button>
<button
type="button"
class="flex h-5 w-5 items-center justify-center rounded md:hidden md:group-hover:flex"
title="Delete conversation"
onclick={(event) => {
event.preventDefault();
if (event.shiftKey) {
dispatch("deleteConversation", conv.id);
} else {
confirmDelete = true;
}
}}
>
<CarbonTrashCan class="text-xs text-gray-400 hover:text-gray-500 dark:hover:text-gray-300" />
</button>
{/if}
</a>
| chat-ui/src/lib/components/NavConversationItem.svelte/0 | {
"file_path": "chat-ui/src/lib/components/NavConversationItem.svelte",
"repo_id": "chat-ui",
"token_count": 1361
} |
<script lang="ts">
import { base } from "$app/paths";
import { page } from "$app/state";
import { clickOutside } from "$lib/actions/clickOutside";
import { useSettingsStore } from "$lib/stores/settings";
import type { ToolFront } from "$lib/types/Tool";
import { isHuggingChat } from "$lib/utils/isHuggingChat";
import IconTool from "./icons/IconTool.svelte";
import CarbonInformation from "~icons/carbon/information";
import CarbonGlobe from "~icons/carbon/earth-filled";
interface Props {
loading?: boolean;
}
let { loading = false }: Props = $props();
const settings = useSettingsStore();
let detailsEl: HTMLDetailsElement | undefined = $state();
// active tools are all the checked tools, either from settings or on by default
let activeToolCount = $derived(
page.data.tools.filter(
(tool: ToolFront) =>
// community tools are always on by default
tool.type === "community" || $settings?.tools?.includes(tool._id)
).length
);
async function setAllTools(value: boolean) {
const configToolsIds = page.data.tools
.filter((t: ToolFront) => t.type === "config")
.map((t: ToolFront) => t._id);
if (value) {
await settings.instantSet({
tools: Array.from(new Set([...configToolsIds, ...($settings?.tools ?? [])])),
});
} else {
await settings.instantSet({
tools: [],
});
}
}
let allToolsEnabled = $derived(activeToolCount === page.data.tools.length);
let tools = $derived(page.data.tools);
</script>
<details
class="group relative bottom-0 h-full min-h-8"
bind:this={detailsEl}
use:clickOutside={() => {
if (detailsEl?.hasAttribute("open")) {
detailsEl.removeAttribute("open");
}
}}
>
<summary
class="absolute bottom-0 flex h-8
cursor-pointer select-none items-center gap-1 rounded-lg border bg-white px-2 py-1.5 shadow-sm hover:shadow-none dark:border-gray-800 dark:bg-gray-900"
>
<IconTool classNames="dark:text-purple-600" />
Tools
<span class="text-gray-400 dark:text-gray-500"> ({activeToolCount}) </span>
</summary>
<div
class="absolute bottom-10 h-max w-max select-none items-center gap-1 rounded-lg border bg-white p-0.5 shadow-sm dark:border-gray-800 dark:bg-gray-900"
>
<div class="grid grid-cols-2 gap-x-6 gap-y-1 p-3">
<div class="col-span-2 flex items-center gap-1.5 text-sm text-gray-500">
Available tools
{#if isHuggingChat}
<a
href="https://huggingface.co/spaces/huggingchat/chat-ui/discussions/470"
target="_blank"
class="hover:brightness-0 dark:hover:brightness-200"
><CarbonInformation class="text-xs" /></a
>
{/if}
<button
class="ml-auto text-xs underline"
onclick={(e) => {
e.stopPropagation();
setAllTools(!allToolsEnabled);
}}
>
{#if allToolsEnabled}
Disable all
{:else}
Enable all
{/if}
</button>
</div>
{#if page.data.enableCommunityTools}
<a
href="{base}/tools"
class="col-span-2 my-1 h-fit w-fit items-center justify-center rounded-full bg-purple-500/20 px-2.5 py-1.5 text-sm hover:bg-purple-500/30"
>
<span class="mr-1 rounded-full bg-purple-700 px-1.5 py-1 text-xs font-bold uppercase">
new
</span>
Browse community tools ({page.data.communityToolCount ?? 0})
</a>
{/if}
{#each tools as tool}
{@const isChecked = $settings?.tools?.includes(tool._id)}
<div class="flex items-center gap-1.5">
{#if tool.type === "community"}
<input
type="checkbox"
id={tool._id}
checked={true}
class="rounded-xs font-semibold accent-purple-500 hover:accent-purple-600"
onclick={async (e) => {
e.preventDefault();
e.stopPropagation();
await settings.instantSet({
tools: $settings?.tools?.filter((t) => t !== tool._id) ?? [],
});
}}
/>
{:else}
<input
type="checkbox"
id={tool._id}
checked={isChecked}
disabled={loading}
onclick={async (e) => {
e.preventDefault();
e.stopPropagation();
if (isChecked) {
await settings.instantSet({
tools: ($settings?.tools ?? []).filter((t) => t !== tool._id),
});
} else {
await settings.instantSet({
tools: [...($settings?.tools ?? []), tool._id],
});
}
}}
/>
{/if}
<label class="cursor-pointer" for={tool._id}>{tool.displayName}</label>
{#if tool.type === "community"}
<a href="{base}/tools/{tool._id}" class="text-purple-600 hover:text-purple-700">
<CarbonGlobe />
</a>
{/if}
</div>
{/each}
</div>
</div>
</details>
<style>
details summary::-webkit-details-marker {
display: none;
}
</style>
| chat-ui/src/lib/components/ToolsMenu.svelte/0 | {
"file_path": "chat-ui/src/lib/components/ToolsMenu.svelte",
"repo_id": "chat-ui",
"token_count": 2084
} |
<script lang="ts">
import type { Message } from "$lib/types/Message";
import CarbonThumbsUp from "~icons/carbon/thumbs-up";
import CarbonThumbsDown from "~icons/carbon/thumbs-down";
import { createEventDispatcher } from "svelte";
interface Props {
message: Message;
}
let { message }: Props = $props();
const dispatch = createEventDispatcher<{
vote: { score: Message["score"]; id: Message["id"] };
}>();
</script>
<button
class="btn rounded-sm p-1 text-sm text-gray-400 hover:text-gray-500 focus:ring-0 dark:text-gray-400 dark:hover:text-gray-300
{message.score && message.score > 0
? 'text-green-500 hover:text-green-500 dark:text-green-400 hover:dark:text-green-400'
: ''}"
title={message.score === 1 ? "Remove +1" : "+1"}
type="button"
onclick={() => dispatch("vote", { score: message.score === 1 ? 0 : 1, id: message.id })}
>
<CarbonThumbsUp class="h-[1.14em] w-[1.14em]" />
</button>
<button
class="btn rounded-sm p-1 text-sm text-gray-400 hover:text-gray-500 focus:ring-0 dark:text-gray-400 dark:hover:text-gray-300
{message.score && message.score < 0
? 'text-red-500 hover:text-red-500 dark:text-red-400 hover:dark:text-red-400'
: ''}"
title={message.score === -1 ? "Remove -1" : "-1"}
type="button"
onclick={() => dispatch("vote", { score: message.score === -1 ? 0 : -1, id: message.id })}
>
<CarbonThumbsDown class="h-[1.14em] w-[1.14em]" />
</button>
| chat-ui/src/lib/components/chat/Vote.svelte/0 | {
"file_path": "chat-ui/src/lib/components/chat/Vote.svelte",
"repo_id": "chat-ui",
"token_count": 527
} |
import { Database } from "$lib/server/database";
import { acquireLock, refreshLock } from "$lib/migrations/lock";
import type { ObjectId } from "mongodb";
import { subDays } from "date-fns";
import { logger } from "$lib/server/logger";
const LOCK_KEY = "assistants.count";
let hasLock = false;
let lockId: ObjectId | null = null;
async function refreshAssistantsCountsHelper() {
if (!hasLock) {
return;
}
try {
await Database.getInstance()
.getClient()
.withSession((session) =>
session.withTransaction(async () => {
await Database.getInstance()
.getCollections()
.assistants.aggregate([
{ $project: { _id: 1 } },
{ $set: { last24HoursCount: 0 } },
{
$unionWith: {
coll: "assistants.stats",
pipeline: [
{
$match: { "date.at": { $gte: subDays(new Date(), 1) }, "date.span": "hour" },
},
{
$group: {
_id: "$assistantId",
last24HoursCount: { $sum: "$count" },
},
},
],
},
},
{
$group: {
_id: "$_id",
last24HoursCount: { $sum: "$last24HoursCount" },
},
},
{
$merge: {
into: "assistants",
on: "_id",
whenMatched: "merge",
whenNotMatched: "discard",
},
},
])
.next();
})
);
} catch (e) {
logger.error(e, "Refresh assistants counter failed!");
}
}
async function maintainLock() {
if (hasLock && lockId) {
hasLock = await refreshLock(LOCK_KEY, lockId);
if (!hasLock) {
lockId = null;
}
} else if (!hasLock) {
lockId = (await acquireLock(LOCK_KEY)) || null;
hasLock = !!lockId;
}
setTimeout(maintainLock, 10_000);
}
export function refreshAssistantsCounts() {
const ONE_HOUR_MS = 3_600_000;
maintainLock().then(() => {
refreshAssistantsCountsHelper();
setInterval(refreshAssistantsCountsHelper, ONE_HOUR_MS);
});
}
| chat-ui/src/lib/jobs/refresh-assistants-counts.ts/0 | {
"file_path": "chat-ui/src/lib/jobs/refresh-assistants-counts.ts",
"repo_id": "chat-ui",
"token_count": 970
} |
// Shouldn't be needed if we dove into sveltekit internals, see https://github.com/huggingface/chat-ui/pull/88#issuecomment-1523173850
import { logger } from "$lib/server/logger";
import { collections } from "$lib/server/database";
import { onExit } from "./exitHandler";
export class AbortedGenerations {
private static instance: AbortedGenerations;
private abortedGenerations: Map<string, Date> = new Map();
private constructor() {
const interval = setInterval(this.updateList, 1000);
onExit(() => clearInterval(interval));
}
public static getInstance(): AbortedGenerations {
if (!AbortedGenerations.instance) {
AbortedGenerations.instance = new AbortedGenerations();
}
return AbortedGenerations.instance;
}
public getList(): Map<string, Date> {
return this.abortedGenerations;
}
private async updateList() {
try {
const aborts = await collections.abortedGenerations.find({}).sort({ createdAt: 1 }).toArray();
this.abortedGenerations = new Map(
aborts.map(({ conversationId, createdAt }) => [conversationId.toString(), createdAt])
);
} catch (err) {
logger.error(err);
}
}
}
| chat-ui/src/lib/server/abortedGenerations.ts/0 | {
"file_path": "chat-ui/src/lib/server/abortedGenerations.ts",
"repo_id": "chat-ui",
"token_count": 373
} |
import type { MessageFile } from "$lib/types/Message";
import { z } from "zod";
export interface FileProcessorOptions<TMimeType extends string = string> {
supportedMimeTypes: TMimeType[];
maxSizeInMB: number;
}
export type ImageProcessor<TMimeType extends string = string> = (file: MessageFile) => Promise<{
file: Buffer;
mime: TMimeType;
}>;
export const createDocumentProcessorOptionsValidator = <TMimeType extends string = string>(
defaults: FileProcessorOptions<TMimeType>
) => {
return z
.object({
supportedMimeTypes: z
.array(
z.enum<string, [TMimeType, ...TMimeType[]]>([
defaults.supportedMimeTypes[0],
...defaults.supportedMimeTypes.slice(1),
])
)
.default(defaults.supportedMimeTypes),
maxSizeInMB: z.number().positive().default(defaults.maxSizeInMB),
})
.default(defaults);
};
export type DocumentProcessor<TMimeType extends string = string> = (file: MessageFile) => {
file: Buffer;
mime: TMimeType;
};
export type AsyncDocumentProcessor<TMimeType extends string = string> = (
file: MessageFile
) => Promise<{
file: Buffer;
mime: TMimeType;
}>;
export function makeDocumentProcessor<TMimeType extends string = string>(
options: FileProcessorOptions<TMimeType>
): AsyncDocumentProcessor<TMimeType> {
return async (file) => {
const { supportedMimeTypes, maxSizeInMB } = options;
const { mime, value } = file;
const buffer = Buffer.from(value, "base64");
const tooLargeInBytes = buffer.byteLength > maxSizeInMB * 1000 * 1000;
if (tooLargeInBytes) {
throw Error("Document is too large");
}
const outputMime = validateMimeType(supportedMimeTypes, mime);
return { file: buffer, mime: outputMime };
};
}
const validateMimeType = <T extends readonly string[]>(
supportedMimes: T,
mime: string
): T[number] => {
if (!supportedMimes.includes(mime)) {
const supportedMimesStr = supportedMimes.join(", ");
throw Error(`Mimetype "${mime}" not found in supported mimes: ${supportedMimesStr}`);
}
return mime;
};
| chat-ui/src/lib/server/endpoints/document.ts/0 | {
"file_path": "chat-ui/src/lib/server/endpoints/document.ts",
"repo_id": "chat-ui",
"token_count": 706
} |
import { dot } from "@huggingface/transformers";
import type { EmbeddingBackendModel } from "$lib/server/embeddingModels";
import type { Embedding } from "$lib/server/embeddingEndpoints/embeddingEndpoints";
// see here: https://github.com/nmslib/hnswlib/blob/359b2ba87358224963986f709e593d799064ace6/README.md?plain=1#L34
export function innerProduct(embeddingA: Embedding, embeddingB: Embedding) {
return 1.0 - dot(embeddingA, embeddingB);
}
export async function getSentenceSimilarity(
embeddingModel: EmbeddingBackendModel,
query: string,
sentences: string[]
): Promise<{ distance: number; embedding: Embedding; idx: number }[]> {
const inputs = [
`${embeddingModel.preQuery}${query}`,
...sentences.map((sentence) => `${embeddingModel.prePassage}${sentence}`),
];
const embeddingEndpoint = await embeddingModel.getEndpoint();
const output = await embeddingEndpoint({ inputs }).catch((err) => {
throw Error("Failed to generate embeddings for sentence similarity", { cause: err });
});
const queryEmbedding: Embedding = output[0];
const sentencesEmbeddings: Embedding[] = output.slice(1);
return sentencesEmbeddings.map((sentenceEmbedding, idx) => ({
distance: innerProduct(queryEmbedding, sentenceEmbedding),
embedding: sentenceEmbedding,
idx,
}));
}
| chat-ui/src/lib/server/sentenceSimilarity.ts/0 | {
"file_path": "chat-ui/src/lib/server/sentenceSimilarity.ts",
"repo_id": "chat-ui",
"token_count": 433
} |
import type { EmbeddingBackendModel } from "$lib/server/embeddingModels";
import { getSentenceSimilarity } from "$lib/server/sentenceSimilarity";
/**
* Combines sentences together to reach the maximum character limit of the embedding model
* Improves performance considerably when using CPU embedding
*/
export async function getCombinedSentenceSimilarity(
embeddingModel: EmbeddingBackendModel,
query: string,
sentences: string[]
): ReturnType<typeof getSentenceSimilarity> {
const combinedSentences = sentences.reduce<{ text: string; indices: number[] }[]>(
(acc, sentence, idx) => {
const lastSentence = acc[acc.length - 1];
if (!lastSentence) return [{ text: sentence, indices: [idx] }];
if (lastSentence.text.length + sentence.length < embeddingModel.chunkCharLength) {
lastSentence.text += ` ${sentence}`;
lastSentence.indices.push(idx);
return acc;
}
return [...acc, { text: sentence, indices: [idx] }];
},
[]
);
const embeddings = await getSentenceSimilarity(
embeddingModel,
query,
combinedSentences.map(({ text }) => text)
);
return embeddings.flatMap((embedding, idx) => {
const { indices } = combinedSentences[idx];
return indices.map((i) => ({ ...embedding, idx: i }));
});
}
| chat-ui/src/lib/server/websearch/embed/combine.ts/0 | {
"file_path": "chat-ui/src/lib/server/websearch/embed/combine.ts",
"repo_id": "chat-ui",
"token_count": 420
} |
import { env } from "$env/dynamic/private";
import type { WebSearchSource } from "$lib/types/WebSearch";
export default async function search(query: string): Promise<WebSearchSource[]> {
const response = await fetch(
`https://www.searchapi.io/api/v1/search?engine=google&hl=en&gl=us&q=${query}`,
{
method: "GET",
headers: {
Authorization: `Bearer ${env.SEARCHAPI_KEY}`,
"Content-type": "application/json",
},
}
);
/* eslint-disable @typescript-eslint/no-explicit-any */
const data = (await response.json()) as Record<string, any>;
if (!response.ok) {
throw new Error(
data["message"] ?? `SearchApi returned error code ${response.status} - ${response.statusText}`
);
}
return data["organic_results"] ?? [];
}
| chat-ui/src/lib/server/websearch/search/endpoints/searchApi.ts/0 | {
"file_path": "chat-ui/src/lib/server/websearch/search/endpoints/searchApi.ts",
"repo_id": "chat-ui",
"token_count": 274
} |
export function formatUserCount(userCount: number): string {
const userCountRanges: { min: number; max: number; label: string }[] = [
{ min: 0, max: 1, label: "1" },
{ min: 2, max: 9, label: "1-10" },
{ min: 10, max: 49, label: "10+" },
{ min: 50, max: 99, label: "50+" },
{ min: 100, max: 299, label: "100+" },
{ min: 300, max: 499, label: "300+" },
{ min: 500, max: 999, label: "500+" },
{ min: 1_000, max: 2_999, label: "1k+" },
{ min: 3_000, max: 4_999, label: "3k+" },
{ min: 5_000, max: 9_999, label: "5k+" },
{ min: 10_000, max: 19_999, label: "10k+" },
{ min: 20_000, max: 29_999, label: "20k+" },
{ min: 30_000, max: 39_999, label: "30k+" },
{ min: 40_000, max: 49_999, label: "40k+" },
{ min: 50_000, max: 59_999, label: "50k+" },
{ min: 60_000, max: 69_999, label: "60k+" },
{ min: 70_000, max: 79_999, label: "70k+" },
{ min: 80_000, max: 89_999, label: "80k+" },
{ min: 90_000, max: 99_999, label: "90k+" },
{ min: 100_000, max: 109_999, label: "100k+" },
{ min: 110_000, max: 119_999, label: "110k+" },
{ min: 120_000, max: 129_999, label: "120k+" },
{ min: 130_000, max: 139_999, label: "130k+" },
{ min: 140_000, max: 149_999, label: "140k+" },
{ min: 150_000, max: 199_999, label: "150k+" },
{ min: 200_000, max: 299_999, label: "200k+" },
{ min: 300_000, max: 499_999, label: "300k+" },
{ min: 500_000, max: 749_999, label: "500k+" },
{ min: 750_000, max: 999_999, label: "750k+" },
{ min: 1_000_000, max: Infinity, label: "1M+" },
];
const range = userCountRanges.find(({ min, max }) => userCount >= min && userCount <= max);
return range?.label ?? "";
}
| chat-ui/src/lib/utils/formatUserCount.ts/0 | {
"file_path": "chat-ui/src/lib/utils/formatUserCount.ts",
"repo_id": "chat-ui",
"token_count": 767
} |
const PUNCTUATION_REGEX = /\p{P}/gu;
function removeDiacritics(s: string, form: "NFD" | "NFKD" = "NFD"): string {
return s.normalize(form).replace(/[\u0300-\u036f]/g, "");
}
export function generateSearchTokens(value: string): string[] {
const fullTitleToken = removeDiacritics(value)
.replace(PUNCTUATION_REGEX, "")
.replaceAll(/\s+/g, "")
.toLowerCase();
return [
...new Set([
...removeDiacritics(value)
.split(/\s+/)
.map((word) => word.replace(PUNCTUATION_REGEX, "").toLowerCase())
.filter((word) => word.length),
...(fullTitleToken.length ? [fullTitleToken] : []),
]),
];
}
function escapeForRegExp(s: string): string {
return s.replace(/[.*+?^${}()|[\]\\]/g, "\\$&"); // $& means the whole matched string
}
export function generateQueryTokens(query: string): RegExp[] {
return removeDiacritics(query)
.split(/\s+/)
.map((word) => word.replace(PUNCTUATION_REGEX, "").toLowerCase())
.filter((word) => word.length)
.map((token) => new RegExp(`^${escapeForRegExp(token)}`));
}
| chat-ui/src/lib/utils/searchTokens.ts/0 | {
"file_path": "chat-ui/src/lib/utils/searchTokens.ts",
"repo_id": "chat-ui",
"token_count": 426
} |
import type { Conversation } from "$lib/types/Conversation";
import type { Message } from "$lib/types/Message";
import { v4 } from "uuid";
export function convertLegacyConversation(
conv: Pick<Conversation, "messages" | "rootMessageId" | "preprompt">
): Pick<Conversation, "messages" | "rootMessageId" | "preprompt"> {
if (conv.rootMessageId) return conv; // not a legacy conversation
if (conv.messages.length === 0) return conv; // empty conversation
const messages = [
{
from: "system",
content: conv.preprompt ?? "",
createdAt: new Date(),
updatedAt: new Date(),
id: v4(),
} satisfies Message,
...conv.messages,
];
const rootMessageId = messages[0].id;
const newMessages = messages.map((message, index) => {
return {
...message,
ancestors: messages.slice(0, index).map((m) => m.id),
children: index < messages.length - 1 ? [messages[index + 1].id] : [],
};
});
return {
...conv,
rootMessageId,
messages: newMessages,
};
}
| chat-ui/src/lib/utils/tree/convertLegacyConversation.ts/0 | {
"file_path": "chat-ui/src/lib/utils/tree/convertLegacyConversation.ts",
"repo_id": "chat-ui",
"token_count": 354
} |
import { env } from "$env/dynamic/private";
import { collections } from "$lib/server/database.js";
import { toolFromConfigs } from "$lib/server/tools/index.js";
import { ReviewStatus } from "$lib/types/Review";
import type { CommunityToolDB } from "$lib/types/Tool.js";
import { ObjectId } from "mongodb";
export async function GET({ params }) {
if (env.COMMUNITY_TOOLS !== "true") {
return new Response("Community tools are not enabled", { status: 403 });
}
const toolId = params.toolId;
try {
const configTool = toolFromConfigs.find((el) => el._id.toString() === toolId);
if (configTool) {
return Response.json({
_id: toolId,
displayName: configTool.displayName,
color: configTool.color,
icon: configTool.icon,
createdByName: undefined,
});
} else {
// try community tools
const tool = await collections.tools
.findOne<CommunityToolDB>({ _id: new ObjectId(toolId) })
.then((tool) =>
tool
? {
_id: tool._id.toString(),
displayName: tool.displayName,
color: tool.color,
icon: tool.icon,
createdByName: tool.createdByName,
review: tool.review,
}
: undefined
);
if (!tool || tool.review !== ReviewStatus.APPROVED) {
return new Response(`Tool "${toolId}" not found`, { status: 404 });
}
return Response.json(tool);
}
} catch (e) {
return new Response(`Tool "${toolId}" not found`, { status: 404 });
}
}
| chat-ui/src/routes/api/tools/[toolId]/+server.ts/0 | {
"file_path": "chat-ui/src/routes/api/tools/[toolId]/+server.ts",
"repo_id": "chat-ui",
"token_count": 571
} |
import { authCondition } from "$lib/server/auth";
import { collections } from "$lib/server/database";
import { error } from "@sveltejs/kit";
import { ObjectId } from "mongodb";
import { z } from "zod";
import type { RequestHandler } from "./$types";
import { downloadFile } from "$lib/server/files/downloadFile";
import mimeTypes from "mime-types";
export const GET: RequestHandler = async ({ locals, params }) => {
const sha256 = z.string().parse(params.sha256);
const userId = locals.user?._id ?? locals.sessionId;
// check user
if (!userId) {
error(401, "Unauthorized");
}
if (params.id.length !== 7) {
const convId = new ObjectId(z.string().parse(params.id));
// check if the user has access to the conversation
const conv = await collections.conversations.findOne({
_id: convId,
...authCondition(locals),
});
if (!conv) {
error(404, "Conversation not found");
}
} else {
// look for the conversation in shared conversations
const conv = await collections.sharedConversations.findOne({
_id: params.id,
});
if (!conv) {
error(404, "Conversation not found");
}
}
const { value, mime } = await downloadFile(sha256, params.id);
const b64Value = Buffer.from(value, "base64");
return new Response(b64Value, {
headers: {
"Content-Type": mime ?? "application/octet-stream",
"Content-Security-Policy":
"default-src 'none'; script-src 'none'; style-src 'none'; sandbox;",
"Content-Disposition": `attachment; filename="${sha256.slice(0, 8)}.${
mime ? mimeTypes.extension(mime) || "bin" : "bin"
}"`,
"Content-Length": b64Value.length.toString(),
"Accept-Range": "bytes",
},
});
};
| chat-ui/src/routes/conversation/[id]/output/[sha256]/+server.ts/0 | {
"file_path": "chat-ui/src/routes/conversation/[id]/output/[sha256]/+server.ts",
"repo_id": "chat-ui",
"token_count": 593
} |
<script lang="ts">
import { base } from "$app/paths";
import { afterNavigate, goto } from "$app/navigation";
import { useSettingsStore } from "$lib/stores/settings";
import CarbonCheckmark from "~icons/carbon/checkmark";
import Modal from "$lib/components/Modal.svelte";
interface Props {
children?: import("svelte").Snippet;
}
let { children }: Props = $props();
let previousPage: string = $state(base);
afterNavigate(({ from }) => {
if (!from?.url.pathname.includes("settings")) {
previousPage = from?.url.toString() || previousPage;
}
});
const settings = useSettingsStore();
</script>
<Modal
on:close={() => goto(previousPage)}
width="!h-[95dvh] !w-[90dvw] overflow-hidden rounded-2xl bg-white shadow-2xl outline-none sm:!h-[85dvh] xl:!w-[1200px] 2xl:!h-[75dvh]"
>
{@render children?.()}
{#if $settings.recentlySaved}
<div
class="absolute bottom-4 right-4 m-2 flex items-center gap-1.5 rounded-full border border-gray-300 bg-gray-200 px-3 py-1 text-black"
>
<CarbonCheckmark class="text-green-500" />
Saved
</div>
{/if}
</Modal>
| chat-ui/src/routes/settings/+layout.svelte/0 | {
"file_path": "chat-ui/src/routes/settings/+layout.svelte",
"repo_id": "chat-ui",
"token_count": 424
} |
# Know your dataset
There are two types of dataset objects, a regular [`Dataset`] and then an β¨ [`IterableDataset`] β¨. A [`Dataset`] provides fast random access to the rows, and memory-mapping so that loading even large datasets only uses a relatively small amount of device memory. But for really, really big datasets that won't even fit on disk or in memory, an [`IterableDataset`] allows you to access and use the dataset without waiting for it to download completely!
This tutorial will show you how to load and access a [`Dataset`] and an [`IterableDataset`].
## Dataset
When you load a dataset split, you'll get a [`Dataset`] object. You can do many things with a [`Dataset`] object, which is why it's important to learn how to manipulate and interact with the data stored inside.
This tutorial uses the [rotten_tomatoes](https://huggingface.co/datasets/rotten_tomatoes) dataset, but feel free to load any dataset you'd like and follow along!
```py
>>> from datasets import load_dataset
>>> dataset = load_dataset("rotten_tomatoes", split="train")
```
### Indexing
A [`Dataset`] contains columns of data, and each column can be a different type of data. The *index*, or axis label, is used to access examples from the dataset. For example, indexing by the row returns a dictionary of an example from the dataset:
```py
# Get the first row in the dataset
>>> dataset[0]
{'label': 1,
'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'}
```
Use the `-` operator to start from the end of the dataset:
```py
# Get the last row in the dataset
>>> dataset[-1]
{'label': 0,
'text': 'things really get weird , though not particularly scary : the movie is all portent and no content .'}
```
Indexing by the column name returns a list of all the values in the column:
```py
>>> dataset["text"]
['the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .',
'the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .',
'effective but too-tepid biopic',
...,
'things really get weird , though not particularly scary : the movie is all portent and no content .']
```
You can combine row and column name indexing to return a specific value at a position:
```py
>>> dataset[0]["text"]
'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'
```
But it is important to remember that indexing order matters, especially when working with large audio and image datasets. Indexing by the column name returns all the values in the column first, then loads the value at that position. For large datasets, it may be slower to index by the column name first.
```py
>>> import time
>>> start_time = time.time()
>>> text = dataset[0]["text"]
>>> end_time = time.time()
>>> print(f"Elapsed time: {end_time - start_time:.4f} seconds")
Elapsed time: 0.0031 seconds
>>> start_time = time.time()
>>> text = dataset["text"][0]
>>> end_time = time.time()
>>> print(f"Elapsed time: {end_time - start_time:.4f} seconds")
Elapsed time: 0.0094 seconds
```
### Slicing
Slicing returns a slice - or subset - of the dataset, which is useful for viewing several rows at once. To slice a dataset, use the `:` operator to specify a range of positions.
```py
# Get the first three rows
>>> dataset[:3]
{'label': [1, 1, 1],
'text': ['the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .',
'the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .',
'effective but too-tepid biopic']}
# Get rows between three and six
>>> dataset[3:6]
{'label': [1, 1, 1],
'text': ['if you sometimes like to go to the movies to have fun , wasabi is a good place to start .',
"emerges as something rare , an issue movie that's so honest and keenly observed that it doesn't feel like one .",
'the film provides some great insight into the neurotic mindset of all comics -- even those who have reached the absolute top of the game .']}
```
## IterableDataset
An [`IterableDataset`] is loaded when you set the `streaming` parameter to `True` in [`~datasets.load_dataset`]:
```py
>>> from datasets import load_dataset
>>> iterable_dataset = load_dataset("food101", split="train", streaming=True)
>>> for example in iterable_dataset:
... print(example)
... break
{'image': <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=384x512 at 0x7F0681F5C520>, 'label': 6}
```
You can also create an [`IterableDataset`] from an *existing* [`Dataset`], but it is faster than streaming mode because the dataset is streamed from local files:
```py
>>> from datasets import load_dataset
>>> dataset = load_dataset("rotten_tomatoes", split="train")
>>> iterable_dataset = dataset.to_iterable_dataset()
```
An [`IterableDataset`] progressively iterates over a dataset one example at a time, so you don't have to wait for the whole dataset to download before you can use it. As you can imagine, this is quite useful for large datasets you want to use immediately!
However, this means an [`IterableDataset`]'s behavior is different from a regular [`Dataset`]. You don't get random access to examples in an [`IterableDataset`]. Instead, you should iterate over its elements, for example, by calling `next(iter())` or with a `for` loop to return the next item from the [`IterableDataset`]:
```py
>>> next(iter(iterable_dataset))
{'image': <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=384x512 at 0x7F0681F59B50>,
'label': 6}
>>> for example in iterable_dataset:
... print(example)
... break
{'image': <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=384x512 at 0x7F7479DE82B0>, 'label': 6}
```
You can return a subset of the dataset with a specific number of examples in it with [`IterableDataset.take`]:
```py
# Get first three examples
>>> list(iterable_dataset.take(3))
[{'image': <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=384x512 at 0x7F7479DEE9D0>,
'label': 6},
{'image': <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=512x512 at 0x7F7479DE8190>,
'label': 6},
{'image': <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=512x383 at 0x7F7479DE8310>,
'label': 6}]
```
But unlike [slicing](access/#slicing), [`IterableDataset.take`] creates a new [`IterableDataset`].
## Next steps
Interested in learning more about the differences between these two types of datasets? Learn more about them in the [Differences between `Dataset` and `IterableDataset`](about_mapstyle_vs_iterable) conceptual guide.
To get more hands-on with these datasets types, check out the [Process](process) guide to learn how to preprocess a [`Dataset`] or the [Stream](stream) guide to learn how to preprocess an [`IterableDataset`].
| datasets/docs/source/access.mdx/0 | {
"file_path": "datasets/docs/source/access.mdx",
"repo_id": "datasets",
"token_count": 2274
} |
# Process image data
This guide shows specific methods for processing image datasets. Learn how to:
- Use [`~Dataset.map`] with image dataset.
- Apply data augmentations to a dataset with [`~Dataset.set_transform`].
For a guide on how to process any type of dataset, take a look at the <a class="underline decoration-sky-400 decoration-2 font-semibold" href="./process">general process guide</a>.
## Map
The [`~Dataset.map`] function can apply transforms over an entire dataset.
For example, create a basic [`Resize`](https://pytorch.org/vision/stable/generated/torchvision.transforms.Resize.html) function:
```py
>>> def transforms(examples):
... examples["pixel_values"] = [image.convert("RGB").resize((100,100)) for image in examples["image"]]
... return examples
```
Now use the [`~Dataset.map`] function to resize the entire dataset, and set `batched=True` to speed up the process by accepting batches of examples. The transform returns `pixel_values` as a cacheable `PIL.Image` object:
```py
>>> dataset = dataset.map(transforms, remove_columns=["image"], batched=True)
>>> dataset[0]
{'label': 6,
'pixel_values': <PIL.PngImagePlugin.PngImageFile image mode=RGB size=100x100 at 0x7F058237BB10>}
```
The cache file saves time because you don't have to execute the same transform twice. The [`~Dataset.map`] function is best for operations you only run once per training - like resizing an image - instead of using it for operations executed for each epoch, like data augmentations.
[`~Dataset.map`] takes up some memory, but you can reduce its memory requirements with the following parameters:
- [`batch_size`](./package_reference/main_classes#datasets.DatasetDict.map.batch_size) determines the number of examples that are processed in one call to the transform function.
- [`writer_batch_size`](./package_reference/main_classes#datasets.DatasetDict.map.writer_batch_size) determines the number of processed examples that are kept in memory before they are stored away.
Both parameter values default to 1000, which can be expensive if you are storing images. Lower these values to use less memory when you use [`~Dataset.map`].
## Apply transforms
π€ Datasets applies data augmentations from any library or package to your dataset. Transforms can be applied on-the-fly on batches of data with [`~Dataset.set_transform`], which consumes less disk space.
<Tip>
The following example uses [torchvision](https://pytorch.org/vision/stable/index.html), but feel free to use other data augmentation libraries like [Albumentations](https://albumentations.ai/docs/), [Kornia](https://kornia.readthedocs.io/en/latest/), and [imgaug](https://imgaug.readthedocs.io/en/latest/).
</Tip>
For example, if you'd like to change the color properties of an image randomly:
```py
>>> from torchvision.transforms import Compose, ColorJitter, ToTensor
>>> jitter = Compose(
... [
... ColorJitter(brightness=0.25, contrast=0.25, saturation=0.25, hue=0.7),
... ToTensor(),
... ]
... )
```
Create a function to apply the `ColorJitter` transform:
```py
>>> def transforms(examples):
... examples["pixel_values"] = [jitter(image.convert("RGB")) for image in examples["image"]]
... return examples
```
Apply the transform with the [`~Dataset.set_transform`] function:
```py
>>> dataset.set_transform(transforms)
``` | datasets/docs/source/image_process.mdx/0 | {
"file_path": "datasets/docs/source/image_process.mdx",
"repo_id": "datasets",
"token_count": 1031
} |
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# Quickstart
[[open-in-colab]]
This quickstart is intended for developers who are ready to dive into the code and see an example of how to integrate π€ Datasets into their model training workflow. If you're a beginner, we recommend starting with our [tutorials](./tutorial), where you'll get a more thorough introduction.
Each dataset is unique, and depending on the task, some datasets may require additional steps to prepare it for training. But you can always use π€ Datasets tools to load and process a dataset. The fastest and easiest way to get started is by loading an existing dataset from the [Hugging Face Hub](https://huggingface.co/datasets). There are thousands of datasets to choose from, spanning many tasks. Choose the type of dataset you want to work with, and let's get started!
<div class="mt-4">
<div class="w-full flex flex-col space-y-4 md:space-y-0 md:grid md:grid-cols-3 md:gap-y-4 md:gap-x-5">
<a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="#audio"
><div class="w-full text-center bg-gradient-to-r from-violet-300 via-sky-400 to-green-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">Audio</div>
<p class="text-gray-700">Resample an audio dataset and get it ready for a model to classify what type of banking issue a speaker is calling about.</p>
</a>
<a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="#vision"
><div class="w-full text-center bg-gradient-to-r from-pink-400 via-purple-400 to-blue-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">Vision</div>
<p class="text-gray-700">Apply data augmentation to an image dataset and get it ready for a model to diagnose disease in bean plants.</p>
</a>
<a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="#nlp"
><div class="w-full text-center bg-gradient-to-r from-orange-300 via-red-400 to-violet-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">NLP</div>
<p class="text-gray-700">Tokenize a dataset and get it ready for a model to determine whether a pair of sentences have the same meaning.</p>
</a>
</div>
</div>
<Tip>
Check out [Chapter 5](https://huggingface.co/course/chapter5/1?fw=pt) of the Hugging Face course to learn more about other important topics such as loading remote or local datasets, tools for cleaning up a dataset, and creating your own dataset.
</Tip>
Start by installing π€ Datasets:
```bash
pip install datasets
```
π€ Datasets also support audio and image data formats:
* To work with audio datasets, install the [`Audio`] feature:
```bash
pip install datasets[audio]
```
* To work with image datasets, install the [`Image`] feature:
```bash
pip install datasets[vision]
```
Besides π€ Datasets, make sure your preferred machine learning framework is installed:
<frameworkcontent>
<pt>
```bash
pip install torch
```
</pt>
<tf>
```bash
pip install tensorflow
```
</tf>
</frameworkcontent>
## Audio
Audio datasets are loaded just like text datasets. However, an audio dataset is preprocessed a bit differently. Instead of a tokenizer, you'll need a [feature extractor](https://huggingface.co/docs/transformers/main_classes/feature_extractor#feature-extractor). An audio input may also require resampling its sampling rate to match the sampling rate of the pretrained model you're using. In this quickstart, you'll prepare the [MInDS-14](https://huggingface.co/datasets/PolyAI/minds14) dataset for a model train on and classify the banking issue a customer is having.
**1**. Load the MInDS-14 dataset by providing the [`load_dataset`] function with the dataset name, dataset configuration (not all datasets will have a configuration), and a dataset split:
```py
>>> from datasets import load_dataset, Audio
>>> dataset = load_dataset("PolyAI/minds14", "en-US", split="train")
```
**2**. Next, load a pretrained [Wav2Vec2](https://huggingface.co/facebook/wav2vec2-base) model and its corresponding feature extractor from the [π€ Transformers](https://huggingface.co/transformers/) library. It is totally normal to see a warning after you load the model about some weights not being initialized. This is expected because you are loading this model checkpoint for training with another task.
```py
>>> from transformers import AutoModelForAudioClassification, AutoFeatureExtractor
>>> model = AutoModelForAudioClassification.from_pretrained("facebook/wav2vec2-base")
>>> feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base")
```
**3**. The [MInDS-14](https://huggingface.co/datasets/PolyAI/minds14) dataset card indicates the sampling rate is 8kHz, but the Wav2Vec2 model was pretrained on a sampling rate of 16kHZ. You'll need to upsample the `audio` column with the [`~Dataset.cast_column`] function and [`Audio`] feature to match the model's sampling rate.
```py
>>> dataset = dataset.cast_column("audio", Audio(sampling_rate=16000))
>>> dataset[0]["audio"]
{'array': array([ 2.3443763e-05, 2.1729663e-04, 2.2145823e-04, ...,
3.8356509e-05, -7.3497440e-06, -2.1754686e-05], dtype=float32),
'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~JOINT_ACCOUNT/602ba55abb1e6d0fbce92065.wav',
'sampling_rate': 16000}
```
**4**. Create a function to preprocess the audio `array` with the feature extractor, and truncate and pad the sequences into tidy rectangular tensors. The most important thing to remember is to call the audio `array` in the feature extractor since the `array` - the actual speech signal - is the model input.
Once you have a preprocessing function, use the [`~Dataset.map`] function to speed up processing by applying the function to batches of examples in the dataset.
```py
>>> def preprocess_function(examples):
... audio_arrays = [x["array"] for x in examples["audio"]]
... inputs = feature_extractor(
... audio_arrays,
... sampling_rate=16000,
... padding=True,
... max_length=100000,
... truncation=True,
... )
... return inputs
>>> dataset = dataset.map(preprocess_function, batched=True)
```
**5**. Use the [`~Dataset.rename_column`] function to rename the `intent_class` column to `labels`, which is the expected input name in [Wav2Vec2ForSequenceClassification](https://huggingface.co/docs/transformers/main/en/model_doc/wav2vec2#transformers.Wav2Vec2ForSequenceClassification):
```py
>>> dataset = dataset.rename_column("intent_class", "labels")
```
**6**. Set the dataset format according to the machine learning framework you're using.
<frameworkcontent>
<pt>
Use the [`~Dataset.set_format`] function to set the dataset format to `torch` and specify the columns you want to format. This function applies formatting on-the-fly. After converting to PyTorch tensors, wrap the dataset in [`torch.utils.data.DataLoader`](https://alband.github.io/doc_view/data.html?highlight=torch%20utils%20data%20dataloader#torch.utils.data.DataLoader):
```py
>>> from torch.utils.data import DataLoader
>>> dataset.set_format(type="torch", columns=["input_values", "labels"])
>>> dataloader = DataLoader(dataset, batch_size=4)
```
</pt>
<tf>
Use the [`~transformers.TFPreTrainedModel.prepare_tf_dataset`] method from π€ Transformers to prepare the dataset to be compatible with
TensorFlow, and ready to train/fine-tune a model, as it wraps a HuggingFace [`~datasets.Dataset`] as a `tf.data.Dataset`
with collation and batching, so one can pass it directly to Keras methods like `fit()` without further modification.
```py
>>> import tensorflow as tf
>>> tf_dataset = model.prepare_tf_dataset(
... dataset,
... batch_size=4,
... shuffle=True,
... )
```
</tf>
</frameworkcontent>
**7**. Start training with your machine learning framework! Check out the π€ Transformers [audio classification guide](https://huggingface.co/docs/transformers/tasks/audio_classification) for an end-to-end example of how to train a model on an audio dataset.
## Vision
Image datasets are loaded just like text datasets. However, instead of a tokenizer, you'll need a [feature extractor](https://huggingface.co/docs/transformers/main_classes/feature_extractor#feature-extractor) to preprocess the dataset. Applying data augmentation to an image is common in computer vision to make the model more robust against overfitting. You're free to use any data augmentation library you want, and then you can apply the augmentations with π€ Datasets. In this quickstart, you'll load the [Beans](https://huggingface.co/datasets/beans) dataset and get it ready for the model to train on and identify disease from the leaf images.
**1**. Load the Beans dataset by providing the [`load_dataset`] function with the dataset name and a dataset split:
```py
>>> from datasets import load_dataset, Image
>>> dataset = load_dataset("beans", split="train")
```
Most image models work with RBG images. If your dataset contains images in a different mode, you can use the [`~Dataset.cast_column`] function to set the mode to RGB:
```py
>>> dataset = dataset.cast_column("image", Image(mode="RGB"))
```
The Beans dataset contains only RGB images, so this step is unnecessary here.
**2**. Now you can add some data augmentations with any library ([Albumentations](https://albumentations.ai/), [imgaug](https://imgaug.readthedocs.io/en/latest/), [Kornia](https://kornia.readthedocs.io/en/latest/)) you like. Here, you'll use [torchvision](https://pytorch.org/vision/stable/transforms.html) to randomly change the color properties of an image:
```py
>>> from torchvision.transforms import Compose, ColorJitter, ToTensor
>>> jitter = Compose(
... [ColorJitter(brightness=0.5, hue=0.5), ToTensor()]
... )
```
**3**. Create a function to apply your transform to the dataset and generate the model input: `pixel_values`.
```python
>>> def transforms(examples):
... examples["pixel_values"] = [jitter(image.convert("RGB")) for image in examples["image"]]
... return examples
```
**4**. Use the [`~Dataset.with_transform`] function to apply the data augmentations on-the-fly:
```py
>>> dataset = dataset.with_transform(transforms)
```
**5**. Set the dataset format according to the machine learning framework you're using.
<frameworkcontent>
<pt>
Wrap the dataset in [`torch.utils.data.DataLoader`](https://alband.github.io/doc_view/data.html?highlight=torch%20utils%20data%20dataloader#torch.utils.data.DataLoader). You'll also need to create a collate function to collate the samples into batches:
```py
>>> from torch.utils.data import DataLoader
>>> def collate_fn(examples):
... images = []
... labels = []
... for example in examples:
... images.append((example["pixel_values"]))
... labels.append(example["labels"])
...
... pixel_values = torch.stack(images)
... labels = torch.tensor(labels)
... return {"pixel_values": pixel_values, "labels": labels}
>>> dataloader = DataLoader(dataset, collate_fn=collate_fn, batch_size=4)
```
</pt>
<tf>
Use the [`~transformers.TFPreTrainedModel.prepare_tf_dataset`] method from π€ Transformers to prepare the dataset to be compatible with
TensorFlow, and ready to train/fine-tune a model, as it wraps a HuggingFace [`~datasets.Dataset`] as a `tf.data.Dataset`
with collation and batching, so one can pass it directly to Keras methods like `fit()` without further modification.
Before you start, make sure you have up-to-date versions of `albumentations` and `cv2` installed:
```bash
pip install -U albumentations opencv-python
```
```py
>>> import albumentations
>>> import numpy as np
>>> transform = albumentations.Compose([
... albumentations.RandomCrop(width=256, height=256),
... albumentations.HorizontalFlip(p=0.5),
... albumentations.RandomBrightnessContrast(p=0.2),
... ])
>>> def transforms(examples):
... examples["pixel_values"] = [
... transform(image=np.array(image))["image"] for image in examples["image"]
... ]
... return examples
>>> dataset.set_transform(transforms)
>>> tf_dataset = model.prepare_tf_dataset(
... dataset,
... batch_size=4,
... shuffle=True,
... )
```
</tf>
</frameworkcontent>
**6**. Start training with your machine learning framework! Check out the π€ Transformers [image classification guide](https://huggingface.co/docs/transformers/tasks/image_classification) for an end-to-end example of how to train a model on an image dataset.
## NLP
Text needs to be tokenized into individual tokens by a [tokenizer](https://huggingface.co/docs/transformers/main_classes/tokenizer). For the quickstart, you'll load the [Microsoft Research Paraphrase Corpus (MRPC)](https://huggingface.co/datasets/glue/viewer/mrpc) training dataset to train a model to determine whether a pair of sentences mean the same thing.
**1**. Load the MRPC dataset by providing the [`load_dataset`] function with the dataset name, dataset configuration (not all datasets will have a configuration), and dataset split:
```py
>>> from datasets import load_dataset
>>> dataset = load_dataset("glue", "mrpc", split="train")
```
**2**. Next, load a pretrained [BERT](https://huggingface.co/bert-base-uncased) model and its corresponding tokenizer from the [π€ Transformers](https://huggingface.co/transformers/) library. It is totally normal to see a warning after you load the model about some weights not being initialized. This is expected because you are loading this model checkpoint for training with another task.
```py
>>> from transformers import AutoModelForSequenceClassification, AutoTokenizer
>>> model = AutoModelForSequenceClassification.from_pretrained("bert-base-uncased")
>>> tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
===PT-TF-SPLIT===
>>> from transformers import TFAutoModelForSequenceClassification, AutoTokenizer
>>> model = TFAutoModelForSequenceClassification.from_pretrained("bert-base-uncased")
>>> tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
```
**3**. Create a function to tokenize the dataset, and you should also truncate and pad the text into tidy rectangular tensors. The tokenizer generates three new columns in the dataset: `input_ids`, `token_type_ids`, and an `attention_mask`. These are the model inputs.
Use the [`~Dataset.map`] function to speed up processing by applying your tokenization function to batches of examples in the dataset:
```py
>>> def encode(examples):
... return tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, padding="max_length")
>>> dataset = dataset.map(encode, batched=True)
>>> dataset[0]
{'sentence1': 'Amrozi accused his brother , whom he called " the witness " , of deliberately distorting his evidence .',
'sentence2': 'Referring to him as only " the witness " , Amrozi accused his brother of deliberately distorting his evidence .',
'label': 1,
'idx': 0,
'input_ids': array([ 101, 7277, 2180, 5303, 4806, 1117, 1711, 117, 2292, 1119, 1270, 107, 1103, 7737, 107, 117, 1104, 9938, 4267, 12223, 21811, 1117, 2554, 119, 102, 11336, 6732, 3384, 1106, 1140, 1112, 1178, 107, 1103, 7737, 107, 117, 7277, 2180, 5303, 4806, 1117, 1711, 1104, 9938, 4267, 12223, 21811, 1117, 2554, 119, 102]),
'token_type_ids': array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]),
'attention_mask': array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1])}
```
**4**. Rename the `label` column to `labels`, which is the expected input name in [BertForSequenceClassification](https://huggingface.co/docs/transformers/main/en/model_doc/bert#transformers.BertForSequenceClassification):
```py
>>> dataset = dataset.map(lambda examples: {"labels": examples["label"]}, batched=True)
```
**5**. Set the dataset format according to the machine learning framework you're using.
<frameworkcontent>
<pt>
Use the [`~Dataset.set_format`] function to set the dataset format to `torch` and specify the columns you want to format. This function applies formatting on-the-fly. After converting to PyTorch tensors, wrap the dataset in [`torch.utils.data.DataLoader`](https://alband.github.io/doc_view/data.html?highlight=torch%20utils%20data%20dataloader#torch.utils.data.DataLoader):
```py
>>> import torch
>>> dataset.set_format(type="torch", columns=["input_ids", "token_type_ids", "attention_mask", "labels"])
>>> dataloader = torch.utils.data.DataLoader(dataset, batch_size=32)
```
</pt>
<tf>
Use the [`~transformers.TFPreTrainedModel.prepare_tf_dataset`] method from π€ Transformers to prepare the dataset to be compatible with
TensorFlow, and ready to train/fine-tune a model, as it wraps a HuggingFace [`~datasets.Dataset`] as a `tf.data.Dataset`
with collation and batching, so one can pass it directly to Keras methods like `fit()` without further modification.
```py
>>> import tensorflow as tf
>>> tf_dataset = model.prepare_tf_dataset(
... dataset,
... batch_size=4,
... shuffle=True,
... )
```
</tf>
</frameworkcontent>
**6**. Start training with your machine learning framework! Check out the π€ Transformers [text classification guide](https://huggingface.co/docs/transformers/tasks/sequence_classification) for an end-to-end example of how to train a model on a text dataset.
## What's next?
This completes the π€ Datasets quickstart! You can load any text, audio, or image dataset with a single function and get it ready for your model to train on.
For your next steps, take a look at our [How-to guides](./how_to) and learn how to do more specific things like loading different dataset formats, aligning labels, and streaming large datasets. If you're interested in learning more about π€ Datasets core concepts, grab a cup of coffee and read our [Conceptual Guides](./about_arrow)!
| datasets/docs/source/quickstart.mdx/0 | {
"file_path": "datasets/docs/source/quickstart.mdx",
"repo_id": "datasets",
"token_count": 6102
} |
# Use with Spark
This document is a quick introduction to using π€ Datasets with Spark, with a particular focus on how to load a Spark DataFrame into a [`Dataset`] object.
From there, you have fast access to any element and you can use it as a data loader to train models.
## Load from Spark
A [`Dataset`] object is a wrapper of an Arrow table, which allows fast reads from arrays in the dataset to PyTorch, TensorFlow and JAX tensors.
The Arrow table is memory mapped from disk, which can load datasets bigger than your available RAM.
You can get a [`Dataset`] from a Spark DataFrame using [`Dataset.from_spark`]:
```py
>>> from datasets import Dataset
>>> df = spark.createDataFrame(
... data=[[1, "Elia"], [2, "Teo"], [3, "Fang"]],
... columns=["id", "name"],
... )
>>> ds = Dataset.from_spark(df)
```
The Spark workers write the dataset on disk in a cache directory as Arrow files, and the [`Dataset`] is loaded from there.
Alternatively, you can skip materialization by using [`IterableDataset.from_spark`], which returns an [`IterableDataset`]:
```py
>>> from datasets import IterableDataset
>>> df = spark.createDataFrame(
... data=[[1, "Elia"], [2, "Teo"], [3, "Fang"]],
... columns=["id", "name"],
... )
>>> ds = IterableDataset.from_spark(df)
>>> print(next(iter(ds)))
{"id": 1, "name": "Elia"}
```
### Caching
When using [`Dataset.from_spark`], the resulting [`Dataset`] is cached; if you call [`Dataset.from_spark`] multiple
times on the same DataFrame it won't re-run the Spark job that writes the dataset as Arrow files on disk.
You can set the cache location by passing `cache_dir=` to [`Dataset.from_spark`].
Make sure to use a disk that is available to both your workers and your current machine (the driver).
<Tip warning={true}>
In a different session, a Spark DataFrame doesn't have the same [semantic hash](https://spark.apache.org/docs/3.2.0/api/python/reference/api/pyspark.sql.DataFrame.semanticHash.html), and it will rerun a Spark job and store it in a new cache.
</Tip>
### Feature types
If your dataset is made of images, audio data or N-dimensional arrays, you can specify the `features=` argument in
[`Dataset.from_spark`] (or [`IterableDataset.from_spark`]):
```py
>>> from datasets import Dataset, Features, Image, Value
>>> data = [(0, open("image.png", "rb").read())]
>>> df = spark.createDataFrame(data, "idx: int, image: binary")
>>> # Also works if you have arrays
>>> # data = [(0, np.zeros(shape=(32, 32, 3), dtype=np.int32).tolist())]
>>> # df = spark.createDataFrame(data, "idx: int, image: array<array<array<int>>>")
>>> features = Features({"idx": Value("int64"), "image": Image()})
>>> dataset = Dataset.from_spark(df, features=features)
>>> dataset[0]
{'idx': 0, 'image': <PIL.PngImagePlugin.PngImageFile image mode=RGB size=32x32>}
```
You can check the [`Features`] documentation to know about all the feature types available.
| datasets/docs/source/use_with_spark.mdx/0 | {
"file_path": "datasets/docs/source/use_with_spark.mdx",
"repo_id": "datasets",
"token_count": 962
} |
#!/usr/bin/env python
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.convert_to_parquet import ConvertToParquetCommand
from datasets.commands.delete_from_hub import DeleteFromHubCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def parse_unknown_args(unknown_args):
return {key.lstrip("-"): value for key, value in zip(unknown_args[::2], unknown_args[1::2])}
def main():
parser = ArgumentParser(
"HuggingFace Datasets CLI tool", usage="datasets-cli <command> [<args>]", allow_abbrev=False
)
commands_parser = parser.add_subparsers(help="datasets-cli command helpers")
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(commands_parser)
EnvironmentCommand.register_subcommand(commands_parser)
TestCommand.register_subcommand(commands_parser)
ConvertToParquetCommand.register_subcommand(commands_parser)
DeleteFromHubCommand.register_subcommand(commands_parser)
# Parse args
args, unknown_args = parser.parse_known_args()
if not hasattr(args, "func"):
parser.print_help()
exit(1)
kwargs = parse_unknown_args(unknown_args)
# Run
service = args.func(args, **kwargs)
service.run()
if __name__ == "__main__":
main()
| datasets/src/datasets/commands/datasets_cli.py/0 | {
"file_path": "datasets/src/datasets/commands/datasets_cli.py",
"repo_id": "datasets",
"token_count": 480
} |
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.download_config import DownloadConfig
from ..table import array_cast
from ..utils.file_utils import is_local_path, xopen
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
_IMAGE_COMPRESSION_FORMATS: Optional[List[str]] = None
_NATIVE_BYTEORDER = "<" if sys.byteorder == "little" else ">"
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
_VALID_IMAGE_ARRAY_DTPYES = [
np.dtype("|b1"),
np.dtype("|u1"),
np.dtype("<u2"),
np.dtype(">u2"),
np.dtype("<i2"),
np.dtype(">i2"),
np.dtype("<u4"),
np.dtype(">u4"),
np.dtype("<i4"),
np.dtype(">i4"),
np.dtype("<f4"),
np.dtype(">f4"),
np.dtype("<f8"),
np.dtype(">f8"),
]
@dataclass
class Image:
"""Image [`Feature`] to read image data from an image file.
Input: The Image feature accepts as input:
- A `str`: Absolute path to the image file (i.e. random access is allowed).
- A `dict` with the keys:
- `path`: String with relative path of the image file to the archive file.
- `bytes`: Bytes of the image file.
This is useful for archived files with sequential access.
- An `np.ndarray`: NumPy array representing an image.
- A `PIL.Image.Image`: PIL image object.
Args:
mode (`str`, *optional*):
The mode to convert the image to. If `None`, the native mode of the image is used.
decode (`bool`, defaults to `True`):
Whether to decode the image data. If `False`,
returns the underlying dictionary in the format `{"path": image_path, "bytes": image_bytes}`.
Examples:
```py
>>> from datasets import load_dataset, Image
>>> ds = load_dataset("beans", split="train")
>>> ds.features["image"]
Image(decode=True, id=None)
>>> ds[0]["image"]
<PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=500x500 at 0x15E52E7F0>
>>> ds = ds.cast_column('image', Image(decode=False))
{'bytes': None,
'path': '/root/.cache/huggingface/datasets/downloads/extracted/b0a21163f78769a2cf11f58dfc767fb458fc7cea5c05dccc0144a2c0f0bc1292/train/healthy/healthy_train.85.jpg'}
```
"""
mode: Optional[str] = None
decode: bool = True
id: Optional[str] = None
# Automatically constructed
dtype: ClassVar[str] = "PIL.Image.Image"
pa_type: ClassVar[Any] = pa.struct({"bytes": pa.binary(), "path": pa.string()})
_type: str = field(default="Image", init=False, repr=False)
def __call__(self):
return self.pa_type
def encode_example(self, value: Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"]) -> dict:
"""Encode example into a format for Arrow.
Args:
value (`str`, `np.ndarray`, `PIL.Image.Image` or `dict`):
Data passed as input to Image feature.
Returns:
`dict` with "path" and "bytes" fields
"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'.")
if isinstance(value, list):
value = np.array(value)
if isinstance(value, str):
return {"path": value, "bytes": None}
elif isinstance(value, bytes):
return {"path": None, "bytes": value}
elif isinstance(value, np.ndarray):
# convert the image array to PNG/TIFF bytes
return encode_np_array(value)
elif isinstance(value, PIL.Image.Image):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(value)
elif value.get("path") is not None and os.path.isfile(value["path"]):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("path")}
elif value.get("bytes") is not None or value.get("path") is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("bytes"), "path": value.get("path")}
else:
raise ValueError(
f"An image sample should have one of 'path' or 'bytes' but they are missing or None in {value}."
)
def decode_example(self, value: dict, token_per_repo_id=None) -> "PIL.Image.Image":
"""Decode example image file into image data.
Args:
value (`str` or `dict`):
A string with the absolute image file path, a dictionary with
keys:
- `path`: String with absolute or relative image file path.
- `bytes`: The bytes of the image file.
token_per_repo_id (`dict`, *optional*):
To access and decode
image files from private repositories on the Hub, you can pass
a dictionary repo_id (`str`) -> token (`bool` or `str`).
Returns:
`PIL.Image.Image`
"""
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Image(decode=True) instead.")
if config.PIL_AVAILABLE:
import PIL.Image
import PIL.ImageOps
else:
raise ImportError("To support decoding images, please install 'Pillow'.")
if token_per_repo_id is None:
token_per_repo_id = {}
path, bytes_ = value["path"], value["bytes"]
if bytes_ is None:
if path is None:
raise ValueError(f"An image should have one of 'path' or 'bytes' but both are None in {value}.")
else:
if is_local_path(path):
image = PIL.Image.open(path)
else:
source_url = path.split("::")[-1]
pattern = (
config.HUB_DATASETS_URL
if source_url.startswith(config.HF_ENDPOINT)
else config.HUB_DATASETS_HFFS_URL
)
try:
repo_id = string_to_dict(source_url, pattern)["repo_id"]
token = token_per_repo_id.get(repo_id)
except ValueError:
token = None
download_config = DownloadConfig(token=token)
with xopen(path, "rb", download_config=download_config) as f:
bytes_ = BytesIO(f.read())
image = PIL.Image.open(bytes_)
else:
image = PIL.Image.open(BytesIO(bytes_))
image.load() # to avoid "Too many open files" errors
if image.getexif().get(PIL.Image.ExifTags.Base.Orientation) is not None:
image = PIL.ImageOps.exif_transpose(image)
if self.mode and self.mode != image.mode:
image = image.convert(self.mode)
return image
def flatten(self) -> Union["FeatureType", Dict[str, "FeatureType"]]:
"""If in the decodable state, return the feature itself, otherwise flatten the feature into a dictionary."""
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("binary"),
"path": Value("string"),
}
)
def cast_storage(self, storage: Union[pa.StringArray, pa.StructArray, pa.ListArray]) -> pa.StructArray:
"""Cast an Arrow array to the Image arrow storage type.
The Arrow types that can be converted to the Image pyarrow storage type are:
- `pa.string()` - it must contain the "path" data
- `pa.binary()` - it must contain the image bytes
- `pa.struct({"bytes": pa.binary()})`
- `pa.struct({"path": pa.string()})`
- `pa.struct({"bytes": pa.binary(), "path": pa.string()})` - order doesn't matter
- `pa.list(*)` - it must contain the image array data
Args:
storage (`Union[pa.StringArray, pa.StructArray, pa.ListArray]`):
PyArrow array to cast.
Returns:
`pa.StructArray`: Array in the Image arrow storage type, that is
`pa.struct({"bytes": pa.binary(), "path": pa.string()})`.
"""
if pa.types.is_string(storage.type):
bytes_array = pa.array([None] * len(storage), type=pa.binary())
storage = pa.StructArray.from_arrays([bytes_array, storage], ["bytes", "path"], mask=storage.is_null())
elif pa.types.is_binary(storage.type):
path_array = pa.array([None] * len(storage), type=pa.string())
storage = pa.StructArray.from_arrays([storage, path_array], ["bytes", "path"], mask=storage.is_null())
elif pa.types.is_struct(storage.type):
if storage.type.get_field_index("bytes") >= 0:
bytes_array = storage.field("bytes")
else:
bytes_array = pa.array([None] * len(storage), type=pa.binary())
if storage.type.get_field_index("path") >= 0:
path_array = storage.field("path")
else:
path_array = pa.array([None] * len(storage), type=pa.string())
storage = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=storage.is_null())
elif pa.types.is_list(storage.type):
bytes_array = pa.array(
[encode_np_array(np.array(arr))["bytes"] if arr is not None else None for arr in storage.to_pylist()],
type=pa.binary(),
)
path_array = pa.array([None] * len(storage), type=pa.string())
storage = pa.StructArray.from_arrays(
[bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null()
)
return array_cast(storage, self.pa_type)
def embed_storage(self, storage: pa.StructArray) -> pa.StructArray:
"""Embed image files into the Arrow array.
Args:
storage (`pa.StructArray`):
PyArrow array to embed.
Returns:
`pa.StructArray`: Array in the Image arrow storage type, that is
`pa.struct({"bytes": pa.binary(), "path": pa.string()})`.
"""
@no_op_if_value_is_null
def path_to_bytes(path):
with xopen(path, "rb") as f:
bytes_ = f.read()
return bytes_
bytes_array = pa.array(
[
(path_to_bytes(x["path"]) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
],
type=pa.binary(),
)
path_array = pa.array(
[os.path.basename(path) if path is not None else None for path in storage.field("path").to_pylist()],
type=pa.string(),
)
storage = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null())
return array_cast(storage, self.pa_type)
def list_image_compression_formats() -> List[str]:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'.")
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
_IMAGE_COMPRESSION_FORMATS = list(set(PIL.Image.OPEN.keys()) & set(PIL.Image.SAVE.keys()))
return _IMAGE_COMPRESSION_FORMATS
def image_to_bytes(image: "PIL.Image.Image") -> bytes:
"""Convert a PIL Image object to bytes using native compression if possible, otherwise use PNG/TIFF compression."""
buffer = BytesIO()
if image.format in list_image_compression_formats():
format = image.format
else:
format = "PNG" if image.mode in ["1", "L", "LA", "RGB", "RGBA"] else "TIFF"
image.save(buffer, format=format)
return buffer.getvalue()
def encode_pil_image(image: "PIL.Image.Image") -> dict:
if hasattr(image, "filename") and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(image)}
def encode_np_array(array: np.ndarray) -> dict:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'.")
dtype = array.dtype
dtype_byteorder = dtype.byteorder if dtype.byteorder != "=" else _NATIVE_BYTEORDER
dtype_kind = dtype.kind
dtype_itemsize = dtype.itemsize
dest_dtype = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
if dtype_kind not in ["u", "i"]:
raise TypeError(
f"Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays."
)
dest_dtype = np.dtype("|u1")
if dtype != dest_dtype:
warnings.warn(f"Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'")
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
dest_dtype = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
dtype_str = dtype_byteorder + dtype_kind + str(dtype_itemsize)
if np.dtype(dtype_str) in _VALID_IMAGE_ARRAY_DTPYES:
dest_dtype = np.dtype(dtype_str)
warnings.warn(f"Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'")
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
f"Cannot downcast dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}"
)
image = PIL.Image.fromarray(array.astype(dest_dtype))
return {"path": None, "bytes": image_to_bytes(image)}
def objects_to_list_of_image_dicts(
objs: Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]],
) -> List[dict]:
"""Encode a list of objects into a format suitable for creating an extension array of type `ImageExtensionType`."""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'.")
if objs:
_, obj = first_non_null_value(objs)
if isinstance(obj, str):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(obj, np.ndarray):
obj_to_image_dict_func = no_op_if_value_is_null(encode_np_array)
return [obj_to_image_dict_func(obj) for obj in objs]
elif isinstance(obj, PIL.Image.Image):
obj_to_image_dict_func = no_op_if_value_is_null(encode_pil_image)
return [obj_to_image_dict_func(obj) for obj in objs]
else:
return objs
else:
return objs
| datasets/src/datasets/features/image.py/0 | {
"file_path": "datasets/src/datasets/features/image.py",
"repo_id": "datasets",
"token_count": 6979
} |
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
logger = logging.get_logger(__name__)
class ParallelBackendConfig:
backend_name = None
@experimental
def parallel_map(function, iterable, num_proc, batched, batch_size, types, disable_tqdm, desc, single_map_nested_func):
"""
**Experimental.** Apply a function to iterable elements in parallel, where the implementation uses either
multiprocessing.Pool or joblib for parallelization.
Args:
function (`Callable[[Any], Any]`): Function to be applied to `iterable`.
iterable (`list`, `tuple` or `np.ndarray`): Iterable elements to apply function to.
num_proc (`int`): Number of processes (if no backend specified) or jobs (using joblib).
types (`tuple`): Additional types (besides `dict` values) to apply `function` recursively to their elements.
disable_tqdm (`bool`): Whether to disable the tqdm progressbar.
desc (`str`): Prefix for the tqdm progressbar.
single_map_nested_func (`Callable`): Map function that applies `function` to an element from `iterable`.
Takes a tuple of function, data_struct, types, rank, disable_tqdm, desc as input, where data_struct is an
element of `iterable`, and `rank` is used for progress bar.
"""
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
function, iterable, num_proc, batched, batch_size, types, disable_tqdm, desc, single_map_nested_func
)
return _map_with_joblib(
function, iterable, num_proc, batched, batch_size, types, disable_tqdm, desc, single_map_nested_func
)
def _map_with_multiprocessing_pool(
function, iterable, num_proc, batched, batch_size, types, disable_tqdm, desc, single_map_nested_func
):
num_proc = num_proc if num_proc <= len(iterable) else len(iterable)
split_kwds = [] # We organize the splits ourselve (contiguous splits)
for index in range(num_proc):
div = len(iterable) // num_proc
mod = len(iterable) % num_proc
start = div * index + min(index, mod)
end = start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], batched, batch_size, types, index, disable_tqdm, desc))
if len(iterable) != sum(len(i[1]) for i in split_kwds):
raise ValueError(
f"Error dividing inputs iterable among processes. "
f"Total number of objects {len(iterable)}, "
f"length: {sum(len(i[1]) for i in split_kwds)}"
)
logger.info(
f"Spawning {num_proc} processes for {len(iterable)} objects in slices of {[len(i[1]) for i in split_kwds]}"
)
initargs, initializer = None, None
if not disable_tqdm:
initargs, initializer = (RLock(),), tqdm.set_lock
with Pool(num_proc, initargs=initargs, initializer=initializer) as pool:
mapped = pool.map(single_map_nested_func, split_kwds)
logger.info(f"Finished {num_proc} processes")
mapped = [obj for proc_res in mapped for obj in proc_res]
logger.info(f"Unpacked {len(mapped)} objects")
return mapped
def _map_with_joblib(
function, iterable, num_proc, batched, batch_size, types, disable_tqdm, desc, single_map_nested_func
):
# progress bar is not yet supported for _map_with_joblib, because tqdm couldn't accurately be applied to joblib,
# and it requires monkey-patching joblib internal classes which is subject to change
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name, n_jobs=num_proc):
return joblib.Parallel()(
joblib.delayed(single_map_nested_func)((function, obj, batched, batch_size, types, None, True, None))
for obj in iterable
)
@experimental
@contextlib.contextmanager
def parallel_backend(backend_name: str):
"""
**Experimental.** Configures the parallel backend for parallelized dataset loading, which uses the parallelization
implemented by joblib.
Args:
backend_name (str): Name of backend for parallelization implementation, has to be supported by joblib.
Example usage:
```py
with parallel_backend('spark'):
dataset = load_dataset(..., num_proc=2)
```
"""
ParallelBackendConfig.backend_name = backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
ParallelBackendConfig.backend_name = None
| datasets/src/datasets/parallel/parallel.py/0 | {
"file_path": "datasets/src/datasets/parallel/parallel.py",
"repo_id": "datasets",
"token_count": 1783
} |
import enum
import os
from typing import Optional
from huggingface_hub.utils import insecure_hashlib
from .. import config
from ..exceptions import (
ExpectedMoreDownloadedFilesError,
ExpectedMoreSplitsError,
NonMatchingChecksumError,
NonMatchingSplitsSizesError,
UnexpectedDownloadedFileError,
UnexpectedSplitsError,
)
from .logging import get_logger
logger = get_logger(__name__)
class VerificationMode(enum.Enum):
"""`Enum` that specifies which verification checks to run.
The default mode is `BASIC_CHECKS`, which will perform only rudimentary checks to avoid slowdowns
when generating/downloading a dataset for the first time.
The verification modes:
| | Verification checks |
|---------------------------|------------------------------------------------------------------------------ |
| `ALL_CHECKS` | Split checks, uniqueness of the keys yielded in case of the GeneratorBuilder |
| | and the validity (number of files, checksums, etc.) of downloaded files |
| `BASIC_CHECKS` (default) | Same as `ALL_CHECKS` but without checking downloaded files |
| `NO_CHECKS` | None |
"""
ALL_CHECKS = "all_checks"
BASIC_CHECKS = "basic_checks"
NO_CHECKS = "no_checks"
def verify_checksums(expected_checksums: Optional[dict], recorded_checksums: dict, verification_name=None):
if expected_checksums is None:
logger.info("Unable to verify checksums.")
return
if len(set(expected_checksums) - set(recorded_checksums)) > 0:
raise ExpectedMoreDownloadedFilesError(str(set(expected_checksums) - set(recorded_checksums)))
if len(set(recorded_checksums) - set(expected_checksums)) > 0:
raise UnexpectedDownloadedFileError(str(set(recorded_checksums) - set(expected_checksums)))
bad_urls = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
for_verification_name = " for " + verification_name if verification_name is not None else ""
if len(bad_urls) > 0:
raise NonMatchingChecksumError(
f"Checksums didn't match{for_verification_name}:\n"
f"{bad_urls}\n"
"Set `verification_mode='no_checks'` to skip checksums verification and ignore this error"
)
logger.info("All the checksums matched successfully" + for_verification_name)
def verify_splits(expected_splits: Optional[dict], recorded_splits: dict):
if expected_splits is None:
logger.info("Unable to verify splits sizes.")
return
if len(set(expected_splits) - set(recorded_splits)) > 0:
raise ExpectedMoreSplitsError(str(set(expected_splits) - set(recorded_splits)))
if len(set(recorded_splits) - set(expected_splits)) > 0:
raise UnexpectedSplitsError(str(set(recorded_splits) - set(expected_splits)))
bad_splits = [
{"expected": expected_splits[name], "recorded": recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(bad_splits) > 0:
raise NonMatchingSplitsSizesError(str(bad_splits))
logger.info("All the splits matched successfully.")
def get_size_checksum_dict(path: str, record_checksum: bool = True) -> dict:
"""Compute the file size and the sha256 checksum of a file"""
if record_checksum:
m = insecure_hashlib.sha256()
with open(path, "rb") as f:
for chunk in iter(lambda: f.read(1 << 20), b""):
m.update(chunk)
checksum = m.hexdigest()
else:
checksum = None
return {"num_bytes": os.path.getsize(path), "checksum": checksum}
def is_small_dataset(dataset_size):
"""Check if `dataset_size` is smaller than `config.IN_MEMORY_MAX_SIZE`.
Args:
dataset_size (int): Dataset size in bytes.
Returns:
bool: Whether `dataset_size` is smaller than `config.IN_MEMORY_MAX_SIZE`.
"""
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| datasets/src/datasets/utils/info_utils.py/0 | {
"file_path": "datasets/src/datasets/utils/info_utils.py",
"repo_id": "datasets",
"token_count": 1731
} |
import pytest
from datasets.builder import InvalidConfigName
from datasets.data_files import DataFilesList
from datasets.packaged_modules.pandas.pandas import PandasConfig
def test_config_raises_when_invalid_name() -> None:
with pytest.raises(InvalidConfigName, match="Bad characters"):
_ = PandasConfig(name="name-with-*-invalid-character")
@pytest.mark.parametrize("data_files", ["str_path", ["str_path"], DataFilesList(["str_path"], [()])])
def test_config_raises_when_invalid_data_files(data_files) -> None:
with pytest.raises(ValueError, match="Expected a DataFilesDict"):
_ = PandasConfig(name="name", data_files=data_files)
| datasets/tests/packaged_modules/test_pandas.py/0 | {
"file_path": "datasets/tests/packaged_modules/test_pandas.py",
"repo_id": "datasets",
"token_count": 229
} |
import unittest
import warnings
from datasets.utils import experimental
@experimental
def dummy_function():
return "success"
class TestExperimentalFlag(unittest.TestCase):
def test_experimental_warning(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
self.assertEqual(dummy_function(), "success")
self.assertEqual(len(w), 1)
| datasets/tests/test_experimental.py/0 | {
"file_path": "datasets/tests/test_experimental.py",
"repo_id": "datasets",
"token_count": 152
} |
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def test_patch_submodule():
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
mock = "__test_patch_submodule_mock__"
with patch_submodule(_test_patching, "os.path.join", mock):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os, _PatchedModuleObj)
assert isinstance(_test_patching.os.path, _PatchedModuleObj)
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path, _PatchedModuleObj)
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os, _PatchedModuleObj)
assert isinstance(_test_patching.renamed_os.path, _PatchedModuleObj)
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path, _PatchedModuleObj)
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def test_patch_submodule_builtin():
assert _test_patching.open is open
mock = "__test_patch_submodule_builtin_mock__"
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching, "open", mock):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def test_patch_submodule_missing():
# pandas.read_csv is not present in _test_patching
mock = "__test_patch_submodule_missing_mock__"
with patch_submodule(_test_patching, "pandas.read_csv", mock):
pass
def test_patch_submodule_missing_builtin():
# builtin should always be mocked even if they're not in the globals
# in case they're loaded at one point
mock = "__test_patch_submodule_missing_builtin_mock__"
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching, "len", None) is None
with patch_submodule(_test_patching, "len", mock):
assert _test_patching.len is mock
assert _test_patching.len is len
def test_patch_submodule_start_and_stop():
mock = "__test_patch_submodule_start_and_stop_mock__"
patch = patch_submodule(_test_patching, "open", mock)
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def test_patch_submodule_successive():
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
mock_join = "__test_patch_submodule_successive_join__"
mock_dirname = "__test_patch_submodule_successive_dirname__"
mock_rename = "__test_patch_submodule_successive_rename__"
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching, "os.path.join", mock_join):
with patch_submodule(_test_patching, "os.rename", mock_rename):
with patch_submodule(_test_patching, "os.path.dirname", mock_dirname):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching, "os.rename", mock_rename):
with patch_submodule(_test_patching, "os.path.join", mock_join):
with patch_submodule(_test_patching, "os.path.dirname", mock_dirname):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def test_patch_submodule_doesnt_exist():
mock = "__test_patch_submodule_doesnt_exist_mock__"
with patch_submodule(_test_patching, "__module_that_doesn_exist__.__attribute_that_doesn_exist__", mock):
pass
with patch_submodule(_test_patching, "os.__attribute_that_doesn_exist__", mock):
pass
| datasets/tests/test_patching.py/0 | {
"file_path": "datasets/tests/test_patching.py",
"repo_id": "datasets",
"token_count": 2274
} |
Subsets and Splits