text
stringlengths 96
319k
| id
stringlengths 14
178
| metadata
dict |
---|---|---|
import { assert, it, describe, afterEach, vi, expect } from "vitest";
import type { Cookies } from "@sveltejs/kit";
import { collections } from "$lib/server/database";
import { updateUser } from "./updateUser";
import { ObjectId } from "mongodb";
import { DEFAULT_SETTINGS } from "$lib/types/Settings";
import { defaultModel } from "$lib/server/models";
import { findUser } from "$lib/server/auth";
import { defaultEmbeddingModel } from "$lib/server/embeddingModels";
const userData = {
preferred_username: "new-username",
name: "name",
picture: "https://example.com/avatar.png",
sub: "1234567890",
};
Object.freeze(userData);
const locals = {
userId: "1234567890",
sessionId: "1234567890",
};
// @ts-expect-error SvelteKit cookies dumb mock
const cookiesMock: Cookies = {
set: vi.fn(),
};
const insertRandomUser = async () => {
const res = await collections.users.insertOne({
_id: new ObjectId(),
createdAt: new Date(),
updatedAt: new Date(),
username: "base-username",
name: userData.name,
avatarUrl: userData.picture,
hfUserId: userData.sub,
});
return res.insertedId;
};
const insertRandomConversations = async (count: number) => {
const res = await collections.conversations.insertMany(
new Array(count).fill(0).map(() => ({
_id: new ObjectId(),
title: "random title",
messages: [],
model: defaultModel.id,
embeddingModel: defaultEmbeddingModel.id,
createdAt: new Date(),
updatedAt: new Date(),
sessionId: locals.sessionId,
}))
);
return res.insertedIds;
};
describe("login", () => {
it("should update user if existing", async () => {
await insertRandomUser();
await updateUser({ userData, locals, cookies: cookiesMock });
const existingUser = await collections.users.findOne({ hfUserId: userData.sub });
assert.equal(existingUser?.name, userData.name);
expect(cookiesMock.set).toBeCalledTimes(1);
});
it("should migrate pre-existing conversations for new user", async () => {
const insertedId = await insertRandomUser();
await insertRandomConversations(2);
await updateUser({ userData, locals, cookies: cookiesMock });
const conversationCount = await collections.conversations.countDocuments({
userId: insertedId,
sessionId: { $exists: false },
});
assert.equal(conversationCount, 2);
await collections.conversations.deleteMany({ userId: insertedId });
});
it("should create default settings for new user", async () => {
await updateUser({ userData, locals, cookies: cookiesMock });
const user = await findUser(locals.sessionId);
assert.exists(user);
const settings = await collections.settings.findOne({ userId: user?._id });
expect(settings).toMatchObject({
userId: user?._id,
updatedAt: expect.any(Date),
createdAt: expect.any(Date),
ethicsModalAcceptedAt: expect.any(Date),
...DEFAULT_SETTINGS,
});
await collections.settings.deleteOne({ userId: user?._id });
});
it("should migrate pre-existing settings for pre-existing user", async () => {
const { insertedId } = await collections.settings.insertOne({
sessionId: locals.sessionId,
ethicsModalAcceptedAt: new Date(),
updatedAt: new Date(),
createdAt: new Date(),
...DEFAULT_SETTINGS,
shareConversationsWithModelAuthors: false,
});
await updateUser({ userData, locals, cookies: cookiesMock });
const settings = await collections.settings.findOne({
_id: insertedId,
sessionId: { $exists: false },
});
assert.exists(settings);
const user = await collections.users.findOne({ hfUserId: userData.sub });
expect(settings).toMatchObject({
userId: user?._id,
updatedAt: expect.any(Date),
createdAt: expect.any(Date),
ethicsModalAcceptedAt: expect.any(Date),
...DEFAULT_SETTINGS,
shareConversationsWithModelAuthors: false,
});
await collections.settings.deleteOne({ userId: user?._id });
});
});
afterEach(async () => {
await collections.users.deleteMany({ hfUserId: userData.sub });
await collections.sessions.deleteMany({});
locals.userId = "1234567890";
locals.sessionId = "1234567890";
vi.clearAllMocks();
});
| chat-ui/src/routes/login/callback/updateUser.spec.ts/0 | {
"file_path": "chat-ui/src/routes/login/callback/updateUser.spec.ts",
"repo_id": "chat-ui",
"token_count": 1408
} |
<script lang="ts">
import { enhance } from "$app/forms";
import { base } from "$app/paths";
import { page } from "$app/state";
import { goto } from "$app/navigation";
import { env as envPublic } from "$env/dynamic/public";
import { useSettingsStore } from "$lib/stores/settings";
import type { PageData } from "./$types";
import CarbonPen from "~icons/carbon/pen";
import CarbonTrash from "~icons/carbon/trash-can";
import CarbonCopy from "~icons/carbon/copy-file";
import CarbonFlag from "~icons/carbon/flag";
import CarbonLink from "~icons/carbon/link";
import CarbonChat from "~icons/carbon/chat";
import CarbonStar from "~icons/carbon/star";
import CarbonTools from "~icons/carbon/tools";
import CarbonLock from "~icons/carbon/locked";
import CopyToClipBoardBtn from "$lib/components/CopyToClipBoardBtn.svelte";
import ReportModal from "./ReportModal.svelte";
import IconInternet from "$lib/components/icons/IconInternet.svelte";
import ToolBadge from "$lib/components/ToolBadge.svelte";
import { ReviewStatus } from "$lib/types/Review";
interface Props {
data: PageData;
}
let { data }: Props = $props();
let assistant = $derived(
data.assistants.find((el) => el._id.toString() === page.params.assistantId)
);
const settings = useSettingsStore();
const prefix =
envPublic.PUBLIC_SHARE_PREFIX || `${envPublic.PUBLIC_ORIGIN || page.url.origin}${base}`;
let shareUrl = $derived(`${prefix}/assistant/${assistant?._id}`);
let displayReportModal = $state(false);
let hasRag = $derived(
assistant?.rag?.allowAllDomains ||
!!assistant?.rag?.allowedDomains?.length ||
!!assistant?.rag?.allowedLinks?.length ||
!!assistant?.dynamicPrompt
);
let prepromptTags = $derived(assistant?.preprompt?.split(/(\{\{[^{}]*\}\})/) ?? []);
</script>
{#if displayReportModal}
<ReportModal on:close={() => (displayReportModal = false)} />
{/if}
<div class="flex h-full flex-col gap-2">
<div class="flex flex-col sm:flex-row sm:gap-6">
<div class="mb-4 flex justify-center sm:mb-0">
{#if assistant?.avatar}
<img
src={`${base}/settings/assistants/${assistant?._id}/avatar.jpg?hash=${assistant?.avatar}`}
alt="Avatar"
class="size-16 flex-none rounded-full object-cover sm:size-24"
/>
{:else}
<div
class="flex size-16 flex-none items-center justify-center rounded-full bg-gray-300 text-4xl font-semibold uppercase text-gray-500 sm:size-24"
>
{assistant?.name[0]}
</div>
{/if}
</div>
<div class="flex-1">
<div class="flex flex-wrap items-center gap-2">
<h1 class="break-words text-xl font-semibold">
{assistant?.name}
</h1>
{#if hasRag}
<span
class="inline-grid size-5 place-items-center rounded-full bg-blue-500/10"
title="This assistant uses the websearch."
>
<IconInternet classNames="text-sm text-blue-600" />
</span>
{/if}
<span class="rounded-full border px-2 py-0.5 text-sm leading-none text-gray-500"
>public</span
>
</div>
{#if assistant?.description}
<p class="mb-2 line-clamp-2 text-sm text-gray-500">
{assistant.description}
</p>
{/if}
<p class="text-sm text-gray-500">
Model: <span class="font-semibold"> {assistant?.modelId} </span>
<span class="text-gray-300">•</span> Created by
<a class="underline" href="{base}/assistants?user={assistant?.createdByName}">
{assistant?.createdByName}
</a>
</p>
<div
class="flex flex-wrap items-center gap-x-4 gap-y-2 whitespace-nowrap text-sm text-gray-500 hover:*:text-gray-800 max-sm:justify-center"
>
<div class="w-full sm:w-auto">
<button
class="mx-auto my-2 flex w-min items-center justify-center rounded-full bg-black px-3 py-1 text-base !text-white"
name="Activate model"
onclick={(e) => {
e.stopPropagation();
settings.instantSet({
activeModel: page.params.assistantId,
});
goto(`${base}/`);
}}
>
<CarbonChat class="mr-1.5 text-sm" />
New chat
</button>
</div>
{#if assistant?.createdByMe}
<a href="{base}/settings/assistants/{assistant?._id}/edit" class="underline"
><CarbonPen class="mr-1.5 inline text-xs" />Edit
</a>
<form method="POST" action="?/delete" use:enhance>
<button
type="submit"
class="flex items-center underline"
onclick={(event) => {
if (!confirm("Are you sure you want to delete this assistant?")) {
event.preventDefault();
}
}}
>
<CarbonTrash class="mr-1.5 inline text-xs" />Delete
</button>
</form>
{:else}
<form method="POST" action="?/unsubscribe" use:enhance>
<button type="submit" class="underline">
<CarbonTrash class="mr-1.5 inline text-xs" />Remove</button
>
</form>
<form method="POST" action="?/edit" use:enhance class="hidden">
<button type="submit" class="underline">
<CarbonCopy class="mr-1.5 inline text-xs" />Duplicate</button
>
</form>
{#if !assistant?.reported}
<button
type="button"
onclick={() => {
displayReportModal = true;
}}
class="underline"
>
<CarbonFlag class="mr-1.5 inline text-xs" />Report
</button>
{:else}
<button type="button" disabled class="text-gray-700">
<CarbonFlag class="mr-1.5 inline text-xs" />Reported</button
>
{/if}
{/if}
{#if data?.user?.isAdmin}
<span class="rounded-full border px-2 py-0.5 text-sm leading-none text-gray-500"
>{assistant?.review?.toLocaleUpperCase()}</span
>
{#if !assistant?.createdByMe}
<form method="POST" action="?/delete" use:enhance>
<button
type="submit"
class="flex items-center text-red-600 underline"
onclick={(event) => {
if (!confirm("Are you sure you want to delete this assistant?")) {
event.preventDefault();
}
}}
>
<CarbonTrash class="mr-1.5 inline text-xs" />Delete
</button>
</form>
{/if}
{#if assistant?.review === ReviewStatus.PRIVATE}
<form method="POST" action="?/approve" use:enhance>
<button type="submit" class="flex items-center text-green-600 underline">
<CarbonStar class="mr-1.5 inline text-xs" />Force feature</button
>
</form>
{/if}
{#if assistant?.review === ReviewStatus.PENDING}
<form method="POST" action="?/approve" use:enhance>
<button type="submit" class="flex items-center text-green-600 underline">
<CarbonStar class="mr-1.5 inline text-xs" />Approve</button
>
</form>
<form method="POST" action="?/deny" use:enhance>
<button type="submit" class="flex items-center text-red-600">
<span class="mr-1.5 font-light no-underline">X</span>
<span class="underline">Deny</span>
</button>
</form>
{/if}
{#if assistant?.review === ReviewStatus.APPROVED || assistant?.review === ReviewStatus.DENIED}
<form method="POST" action="?/unrequest" use:enhance>
<button type="submit" class="flex items-center text-red-600 underline">
<CarbonLock class="mr-1.5 inline text-xs " />Reset review</button
>
</form>
{/if}
{/if}
{#if assistant?.createdByMe && assistant?.review === ReviewStatus.PRIVATE}
<form
method="POST"
action="?/request"
use:enhance={async ({ cancel }) => {
const confirmed = confirm(
"Are you sure you want to request this assistant to be featured? Make sure you have tried the assistant and that it works as expected. "
);
if (!confirmed) {
cancel();
}
}}
>
<button type="submit" class="flex items-center underline">
<CarbonStar class="mr-1.5 inline text-xs" />Request to be featured</button
>
</form>
{/if}
</div>
</div>
</div>
<div>
<h2 class="text-lg font-semibold">Direct URL</h2>
<p class="pb-2 text-sm text-gray-500">Share this link for people to use your assistant.</p>
<div
class="flex flex-row gap-2 rounded-lg border-2 border-gray-200 bg-gray-100 py-2 pl-3 pr-1.5"
>
<input disabled class="flex-1 truncate bg-inherit" value={shareUrl} />
<CopyToClipBoardBtn
value={shareUrl}
classNames="!border-none !shadow-none !py-0 !px-1 !rounded-md"
>
<div class="flex items-center gap-1.5 text-gray-500 hover:underline">
<CarbonLink />Copy
</div>
</CopyToClipBoardBtn>
</div>
</div>
<!-- two columns for big screen, single column for small screen -->
<div class="mb-12 mt-3">
<h2 class="mb-2 inline font-semibold">System Instructions</h2>
<div
id="System Instructions"
class="overlow-y-auto mt-2 box-border h-fit max-h-[240px] w-full overflow-y-auto whitespace-pre-line rounded-lg border-2 border-gray-200 bg-gray-100 p-2 disabled:cursor-not-allowed 2xl:max-h-[310px]"
>
{#if assistant?.dynamicPrompt}
{#each prepromptTags as tag}
{#if (tag.startsWith("{{") && tag.endsWith("}}") && (tag.includes("get=") || tag.includes("post=") || tag.includes("url="))) || tag.includes("today")}
{@const url = tag.match(/(?:get|post|url)=(.*?)}}/)?.[1] ?? ""}
<a
target="_blank"
href={url.startsWith("http") ? url : `//${url}`}
class="break-words rounded-lg bg-blue-100 px-1 py-0.5 text-blue-800 hover:underline"
>
{tag}</a
>
{:else}
{tag}
{/if}
{/each}
{:else}
{assistant?.preprompt}
{/if}
</div>
{#if assistant?.tools?.length}
<div class="mt-4">
<div class="mb-1 flex items-center gap-1">
<span
class="inline-grid size-5 place-items-center rounded-full bg-purple-500/10"
title="This assistant uses the websearch."
>
<CarbonTools class="text-xs text-purple-600" />
</span>
<h2 class="font-semibold">Tools</h2>
</div>
<p class="w-full text-sm text-gray-500">
This Assistant has access to the following tools:
</p>
<ul class="mr-2 mt-2 flex flex-wrap gap-2.5 text-sm text-gray-800">
{#each assistant.tools as tool}
<ToolBadge toolId={tool} />
{/each}
</ul>
</div>
{/if}
{#if hasRag}
<div class="mt-4">
<div class="mb-1 flex items-center gap-1">
<span
class="inline-grid size-5 place-items-center rounded-full bg-blue-500/10"
title="This assistant uses the websearch."
>
<IconInternet classNames="text-sm text-blue-600" />
</span>
<h2 class=" font-semibold">Internet Access</h2>
</div>
{#if assistant?.rag?.allowAllDomains}
<p class="text-sm text-gray-500">
This Assistant uses Web Search to find information on Internet.
</p>
{:else if !!assistant?.rag?.allowedDomains && assistant?.rag?.allowedDomains.length}
<p class="pb-4 text-sm text-gray-500">
This Assistant can use Web Search on the following domains:
</p>
<ul class="mr-2 flex flex-wrap gap-2.5 text-sm text-gray-800">
{#each assistant?.rag?.allowedDomains as domain}
<li
class="break-all rounded-lg border border-gray-200 bg-gray-100 px-2 py-0.5 leading-tight decoration-gray-400"
>
<a target="_blank" class="underline" href={domain}>{domain}</a>
</li>
{/each}
</ul>
{:else if !!assistant?.rag?.allowedLinks && assistant?.rag?.allowedLinks.length}
<p class="pb-4 text-sm text-gray-500">This Assistant can browse the following links:</p>
<ul class="mr-2 flex flex-wrap gap-2.5 text-sm text-gray-800">
{#each assistant?.rag?.allowedLinks as link}
<li
class="break-all rounded-lg border border-gray-200 bg-gray-100 px-2 py-0.5 leading-tight decoration-gray-400"
>
<a target="_blank" class="underline" href={link}>{link}</a>
</li>
{/each}
</ul>
{/if}
{#if assistant?.dynamicPrompt}
<p class="text-sm text-gray-500">
This Assistant has dynamic prompts enabled and can make requests to external services.
</p>
{/if}
</div>
{/if}
</div>
</div>
| chat-ui/src/routes/settings/(nav)/assistants/[assistantId]/+page.svelte/0 | {
"file_path": "chat-ui/src/routes/settings/(nav)/assistants/[assistantId]/+page.svelte",
"repo_id": "chat-ui",
"token_count": 5426
} |
import { base } from "$app/paths";
import { collections } from "$lib/server/database.js";
import { toolFromConfigs } from "$lib/server/tools/index.js";
import { ReviewStatus } from "$lib/types/Review.js";
import { redirect } from "@sveltejs/kit";
import { ObjectId } from "mongodb";
export const load = async ({ params, locals }) => {
const tool = await collections.tools.findOne({ _id: new ObjectId(params.toolId) });
if (!tool) {
const tool = toolFromConfigs.find((el) => el._id.toString() === params.toolId);
if (!tool) {
redirect(302, `${base}/tools`);
}
return {
tool: {
...tool,
_id: tool._id.toString(),
call: undefined,
createdById: null,
createdByName: null,
createdByMe: false,
reported: false,
review: ReviewStatus.APPROVED,
},
};
}
const reported = await collections.reports.findOne({
contentId: tool._id,
object: "tool",
});
return {
tool: {
...tool,
_id: tool._id.toString(),
call: undefined,
createdById: tool.createdById.toString(),
createdByMe:
tool.createdById.toString() === (locals.user?._id ?? locals.sessionId).toString(),
reported: !!reported,
},
};
};
| chat-ui/src/routes/tools/[toolId]/+layout.server.ts/0 | {
"file_path": "chat-ui/src/routes/tools/[toolId]/+layout.server.ts",
"repo_id": "chat-ui",
"token_count": 450
} |
<svg xmlns="http://www.w3.org/2000/svg" width="32" height="32" fill="none">
<path
fill="#FFD21E"
d="M4 15.55C4 9.72 8.72 5 14.55 5h4.11a9.34 9.34 0 1 1 0 18.68H7.58l-2.89 2.8a.41.41 0 0 1-.69-.3V15.55Z"
/>
<path
fill="#32343D"
d="M19.63 12.48c.37.14.52.9.9.7.71-.38.98-1.27.6-1.98a1.46 1.46 0 0 0-1.98-.61 1.47 1.47 0 0 0-.6 1.99c.17.34.74-.21 1.08-.1ZM12.72 12.48c-.37.14-.52.9-.9.7a1.47 1.47 0 0 1-.6-1.98 1.46 1.46 0 0 1 1.98-.61c.71.38.98 1.27.6 1.99-.18.34-.74-.21-1.08-.1ZM16.24 19.55c2.89 0 3.82-2.58 3.82-3.9 0-1.33-1.71.7-3.82.7-2.1 0-3.8-2.03-3.8-.7 0 1.32.92 3.9 3.8 3.9Z"
/>
<path
fill="#FF323D"
d="M18.56 18.8c-.57.44-1.33.75-2.32.75-.92 0-1.65-.27-2.2-.68.3-.63.87-1.11 1.55-1.32.12-.03.24.17.36.38.12.2.24.4.37.4s.26-.2.39-.4.26-.4.38-.36a2.56 2.56 0 0 1 1.47 1.23Z"
/>
</svg>
| chat-ui/static/huggingchat/logo.svg/0 | {
"file_path": "chat-ui/static/huggingchat/logo.svg",
"repo_id": "chat-ui",
"token_count": 523
} |
<p align="center">
<picture>
<source media="(prefers-color-scheme: dark)" srcset="https://huggingface.co/datasets/huggingface/documentation-images/raw/main/datasets-logo-dark.svg">
<source media="(prefers-color-scheme: light)" srcset="https://huggingface.co/datasets/huggingface/documentation-images/raw/main/datasets-logo-light.svg">
<img alt="Hugging Face Datasets Library" src="https://huggingface.co/datasets/huggingface/documentation-images/raw/main/datasets-logo-light.svg" width="352" height="59" style="max-width: 100%;">
</picture>
<br/>
<br/>
</p>
<p align="center">
<a href="https://github.com/huggingface/datasets/actions/workflows/ci.yml?query=branch%3Amain"><img alt="Build" src="https://github.com/huggingface/datasets/actions/workflows/ci.yml/badge.svg?branch=main"></a>
<a href="https://github.com/huggingface/datasets/blob/main/LICENSE"><img alt="GitHub" src="https://img.shields.io/github/license/huggingface/datasets.svg?color=blue"></a>
<a href="https://huggingface.co/docs/datasets/index.html"><img alt="Documentation" src="https://img.shields.io/website/http/huggingface.co/docs/datasets/index.html.svg?down_color=red&down_message=offline&up_message=online"></a>
<a href="https://github.com/huggingface/datasets/releases"><img alt="GitHub release" src="https://img.shields.io/github/release/huggingface/datasets.svg"></a>
<a href="https://huggingface.co/datasets/"><img alt="Number of datasets" src="https://img.shields.io/endpoint?url=https://huggingface.co/api/shields/datasets&color=brightgreen"></a>
<a href="CODE_OF_CONDUCT.md"><img alt="Contributor Covenant" src="https://img.shields.io/badge/Contributor%20Covenant-2.0-4baaaa.svg"></a>
<a href="https://zenodo.org/badge/latestdoi/250213286"><img src="https://zenodo.org/badge/250213286.svg" alt="DOI"></a>
</p>
🤗 Datasets is a lightweight library providing **two** main features:
- **one-line dataloaders for many public datasets**: one-liners to download and pre-process any of the  major public datasets (image datasets, audio datasets, text datasets in 467 languages and dialects, etc.) provided on the [HuggingFace Datasets Hub](https://huggingface.co/datasets). With a simple command like `squad_dataset = load_dataset("squad")`, get any of these datasets ready to use in a dataloader for training/evaluating a ML model (Numpy/Pandas/PyTorch/TensorFlow/JAX),
- **efficient data pre-processing**: simple, fast and reproducible data pre-processing for the public datasets as well as your own local datasets in CSV, JSON, text, PNG, JPEG, WAV, MP3, Parquet, etc. With simple commands like `processed_dataset = dataset.map(process_example)`, efficiently prepare the dataset for inspection and ML model evaluation and training.
[🎓 **Documentation**](https://huggingface.co/docs/datasets/) [🔎 **Find a dataset in the Hub**](https://huggingface.co/datasets) [🌟 **Share a dataset on the Hub**](https://huggingface.co/docs/datasets/share)
<h3 align="center">
<a href="https://hf.co/course"><img src="https://raw.githubusercontent.com/huggingface/datasets/main/docs/source/imgs/course_banner.png"></a>
</h3>
🤗 Datasets is designed to let the community easily add and share new datasets.
🤗 Datasets has many additional interesting features:
- Thrive on large datasets: 🤗 Datasets naturally frees the user from RAM memory limitation, all datasets are memory-mapped using an efficient zero-serialization cost backend (Apache Arrow).
- Smart caching: never wait for your data to process several times.
- Lightweight and fast with a transparent and pythonic API (multi-processing/caching/memory-mapping).
- Built-in interoperability with NumPy, pandas, PyTorch, TensorFlow 2 and JAX.
- Native support for audio and image data.
- Enable streaming mode to save disk space and start iterating over the dataset immediately.
🤗 Datasets originated from a fork of the awesome [TensorFlow Datasets](https://github.com/tensorflow/datasets) and the HuggingFace team want to deeply thank the TensorFlow Datasets team for building this amazing library. More details on the differences between 🤗 Datasets and `tfds` can be found in the section [Main differences between 🤗 Datasets and `tfds`](#main-differences-between--datasets-and-tfds).
# Installation
## With pip
🤗 Datasets can be installed from PyPi and has to be installed in a virtual environment (venv or conda for instance)
```bash
pip install datasets
```
## With conda
🤗 Datasets can be installed using conda as follows:
```bash
conda install -c huggingface -c conda-forge datasets
```
Follow the installation pages of TensorFlow and PyTorch to see how to install them with conda.
For more details on installation, check the installation page in the documentation: https://huggingface.co/docs/datasets/installation
## Installation to use with PyTorch/TensorFlow/pandas
If you plan to use 🤗 Datasets with PyTorch (1.0+), TensorFlow (2.2+) or pandas, you should also install PyTorch, TensorFlow or pandas.
For more details on using the library with NumPy, pandas, PyTorch or TensorFlow, check the quick start page in the documentation: https://huggingface.co/docs/datasets/quickstart
# Usage
🤗 Datasets is made to be very simple to use - the API is centered around a single function, `datasets.load_dataset(dataset_name, **kwargs)`, that instantiates a dataset.
This library can be used for text/image/audio/etc. datasets. Here is an example to load a text dataset:
Here is a quick example:
```python
from datasets import load_dataset
# Print all the available datasets
from huggingface_hub import list_datasets
print([dataset.id for dataset in list_datasets()])
# Load a dataset and print the first example in the training set
squad_dataset = load_dataset('squad')
print(squad_dataset['train'][0])
# Process the dataset - add a column with the length of the context texts
dataset_with_length = squad_dataset.map(lambda x: {"length": len(x["context"])})
# Process the dataset - tokenize the context texts (using a tokenizer from the 🤗 Transformers library)
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('bert-base-cased')
tokenized_dataset = squad_dataset.map(lambda x: tokenizer(x['context']), batched=True)
```
If your dataset is bigger than your disk or if you don't want to wait to download the data, you can use streaming:
```python
# If you want to use the dataset immediately and efficiently stream the data as you iterate over the dataset
image_dataset = load_dataset('cifar100', streaming=True)
for example in image_dataset["train"]:
break
```
For more details on using the library, check the quick start page in the documentation: https://huggingface.co/docs/datasets/quickstart and the specific pages on:
- Loading a dataset: https://huggingface.co/docs/datasets/loading
- What's in a Dataset: https://huggingface.co/docs/datasets/access
- Processing data with 🤗 Datasets: https://huggingface.co/docs/datasets/process
- Processing audio data: https://huggingface.co/docs/datasets/audio_process
- Processing image data: https://huggingface.co/docs/datasets/image_process
- Processing text data: https://huggingface.co/docs/datasets/nlp_process
- Streaming a dataset: https://huggingface.co/docs/datasets/stream
- Writing your own dataset loading script: https://huggingface.co/docs/datasets/dataset_script
- etc.
# Add a new dataset to the Hub
We have a very detailed step-by-step guide to add a new dataset to the  datasets already provided on the [HuggingFace Datasets Hub](https://huggingface.co/datasets).
You can find:
- [how to upload a dataset to the Hub using your web browser or Python](https://huggingface.co/docs/datasets/upload_dataset) and also
- [how to upload it using Git](https://huggingface.co/docs/datasets/share).
# Main differences between 🤗 Datasets and `tfds`
If you are familiar with the great TensorFlow Datasets, here are the main differences between 🤗 Datasets and `tfds`:
- the scripts in 🤗 Datasets are not provided within the library but are queried, downloaded/cached and dynamically loaded upon request
- the backend serialization of 🤗 Datasets is based on [Apache Arrow](https://arrow.apache.org/) instead of TF Records and leverage python dataclasses for info and features with some diverging features (we mostly don't do encoding and store the raw data as much as possible in the backend serialization cache).
- the user-facing dataset object of 🤗 Datasets is not a `tf.data.Dataset` but a built-in framework-agnostic dataset class with methods inspired by what we like in `tf.data` (like a `map()` method). It basically wraps a memory-mapped Arrow table cache.
# Disclaimers
🤗 Datasets may run Python code defined by the dataset authors to parse certain data formats or structures. For security reasons, we ask users to:
- check the dataset scripts they're going to run beforehand and
- pin the `revision` of the repositories they use.
If you're a dataset owner and wish to update any part of it (description, citation, license, etc.), or do not want your dataset to be included in the Hugging Face Hub, please get in touch by opening a discussion or a pull request in the Community tab of the dataset page. Thanks for your contribution to the ML community!
## BibTeX
If you want to cite our 🤗 Datasets library, you can use our [paper](https://arxiv.org/abs/2109.02846):
```bibtex
@inproceedings{lhoest-etal-2021-datasets,
title = "Datasets: A Community Library for Natural Language Processing",
author = "Lhoest, Quentin and
Villanova del Moral, Albert and
Jernite, Yacine and
Thakur, Abhishek and
von Platen, Patrick and
Patil, Suraj and
Chaumond, Julien and
Drame, Mariama and
Plu, Julien and
Tunstall, Lewis and
Davison, Joe and
{\v{S}}a{\v{s}}ko, Mario and
Chhablani, Gunjan and
Malik, Bhavitvya and
Brandeis, Simon and
Le Scao, Teven and
Sanh, Victor and
Xu, Canwen and
Patry, Nicolas and
McMillan-Major, Angelina and
Schmid, Philipp and
Gugger, Sylvain and
Delangue, Cl{\'e}ment and
Matussi{\`e}re, Th{\'e}o and
Debut, Lysandre and
Bekman, Stas and
Cistac, Pierric and
Goehringer, Thibault and
Mustar, Victor and
Lagunas, Fran{\c{c}}ois and
Rush, Alexander and
Wolf, Thomas",
booktitle = "Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing: System Demonstrations",
month = nov,
year = "2021",
address = "Online and Punta Cana, Dominican Republic",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2021.emnlp-demo.21",
pages = "175--184",
abstract = "The scale, variety, and quantity of publicly-available NLP datasets has grown rapidly as researchers propose new tasks, larger models, and novel benchmarks. Datasets is a community library for contemporary NLP designed to support this ecosystem. Datasets aims to standardize end-user interfaces, versioning, and documentation, while providing a lightweight front-end that behaves similarly for small datasets as for internet-scale corpora. The design of the library incorporates a distributed, community-driven approach to adding datasets and documenting usage. After a year of development, the library now includes more than 650 unique datasets, has more than 250 contributors, and has helped support a variety of novel cross-dataset research projects and shared tasks. The library is available at https://github.com/huggingface/datasets.",
eprint={2109.02846},
archivePrefix={arXiv},
primaryClass={cs.CL},
}
```
If you need to cite a specific version of our 🤗 Datasets library for reproducibility, you can use the corresponding version Zenodo DOI from this [list](https://zenodo.org/search?q=conceptrecid:%224817768%22&sort=-version&all_versions=True).
| datasets/README.md/0 | {
"file_path": "datasets/README.md",
"repo_id": "datasets",
"token_count": 3918
} |
# docstyle-ignore
INSTALL_CONTENT = """
# Datasets installation
! pip install datasets transformers
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/datasets.git
"""
notebook_first_cells = [{"type": "code", "content": INSTALL_CONTENT}]
default_branch_name = "main"
version_prefix = ""
| datasets/docs/source/_config.py/0 | {
"file_path": "datasets/docs/source/_config.py",
"repo_id": "datasets",
"token_count": 118
} |
# Load text data
This guide shows you how to load text datasets. To learn how to load any type of dataset, take a look at the <a class="underline decoration-sky-400 decoration-2 font-semibold" href="./loading">general loading guide</a>.
Text files are one of the most common file types for storing a dataset. By default, 🤗 Datasets samples a text file line by line to build the dataset.
```py
>>> from datasets import load_dataset
>>> dataset = load_dataset("text", data_files={"train": ["my_text_1.txt", "my_text_2.txt"], "test": "my_test_file.txt"})
# Load from a directory
>>> dataset = load_dataset("text", data_dir="path/to/text/dataset")
```
To sample a text file by paragraph or even an entire document, use the `sample_by` parameter:
```py
# Sample by paragraph
>>> dataset = load_dataset("text", data_files={"train": "my_train_file.txt", "test": "my_test_file.txt"}, sample_by="paragraph")
# Sample by document
>>> dataset = load_dataset("text", data_files={"train": "my_train_file.txt", "test": "my_test_file.txt"}, sample_by="document")
```
You can also use grep patterns to load specific files:
```py
>>> from datasets import load_dataset
>>> c4_subset = load_dataset("allenai/c4", data_files="en/c4-train.0000*-of-01024.json.gz")
```
To load remote text files via HTTP, pass the URLs instead:
```py
>>> dataset = load_dataset("text", data_files="https://huggingface.co/datasets/lhoestq/test/resolve/main/some_text.txt")
```
To load XML data you can use the "xml" loader, which is equivalent to "text" with sample_by="document":
```py
>>> from datasets import load_dataset
>>> dataset = load_dataset("xml", data_files={"train": ["my_xml_1.xml", "my_xml_2.xml"], "test": "my_xml_file.xml"})
# Load from a directory
>>> dataset = load_dataset("xml", data_dir="path/to/xml/dataset")
```
| datasets/docs/source/nlp_load.mdx/0 | {
"file_path": "datasets/docs/source/nlp_load.mdx",
"repo_id": "datasets",
"token_count": 613
} |
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "3.2.1.dev0"
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled
from .info import DatasetInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_default_config_name,
get_dataset_infos,
get_dataset_split_names,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .utils import *
from .utils import logging
| datasets/src/datasets/__init__.py/0 | {
"file_path": "datasets/src/datasets/__init__.py",
"repo_id": "datasets",
"token_count": 513
} |
from typing import TypeVar
from .arrow_dataset import Dataset, _split_by_node_map_style_dataset
from .iterable_dataset import IterableDataset, _split_by_node_iterable_dataset
DatasetType = TypeVar("DatasetType", Dataset, IterableDataset)
def split_dataset_by_node(dataset: DatasetType, rank: int, world_size: int) -> DatasetType:
"""
Split a dataset for the node at rank `rank` in a pool of nodes of size `world_size`.
For map-style datasets:
Each node is assigned a chunk of data, e.g. rank 0 is given the first chunk of the dataset.
To maximize data loading throughput, chunks are made of contiguous data on disk if possible.
For iterable datasets:
If the dataset has a number of shards that is a factor of `world_size` (i.e. if `dataset.num_shards % world_size == 0`),
then the shards are evenly assigned across the nodes, which is the most optimized.
Otherwise, each node keeps 1 example out of `world_size`, skipping the other examples.
Args:
dataset ([`Dataset`] or [`IterableDataset`]):
The dataset to split by node.
rank (`int`):
Rank of the current node.
world_size (`int`):
Total number of nodes.
Returns:
[`Dataset`] or [`IterableDataset`]: The dataset to be used on the node at rank `rank`.
"""
if isinstance(dataset, Dataset):
return _split_by_node_map_style_dataset(dataset, rank=rank, world_size=world_size)
else:
return _split_by_node_iterable_dataset(dataset, rank=rank, world_size=world_size)
| datasets/src/datasets/distributed.py/0 | {
"file_path": "datasets/src/datasets/distributed.py",
"repo_id": "datasets",
"token_count": 582
} |
# Copyright 2020 The HuggingFace Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import operator
from collections.abc import Mapping, MutableMapping
from functools import partial
# Lint as: python3
from typing import Any, Callable, Dict, Generic, Iterable, List, Optional, TypeVar, Union
import numpy as np
import pandas as pd
import pyarrow as pa
from packaging import version
from .. import config
from ..features import Features
from ..features.features import _ArrayXDExtensionType, _is_zero_copy_only, decode_nested_example, pandas_types_mapper
from ..table import Table
from ..utils.py_utils import no_op_if_value_is_null
T = TypeVar("T")
RowFormat = TypeVar("RowFormat")
ColumnFormat = TypeVar("ColumnFormat")
BatchFormat = TypeVar("BatchFormat")
def _is_range_contiguous(key: range) -> bool:
return key.step == 1 and key.stop >= key.start
def _raise_bad_key_type(key: Any):
raise TypeError(
f"Wrong key type: '{key}' of type '{type(key)}'. Expected one of int, slice, range, str or Iterable."
)
def _query_table_with_indices_mapping(
table: Table, key: Union[int, slice, range, str, Iterable], indices: Table
) -> pa.Table:
"""
Query a pyarrow Table to extract the subtable that correspond to the given key.
The :obj:`indices` parameter corresponds to the indices mapping in case we cant to take into
account a shuffling or an indices selection for example.
The indices table must contain one column named "indices" of type uint64.
"""
if isinstance(key, int):
key = indices.fast_slice(key % indices.num_rows, 1).column(0)[0].as_py()
return _query_table(table, key)
if isinstance(key, slice):
key = range(*key.indices(indices.num_rows))
if isinstance(key, range):
if _is_range_contiguous(key) and key.start >= 0:
return _query_table(
table, [i.as_py() for i in indices.fast_slice(key.start, key.stop - key.start).column(0)]
)
else:
pass # treat as an iterable
if isinstance(key, str):
table = table.select([key])
return _query_table(table, indices.column(0).to_pylist())
if isinstance(key, Iterable):
return _query_table(table, [indices.fast_slice(i, 1).column(0)[0].as_py() for i in key])
_raise_bad_key_type(key)
def _query_table(table: Table, key: Union[int, slice, range, str, Iterable]) -> pa.Table:
"""
Query a pyarrow Table to extract the subtable that correspond to the given key.
"""
if isinstance(key, int):
return table.fast_slice(key % table.num_rows, 1)
if isinstance(key, slice):
key = range(*key.indices(table.num_rows))
if isinstance(key, range):
if _is_range_contiguous(key) and key.start >= 0:
return table.fast_slice(key.start, key.stop - key.start)
else:
pass # treat as an iterable
if isinstance(key, str):
return table.table.drop([column for column in table.column_names if column != key])
if isinstance(key, Iterable):
key = np.fromiter(key, np.int64)
if len(key) == 0:
return table.table.slice(0, 0)
# don't use pyarrow.Table.take even for pyarrow >=1.0 (see https://issues.apache.org/jira/browse/ARROW-9773)
return table.fast_gather(key % table.num_rows)
_raise_bad_key_type(key)
def _is_array_with_nulls(pa_array: pa.Array) -> bool:
return pa_array.null_count > 0
class BaseArrowExtractor(Generic[RowFormat, ColumnFormat, BatchFormat]):
"""
Arrow extractor are used to extract data from pyarrow tables.
It makes it possible to extract rows, columns and batches.
These three extractions types have to be implemented.
"""
def extract_row(self, pa_table: pa.Table) -> RowFormat:
raise NotImplementedError
def extract_column(self, pa_table: pa.Table) -> ColumnFormat:
raise NotImplementedError
def extract_batch(self, pa_table: pa.Table) -> BatchFormat:
raise NotImplementedError
def _unnest(py_dict: Dict[str, List[T]]) -> Dict[str, T]:
"""Return the first element of a batch (dict) as a row (dict)"""
return {key: array[0] for key, array in py_dict.items()}
class SimpleArrowExtractor(BaseArrowExtractor[pa.Table, pa.Array, pa.Table]):
def extract_row(self, pa_table: pa.Table) -> pa.Table:
return pa_table
def extract_column(self, pa_table: pa.Table) -> pa.Array:
return pa_table.column(0)
def extract_batch(self, pa_table: pa.Table) -> pa.Table:
return pa_table
class PythonArrowExtractor(BaseArrowExtractor[dict, list, dict]):
def extract_row(self, pa_table: pa.Table) -> dict:
return _unnest(pa_table.to_pydict())
def extract_column(self, pa_table: pa.Table) -> list:
return pa_table.column(0).to_pylist()
def extract_batch(self, pa_table: pa.Table) -> dict:
return pa_table.to_pydict()
class NumpyArrowExtractor(BaseArrowExtractor[dict, np.ndarray, dict]):
def __init__(self, **np_array_kwargs):
self.np_array_kwargs = np_array_kwargs
def extract_row(self, pa_table: pa.Table) -> dict:
return _unnest(self.extract_batch(pa_table))
def extract_column(self, pa_table: pa.Table) -> np.ndarray:
return self._arrow_array_to_numpy(pa_table[pa_table.column_names[0]])
def extract_batch(self, pa_table: pa.Table) -> dict:
return {col: self._arrow_array_to_numpy(pa_table[col]) for col in pa_table.column_names}
def _arrow_array_to_numpy(self, pa_array: pa.Array) -> np.ndarray:
if isinstance(pa_array, pa.ChunkedArray):
if isinstance(pa_array.type, _ArrayXDExtensionType):
# don't call to_pylist() to preserve dtype of the fixed-size array
zero_copy_only = _is_zero_copy_only(pa_array.type.storage_dtype, unnest=True)
array: List = [
row for chunk in pa_array.chunks for row in chunk.to_numpy(zero_copy_only=zero_copy_only)
]
else:
zero_copy_only = _is_zero_copy_only(pa_array.type) and all(
not _is_array_with_nulls(chunk) for chunk in pa_array.chunks
)
array: List = [
row for chunk in pa_array.chunks for row in chunk.to_numpy(zero_copy_only=zero_copy_only)
]
else:
if isinstance(pa_array.type, _ArrayXDExtensionType):
# don't call to_pylist() to preserve dtype of the fixed-size array
zero_copy_only = _is_zero_copy_only(pa_array.type.storage_dtype, unnest=True)
array: List = pa_array.to_numpy(zero_copy_only=zero_copy_only)
else:
zero_copy_only = _is_zero_copy_only(pa_array.type) and not _is_array_with_nulls(pa_array)
array: List = pa_array.to_numpy(zero_copy_only=zero_copy_only).tolist()
if len(array) > 0:
if any(
(isinstance(x, np.ndarray) and (x.dtype == object or x.shape != array[0].shape))
or (isinstance(x, float) and np.isnan(x))
for x in array
):
if np.lib.NumpyVersion(np.__version__) >= "2.0.0b1":
return np.asarray(array, dtype=object)
return np.array(array, copy=False, dtype=object)
if np.lib.NumpyVersion(np.__version__) >= "2.0.0b1":
return np.asarray(array)
else:
return np.array(array, copy=False)
class PandasArrowExtractor(BaseArrowExtractor[pd.DataFrame, pd.Series, pd.DataFrame]):
def extract_row(self, pa_table: pa.Table) -> pd.DataFrame:
return pa_table.slice(length=1).to_pandas(types_mapper=pandas_types_mapper)
def extract_column(self, pa_table: pa.Table) -> pd.Series:
return pa_table.select([0]).to_pandas(types_mapper=pandas_types_mapper)[pa_table.column_names[0]]
def extract_batch(self, pa_table: pa.Table) -> pd.DataFrame:
return pa_table.to_pandas(types_mapper=pandas_types_mapper)
class PythonFeaturesDecoder:
def __init__(
self, features: Optional[Features], token_per_repo_id: Optional[Dict[str, Union[str, bool, None]]] = None
):
self.features = features
self.token_per_repo_id = token_per_repo_id
def decode_row(self, row: dict) -> dict:
return self.features.decode_example(row, token_per_repo_id=self.token_per_repo_id) if self.features else row
def decode_column(self, column: list, column_name: str) -> list:
return self.features.decode_column(column, column_name) if self.features else column
def decode_batch(self, batch: dict) -> dict:
return self.features.decode_batch(batch) if self.features else batch
class PandasFeaturesDecoder:
def __init__(self, features: Optional[Features]):
self.features = features
def decode_row(self, row: pd.DataFrame) -> pd.DataFrame:
decode = (
{
column_name: no_op_if_value_is_null(partial(decode_nested_example, feature))
for column_name, feature in self.features.items()
if self.features._column_requires_decoding[column_name]
}
if self.features
else {}
)
if decode:
row[list(decode.keys())] = row.transform(decode)
return row
def decode_column(self, column: pd.Series, column_name: str) -> pd.Series:
decode = (
no_op_if_value_is_null(partial(decode_nested_example, self.features[column_name]))
if self.features and column_name in self.features and self.features._column_requires_decoding[column_name]
else None
)
if decode:
column = column.transform(decode)
return column
def decode_batch(self, batch: pd.DataFrame) -> pd.DataFrame:
return self.decode_row(batch)
class LazyDict(MutableMapping):
"""A dictionary backed by Arrow data. The values are formatted on-the-fly when accessing the dictionary."""
def __init__(self, pa_table: pa.Table, formatter: "Formatter"):
self.pa_table = pa_table
self.formatter = formatter
self.data = {key: None for key in pa_table.column_names}
self.keys_to_format = set(self.data.keys())
def __len__(self):
return len(self.data)
def __getitem__(self, key):
value = self.data[key]
if key in self.keys_to_format:
value = self.format(key)
self.data[key] = value
self.keys_to_format.remove(key)
return value
def __setitem__(self, key, value):
if key in self.keys_to_format:
self.keys_to_format.remove(key)
self.data[key] = value
def __delitem__(self, key) -> None:
if key in self.keys_to_format:
self.keys_to_format.remove(key)
del self.data[key]
def __iter__(self):
return iter(self.data)
def __contains__(self, key):
return key in self.data
def __repr__(self):
self._format_all()
return repr(self.data)
if config.PY_VERSION >= version.parse("3.9"):
# merging with the union ("|") operator is supported in Python 3.9+
def __or__(self, other):
if isinstance(other, LazyDict):
inst = self.copy()
other = other.copy()
other._format_all()
inst.keys_to_format -= other.data.keys()
inst.data = inst.data | other.data
return inst
if isinstance(other, dict):
inst = self.copy()
inst.keys_to_format -= other.keys()
inst.data = inst.data | other
return inst
return NotImplemented
def __ror__(self, other):
if isinstance(other, LazyDict):
inst = self.copy()
other = other.copy()
other._format_all()
inst.keys_to_format -= other.data.keys()
inst.data = other.data | inst.data
return inst
if isinstance(other, dict):
inst = self.copy()
inst.keys_to_format -= other.keys()
inst.data = other | inst.data
return inst
return NotImplemented
def __ior__(self, other):
if isinstance(other, LazyDict):
other = other.copy()
other._format_all()
self.keys_to_format -= other.data.keys()
self.data |= other.data
else:
self.keys_to_format -= other.keys()
self.data |= other
return self
def __copy__(self):
# Identical to `UserDict.__copy__`
inst = self.__class__.__new__(self.__class__)
inst.__dict__.update(self.__dict__)
# Create a copy and avoid triggering descriptors
inst.__dict__["data"] = self.__dict__["data"].copy()
inst.__dict__["keys_to_format"] = self.__dict__["keys_to_format"].copy()
return inst
def copy(self):
import copy
return copy.copy(self)
@classmethod
def fromkeys(cls, iterable, value=None):
raise NotImplementedError
def format(self, key):
raise NotImplementedError
def _format_all(self):
for key in self.keys_to_format:
self.data[key] = self.format(key)
self.keys_to_format.clear()
class LazyRow(LazyDict):
def format(self, key):
return self.formatter.format_column(self.pa_table.select([key]))[0]
class LazyBatch(LazyDict):
def format(self, key):
return self.formatter.format_column(self.pa_table.select([key]))
class Formatter(Generic[RowFormat, ColumnFormat, BatchFormat]):
"""
A formatter is an object that extracts and formats data from pyarrow tables.
It defines the formatting for rows, columns and batches.
"""
simple_arrow_extractor = SimpleArrowExtractor
python_arrow_extractor = PythonArrowExtractor
numpy_arrow_extractor = NumpyArrowExtractor
pandas_arrow_extractor = PandasArrowExtractor
def __init__(
self,
features: Optional[Features] = None,
token_per_repo_id: Optional[Dict[str, Union[str, bool, None]]] = None,
):
self.features = features
self.token_per_repo_id = token_per_repo_id
self.python_features_decoder = PythonFeaturesDecoder(self.features, self.token_per_repo_id)
self.pandas_features_decoder = PandasFeaturesDecoder(self.features)
def __call__(self, pa_table: pa.Table, query_type: str) -> Union[RowFormat, ColumnFormat, BatchFormat]:
if query_type == "row":
return self.format_row(pa_table)
elif query_type == "column":
return self.format_column(pa_table)
elif query_type == "batch":
return self.format_batch(pa_table)
def format_row(self, pa_table: pa.Table) -> RowFormat:
raise NotImplementedError
def format_column(self, pa_table: pa.Table) -> ColumnFormat:
raise NotImplementedError
def format_batch(self, pa_table: pa.Table) -> BatchFormat:
raise NotImplementedError
class TensorFormatter(Formatter[RowFormat, ColumnFormat, BatchFormat]):
def recursive_tensorize(self, data_struct: dict):
raise NotImplementedError
class TableFormatter(Formatter[RowFormat, ColumnFormat, BatchFormat]):
table_type: str
column_type: str
class ArrowFormatter(TableFormatter[pa.Table, pa.Array, pa.Table]):
table_type = "arrow table"
column_type = "arrow array"
def format_row(self, pa_table: pa.Table) -> pa.Table:
return self.simple_arrow_extractor().extract_row(pa_table)
def format_column(self, pa_table: pa.Table) -> pa.Array:
return self.simple_arrow_extractor().extract_column(pa_table)
def format_batch(self, pa_table: pa.Table) -> pa.Table:
return self.simple_arrow_extractor().extract_batch(pa_table)
class PythonFormatter(Formatter[Mapping, list, Mapping]):
def __init__(self, features=None, lazy=False, token_per_repo_id=None):
super().__init__(features, token_per_repo_id)
self.lazy = lazy
def format_row(self, pa_table: pa.Table) -> Mapping:
if self.lazy:
return LazyRow(pa_table, self)
row = self.python_arrow_extractor().extract_row(pa_table)
row = self.python_features_decoder.decode_row(row)
return row
def format_column(self, pa_table: pa.Table) -> list:
column = self.python_arrow_extractor().extract_column(pa_table)
column = self.python_features_decoder.decode_column(column, pa_table.column_names[0])
return column
def format_batch(self, pa_table: pa.Table) -> Mapping:
if self.lazy:
return LazyBatch(pa_table, self)
batch = self.python_arrow_extractor().extract_batch(pa_table)
batch = self.python_features_decoder.decode_batch(batch)
return batch
class PandasFormatter(TableFormatter[pd.DataFrame, pd.Series, pd.DataFrame]):
table_type = "pandas dataframe"
column_type = "pandas series"
def format_row(self, pa_table: pa.Table) -> pd.DataFrame:
row = self.pandas_arrow_extractor().extract_row(pa_table)
row = self.pandas_features_decoder.decode_row(row)
return row
def format_column(self, pa_table: pa.Table) -> pd.Series:
column = self.pandas_arrow_extractor().extract_column(pa_table)
column = self.pandas_features_decoder.decode_column(column, pa_table.column_names[0])
return column
def format_batch(self, pa_table: pa.Table) -> pd.DataFrame:
row = self.pandas_arrow_extractor().extract_batch(pa_table)
row = self.pandas_features_decoder.decode_batch(row)
return row
class CustomFormatter(Formatter[dict, ColumnFormat, dict]):
"""
A user-defined custom formatter function defined by a ``transform``.
The transform must take as input a batch of data extracted for an arrow table using the python extractor,
and return a batch.
If the output batch is not a dict, then output_all_columns won't work.
If the ouput batch has several fields, then querying a single column won't work since we don't know which field
to return.
"""
def __init__(self, transform: Callable[[dict], dict], features=None, token_per_repo_id=None, **kwargs):
super().__init__(features=features, token_per_repo_id=token_per_repo_id)
self.transform = transform
def format_row(self, pa_table: pa.Table) -> dict:
formatted_batch = self.format_batch(pa_table)
try:
return _unnest(formatted_batch)
except Exception as exc:
raise TypeError(
f"Custom formatting function must return a dict of sequences to be able to pick a row, but got {formatted_batch}"
) from exc
def format_column(self, pa_table: pa.Table) -> ColumnFormat:
formatted_batch = self.format_batch(pa_table)
if hasattr(formatted_batch, "keys"):
if len(formatted_batch.keys()) > 1:
raise TypeError(
"Tried to query a column but the custom formatting function returns too many columns. "
f"Only one column was expected but got columns {list(formatted_batch.keys())}."
)
else:
raise TypeError(
f"Custom formatting function must return a dict to be able to pick a row, but got {formatted_batch}"
)
try:
return formatted_batch[pa_table.column_names[0]]
except Exception as exc:
raise TypeError(
f"Custom formatting function must return a dict to be able to pick a row, but got {formatted_batch}"
) from exc
def format_batch(self, pa_table: pa.Table) -> dict:
batch = self.python_arrow_extractor().extract_batch(pa_table)
batch = self.python_features_decoder.decode_batch(batch)
return self.transform(batch)
def _check_valid_column_key(key: str, columns: List[str]) -> None:
if key not in columns:
raise KeyError(f"Column {key} not in the dataset. Current columns in the dataset: {columns}")
def _check_valid_index_key(key: Union[int, slice, range, Iterable], size: int) -> None:
if isinstance(key, int):
if (key < 0 and key + size < 0) or (key >= size):
raise IndexError(f"Invalid key: {key} is out of bounds for size {size}")
return
elif isinstance(key, slice):
pass
elif isinstance(key, range):
if len(key) > 0:
_check_valid_index_key(max(key), size=size)
_check_valid_index_key(min(key), size=size)
elif isinstance(key, Iterable):
if len(key) > 0:
_check_valid_index_key(int(max(key)), size=size)
_check_valid_index_key(int(min(key)), size=size)
else:
_raise_bad_key_type(key)
def key_to_query_type(key: Union[int, slice, range, str, Iterable]) -> str:
if isinstance(key, int):
return "row"
elif isinstance(key, str):
return "column"
elif isinstance(key, (slice, range, Iterable)):
return "batch"
_raise_bad_key_type(key)
def query_table(
table: Table,
key: Union[int, slice, range, str, Iterable],
indices: Optional[Table] = None,
) -> pa.Table:
"""
Query a Table to extract the subtable that correspond to the given key.
Args:
table (``datasets.table.Table``): The input Table to query from
key (``Union[int, slice, range, str, Iterable]``): The key can be of different types:
- an integer i: the subtable containing only the i-th row
- a slice [i:j:k]: the subtable containing the rows that correspond to this slice
- a range(i, j, k): the subtable containing the rows that correspond to this range
- a string c: the subtable containing all the rows but only the column c
- an iterable l: the subtable that is the concatenation of all the i-th rows for all i in the iterable
indices (Optional ``datasets.table.Table``): If not None, it is used to re-map the given key to the table rows.
The indices table must contain one column named "indices" of type uint64.
This is used in case of shuffling or rows selection.
Returns:
``pyarrow.Table``: the result of the query on the input table
"""
# Check if key is valid
if not isinstance(key, (int, slice, range, str, Iterable)):
try:
key = operator.index(key)
except TypeError:
_raise_bad_key_type(key)
if isinstance(key, str):
_check_valid_column_key(key, table.column_names)
else:
size = indices.num_rows if indices is not None else table.num_rows
_check_valid_index_key(key, size)
# Query the main table
if indices is None:
pa_subtable = _query_table(table, key)
else:
pa_subtable = _query_table_with_indices_mapping(table, key, indices=indices)
return pa_subtable
def format_table(
table: Table,
key: Union[int, slice, range, str, Iterable],
formatter: Formatter,
format_columns: Optional[list] = None,
output_all_columns=False,
):
"""
Format a Table depending on the key that was used and a Formatter object.
Args:
table (``datasets.table.Table``): The input Table to format
key (``Union[int, slice, range, str, Iterable]``): Depending on the key that was used, the formatter formats
the table as either a row, a column or a batch.
formatter (``datasets.formatting.formatting.Formatter``): Any subclass of a Formatter such as
PythonFormatter, NumpyFormatter, etc.
format_columns (:obj:`List[str]`, optional): if not None, it defines the columns that will be formatted using the
given formatter. Other columns are discarded (unless ``output_all_columns`` is True)
output_all_columns (:obj:`bool`, defaults to False). If True, the formatted output is completed using the columns
that are not in the ``format_columns`` list. For these columns, the PythonFormatter is used.
Returns:
A row, column or batch formatted object defined by the Formatter:
- the PythonFormatter returns a dictionary for a row or a batch, and a list for a column.
- the NumpyFormatter returns a dictionary for a row or a batch, and a np.array for a column.
- the PandasFormatter returns a pd.DataFrame for a row or a batch, and a pd.Series for a column.
- the TorchFormatter returns a dictionary for a row or a batch, and a torch.Tensor for a column.
- the TFFormatter returns a dictionary for a row or a batch, and a tf.Tensor for a column.
"""
if isinstance(table, Table):
pa_table = table.table
else:
pa_table = table
query_type = key_to_query_type(key)
python_formatter = PythonFormatter(features=formatter.features)
if format_columns is None:
return formatter(pa_table, query_type=query_type)
elif query_type == "column":
if key in format_columns:
return formatter(pa_table, query_type)
else:
return python_formatter(pa_table, query_type=query_type)
else:
pa_table_to_format = pa_table.drop(col for col in pa_table.column_names if col not in format_columns)
formatted_output = formatter(pa_table_to_format, query_type=query_type)
if output_all_columns:
if isinstance(formatted_output, MutableMapping):
pa_table_with_remaining_columns = pa_table.drop(
col for col in pa_table.column_names if col in format_columns
)
remaining_columns_dict = python_formatter(pa_table_with_remaining_columns, query_type=query_type)
formatted_output.update(remaining_columns_dict)
else:
raise TypeError(
f"Custom formatting function must return a dict to work with output_all_columns=True, but got {formatted_output}"
)
return formatted_output
| datasets/src/datasets/formatting/formatting.py/0 | {
"file_path": "datasets/src/datasets/formatting/formatting.py",
"repo_id": "datasets",
"token_count": 11243
} |
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import tqdm as hf_tqdm
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlite3
import sqlalchemy
class SqlDatasetReader(AbstractDatasetInputStream):
def __init__(
self,
sql: Union[str, "sqlalchemy.sql.Selectable"],
con: Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"],
features: Optional[Features] = None,
cache_dir: str = None,
keep_in_memory: bool = False,
**kwargs,
):
super().__init__(features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, **kwargs)
self.builder = Sql(
cache_dir=cache_dir,
features=features,
sql=sql,
con=con,
**kwargs,
)
def read(self):
download_config = None
download_mode = None
verification_mode = None
base_path = None
self.builder.download_and_prepare(
download_config=download_config,
download_mode=download_mode,
verification_mode=verification_mode,
base_path=base_path,
)
# Build dataset for splits
dataset = self.builder.as_dataset(
split="train", verification_mode=verification_mode, in_memory=self.keep_in_memory
)
return dataset
class SqlDatasetWriter:
def __init__(
self,
dataset: Dataset,
name: str,
con: Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"],
batch_size: Optional[int] = None,
num_proc: Optional[int] = None,
**to_sql_kwargs,
):
if num_proc is not None and num_proc <= 0:
raise ValueError(f"num_proc {num_proc} must be an integer > 0.")
self.dataset = dataset
self.name = name
self.con = con
self.batch_size = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
self.num_proc = num_proc
self.to_sql_kwargs = to_sql_kwargs
def write(self) -> int:
_ = self.to_sql_kwargs.pop("sql", None)
_ = self.to_sql_kwargs.pop("con", None)
index = self.to_sql_kwargs.pop("index", False)
written = self._write(index=index, **self.to_sql_kwargs)
return written
def _batch_sql(self, args):
offset, index, to_sql_kwargs = args
to_sql_kwargs = {**to_sql_kwargs, "if_exists": "append"} if offset > 0 else to_sql_kwargs
batch = query_table(
table=self.dataset.data,
key=slice(offset, offset + self.batch_size),
indices=self.dataset._indices,
)
df = batch.to_pandas()
num_rows = df.to_sql(self.name, self.con, index=index, **to_sql_kwargs)
return num_rows or len(df)
def _write(self, index, **to_sql_kwargs) -> int:
"""Writes the pyarrow table as SQL to a database.
Caller is responsible for opening and closing the SQL connection.
"""
written = 0
if self.num_proc is None or self.num_proc == 1:
for offset in hf_tqdm(
range(0, len(self.dataset), self.batch_size),
unit="ba",
desc="Creating SQL from Arrow format",
):
written += self._batch_sql((offset, index, to_sql_kwargs))
else:
num_rows, batch_size = len(self.dataset), self.batch_size
with multiprocessing.Pool(self.num_proc) as pool:
for num_rows in hf_tqdm(
pool.imap(
self._batch_sql,
[(offset, index, to_sql_kwargs) for offset in range(0, num_rows, batch_size)],
),
total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size,
unit="ba",
desc="Creating SQL from Arrow format",
):
written += num_rows
return written
| datasets/src/datasets/io/sql.py/0 | {
"file_path": "datasets/src/datasets/io/sql.py",
"repo_id": "datasets",
"token_count": 2007
} |
import collections
import itertools
import os
from dataclasses import dataclass
from typing import List, Optional, Tuple, Type
import pandas as pd
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.features.features import FeatureType
logger = datasets.utils.logging.get_logger(__name__)
def count_path_segments(path):
return path.replace("\\", "/").count("/")
@dataclass
class FolderBasedBuilderConfig(datasets.BuilderConfig):
"""BuilderConfig for AutoFolder."""
features: Optional[datasets.Features] = None
drop_labels: bool = None
drop_metadata: bool = None
def __post_init__(self):
super().__post_init__()
class FolderBasedBuilder(datasets.GeneratorBasedBuilder):
"""
Base class for generic data loaders for vision and image data.
Abstract class attributes to be overridden by a child class:
BASE_FEATURE: feature object to decode data (i.e. datasets.Image, datasets.Audio, ...)
BASE_COLUMN_NAME: string key name of a base feature (i.e. "image", "audio", ...)
BUILDER_CONFIG_CLASS: builder config inherited from `folder_based_builder.FolderBasedBuilderConfig`
EXTENSIONS: list of allowed extensions (only files with these extensions and METADATA_FILENAME files
will be included in a dataset)
"""
BASE_FEATURE: Type[FeatureType]
BASE_COLUMN_NAME: str
BUILDER_CONFIG_CLASS: FolderBasedBuilderConfig
EXTENSIONS: List[str]
METADATA_FILENAMES: List[str] = ["metadata.csv", "metadata.jsonl"]
def _info(self):
return datasets.DatasetInfo(features=self.config.features)
def _split_generators(self, dl_manager):
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
dl_manager.download_config.extract_on_the_fly = True
# Do an early pass if:
# * `drop_labels` is None (default) or False, to infer the class labels
# * `drop_metadata` is None (default) or False, to find the metadata files
do_analyze = not self.config.drop_labels or not self.config.drop_metadata
labels, path_depths = set(), set()
metadata_files = collections.defaultdict(set)
def analyze(files_or_archives, downloaded_files_or_dirs, split):
if len(downloaded_files_or_dirs) == 0:
return
# The files are separated from the archives at this point, so check the first sample
# to see if it's a file or a directory and iterate accordingly
if os.path.isfile(downloaded_files_or_dirs[0]):
original_files, downloaded_files = files_or_archives, downloaded_files_or_dirs
for original_file, downloaded_file in zip(original_files, downloaded_files):
original_file, downloaded_file = str(original_file), str(downloaded_file)
_, original_file_ext = os.path.splitext(original_file)
if original_file_ext.lower() in self.EXTENSIONS:
if not self.config.drop_labels:
labels.add(os.path.basename(os.path.dirname(original_file)))
path_depths.add(count_path_segments(original_file))
elif os.path.basename(original_file) in self.METADATA_FILENAMES:
metadata_files[split].add((original_file, downloaded_file))
else:
original_file_name = os.path.basename(original_file)
logger.debug(
f"The file '{original_file_name}' was ignored: it is not an image, and is not {self.METADATA_FILENAMES} either."
)
else:
archives, downloaded_dirs = files_or_archives, downloaded_files_or_dirs
for archive, downloaded_dir in zip(archives, downloaded_dirs):
archive, downloaded_dir = str(archive), str(downloaded_dir)
for downloaded_dir_file in dl_manager.iter_files(downloaded_dir):
_, downloaded_dir_file_ext = os.path.splitext(downloaded_dir_file)
if downloaded_dir_file_ext in self.EXTENSIONS:
if not self.config.drop_labels:
labels.add(os.path.basename(os.path.dirname(downloaded_dir_file)))
path_depths.add(count_path_segments(downloaded_dir_file))
elif os.path.basename(downloaded_dir_file) in self.METADATA_FILENAMES:
metadata_files[split].add((None, downloaded_dir_file))
else:
archive_file_name = os.path.basename(archive)
original_file_name = os.path.basename(downloaded_dir_file)
logger.debug(
f"The file '{original_file_name}' from the archive '{archive_file_name}' was ignored: it is not an {self.BASE_COLUMN_NAME}, and is not {self.METADATA_FILENAMES} either."
)
data_files = self.config.data_files
splits = []
for split_name, files in data_files.items():
if isinstance(files, str):
files = [files]
files, archives = self._split_files_and_archives(files)
downloaded_files = dl_manager.download(files)
downloaded_dirs = dl_manager.download_and_extract(archives)
if do_analyze: # drop_metadata is None or False, drop_labels is None or False
logger.info(f"Searching for labels and/or metadata files in {split_name} data files...")
analyze(files, downloaded_files, split_name)
analyze(archives, downloaded_dirs, split_name)
if metadata_files:
# add metadata if `metadata_files` are found and `drop_metadata` is None (default) or False
add_metadata = not self.config.drop_metadata
# if `metadata_files` are found, add labels only if
# `drop_labels` is set up to False explicitly (not-default behavior)
add_labels = self.config.drop_labels is False
else:
# if `metadata_files` are not found, don't add metadata
add_metadata = False
# if `metadata_files` are not found and `drop_labels` is None (default) -
# add labels if files are on the same level in directory hierarchy and there is more than one label
add_labels = (
(len(labels) > 1 and len(path_depths) == 1)
if self.config.drop_labels is None
else not self.config.drop_labels
)
if add_labels:
logger.info("Adding the labels inferred from data directories to the dataset's features...")
if add_metadata:
logger.info("Adding metadata to the dataset...")
else:
add_labels, add_metadata, metadata_files = False, False, {}
splits.append(
datasets.SplitGenerator(
name=split_name,
gen_kwargs={
"files": list(zip(files, downloaded_files))
+ [(None, dl_manager.iter_files(downloaded_dir)) for downloaded_dir in downloaded_dirs],
"metadata_files": metadata_files,
"split_name": split_name,
"add_labels": add_labels,
"add_metadata": add_metadata,
},
)
)
if add_metadata:
# Verify that:
# * all metadata files have the same set of features
# * the `file_name` key is one of the metadata keys and is of type string
features_per_metadata_file: List[Tuple[str, datasets.Features]] = []
# Check that all metadata files share the same format
metadata_ext = {
os.path.splitext(original_metadata_file)[-1]
for original_metadata_file, _ in itertools.chain.from_iterable(metadata_files.values())
}
if len(metadata_ext) > 1:
raise ValueError(f"Found metadata files with different extensions: {list(metadata_ext)}")
metadata_ext = metadata_ext.pop()
for _, downloaded_metadata_file in itertools.chain.from_iterable(metadata_files.values()):
pa_metadata_table = self._read_metadata(downloaded_metadata_file, metadata_ext=metadata_ext)
features_per_metadata_file.append(
(downloaded_metadata_file, datasets.Features.from_arrow_schema(pa_metadata_table.schema))
)
for downloaded_metadata_file, metadata_features in features_per_metadata_file:
if metadata_features != features_per_metadata_file[0][1]:
raise ValueError(
f"Metadata files {downloaded_metadata_file} and {features_per_metadata_file[0][0]} have different features: {features_per_metadata_file[0]} != {metadata_features}"
)
metadata_features = features_per_metadata_file[0][1]
if "file_name" not in metadata_features:
raise ValueError("`file_name` must be present as dictionary key in metadata files")
if metadata_features["file_name"] != datasets.Value("string"):
raise ValueError("`file_name` key must be a string")
del metadata_features["file_name"]
else:
metadata_features = None
# Normally, we would do this in _info, but we need to know the labels and/or metadata
# before building the features
if self.config.features is None:
if add_labels:
self.info.features = datasets.Features(
{
self.BASE_COLUMN_NAME: self.BASE_FEATURE(),
"label": datasets.ClassLabel(names=sorted(labels)),
}
)
else:
self.info.features = datasets.Features({self.BASE_COLUMN_NAME: self.BASE_FEATURE()})
if add_metadata:
# Warn if there are duplicated keys in metadata compared to the existing features
# (`BASE_COLUMN_NAME`, optionally "label")
duplicated_keys = set(self.info.features) & set(metadata_features)
if duplicated_keys:
logger.warning(
f"Ignoring metadata columns {list(duplicated_keys)} as they are already present in "
f"the features dictionary."
)
# skip metadata duplicated keys
self.info.features.update(
{
feature: metadata_features[feature]
for feature in metadata_features
if feature not in duplicated_keys
}
)
return splits
def _split_files_and_archives(self, data_files):
files, archives = [], []
for data_file in data_files:
_, data_file_ext = os.path.splitext(data_file)
if data_file_ext.lower() in self.EXTENSIONS:
files.append(data_file)
elif os.path.basename(data_file) in self.METADATA_FILENAMES:
files.append(data_file)
else:
archives.append(data_file)
return files, archives
def _read_metadata(self, metadata_file, metadata_ext: str = ""):
if metadata_ext == ".csv":
# Use `pd.read_csv` (although slower) instead of `pyarrow.csv.read_csv` for reading CSV files for consistency with the CSV packaged module
return pa.Table.from_pandas(pd.read_csv(metadata_file))
else:
with open(metadata_file, "rb") as f:
return paj.read_json(f)
def _generate_examples(self, files, metadata_files, split_name, add_metadata, add_labels):
split_metadata_files = metadata_files.get(split_name, [])
sample_empty_metadata = (
{k: None for k in self.info.features if k != self.BASE_COLUMN_NAME} if self.info.features else {}
)
last_checked_dir = None
metadata_dir = None
metadata_dict = None
downloaded_metadata_file = None
metadata_ext = ""
if split_metadata_files:
metadata_ext = {
os.path.splitext(original_metadata_file)[-1] for original_metadata_file, _ in split_metadata_files
}
metadata_ext = metadata_ext.pop()
file_idx = 0
for original_file, downloaded_file_or_dir in files:
if original_file is not None:
_, original_file_ext = os.path.splitext(original_file)
if original_file_ext.lower() in self.EXTENSIONS:
if add_metadata:
# If the file is a file of a needed type, and we've just entered a new directory,
# find the nereast metadata file (by counting path segments) for the directory
current_dir = os.path.dirname(original_file)
if last_checked_dir is None or last_checked_dir != current_dir:
last_checked_dir = current_dir
metadata_file_candidates = [
(
os.path.relpath(original_file, os.path.dirname(metadata_file_candidate)),
metadata_file_candidate,
downloaded_metadata_file,
)
for metadata_file_candidate, downloaded_metadata_file in split_metadata_files
if metadata_file_candidate
is not None # ignore metadata_files that are inside archives
and not os.path.relpath(
original_file, os.path.dirname(metadata_file_candidate)
).startswith("..")
]
if metadata_file_candidates:
_, metadata_file, downloaded_metadata_file = min(
metadata_file_candidates, key=lambda x: count_path_segments(x[0])
)
pa_metadata_table = self._read_metadata(
downloaded_metadata_file, metadata_ext=metadata_ext
)
pa_file_name_array = pa_metadata_table["file_name"]
pa_metadata_table = pa_metadata_table.drop(["file_name"])
metadata_dir = os.path.dirname(metadata_file)
metadata_dict = {
os.path.normpath(file_name).replace("\\", "/"): sample_metadata
for file_name, sample_metadata in zip(
pa_file_name_array.to_pylist(), pa_metadata_table.to_pylist()
)
}
else:
raise ValueError(
f"One or several metadata{metadata_ext} were found, but not in the same directory or in a parent directory of {downloaded_file_or_dir}."
)
if metadata_dir is not None and downloaded_metadata_file is not None:
file_relpath = os.path.relpath(original_file, metadata_dir)
file_relpath = file_relpath.replace("\\", "/")
if file_relpath not in metadata_dict:
raise ValueError(
f"{self.BASE_COLUMN_NAME} at {file_relpath} doesn't have metadata in {downloaded_metadata_file}."
)
sample_metadata = metadata_dict[file_relpath]
else:
raise ValueError(
f"One or several metadata{metadata_ext} were found, but not in the same directory or in a parent directory of {downloaded_file_or_dir}."
)
else:
sample_metadata = {}
if add_labels:
sample_label = {"label": os.path.basename(os.path.dirname(original_file))}
else:
sample_label = {}
yield (
file_idx,
{
**sample_empty_metadata,
self.BASE_COLUMN_NAME: downloaded_file_or_dir,
**sample_metadata,
**sample_label,
},
)
file_idx += 1
else:
for downloaded_dir_file in downloaded_file_or_dir:
_, downloaded_dir_file_ext = os.path.splitext(downloaded_dir_file)
if downloaded_dir_file_ext.lower() in self.EXTENSIONS:
if add_metadata:
current_dir = os.path.dirname(downloaded_dir_file)
if last_checked_dir is None or last_checked_dir != current_dir:
last_checked_dir = current_dir
metadata_file_candidates = [
(
os.path.relpath(
downloaded_dir_file, os.path.dirname(downloaded_metadata_file)
),
metadata_file_candidate,
downloaded_metadata_file,
)
for metadata_file_candidate, downloaded_metadata_file in split_metadata_files
if metadata_file_candidate
is None # ignore metadata_files that are not inside archives
and not os.path.relpath(
downloaded_dir_file, os.path.dirname(downloaded_metadata_file)
).startswith("..")
]
if metadata_file_candidates:
_, metadata_file, downloaded_metadata_file = min(
metadata_file_candidates, key=lambda x: count_path_segments(x[0])
)
pa_metadata_table = self._read_metadata(
downloaded_metadata_file, metadata_ext=metadata_ext
)
pa_file_name_array = pa_metadata_table["file_name"]
pa_metadata_table = pa_metadata_table.drop(["file_name"])
metadata_dir = os.path.dirname(downloaded_metadata_file)
metadata_dict = {
os.path.normpath(file_name).replace("\\", "/"): sample_metadata
for file_name, sample_metadata in zip(
pa_file_name_array.to_pylist(), pa_metadata_table.to_pylist()
)
}
else:
raise ValueError(
f"One or several metadata{metadata_ext} were found, but not in the same directory or in a parent directory of {downloaded_dir_file}."
)
if metadata_dir is not None and downloaded_metadata_file is not None:
downloaded_dir_file_relpath = os.path.relpath(downloaded_dir_file, metadata_dir)
downloaded_dir_file_relpath = downloaded_dir_file_relpath.replace("\\", "/")
if downloaded_dir_file_relpath not in metadata_dict:
raise ValueError(
f"{self.BASE_COLUMN_NAME} at {downloaded_dir_file_relpath} doesn't have metadata in {downloaded_metadata_file}."
)
sample_metadata = metadata_dict[downloaded_dir_file_relpath]
else:
raise ValueError(
f"One or several metadata{metadata_ext} were found, but not in the same directory or in a parent directory of {downloaded_dir_file}."
)
else:
sample_metadata = {}
if add_labels:
sample_label = {"label": os.path.basename(os.path.dirname(downloaded_dir_file))}
else:
sample_label = {}
yield (
file_idx,
{
**sample_empty_metadata,
self.BASE_COLUMN_NAME: downloaded_dir_file,
**sample_metadata,
**sample_label,
},
)
file_idx += 1
| datasets/src/datasets/packaged_modules/folder_based_builder/folder_based_builder.py/0 | {
"file_path": "datasets/src/datasets/packaged_modules/folder_based_builder/folder_based_builder.py",
"repo_id": "datasets",
"token_count": 11915
} |
import itertools
from dataclasses import dataclass
from io import StringIO
from typing import Optional
import pyarrow as pa
import datasets
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
logger = datasets.utils.logging.get_logger(__name__)
@dataclass
class TextConfig(datasets.BuilderConfig):
"""BuilderConfig for text files."""
features: Optional[datasets.Features] = None
encoding: str = "utf-8"
encoding_errors: Optional[str] = None
chunksize: int = 10 << 20 # 10MB
keep_linebreaks: bool = False
sample_by: str = "line"
class Text(datasets.ArrowBasedBuilder):
BUILDER_CONFIG_CLASS = TextConfig
def _info(self):
return datasets.DatasetInfo(features=self.config.features)
def _split_generators(self, dl_manager):
"""The `data_files` kwarg in load_dataset() can be a str, List[str], Dict[str,str], or Dict[str,List[str]].
If str or List[str], then the dataset returns only the 'train' split.
If dict, then keys should be from the `datasets.Split` enum.
"""
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
dl_manager.download_config.extract_on_the_fly = True
data_files = dl_manager.download_and_extract(self.config.data_files)
splits = []
for split_name, files in data_files.items():
if isinstance(files, str):
files = [files]
files = [dl_manager.iter_files(file) for file in files]
splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files}))
return splits
def _cast_table(self, pa_table: pa.Table) -> pa.Table:
if self.config.features is not None:
schema = self.config.features.arrow_schema
if all(not require_storage_cast(feature) for feature in self.config.features.values()):
# cheaper cast
pa_table = pa_table.cast(schema)
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
pa_table = table_cast(pa_table, schema)
return pa_table
else:
return pa_table.cast(pa.schema({"text": pa.string()}))
def _generate_tables(self, files):
pa_table_names = list(self.config.features) if self.config.features is not None else ["text"]
for file_idx, file in enumerate(itertools.chain.from_iterable(files)):
# open in text mode, by default translates universal newlines ("\n", "\r\n" and "\r") into "\n"
with open(file, encoding=self.config.encoding, errors=self.config.encoding_errors) as f:
if self.config.sample_by == "line":
batch_idx = 0
while True:
batch = f.read(self.config.chunksize)
if not batch:
break
batch += f.readline() # finish current line
# StringIO.readlines, by default splits only on "\n" (and keeps line breaks)
batch = StringIO(batch).readlines()
if not self.config.keep_linebreaks:
batch = [line.rstrip("\n") for line in batch]
pa_table = pa.Table.from_arrays([pa.array(batch)], names=pa_table_names)
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(pa_table)
batch_idx += 1
elif self.config.sample_by == "paragraph":
batch_idx = 0
batch = ""
while True:
new_batch = f.read(self.config.chunksize)
if not new_batch:
break
batch += new_batch
batch += f.readline() # finish current line
batch = batch.split("\n\n")
pa_table = pa.Table.from_arrays(
[pa.array([example for example in batch[:-1] if example])], names=pa_table_names
)
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(pa_table)
batch_idx += 1
batch = batch[-1]
if batch:
pa_table = pa.Table.from_arrays([pa.array([batch])], names=pa_table_names)
yield (file_idx, batch_idx), self._cast_table(pa_table)
elif self.config.sample_by == "document":
text = f.read()
pa_table = pa.Table.from_arrays([pa.array([text])], names=pa_table_names)
yield file_idx, self._cast_table(pa_table)
| datasets/src/datasets/packaged_modules/text/text.py/0 | {
"file_path": "datasets/src/datasets/packaged_modules/text/text.py",
"repo_id": "datasets",
"token_count": 2703
} |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Extends `dill` to support pickling more types and produce more consistent dumps."""
import os
import sys
from io import BytesIO
from types import CodeType, FunctionType
import dill
from packaging import version
from .. import config
class Pickler(dill.Pickler):
dispatch = dill._dill.MetaCatchingDict(dill.Pickler.dispatch.copy())
_legacy_no_dict_keys_sorting = False
def save(self, obj, save_persistent_id=True):
obj_type = type(obj)
if obj_type not in self.dispatch:
if "regex" in sys.modules:
import regex # type: ignore
if obj_type is regex.Pattern:
pklregister(obj_type)(_save_regexPattern)
if "spacy" in sys.modules:
import spacy # type: ignore
if issubclass(obj_type, spacy.Language):
pklregister(obj_type)(_save_spacyLanguage)
if "tiktoken" in sys.modules:
import tiktoken # type: ignore
if obj_type is tiktoken.Encoding:
pklregister(obj_type)(_save_tiktokenEncoding)
if "torch" in sys.modules:
import torch # type: ignore
if issubclass(obj_type, torch.Tensor):
pklregister(obj_type)(_save_torchTensor)
if obj_type is torch.Generator:
pklregister(obj_type)(_save_torchGenerator)
# Unwrap `torch.compile`-ed modules
if issubclass(obj_type, torch.nn.Module):
obj = getattr(obj, "_orig_mod", obj)
if "transformers" in sys.modules:
import transformers # type: ignore
if issubclass(obj_type, transformers.PreTrainedTokenizerBase):
pklregister(obj_type)(_save_transformersPreTrainedTokenizerBase)
# Unwrap `torch.compile`-ed functions
if obj_type is FunctionType:
obj = getattr(obj, "_torchdynamo_orig_callable", obj)
dill.Pickler.save(self, obj, save_persistent_id=save_persistent_id)
def _batch_setitems(self, items):
if self._legacy_no_dict_keys_sorting:
return super()._batch_setitems(items)
# Ignore the order of keys in a dict
try:
# Faster, but fails for unorderable elements
items = sorted(items)
except Exception: # TypeError, decimal.InvalidOperation, etc.
from datasets.fingerprint import Hasher
items = sorted(items, key=lambda x: Hasher.hash(x[0]))
dill.Pickler._batch_setitems(self, items)
def memoize(self, obj):
# Don't memoize strings since two identical strings can have different Python ids
if type(obj) is not str: # noqa: E721
dill.Pickler.memoize(self, obj)
def pklregister(t):
"""Register a custom reducer for the type."""
def proxy(func):
Pickler.dispatch[t] = func
return func
return proxy
def dump(obj, file):
"""Pickle an object to a file."""
Pickler(file, recurse=True).dump(obj)
def dumps(obj):
"""Pickle an object to a string."""
file = BytesIO()
dump(obj, file)
return file.getvalue()
if config.DILL_VERSION < version.parse("0.3.6"):
def log(pickler, msg):
dill._dill.log.info(msg)
elif config.DILL_VERSION.release[:3] in [
version.parse("0.3.6").release,
version.parse("0.3.7").release,
version.parse("0.3.8").release,
]:
def log(pickler, msg):
dill._dill.logger.trace(pickler, msg)
@pklregister(set)
def _save_set(pickler, obj):
log(pickler, f"Se: {obj}")
try:
# Faster, but fails for unorderable elements
args = (sorted(obj),)
except Exception: # TypeError, decimal.InvalidOperation, etc.
from datasets.fingerprint import Hasher
args = (sorted(obj, key=Hasher.hash),)
pickler.save_reduce(set, args, obj=obj)
log(pickler, "# Se")
def _save_regexPattern(pickler, obj):
import regex # type: ignore
log(pickler, f"Re: {obj}")
args = (obj.pattern, obj.flags)
pickler.save_reduce(regex.compile, args, obj=obj)
log(pickler, "# Re")
def _save_tiktokenEncoding(pickler, obj):
import tiktoken # type: ignore
log(pickler, f"Enc: {obj}")
args = (obj.name, obj._pat_str, obj._mergeable_ranks, obj._special_tokens)
pickler.save_reduce(tiktoken.Encoding, args, obj=obj)
log(pickler, "# Enc")
def _save_torchTensor(pickler, obj):
import torch # type: ignore
# `torch.from_numpy` is not picklable in `torch>=1.11.0`
def create_torchTensor(np_array, dtype=None):
tensor = torch.from_numpy(np_array)
if dtype:
tensor = tensor.type(dtype)
return tensor
log(pickler, f"To: {obj}")
if obj.dtype == torch.bfloat16:
args = (obj.detach().to(torch.float).cpu().numpy(), torch.bfloat16)
else:
args = (obj.detach().cpu().numpy(),)
pickler.save_reduce(create_torchTensor, args, obj=obj)
log(pickler, "# To")
def _save_torchGenerator(pickler, obj):
import torch # type: ignore
def create_torchGenerator(state):
generator = torch.Generator()
generator.set_state(state)
return generator
log(pickler, f"Ge: {obj}")
args = (obj.get_state(),)
pickler.save_reduce(create_torchGenerator, args, obj=obj)
log(pickler, "# Ge")
def _save_spacyLanguage(pickler, obj):
import spacy # type: ignore
def create_spacyLanguage(config, bytes):
lang_cls = spacy.util.get_lang_class(config["nlp"]["lang"])
lang_inst = lang_cls.from_config(config)
return lang_inst.from_bytes(bytes)
log(pickler, f"Sp: {obj}")
args = (obj.config, obj.to_bytes())
pickler.save_reduce(create_spacyLanguage, args, obj=obj)
log(pickler, "# Sp")
def _save_transformersPreTrainedTokenizerBase(pickler, obj):
log(pickler, f"Tok: {obj}")
# Ignore the `cache` attribute
state = obj.__dict__
if "cache" in state and isinstance(state["cache"], dict):
state["cache"] = {}
pickler.save_reduce(type(obj), (), state=state, obj=obj)
log(pickler, "# Tok")
if config.DILL_VERSION < version.parse("0.3.6"):
@pklregister(CodeType)
def _save_code(pickler, obj):
"""
From dill._dill.save_code
This is a modified version that removes the origin (filename + line no.)
of functions created in notebooks or shells for example.
"""
dill._dill.log.info(f"Co: {obj}")
# The filename of a function is the .py file where it is defined.
# Filenames of functions created in notebooks or shells start with '<'
# ex: <ipython-input-13-9ed2afe61d25> for ipython, and <stdin> for shell
# Filenames of functions created in ipykernel the filename
# look like f"{tempdir}/ipykernel_{id1}/{id2}.py"
# Moreover lambda functions have a special name: '<lambda>'
# ex: (lambda x: x).__code__.co_name == "<lambda>" # True
#
# For the hashing mechanism we ignore where the function has been defined
# More specifically:
# - we ignore the filename of special functions (filename starts with '<')
# - we always ignore the line number
# - we only use the base name of the file instead of the whole path,
# to be robust in case a script is moved for example.
#
# Only those two lines are different from the original implementation:
co_filename = (
""
if obj.co_filename.startswith("<")
or (
len(obj.co_filename.split(os.path.sep)) > 1
and obj.co_filename.split(os.path.sep)[-2].startswith("ipykernel_")
)
or obj.co_name == "<lambda>"
else os.path.basename(obj.co_filename)
)
co_firstlineno = 1
# The rest is the same as in the original dill implementation
if dill._dill.PY3:
if hasattr(obj, "co_posonlyargcount"):
args = (
obj.co_argcount,
obj.co_posonlyargcount,
obj.co_kwonlyargcount,
obj.co_nlocals,
obj.co_stacksize,
obj.co_flags,
obj.co_code,
obj.co_consts,
obj.co_names,
obj.co_varnames,
co_filename,
obj.co_name,
co_firstlineno,
obj.co_lnotab,
obj.co_freevars,
obj.co_cellvars,
)
else:
args = (
obj.co_argcount,
obj.co_kwonlyargcount,
obj.co_nlocals,
obj.co_stacksize,
obj.co_flags,
obj.co_code,
obj.co_consts,
obj.co_names,
obj.co_varnames,
co_filename,
obj.co_name,
co_firstlineno,
obj.co_lnotab,
obj.co_freevars,
obj.co_cellvars,
)
else:
args = (
obj.co_argcount,
obj.co_nlocals,
obj.co_stacksize,
obj.co_flags,
obj.co_code,
obj.co_consts,
obj.co_names,
obj.co_varnames,
co_filename,
obj.co_name,
co_firstlineno,
obj.co_lnotab,
obj.co_freevars,
obj.co_cellvars,
)
pickler.save_reduce(CodeType, args, obj=obj)
dill._dill.log.info("# Co")
return
elif config.DILL_VERSION.release[:3] in [
version.parse("0.3.6").release,
version.parse("0.3.7").release,
version.parse("0.3.8").release,
]:
# From: https://github.com/uqfoundation/dill/blob/dill-0.3.6/dill/_dill.py#L1104
@pklregister(CodeType)
def save_code(pickler, obj):
dill._dill.logger.trace(pickler, "Co: %s", obj)
############################################################################################################
# Modification here for huggingface/datasets
# The filename of a function is the .py file where it is defined.
# Filenames of functions created in notebooks or shells start with '<'
# ex: <ipython-input-13-9ed2afe61d25> for ipython, and <stdin> for shell
# Filenames of functions created in ipykernel the filename
# look like f"{tempdir}/ipykernel_{id1}/{id2}.py"
# Moreover lambda functions have a special name: '<lambda>'
# ex: (lambda x: x).__code__.co_name == "<lambda>" # True
#
# For the hashing mechanism we ignore where the function has been defined
# More specifically:
# - we ignore the filename of special functions (filename starts with '<')
# - we always ignore the line number
# - we only use the base name of the file instead of the whole path,
# to be robust in case a script is moved for example.
#
# Only those two lines are different from the original implementation:
co_filename = (
""
if obj.co_filename.startswith("<")
or (
len(obj.co_filename.split(os.path.sep)) > 1
and obj.co_filename.split(os.path.sep)[-2].startswith("ipykernel_")
)
or obj.co_name == "<lambda>"
else os.path.basename(obj.co_filename)
)
co_firstlineno = 1
# The rest is the same as in the original dill implementation, except for the replacements:
# - obj.co_filename => co_filename
# - obj.co_firstlineno => co_firstlineno
############################################################################################################
if hasattr(obj, "co_endlinetable"): # python 3.11a (20 args)
args = (
obj.co_lnotab, # for < python 3.10 [not counted in args]
obj.co_argcount,
obj.co_posonlyargcount,
obj.co_kwonlyargcount,
obj.co_nlocals,
obj.co_stacksize,
obj.co_flags,
obj.co_code,
obj.co_consts,
obj.co_names,
obj.co_varnames,
co_filename, # Modification for huggingface/datasets ############################################
obj.co_name,
obj.co_qualname,
co_firstlineno, # Modification for huggingface/datasets #########################################
obj.co_linetable,
obj.co_endlinetable,
obj.co_columntable,
obj.co_exceptiontable,
obj.co_freevars,
obj.co_cellvars,
)
elif hasattr(obj, "co_exceptiontable"): # python 3.11 (18 args)
args = (
obj.co_lnotab, # for < python 3.10 [not counted in args]
obj.co_argcount,
obj.co_posonlyargcount,
obj.co_kwonlyargcount,
obj.co_nlocals,
obj.co_stacksize,
obj.co_flags,
obj.co_code,
obj.co_consts,
obj.co_names,
obj.co_varnames,
co_filename, # Modification for huggingface/datasets ############################################
obj.co_name,
obj.co_qualname,
co_firstlineno, # Modification for huggingface/datasets #########################################
obj.co_linetable,
obj.co_exceptiontable,
obj.co_freevars,
obj.co_cellvars,
)
elif hasattr(obj, "co_linetable"): # python 3.10 (16 args)
args = (
obj.co_lnotab, # for < python 3.10 [not counted in args]
obj.co_argcount,
obj.co_posonlyargcount,
obj.co_kwonlyargcount,
obj.co_nlocals,
obj.co_stacksize,
obj.co_flags,
obj.co_code,
obj.co_consts,
obj.co_names,
obj.co_varnames,
co_filename, # Modification for huggingface/datasets ############################################
obj.co_name,
co_firstlineno, # Modification for huggingface/datasets #########################################
obj.co_linetable,
obj.co_freevars,
obj.co_cellvars,
)
elif hasattr(obj, "co_posonlyargcount"): # python 3.8 (16 args)
args = (
obj.co_argcount,
obj.co_posonlyargcount,
obj.co_kwonlyargcount,
obj.co_nlocals,
obj.co_stacksize,
obj.co_flags,
obj.co_code,
obj.co_consts,
obj.co_names,
obj.co_varnames,
co_filename, # Modification for huggingface/datasets ############################################
obj.co_name,
co_firstlineno, # Modification for huggingface/datasets #########################################
obj.co_lnotab,
obj.co_freevars,
obj.co_cellvars,
)
else: # python 3.7 (15 args)
args = (
obj.co_argcount,
obj.co_kwonlyargcount,
obj.co_nlocals,
obj.co_stacksize,
obj.co_flags,
obj.co_code,
obj.co_consts,
obj.co_names,
obj.co_varnames,
co_filename, # Modification for huggingface/datasets ############################################
obj.co_name,
co_firstlineno, # Modification for huggingface/datasets #########################################
obj.co_lnotab,
obj.co_freevars,
obj.co_cellvars,
)
pickler.save_reduce(dill._dill._create_code, args, obj=obj)
dill._dill.logger.trace(pickler, "# Co")
return
| datasets/src/datasets/utils/_dill.py/0 | {
"file_path": "datasets/src/datasets/utils/_dill.py",
"repo_id": "datasets",
"token_count": 8494
} |
{
"code": "Programming language (C++, Java, Javascript, Python, etc.)",
"aa": "Afar",
"aaa": "Ghotuo",
"aab": "Alumu-Tesu",
"aac": "Ari",
"aad": "Amal",
"aae": "Arbëreshë Albanian",
"aaf": "Aranadan",
"aag": "Ambrak",
"aah": "Abu' Arapesh",
"aai": "Arifama-Miniafia",
"aak": "Ankave",
"aal": "Afade",
"aan": "Anambé",
"aao": "Algerian Saharan Arabic",
"aap": "Pará Arára",
"aaq": "Eastern Abnaki",
"aas": "Aasáx",
"aat": "Arvanitika Albanian",
"aau": "Abau",
"aav": "Austro-Asiatic languages",
"aaw": "Solong",
"aax": "Mandobo Atas",
"aaz": "Amarasi",
"ab": "Abkhazian",
"aba": "Abé",
"abb": "Bankon",
"abc": "Ambala Ayta",
"abd": "Manide",
"abe": "Western Abnaki",
"abf": "Abai Sungai",
"abg": "Abaga",
"abh": "Tajiki Arabic",
"abi": "Abidji",
"abj": "Aka-Bea",
"abl": "Lampung Nyo",
"abm": "Abanyom",
"abn": "Abua",
"abo": "Abon",
"abp": "Abellen Ayta",
"abq": "Abaza",
"abr": "Abron",
"abs": "Ambonese Malay",
"abt": "Ambulas",
"abu": "Abure",
"abv": "Baharna Arabic",
"abw": "Pal",
"abx": "Inabaknon",
"aby": "Aneme Wake",
"abz": "Abui",
"aca": "Achagua",
"acb": "Áncá",
"acd": "Gikyode",
"ace": "Achinese",
"acf": "Saint Lucian Creole French",
"ach": "Acoli",
"aci": "Aka-Cari",
"ack": "Aka-Kora",
"acl": "Akar-Bale",
"acm": "Mesopotamian Arabic",
"acn": "Achang",
"acp": "Eastern Acipa",
"acq": "Ta'izzi-Adeni Arabic",
"acr": "Achi",
"acs": "Acroá",
"act": "Achterhoeks",
"acu": "Achuar-Shiwiar",
"acv": "Achumawi",
"acw": "Hijazi Arabic",
"acx": "Omani Arabic",
"acy": "Cypriot Arabic",
"acz": "Acheron",
"ada": "Adangme",
"adb": "Atauran",
"add": "Lidzonka; Dzodinka",
"ade": "Adele",
"adf": "Dhofari Arabic",
"adg": "Andegerebinha",
"adh": "Adhola",
"adi": "Adi",
"adj": "Adioukrou",
"adl": "Galo",
"adn": "Adang",
"ado": "Abu",
"adq": "Adangbe",
"adr": "Adonara",
"ads": "Adamorobe Sign Language",
"adt": "Adnyamathanha",
"adu": "Aduge",
"adw": "Amundava",
"adx": "Amdo Tibetan",
"ady": "Adyghe; Adygei",
"adz": "Adzera",
"ae": "Avestan",
"aea": "Areba",
"aeb": "Tunisian Arabic",
"aec": "Saidi Arabic",
"aed": "Argentine Sign Language",
"aee": "Northeast Pashai; Northeast Pashayi",
"aek": "Haeke",
"ael": "Ambele",
"aem": "Arem",
"aen": "Armenian Sign Language",
"aeq": "Aer",
"aer": "Eastern Arrernte",
"aes": "Alsea",
"aeu": "Akeu",
"aew": "Ambakich",
"aey": "Amele",
"aez": "Aeka",
"af": "Afrikaans",
"afa": "Afro-Asiatic languages",
"afb": "Gulf Arabic",
"afd": "Andai",
"afe": "Putukwam",
"afg": "Afghan Sign Language",
"afh": "Afrihili",
"afi": "Akrukay; Chini",
"afk": "Nanubae",
"afn": "Defaka",
"afo": "Eloyi",
"afp": "Tapei",
"afs": "Afro-Seminole Creole",
"aft": "Afitti",
"afu": "Awutu",
"afz": "Obokuitai",
"aga": "Aguano",
"agb": "Legbo",
"agc": "Agatu",
"agd": "Agarabi",
"age": "Angal",
"agf": "Arguni",
"agg": "Angor",
"agh": "Ngelima",
"agi": "Agariya",
"agj": "Argobba",
"agk": "Isarog Agta",
"agl": "Fembe",
"agm": "Angaataha",
"agn": "Agutaynen",
"ago": "Tainae",
"agq": "Aghem",
"agr": "Aguaruna",
"ags": "Esimbi",
"agt": "Central Cagayan Agta",
"agu": "Aguacateco",
"agv": "Remontado Dumagat",
"agw": "Kahua",
"agx": "Aghul",
"agy": "Southern Alta",
"agz": "Mt. Iriga Agta",
"aha": "Ahanta",
"ahb": "Axamb",
"ahg": "Qimant",
"ahh": "Aghu",
"ahi": "Tiagbamrin Aizi",
"ahk": "Akha",
"ahl": "Igo",
"ahm": "Mobumrin Aizi",
"ahn": "Àhàn",
"aho": "Ahom",
"ahp": "Aproumu Aizi",
"ahr": "Ahirani",
"ahs": "Ashe",
"aht": "Ahtena",
"aia": "Arosi",
"aib": "Ainu (China)",
"aic": "Ainbai",
"aid": "Alngith",
"aie": "Amara",
"aif": "Agi",
"aig": "Antigua and Barbuda Creole English",
"aih": "Ai-Cham",
"aii": "Assyrian Neo-Aramaic",
"aij": "Lishanid Noshan",
"aik": "Ake",
"ail": "Aimele",
"aim": "Aimol",
"ain": "Ainu (Japan)",
"aio": "Aiton",
"aip": "Burumakok",
"aiq": "Aimaq",
"air": "Airoran",
"ait": "Arikem",
"aiw": "Aari",
"aix": "Aighon",
"aiy": "Ali",
"aja": "Aja (South Sudan)",
"ajg": "Aja (Benin)",
"aji": "Ajië",
"ajn": "Andajin",
"ajp": "South Levantine Arabic",
"ajs": "Algerian Jewish Sign Language",
"aju": "Judeo-Moroccan Arabic",
"ajw": "Ajawa",
"ajz": "Amri Karbi",
"ak": "Akan",
"akb": "Batak Angkola",
"akc": "Mpur",
"akd": "Ukpet-Ehom",
"ake": "Akawaio",
"akf": "Akpa",
"akg": "Anakalangu",
"akh": "Angal Heneng",
"aki": "Aiome",
"akj": "Aka-Jeru",
"akk": "Akkadian",
"akl": "Aklanon",
"akm": "Aka-Bo",
"ako": "Akurio",
"akp": "Siwu",
"akq": "Ak",
"akr": "Araki",
"aks": "Akaselem",
"akt": "Akolet",
"aku": "Akum",
"akv": "Akhvakh",
"akw": "Akwa",
"akx": "Aka-Kede",
"aky": "Aka-Kol",
"akz": "Alabama",
"ala": "Alago",
"alc": "Qawasqar",
"ald": "Alladian",
"ale": "Aleut",
"alf": "Alege",
"alg": "Algonquian languages",
"alh": "Alawa",
"ali": "Amaimon",
"alj": "Alangan",
"alk": "Alak",
"all": "Allar",
"alm": "Amblong",
"aln": "Gheg Albanian",
"alo": "Larike-Wakasihu",
"alp": "Alune",
"alq": "Algonquin",
"alr": "Alutor",
"als": "Tosk Albanian",
"alt": "Southern Altai",
"alu": "'Are'are",
"alv": "Atlantic-Congo languages",
"alw": "Alaba-K’abeena; Wanbasana",
"alx": "Amol",
"aly": "Alyawarr",
"alz": "Alur",
"am": "Amharic",
"ama": "Amanayé",
"amb": "Ambo",
"amc": "Amahuaca",
"ame": "Yanesha'",
"amf": "Hamer-Banna",
"amg": "Amurdak",
"ami": "Amis",
"amj": "Amdang",
"amk": "Ambai",
"aml": "War-Jaintia",
"amm": "Ama (Papua New Guinea)",
"amn": "Amanab",
"amo": "Amo",
"amp": "Alamblak",
"amq": "Amahai",
"amr": "Amarakaeri",
"ams": "Southern Amami-Oshima",
"amt": "Amto",
"amu": "Guerrero Amuzgo",
"amv": "Ambelau",
"amw": "Western Neo-Aramaic",
"amx": "Anmatyerre",
"amy": "Ami",
"amz": "Atampaya",
"an": "Aragonese",
"ana": "Andaqui",
"anb": "Andoa",
"anc": "Ngas",
"and": "Ansus",
"ane": "Xârâcùù",
"anf": "Animere",
"ang": "Old English (ca. 450-1100)",
"anh": "Nend",
"ani": "Andi",
"anj": "Anor",
"ank": "Goemai",
"anl": "Anu-Hkongso Chin",
"anm": "Anal",
"ann": "Obolo",
"ano": "Andoque",
"anp": "Angika",
"anq": "Jarawa (India)",
"anr": "Andh",
"ans": "Anserma",
"ant": "Antakarinya; Antikarinya",
"anu": "Anuak",
"anv": "Denya",
"anw": "Anaang",
"anx": "Andra-Hus",
"any": "Anyin",
"anz": "Anem",
"aoa": "Angolar",
"aob": "Abom",
"aoc": "Pemon",
"aod": "Andarum",
"aoe": "Angal Enen",
"aof": "Bragat",
"aog": "Angoram",
"aoi": "Anindilyakwa",
"aoj": "Mufian",
"aok": "Arhö",
"aol": "Alor",
"aom": "Ömie",
"aon": "Bumbita Arapesh",
"aor": "Aore",
"aos": "Taikat",
"aot": "Atong (India); A'tong",
"aou": "A'ou",
"aox": "Atorada",
"aoz": "Uab Meto",
"apa": "Apache languages",
"apb": "Sa'a",
"apc": "North Levantine Arabic",
"apd": "Sudanese Arabic",
"ape": "Bukiyip",
"apf": "Pahanan Agta",
"apg": "Ampanang",
"aph": "Athpariya",
"api": "Apiaká",
"apj": "Jicarilla Apache",
"apk": "Kiowa Apache",
"apl": "Lipan Apache",
"apm": "Mescalero-Chiricahua Apache",
"apn": "Apinayé",
"apo": "Ambul",
"app": "Apma",
"apq": "A-Pucikwar",
"apr": "Arop-Lokep",
"aps": "Arop-Sissano",
"apt": "Apatani",
"apu": "Apurinã",
"apv": "Alapmunte",
"apw": "Western Apache",
"apx": "Aputai",
"apy": "Apalaí",
"apz": "Safeyoka",
"aqa": "Alacalufan languages",
"aqc": "Archi",
"aqd": "Ampari Dogon",
"aqg": "Arigidi",
"aqk": "Aninka",
"aql": "Algic languages",
"aqm": "Atohwaim",
"aqn": "Northern Alta",
"aqp": "Atakapa",
"aqr": "Arhâ",
"aqt": "Angaité",
"aqz": "Akuntsu",
"ar": "Arabic",
"arb": "Standard Arabic",
"arc": "Official Aramaic (700-300 BCE); Imperial Aramaic (700-300 BCE)",
"ard": "Arabana",
"are": "Western Arrarnta",
"arh": "Arhuaco",
"ari": "Arikara",
"arj": "Arapaso",
"ark": "Arikapú",
"arl": "Arabela",
"arn": "Mapudungun; Mapuche",
"aro": "Araona",
"arp": "Arapaho",
"arq": "Algerian Arabic",
"arr": "Karo (Brazil)",
"ars": "Najdi Arabic",
"art": "Artificial languages",
"aru": "Aruá (Amazonas State); Arawá",
"arv": "Arbore",
"arw": "Arawak",
"arx": "Aruá (Rodonia State)",
"ary": "Moroccan Arabic",
"arz": "Egyptian Arabic",
"as": "Assamese",
"asa": "Asu (Tanzania)",
"asb": "Assiniboine",
"asc": "Casuarina Coast Asmat",
"ase": "American Sign Language",
"asf": "Auslan; Australian Sign Language",
"asg": "Cishingini",
"ash": "Abishira",
"asi": "Buruwai",
"asj": "Sari",
"ask": "Ashkun",
"asl": "Asilulu",
"asn": "Xingú Asuriní",
"aso": "Dano",
"asp": "Algerian Sign Language",
"asq": "Austrian Sign Language",
"asr": "Asuri",
"ass": "Ipulo",
"ast": "Asturian; Asturleonese; Bable; Leonese",
"asu": "Tocantins Asurini",
"asv": "Asoa",
"asw": "Australian Aborigines Sign Language",
"asx": "Muratayak",
"asy": "Yaosakor Asmat",
"asz": "As",
"ata": "Pele-Ata",
"atb": "Zaiwa",
"atc": "Atsahuaca",
"atd": "Ata Manobo",
"ate": "Atemble",
"atg": "Ivbie North-Okpela-Arhe",
"ath": "Athapascan languages",
"ati": "Attié",
"atj": "Atikamekw",
"atk": "Ati",
"atl": "Mt. Iraya Agta",
"atm": "Ata",
"atn": "Ashtiani",
"ato": "Atong (Cameroon)",
"atp": "Pudtol Atta",
"atq": "Aralle-Tabulahan",
"atr": "Waimiri-Atroari",
"ats": "Gros Ventre",
"att": "Pamplona Atta",
"atu": "Reel",
"atv": "Northern Altai",
"atw": "Atsugewi",
"atx": "Arutani",
"aty": "Aneityum",
"atz": "Arta",
"aua": "Asumboa",
"aub": "Alugu",
"auc": "Waorani",
"aud": "Anuta",
"auf": "Arauan languages",
"aug": "Aguna",
"auh": "Aushi",
"aui": "Anuki",
"auj": "Awjilah",
"auk": "Heyo",
"aul": "Aulua",
"aum": "Asu (Nigeria)",
"aun": "Molmo One",
"auo": "Auyokawa",
"aup": "Makayam",
"auq": "Anus; Korur",
"aur": "Aruek",
"aus": "Australian languages",
"aut": "Austral",
"auu": "Auye",
"auw": "Awyi",
"aux": "Aurá",
"auy": "Awiyaana",
"auz": "Uzbeki Arabic",
"av": "Avaric",
"avb": "Avau",
"avd": "Alviri-Vidari",
"avi": "Avikam",
"avk": "Kotava",
"avl": "Eastern Egyptian Bedawi Arabic",
"avm": "Angkamuthi",
"avn": "Avatime",
"avo": "Agavotaguerra",
"avs": "Aushiri",
"avt": "Au",
"avu": "Avokaya",
"avv": "Avá-Canoeiro",
"awa": "Awadhi",
"awb": "Awa (Papua New Guinea)",
"awc": "Cicipu",
"awd": "Arawakan languages",
"awe": "Awetí",
"awg": "Anguthimri",
"awh": "Awbono",
"awi": "Aekyom",
"awk": "Awabakal",
"awm": "Arawum",
"awn": "Awngi",
"awo": "Awak",
"awr": "Awera",
"aws": "South Awyu",
"awt": "Araweté",
"awu": "Central Awyu",
"awv": "Jair Awyu",
"aww": "Awun",
"awx": "Awara",
"awy": "Edera Awyu",
"axb": "Abipon",
"axe": "Ayerrerenge",
"axg": "Mato Grosso Arára",
"axk": "Yaka (Central African Republic)",
"axl": "Lower Southern Aranda",
"axm": "Middle Armenian",
"axx": "Xârâgurè",
"ay": "Aymara",
"aya": "Awar",
"ayb": "Ayizo Gbe",
"ayc": "Southern Aymara",
"ayd": "Ayabadhu",
"aye": "Ayere",
"ayg": "Ginyanga",
"ayh": "Hadrami Arabic",
"ayi": "Leyigha",
"ayk": "Akuku",
"ayl": "Libyan Arabic",
"ayn": "Sanaani Arabic",
"ayo": "Ayoreo",
"ayp": "North Mesopotamian Arabic",
"ayq": "Ayi (Papua New Guinea)",
"ayr": "Central Aymara",
"ays": "Sorsogon Ayta",
"ayt": "Magbukun Ayta",
"ayu": "Ayu",
"ayz": "Mai Brat",
"az": "Azerbaijani",
"aza": "Azha",
"azb": "South Azerbaijani",
"azc": "Uto-Aztecan languages",
"azd": "Eastern Durango Nahuatl",
"azg": "San Pedro Amuzgos Amuzgo",
"azj": "North Azerbaijani",
"azm": "Ipalapa Amuzgo",
"azn": "Western Durango Nahuatl",
"azo": "Awing",
"azt": "Faire Atta",
"azz": "Highland Puebla Nahuatl",
"ba": "Bashkir",
"baa": "Babatana",
"bab": "Bainouk-Gunyuño",
"bac": "Badui",
"bad": "Banda languages",
"bae": "Baré",
"baf": "Nubaca",
"bag": "Tuki",
"bah": "Bahamas Creole English",
"bai": "Bamileke languages",
"baj": "Barakai",
"bal": "Baluchi",
"ban": "Balinese",
"bao": "Waimaha",
"bap": "Bantawa",
"bar": "Bavarian",
"bas": "Basa (Cameroon)",
"bat": "Baltic languages",
"bau": "Bada (Nigeria)",
"bav": "Vengo",
"baw": "Bambili-Bambui",
"bax": "Bamun",
"bay": "Batuley",
"bba": "Baatonum",
"bbb": "Barai",
"bbc": "Batak Toba",
"bbd": "Bau",
"bbe": "Bangba",
"bbf": "Baibai",
"bbg": "Barama",
"bbh": "Bugan",
"bbi": "Barombi",
"bbj": "Ghomálá'",
"bbk": "Babanki",
"bbl": "Bats",
"bbm": "Babango",
"bbn": "Uneapa",
"bbo": "Northern Bobo Madaré; Konabéré",
"bbp": "West Central Banda",
"bbq": "Bamali",
"bbr": "Girawa",
"bbs": "Bakpinka",
"bbt": "Mburku",
"bbu": "Kulung (Nigeria)",
"bbv": "Karnai",
"bbw": "Baba",
"bbx": "Bubia",
"bby": "Befang",
"bca": "Central Bai",
"bcb": "Bainouk-Samik",
"bcc": "Southern Balochi",
"bcd": "North Babar",
"bce": "Bamenyam",
"bcf": "Bamu",
"bcg": "Baga Pokur",
"bch": "Bariai",
"bci": "Baoulé",
"bcj": "Bardi",
"bck": "Bunuba",
"bcl": "Central Bikol",
"bcm": "Bannoni",
"bcn": "Bali (Nigeria)",
"bco": "Kaluli",
"bcp": "Bali (Democratic Republic of Congo)",
"bcq": "Bench",
"bcr": "Babine",
"bcs": "Kohumono",
"bct": "Bendi",
"bcu": "Awad Bing",
"bcv": "Shoo-Minda-Nye",
"bcw": "Bana",
"bcy": "Bacama",
"bcz": "Bainouk-Gunyaamolo",
"bda": "Bayot",
"bdb": "Basap",
"bdc": "Emberá-Baudó",
"bdd": "Bunama",
"bde": "Bade",
"bdf": "Biage",
"bdg": "Bonggi",
"bdh": "Baka (South Sudan)",
"bdi": "Burun",
"bdj": "Bai (South Sudan); Bai",
"bdk": "Budukh",
"bdl": "Indonesian Bajau",
"bdm": "Buduma",
"bdn": "Baldemu",
"bdo": "Morom",
"bdp": "Bende",
"bdq": "Bahnar",
"bdr": "West Coast Bajau",
"bds": "Burunge",
"bdt": "Bokoto",
"bdu": "Oroko",
"bdv": "Bodo Parja",
"bdw": "Baham",
"bdx": "Budong-Budong",
"bdy": "Bandjalang",
"bdz": "Badeshi",
"be": "Belarusian",
"bea": "Beaver",
"beb": "Bebele",
"bec": "Iceve-Maci",
"bed": "Bedoanas",
"bee": "Byangsi",
"bef": "Benabena",
"beg": "Belait",
"beh": "Biali",
"bei": "Bekati'",
"bej": "Beja; Bedawiyet",
"bek": "Bebeli",
"bem": "Bemba (Zambia)",
"beo": "Beami",
"bep": "Besoa",
"beq": "Beembe",
"ber": "Berber languages",
"bes": "Besme",
"bet": "Guiberoua Béte",
"beu": "Blagar",
"bev": "Daloa Bété",
"bew": "Betawi",
"bex": "Jur Modo",
"bey": "Beli (Papua New Guinea)",
"bez": "Bena (Tanzania)",
"bfa": "Bari",
"bfb": "Pauri Bareli",
"bfc": "Panyi Bai; Northern Bai",
"bfd": "Bafut",
"bfe": "Betaf; Tena",
"bff": "Bofi",
"bfg": "Busang Kayan",
"bfh": "Blafe",
"bfi": "British Sign Language",
"bfj": "Bafanji",
"bfk": "Ban Khor Sign Language",
"bfl": "Banda-Ndélé",
"bfm": "Mmen",
"bfn": "Bunak",
"bfo": "Malba Birifor",
"bfp": "Beba",
"bfq": "Badaga",
"bfr": "Bazigar",
"bfs": "Southern Bai",
"bft": "Balti",
"bfu": "Gahri",
"bfw": "Bondo",
"bfx": "Bantayanon",
"bfy": "Bagheli",
"bfz": "Mahasu Pahari",
"bg": "Bulgarian",
"bga": "Gwamhi-Wuri",
"bgb": "Bobongko",
"bgc": "Haryanvi",
"bgd": "Rathwi Bareli",
"bge": "Bauria",
"bgf": "Bangandu",
"bgg": "Bugun",
"bgi": "Giangan",
"bgj": "Bangolan",
"bgk": "Bit; Buxinhua",
"bgl": "Bo (Laos)",
"bgn": "Western Balochi",
"bgo": "Baga Koga",
"bgp": "Eastern Balochi",
"bgq": "Bagri",
"bgr": "Bawm Chin",
"bgs": "Tagabawa",
"bgt": "Bughotu",
"bgu": "Mbongno",
"bgv": "Warkay-Bipim",
"bgw": "Bhatri",
"bgx": "Balkan Gagauz Turkish",
"bgy": "Benggoi",
"bgz": "Banggai",
"bh": "Bihari languages",
"bha": "Bharia",
"bhb": "Bhili",
"bhc": "Biga",
"bhd": "Bhadrawahi",
"bhe": "Bhaya",
"bhf": "Odiai",
"bhg": "Binandere",
"bhh": "Bukharic",
"bhi": "Bhilali",
"bhj": "Bahing",
"bhl": "Bimin",
"bhm": "Bathari",
"bhn": "Bohtan Neo-Aramaic",
"bho": "Bhojpuri",
"bhp": "Bima",
"bhq": "Tukang Besi South",
"bhr": "Bara Malagasy",
"bhs": "Buwal",
"bht": "Bhattiyali",
"bhu": "Bhunjia",
"bhv": "Bahau",
"bhw": "Biak",
"bhx": "Bhalay",
"bhy": "Bhele",
"bhz": "Bada (Indonesia)",
"bi": "Bislama",
"bia": "Badimaya",
"bib": "Bissa; Bisa",
"bid": "Bidiyo",
"bie": "Bepour",
"bif": "Biafada",
"big": "Biangai",
"bik": "Bikol",
"bil": "Bile",
"bim": "Bimoba",
"bin": "Bini; Edo",
"bio": "Nai",
"bip": "Bila",
"biq": "Bipi",
"bir": "Bisorio",
"bit": "Berinomo",
"biu": "Biete",
"biv": "Southern Birifor",
"biw": "Kol (Cameroon)",
"bix": "Bijori",
"biy": "Birhor",
"biz": "Baloi",
"bja": "Budza",
"bjb": "Banggarla",
"bjc": "Bariji",
"bje": "Biao-Jiao Mien",
"bjf": "Barzani Jewish Neo-Aramaic",
"bjg": "Bidyogo",
"bjh": "Bahinemo",
"bji": "Burji",
"bjj": "Kanauji",
"bjk": "Barok",
"bjl": "Bulu (Papua New Guinea)",
"bjm": "Bajelani",
"bjn": "Banjar",
"bjo": "Mid-Southern Banda",
"bjp": "Fanamaket",
"bjr": "Binumarien",
"bjs": "Bajan",
"bjt": "Balanta-Ganja",
"bju": "Busuu",
"bjv": "Bedjond",
"bjw": "Bakwé",
"bjx": "Banao Itneg",
"bjy": "Bayali",
"bjz": "Baruga",
"bka": "Kyak",
"bkc": "Baka (Cameroon)",
"bkd": "Binukid; Talaandig",
"bkf": "Beeke",
"bkg": "Buraka",
"bkh": "Bakoko",
"bki": "Baki",
"bkj": "Pande",
"bkk": "Brokskat",
"bkl": "Berik",
"bkm": "Kom (Cameroon)",
"bkn": "Bukitan",
"bko": "Kwa'",
"bkp": "Boko (Democratic Republic of Congo)",
"bkq": "Bakairí",
"bkr": "Bakumpai",
"bks": "Northern Sorsoganon",
"bkt": "Boloki",
"bku": "Buhid",
"bkv": "Bekwarra",
"bkw": "Bekwel",
"bkx": "Baikeno",
"bky": "Bokyi",
"bkz": "Bungku",
"bla": "Siksika",
"blb": "Bilua",
"blc": "Bella Coola",
"bld": "Bolango",
"ble": "Balanta-Kentohe",
"blf": "Buol",
"blh": "Kuwaa",
"bli": "Bolia",
"blj": "Bolongan",
"blk": "Pa'o Karen; Pa'O",
"bll": "Biloxi",
"blm": "Beli (South Sudan)",
"bln": "Southern Catanduanes Bikol",
"blo": "Anii",
"blp": "Blablanga",
"blq": "Baluan-Pam",
"blr": "Blang",
"bls": "Balaesang",
"blt": "Tai Dam",
"blv": "Kibala; Bolo",
"blw": "Balangao",
"blx": "Mag-Indi Ayta",
"bly": "Notre",
"blz": "Balantak",
"bm": "Bambara",
"bma": "Lame",
"bmb": "Bembe",
"bmc": "Biem",
"bmd": "Baga Manduri",
"bme": "Limassa",
"bmf": "Bom-Kim",
"bmg": "Bamwe",
"bmh": "Kein",
"bmi": "Bagirmi",
"bmj": "Bote-Majhi",
"bmk": "Ghayavi",
"bml": "Bomboli",
"bmm": "Northern Betsimisaraka Malagasy",
"bmn": "Bina (Papua New Guinea)",
"bmo": "Bambalang",
"bmp": "Bulgebi",
"bmq": "Bomu",
"bmr": "Muinane",
"bms": "Bilma Kanuri",
"bmt": "Biao Mon",
"bmu": "Somba-Siawari",
"bmv": "Bum",
"bmw": "Bomwali",
"bmx": "Baimak",
"bmz": "Baramu",
"bn": "Bengali; Bangla",
"bna": "Bonerate",
"bnb": "Bookan",
"bnc": "Bontok",
"bnd": "Banda (Indonesia)",
"bne": "Bintauna",
"bnf": "Masiwang",
"bng": "Benga",
"bni": "Bangi",
"bnj": "Eastern Tawbuid",
"bnk": "Bierebo",
"bnl": "Boon",
"bnm": "Batanga",
"bnn": "Bunun",
"bno": "Bantoanon",
"bnp": "Bola",
"bnq": "Bantik",
"bnr": "Butmas-Tur",
"bns": "Bundeli",
"bnt": "Bantu languages",
"bnu": "Bentong",
"bnv": "Bonerif; Beneraf; Edwas",
"bnw": "Bisis",
"bnx": "Bangubangu",
"bny": "Bintulu",
"bnz": "Beezen",
"bo": "Tibetan",
"boa": "Bora",
"bob": "Aweer",
"boe": "Mundabli",
"bof": "Bolon",
"bog": "Bamako Sign Language",
"boh": "Boma",
"boi": "Barbareño",
"boj": "Anjam",
"bok": "Bonjo",
"bol": "Bole",
"bom": "Berom",
"bon": "Bine",
"boo": "Tiemacèwè Bozo",
"bop": "Bonkiman",
"boq": "Bogaya",
"bor": "Borôro",
"bot": "Bongo",
"bou": "Bondei",
"bov": "Tuwuli",
"bow": "Rema",
"box": "Buamu",
"boy": "Bodo (Central African Republic)",
"boz": "Tiéyaxo Bozo",
"bpa": "Daakaka",
"bpc": "Mbuk",
"bpd": "Banda-Banda",
"bpe": "Bauni",
"bpg": "Bonggo",
"bph": "Botlikh",
"bpi": "Bagupi",
"bpj": "Binji",
"bpk": "Orowe; 'Ôrôê",
"bpl": "Broome Pearling Lugger Pidgin",
"bpm": "Biyom",
"bpn": "Dzao Min",
"bpo": "Anasi",
"bpp": "Kaure",
"bpq": "Banda Malay",
"bpr": "Koronadal Blaan",
"bps": "Sarangani Blaan",
"bpt": "Barrow Point",
"bpu": "Bongu",
"bpv": "Bian Marind",
"bpw": "Bo (Papua New Guinea)",
"bpx": "Palya Bareli",
"bpy": "Bishnupriya",
"bpz": "Bilba",
"bqa": "Tchumbuli",
"bqb": "Bagusa",
"bqc": "Boko (Benin); Boo",
"bqd": "Bung",
"bqf": "Baga Kaloum",
"bqg": "Bago-Kusuntu",
"bqh": "Baima",
"bqi": "Bakhtiari",
"bqj": "Bandial",
"bqk": "Banda-Mbrès",
"bql": "Bilakura",
"bqm": "Wumboko",
"bqn": "Bulgarian Sign Language",
"bqo": "Balo",
"bqp": "Busa",
"bqq": "Biritai",
"bqr": "Burusu",
"bqs": "Bosngun",
"bqt": "Bamukumbit",
"bqu": "Boguru",
"bqv": "Koro Wachi; Begbere-Ejar",
"bqw": "Buru (Nigeria)",
"bqx": "Baangi",
"bqy": "Bengkala Sign Language",
"bqz": "Bakaka",
"br": "Breton",
"bra": "Braj",
"brb": "Brao; Lave",
"brc": "Berbice Creole Dutch",
"brd": "Baraamu",
"brf": "Bira",
"brg": "Baure",
"brh": "Brahui",
"bri": "Mokpwe",
"brj": "Bieria",
"brk": "Birked",
"brl": "Birwa",
"brm": "Barambu",
"brn": "Boruca",
"bro": "Brokkat",
"brp": "Barapasi",
"brq": "Breri",
"brr": "Birao",
"brs": "Baras",
"brt": "Bitare",
"bru": "Eastern Bru",
"brv": "Western Bru",
"brw": "Bellari",
"brx": "Bodo (India)",
"bry": "Burui",
"brz": "Bilbil",
"bs": "Bosnian",
"bsa": "Abinomn",
"bsb": "Brunei Bisaya",
"bsc": "Bassari; Oniyan",
"bse": "Wushi",
"bsf": "Bauchi",
"bsg": "Bashkardi",
"bsh": "Kati",
"bsi": "Bassossi",
"bsj": "Bangwinji",
"bsk": "Burushaski",
"bsl": "Basa-Gumna",
"bsm": "Busami",
"bsn": "Barasana-Eduria",
"bso": "Buso",
"bsp": "Baga Sitemu",
"bsq": "Bassa",
"bsr": "Bassa-Kontagora",
"bss": "Akoose",
"bst": "Basketo",
"bsu": "Bahonsuai",
"bsv": "Baga Sobané",
"bsw": "Baiso",
"bsx": "Yangkam",
"bsy": "Sabah Bisaya",
"bta": "Bata",
"btc": "Bati (Cameroon)",
"btd": "Batak Dairi",
"bte": "Gamo-Ningi",
"btf": "Birgit",
"btg": "Gagnoa Bété",
"bth": "Biatah Bidayuh",
"bti": "Burate",
"btj": "Bacanese Malay",
"btk": "Batak languages",
"btm": "Batak Mandailing",
"btn": "Ratagnon",
"bto": "Rinconada Bikol",
"btp": "Budibud",
"btq": "Batek",
"btr": "Baetora",
"bts": "Batak Simalungun",
"btt": "Bete-Bendi",
"btu": "Batu",
"btv": "Bateri",
"btw": "Butuanon",
"btx": "Batak Karo",
"bty": "Bobot",
"btz": "Batak Alas-Kluet",
"bua": "Buriat",
"bub": "Bua",
"buc": "Bushi",
"bud": "Ntcham",
"bue": "Beothuk",
"buf": "Bushoong",
"bug": "Buginese",
"buh": "Younuo Bunu",
"bui": "Bongili",
"buj": "Basa-Gurmana",
"buk": "Bugawac",
"bum": "Bulu (Cameroon)",
"bun": "Sherbro",
"buo": "Terei",
"bup": "Busoa",
"buq": "Brem",
"bus": "Bokobaru",
"but": "Bungain",
"buu": "Budu",
"buv": "Bun",
"buw": "Bubi",
"bux": "Boghom",
"buy": "Bullom So",
"buz": "Bukwen",
"bva": "Barein",
"bvb": "Bube",
"bvc": "Baelelea",
"bvd": "Baeggu",
"bve": "Berau Malay",
"bvf": "Boor",
"bvg": "Bonkeng",
"bvh": "Bure",
"bvi": "Belanda Viri",
"bvj": "Baan",
"bvk": "Bukat",
"bvl": "Bolivian Sign Language",
"bvm": "Bamunka",
"bvn": "Buna",
"bvo": "Bolgo",
"bvp": "Bumang",
"bvq": "Birri",
"bvr": "Burarra",
"bvt": "Bati (Indonesia)",
"bvu": "Bukit Malay",
"bvv": "Baniva",
"bvw": "Boga",
"bvx": "Dibole",
"bvy": "Baybayanon",
"bvz": "Bauzi",
"bwa": "Bwatoo",
"bwb": "Namosi-Naitasiri-Serua",
"bwc": "Bwile",
"bwd": "Bwaidoka",
"bwe": "Bwe Karen",
"bwf": "Boselewa",
"bwg": "Barwe",
"bwh": "Bishuo",
"bwi": "Baniwa",
"bwj": "Láá Láá Bwamu",
"bwk": "Bauwaki",
"bwl": "Bwela",
"bwm": "Biwat",
"bwn": "Wunai Bunu",
"bwo": "Boro (Ethiopia); Borna (Ethiopia)",
"bwp": "Mandobo Bawah",
"bwq": "Southern Bobo Madaré",
"bwr": "Bura-Pabir",
"bws": "Bomboma",
"bwt": "Bafaw-Balong",
"bwu": "Buli (Ghana)",
"bww": "Bwa",
"bwx": "Bu-Nao Bunu",
"bwy": "Cwi Bwamu",
"bwz": "Bwisi",
"bxa": "Tairaha",
"bxb": "Belanda Bor",
"bxc": "Molengue",
"bxd": "Pela",
"bxe": "Birale",
"bxf": "Bilur; Minigir",
"bxg": "Bangala",
"bxh": "Buhutu",
"bxi": "Pirlatapa",
"bxj": "Bayungu",
"bxk": "Bukusu; Lubukusu",
"bxl": "Jalkunan",
"bxm": "Mongolia Buriat",
"bxn": "Burduna",
"bxo": "Barikanchi",
"bxp": "Bebil",
"bxq": "Beele",
"bxr": "Russia Buriat",
"bxs": "Busam",
"bxu": "China Buriat",
"bxv": "Berakou",
"bxw": "Bankagooma",
"bxz": "Binahari",
"bya": "Batak",
"byb": "Bikya",
"byc": "Ubaghara",
"byd": "Benyadu'",
"bye": "Pouye",
"byf": "Bete",
"byg": "Baygo",
"byh": "Bhujel",
"byi": "Buyu",
"byj": "Bina (Nigeria)",
"byk": "Biao",
"byl": "Bayono",
"bym": "Bidjara",
"byn": "Bilin; Blin",
"byo": "Biyo",
"byp": "Bumaji",
"byq": "Basay",
"byr": "Baruya; Yipma",
"bys": "Burak",
"byt": "Berti",
"byv": "Medumba",
"byw": "Belhariya",
"byx": "Qaqet",
"byz": "Banaro",
"bza": "Bandi",
"bzb": "Andio",
"bzc": "Southern Betsimisaraka Malagasy",
"bzd": "Bribri",
"bze": "Jenaama Bozo",
"bzf": "Boikin",
"bzg": "Babuza",
"bzh": "Mapos Buang",
"bzi": "Bisu",
"bzj": "Belize Kriol English",
"bzk": "Nicaragua Creole English",
"bzl": "Boano (Sulawesi)",
"bzm": "Bolondo",
"bzn": "Boano (Maluku)",
"bzo": "Bozaba",
"bzp": "Kemberano",
"bzq": "Buli (Indonesia)",
"bzr": "Biri",
"bzs": "Brazilian Sign Language",
"bzt": "Brithenig",
"bzu": "Burmeso",
"bzv": "Naami",
"bzw": "Basa (Nigeria)",
"bzx": "Kɛlɛngaxo Bozo",
"bzy": "Obanliku",
"bzz": "Evant",
"ca": "Catalan; Valencian",
"caa": "Chortí",
"cab": "Garifuna",
"cac": "Chuj",
"cad": "Caddo",
"cae": "Lehar; Laalaa",
"caf": "Southern Carrier",
"cag": "Nivaclé",
"cah": "Cahuarano",
"cai": "Central American Indian languages",
"caj": "Chané",
"cak": "Kaqchikel; Cakchiquel",
"cal": "Carolinian",
"cam": "Cemuhî",
"can": "Chambri",
"cao": "Chácobo",
"cap": "Chipaya",
"caq": "Car Nicobarese",
"car": "Galibi Carib",
"cas": "Tsimané",
"cau": "Caucasian languages",
"cav": "Cavineña",
"caw": "Callawalla",
"cax": "Chiquitano",
"cay": "Cayuga",
"caz": "Canichana",
"cba": "Chibchan languages",
"cbb": "Cabiyarí",
"cbc": "Carapana",
"cbd": "Carijona",
"cbg": "Chimila",
"cbi": "Chachi",
"cbj": "Ede Cabe",
"cbk": "Chavacano",
"cbl": "Bualkhaw Chin",
"cbn": "Nyahkur",
"cbo": "Izora",
"cbq": "Tsucuba; Cuba",
"cbr": "Cashibo-Cacataibo",
"cbs": "Cashinahua",
"cbt": "Chayahuita",
"cbu": "Candoshi-Shapra",
"cbv": "Cacua",
"cbw": "Kinabalian",
"cby": "Carabayo",
"ccc": "Chamicuro",
"ccd": "Cafundo Creole",
"cce": "Chopi",
"ccg": "Samba Daka",
"cch": "Atsam",
"ccj": "Kasanga",
"ccl": "Cutchi-Swahili",
"ccm": "Malaccan Creole Malay",
"ccn": "North Caucasian languages",
"cco": "Comaltepec Chinantec",
"ccp": "Chakma",
"ccr": "Cacaopera",
"ccs": "South Caucasian languages",
"cda": "Choni",
"cdc": "Chadic languages",
"cdd": "Caddoan languages",
"cde": "Chenchu",
"cdf": "Chiru",
"cdh": "Chambeali",
"cdi": "Chodri",
"cdj": "Churahi",
"cdm": "Chepang",
"cdn": "Chaudangsi",
"cdo": "Min Dong Chinese",
"cdr": "Cinda-Regi-Tiyal",
"cds": "Chadian Sign Language",
"cdy": "Chadong",
"cdz": "Koda",
"ce": "Chechen",
"cea": "Lower Chehalis",
"ceb": "Cebuano",
"ceg": "Chamacoco",
"cek": "Eastern Khumi Chin",
"cel": "Celtic languages",
"cen": "Cen",
"cet": "Centúúm",
"cey": "Ekai Chin",
"cfa": "Dijim-Bwilim",
"cfd": "Cara",
"cfg": "Como Karim",
"cfm": "Falam Chin",
"cga": "Changriwa",
"cgc": "Kagayanen",
"cgg": "Chiga",
"cgk": "Chocangacakha",
"ch": "Chamorro",
"chb": "Chibcha",
"chc": "Catawba",
"chd": "Highland Oaxaca Chontal",
"chf": "Tabasco Chontal",
"chg": "Chagatai",
"chh": "Chinook",
"chj": "Ojitlán Chinantec",
"chk": "Chuukese",
"chl": "Cahuilla",
"chm": "Mari (Russia)",
"chn": "Chinook jargon",
"cho": "Choctaw",
"chp": "Chipewyan; Dene Suline",
"chq": "Quiotepec Chinantec",
"chr": "Cherokee",
"cht": "Cholón",
"chw": "Chuwabu",
"chx": "Chantyal",
"chy": "Cheyenne",
"chz": "Ozumacín Chinantec",
"cia": "Cia-Cia",
"cib": "Ci Gbe",
"cic": "Chickasaw",
"cid": "Chimariko",
"cie": "Cineni",
"cih": "Chinali",
"cik": "Chitkuli Kinnauri",
"cim": "Cimbrian",
"cin": "Cinta Larga",
"cip": "Chiapanec",
"cir": "Tiri; Haméa; Méa",
"ciw": "Chippewa",
"ciy": "Chaima",
"cja": "Western Cham",
"cje": "Chru",
"cjh": "Upper Chehalis",
"cji": "Chamalal",
"cjk": "Chokwe",
"cjm": "Eastern Cham",
"cjn": "Chenapian",
"cjo": "Ashéninka Pajonal",
"cjp": "Cabécar",
"cjs": "Shor",
"cjv": "Chuave",
"cjy": "Jinyu Chinese",
"ckb": "Central Kurdish",
"ckh": "Chak",
"ckl": "Cibak",
"ckm": "Chakavian",
"ckn": "Kaang Chin",
"cko": "Anufo",
"ckq": "Kajakse",
"ckr": "Kairak",
"cks": "Tayo",
"ckt": "Chukot",
"cku": "Koasati",
"ckv": "Kavalan",
"ckx": "Caka",
"cky": "Cakfem-Mushere",
"ckz": "Cakchiquel-Quiché Mixed Language",
"cla": "Ron",
"clc": "Chilcotin",
"cld": "Chaldean Neo-Aramaic",
"cle": "Lealao Chinantec",
"clh": "Chilisso",
"cli": "Chakali",
"clj": "Laitu Chin",
"clk": "Idu-Mishmi",
"cll": "Chala",
"clm": "Clallam",
"clo": "Lowland Oaxaca Chontal",
"clt": "Lautu Chin",
"clu": "Caluyanun",
"clw": "Chulym",
"cly": "Eastern Highland Chatino",
"cma": "Maa",
"cmc": "Chamic languages",
"cme": "Cerma",
"cmg": "Classical Mongolian",
"cmi": "Emberá-Chamí",
"cml": "Campalagian",
"cmm": "Michigamea",
"cmn": "Mandarin Chinese",
"cmo": "Central Mnong",
"cmr": "Mro-Khimi Chin",
"cms": "Messapic",
"cmt": "Camtho",
"cna": "Changthang",
"cnb": "Chinbon Chin",
"cnc": "Côông",
"cng": "Northern Qiang",
"cnh": "Hakha Chin; Haka Chin",
"cni": "Asháninka",
"cnk": "Khumi Chin",
"cnl": "Lalana Chinantec",
"cno": "Con",
"cnp": "Northern Ping Chinese; Northern Pinghua",
"cnq": "Chung",
"cnr": "Montenegrin",
"cns": "Central Asmat",
"cnt": "Tepetotutla Chinantec",
"cnu": "Chenoua",
"cnw": "Ngawn Chin",
"cnx": "Middle Cornish",
"co": "Corsican",
"coa": "Cocos Islands Malay",
"cob": "Chicomuceltec",
"coc": "Cocopa",
"cod": "Cocama-Cocamilla",
"coe": "Koreguaje",
"cof": "Colorado",
"cog": "Chong",
"coh": "Chonyi-Dzihana-Kauma; Chichonyi-Chidzihana-Chikauma",
"coj": "Cochimi",
"cok": "Santa Teresa Cora",
"col": "Columbia-Wenatchi",
"com": "Comanche",
"con": "Cofán",
"coo": "Comox",
"cop": "Coptic",
"coq": "Coquille",
"cot": "Caquinte",
"cou": "Wamey",
"cov": "Cao Miao",
"cow": "Cowlitz",
"cox": "Nanti",
"coz": "Chochotec",
"cpa": "Palantla Chinantec",
"cpb": "Ucayali-Yurúa Ashéninka",
"cpc": "Ajyíninka Apurucayali",
"cpe": "English-based creoles and pidgins",
"cpf": "French-based creoles and pidgins",
"cpg": "Cappadocian Greek",
"cpi": "Chinese Pidgin English",
"cpn": "Cherepon",
"cpo": "Kpeego",
"cpp": "Portuguese-based creoles and pidgins",
"cps": "Capiznon",
"cpu": "Pichis Ashéninka",
"cpx": "Pu-Xian Chinese",
"cpy": "South Ucayali Ashéninka",
"cqd": "Chuanqiandian Cluster Miao",
"cr": "Cree",
"cra": "Chara",
"crb": "Island Carib",
"crc": "Lonwolwol",
"crd": "Coeur d'Alene",
"crf": "Caramanta",
"crg": "Michif",
"crh": "Crimean Tatar; Crimean Turkish",
"cri": "Sãotomense",
"crj": "Southern East Cree",
"crk": "Plains Cree",
"crl": "Northern East Cree",
"crm": "Moose Cree",
"crn": "El Nayar Cora",
"cro": "Crow",
"crp": "Creoles and pidgins",
"crq": "Iyo'wujwa Chorote",
"crr": "Carolina Algonquian",
"crs": "Seselwa Creole French",
"crt": "Iyojwa'ja Chorote",
"crv": "Chaura",
"crw": "Chrau",
"crx": "Carrier",
"cry": "Cori",
"crz": "Cruzeño",
"cs": "Czech",
"csa": "Chiltepec Chinantec",
"csb": "Kashubian",
"csc": "Catalan Sign Language; Lengua de señas catalana; Llengua de Signes Catalana",
"csd": "Chiangmai Sign Language",
"cse": "Czech Sign Language",
"csf": "Cuba Sign Language",
"csg": "Chilean Sign Language",
"csh": "Asho Chin",
"csi": "Coast Miwok",
"csj": "Songlai Chin",
"csk": "Jola-Kasa",
"csl": "Chinese Sign Language",
"csm": "Central Sierra Miwok",
"csn": "Colombian Sign Language",
"cso": "Sochiapam Chinantec; Sochiapan Chinantec",
"csp": "Southern Ping Chinese; Southern Pinghua",
"csq": "Croatia Sign Language",
"csr": "Costa Rican Sign Language",
"css": "Southern Ohlone",
"cst": "Northern Ohlone",
"csu": "Central Sudanic languages",
"csv": "Sumtu Chin",
"csw": "Swampy Cree",
"csx": "Cambodian Sign Language",
"csy": "Siyin Chin",
"csz": "Coos",
"cta": "Tataltepec Chatino",
"ctc": "Chetco",
"ctd": "Tedim Chin",
"cte": "Tepinapa Chinantec",
"ctg": "Chittagonian",
"cth": "Thaiphum Chin",
"ctl": "Tlacoatzintepec Chinantec",
"ctm": "Chitimacha",
"ctn": "Chhintange",
"cto": "Emberá-Catío",
"ctp": "Western Highland Chatino",
"cts": "Northern Catanduanes Bikol",
"ctt": "Wayanad Chetti",
"ctu": "Chol",
"cty": "Moundadan Chetty",
"ctz": "Zacatepec Chatino",
"cu": "Church Slavic; Church Slavonic; Old Bulgarian; Old Church Slavonic; Old Slavonic",
"cua": "Cua",
"cub": "Cubeo",
"cuc": "Usila Chinantec",
"cuh": "Chuka; Gichuka",
"cui": "Cuiba",
"cuj": "Mashco Piro",
"cuk": "San Blas Kuna",
"cul": "Culina; Kulina",
"cuo": "Cumanagoto",
"cup": "Cupeño",
"cuq": "Cun",
"cur": "Chhulung",
"cus": "Cushitic languages",
"cut": "Teutila Cuicatec",
"cuu": "Tai Ya",
"cuv": "Cuvok",
"cuw": "Chukwa",
"cux": "Tepeuxila Cuicatec",
"cuy": "Cuitlatec",
"cv": "Chuvash",
"cvg": "Chug",
"cvn": "Valle Nacional Chinantec",
"cwa": "Kabwa",
"cwb": "Maindo",
"cwd": "Woods Cree",
"cwe": "Kwere",
"cwg": "Chewong; Cheq Wong",
"cwt": "Kuwaataay",
"cy": "Welsh",
"cya": "Nopala Chatino",
"cyb": "Cayubaba",
"cyo": "Cuyonon",
"czh": "Huizhou Chinese",
"czk": "Knaanic",
"czn": "Zenzontepec Chatino",
"czo": "Min Zhong Chinese",
"czt": "Zotung Chin",
"da": "Danish",
"daa": "Dangaléat",
"dac": "Dambi",
"dad": "Marik",
"dae": "Duupa",
"dag": "Dagbani",
"dah": "Gwahatike",
"dai": "Day",
"daj": "Dar Fur Daju",
"dak": "Dakota",
"dal": "Dahalo",
"dam": "Damakawa",
"dao": "Daai Chin",
"daq": "Dandami Maria",
"dar": "Dargwa",
"das": "Daho-Doo",
"dau": "Dar Sila Daju",
"dav": "Taita; Dawida",
"daw": "Davawenyo",
"dax": "Dayi",
"day": "Land Dayak languages",
"daz": "Dao",
"dba": "Bangime",
"dbb": "Deno",
"dbd": "Dadiya",
"dbe": "Dabe",
"dbf": "Edopi",
"dbg": "Dogul Dom Dogon",
"dbi": "Doka",
"dbj": "Ida'an",
"dbl": "Dyirbal",
"dbm": "Duguri",
"dbn": "Duriankere",
"dbo": "Dulbu",
"dbp": "Duwai",
"dbq": "Daba",
"dbr": "Dabarre",
"dbt": "Ben Tey Dogon",
"dbu": "Bondum Dom Dogon",
"dbv": "Dungu",
"dbw": "Bankan Tey Dogon",
"dby": "Dibiyaso",
"dcc": "Deccan",
"dcr": "Negerhollands",
"dda": "Dadi Dadi",
"ddd": "Dongotono",
"dde": "Doondo",
"ddg": "Fataluku",
"ddi": "West Goodenough",
"ddj": "Jaru",
"ddn": "Dendi (Benin)",
"ddo": "Dido",
"ddr": "Dhudhuroa",
"dds": "Donno So Dogon",
"ddw": "Dawera-Daweloor",
"de": "German",
"dec": "Dagik",
"ded": "Dedua",
"dee": "Dewoin",
"def": "Dezfuli",
"deg": "Degema",
"deh": "Dehwari",
"dei": "Demisa",
"dek": "Dek",
"del": "Delaware",
"dem": "Dem",
"den": "Slave (Athapascan)",
"dep": "Pidgin Delaware",
"deq": "Dendi (Central African Republic)",
"der": "Deori",
"des": "Desano",
"dev": "Domung",
"dez": "Dengese",
"dga": "Southern Dagaare",
"dgb": "Bunoge Dogon",
"dgc": "Casiguran Dumagat Agta",
"dgd": "Dagaari Dioula",
"dge": "Degenan",
"dgg": "Doga",
"dgh": "Dghwede",
"dgi": "Northern Dagara",
"dgk": "Dagba",
"dgl": "Andaandi; Dongolawi",
"dgn": "Dagoman",
"dgo": "Dogri (individual language)",
"dgr": "Dogrib; Tłı̨chǫ",
"dgs": "Dogoso",
"dgt": "Ndra'ngith",
"dgw": "Daungwurrung",
"dgx": "Doghoro",
"dgz": "Daga",
"dhd": "Dhundari",
"dhg": "Dhangu-Djangu; Dhangu; Djangu",
"dhi": "Dhimal",
"dhl": "Dhalandji",
"dhm": "Zemba",
"dhn": "Dhanki",
"dho": "Dhodia",
"dhr": "Dhargari",
"dhs": "Dhaiso",
"dhu": "Dhurga",
"dhv": "Dehu; Drehu",
"dhw": "Dhanwar (Nepal)",
"dhx": "Dhungaloo",
"dia": "Dia",
"dib": "South Central Dinka",
"dic": "Lakota Dida",
"did": "Didinga",
"dif": "Dieri; Diyari",
"dig": "Digo; Chidigo",
"dih": "Kumiai",
"dii": "Dimbong",
"dij": "Dai",
"dik": "Southwestern Dinka",
"dil": "Dilling",
"dim": "Dime",
"din": "Dinka",
"dio": "Dibo",
"dip": "Northeastern Dinka",
"diq": "Dimli (individual language)",
"dir": "Dirim",
"dis": "Dimasa",
"diu": "Diriku",
"diw": "Northwestern Dinka",
"dix": "Dixon Reef",
"diy": "Diuwe",
"diz": "Ding",
"dja": "Djadjawurrung",
"djb": "Djinba",
"djc": "Dar Daju Daju",
"djd": "Djamindjung; Ngaliwurru",
"dje": "Zarma",
"djf": "Djangun",
"dji": "Djinang",
"djj": "Djeebbana",
"djk": "Eastern Maroon Creole; Businenge Tongo; Nenge",
"djm": "Jamsay Dogon",
"djn": "Jawoyn; Djauan",
"djo": "Jangkang",
"djr": "Djambarrpuyngu",
"dju": "Kapriman",
"djw": "Djawi",
"dka": "Dakpakha",
"dkg": "Kadung",
"dkk": "Dakka",
"dkr": "Kuijau",
"dks": "Southeastern Dinka",
"dkx": "Mazagway",
"dlg": "Dolgan",
"dlk": "Dahalik",
"dlm": "Dalmatian",
"dln": "Darlong",
"dma": "Duma",
"dmb": "Mombo Dogon",
"dmc": "Gavak",
"dmd": "Madhi Madhi",
"dme": "Dugwor",
"dmf": "Medefaidrin",
"dmg": "Upper Kinabatangan",
"dmk": "Domaaki",
"dml": "Dameli",
"dmm": "Dama",
"dmn": "Mande languages",
"dmo": "Kemedzung",
"dmr": "East Damar",
"dms": "Dampelas",
"dmu": "Dubu; Tebi",
"dmv": "Dumpas",
"dmw": "Mudburra",
"dmx": "Dema",
"dmy": "Demta; Sowari",
"dna": "Upper Grand Valley Dani",
"dnd": "Daonda",
"dne": "Ndendeule",
"dng": "Dungan",
"dni": "Lower Grand Valley Dani",
"dnj": "Dan",
"dnk": "Dengka",
"dnn": "Dzùùngoo",
"dno": "Ndrulo; Northern Lendu",
"dnr": "Danaru",
"dnt": "Mid Grand Valley Dani",
"dnu": "Danau",
"dnv": "Danu",
"dnw": "Western Dani",
"dny": "Dení",
"doa": "Dom",
"dob": "Dobu",
"doc": "Northern Dong",
"doe": "Doe",
"dof": "Domu",
"doh": "Dong",
"doi": "Dogri (macrolanguage)",
"dok": "Dondo",
"dol": "Doso",
"don": "Toura (Papua New Guinea)",
"doo": "Dongo",
"dop": "Lukpa",
"doq": "Dominican Sign Language",
"dor": "Dori'o",
"dos": "Dogosé",
"dot": "Dass",
"dov": "Dombe",
"dow": "Doyayo",
"dox": "Bussa",
"doy": "Dompo",
"doz": "Dorze",
"dpp": "Papar",
"dra": "Dravidian languages",
"drb": "Dair",
"drc": "Minderico",
"drd": "Darmiya",
"dre": "Dolpo",
"drg": "Rungus",
"dri": "C'Lela",
"drl": "Paakantyi",
"drn": "West Damar",
"dro": "Daro-Matu Melanau",
"drq": "Dura",
"drs": "Gedeo",
"drt": "Drents",
"dru": "Rukai",
"dry": "Darai",
"dsb": "Lower Sorbian",
"dse": "Dutch Sign Language",
"dsh": "Daasanach",
"dsi": "Disa",
"dsl": "Danish Sign Language",
"dsn": "Dusner",
"dso": "Desiya",
"dsq": "Tadaksahak",
"dsz": "Mardin Sign Language",
"dta": "Daur",
"dtb": "Labuk-Kinabatangan Kadazan",
"dtd": "Ditidaht",
"dth": "Adithinngithigh",
"dti": "Ana Tinga Dogon",
"dtk": "Tene Kan Dogon",
"dtm": "Tomo Kan Dogon",
"dtn": "Daatsʼíin",
"dto": "Tommo So Dogon",
"dtp": "Kadazan Dusun; Central Dusun",
"dtr": "Lotud",
"dts": "Toro So Dogon",
"dtt": "Toro Tegu Dogon",
"dtu": "Tebul Ure Dogon",
"dty": "Dotyali",
"dua": "Duala",
"dub": "Dubli",
"duc": "Duna",
"due": "Umiray Dumaget Agta",
"duf": "Dumbea; Drubea",
"dug": "Duruma; Chiduruma",
"duh": "Dungra Bhil",
"dui": "Dumun",
"duk": "Uyajitaya",
"dul": "Alabat Island Agta",
"dum": "Middle Dutch (ca. 1050-1350)",
"dun": "Dusun Deyah",
"duo": "Dupaninan Agta",
"dup": "Duano",
"duq": "Dusun Malang",
"dur": "Dii",
"dus": "Dumi",
"duu": "Drung",
"duv": "Duvle",
"duw": "Dusun Witu",
"dux": "Duungooma",
"duy": "Dicamay Agta",
"duz": "Duli-Gey",
"dv": "Dhivehi; Divehi; Maldivian",
"dva": "Duau",
"dwa": "Diri",
"dwk": "Dawik Kui",
"dwr": "Dawro",
"dws": "Dutton World Speedwords",
"dwu": "Dhuwal",
"dww": "Dawawa",
"dwy": "Dhuwaya",
"dwz": "Dewas Rai",
"dya": "Dyan",
"dyb": "Dyaberdyaber",
"dyd": "Dyugun",
"dyg": "Villa Viciosa Agta",
"dyi": "Djimini Senoufo",
"dym": "Yanda Dom Dogon",
"dyn": "Dyangadi; Dhanggatti",
"dyo": "Jola-Fonyi",
"dyu": "Dyula",
"dyy": "Djabugay; Dyaabugay",
"dz": "Dzongkha",
"dza": "Tunzu",
"dze": "Djiwarli",
"dzg": "Dazaga",
"dzl": "Dzalakha",
"dzn": "Dzando",
"eaa": "Karenggapa",
"ebc": "Beginci",
"ebg": "Ebughu",
"ebk": "Eastern Bontok",
"ebo": "Teke-Ebo",
"ebr": "Ebrié",
"ebu": "Embu; Kiembu",
"ecr": "Eteocretan",
"ecs": "Ecuadorian Sign Language",
"ecy": "Eteocypriot",
"ee": "Ewe",
"eee": "E",
"efa": "Efai",
"efe": "Efe",
"efi": "Efik",
"ega": "Ega",
"egl": "Emilian",
"egm": "Benamanga",
"ego": "Eggon",
"egx": "Egyptian languages",
"egy": "Egyptian (Ancient)",
"ehs": "Miyakubo Sign Language",
"ehu": "Ehueun",
"eip": "Eipomek",
"eit": "Eitiep",
"eiv": "Askopan",
"eja": "Ejamat",
"eka": "Ekajuk",
"eke": "Ekit",
"ekg": "Ekari",
"eki": "Eki",
"ekk": "Standard Estonian",
"ekl": "Kol (Bangladesh); Kol",
"ekm": "Elip",
"eko": "Koti",
"ekp": "Ekpeye",
"ekr": "Yace",
"eky": "Eastern Kayah",
"el": "Modern Greek (1453-)",
"ele": "Elepi",
"elh": "El Hugeirat",
"eli": "Nding",
"elk": "Elkei",
"elm": "Eleme",
"elo": "El Molo",
"elu": "Elu",
"elx": "Elamite",
"ema": "Emai-Iuleha-Ora",
"emb": "Embaloh",
"eme": "Emerillon",
"emg": "Eastern Meohang",
"emi": "Mussau-Emira",
"emk": "Eastern Maninkakan",
"emm": "Mamulique",
"emn": "Eman",
"emp": "Northern Emberá",
"emq": "Eastern Minyag",
"ems": "Pacific Gulf Yupik",
"emu": "Eastern Muria",
"emw": "Emplawas",
"emx": "Erromintxela",
"emy": "Epigraphic Mayan",
"emz": "Mbessa",
"en": "English",
"ena": "Apali",
"enb": "Markweeta",
"enc": "En",
"end": "Ende",
"enf": "Forest Enets",
"enh": "Tundra Enets",
"enl": "Enlhet",
"enm": "Middle English (1100-1500)",
"enn": "Engenni",
"eno": "Enggano",
"enq": "Enga",
"enr": "Emumu; Emem",
"enu": "Enu",
"env": "Enwan (Edo State)",
"enw": "Enwan (Akwa Ibom State)",
"enx": "Enxet",
"eo": "Esperanto",
"eot": "Beti (Côte d'Ivoire)",
"epi": "Epie",
"era": "Eravallan",
"erg": "Sie",
"erh": "Eruwa",
"eri": "Ogea",
"erk": "South Efate",
"ero": "Horpa",
"err": "Erre",
"ers": "Ersu",
"ert": "Eritai",
"erw": "Erokwanas",
"es": "Spanish; Castilian",
"ese": "Ese Ejja",
"esg": "Aheri Gondi",
"esh": "Eshtehardi",
"esi": "North Alaskan Inupiatun",
"esk": "Northwest Alaska Inupiatun",
"esl": "Egypt Sign Language",
"esm": "Esuma",
"esn": "Salvadoran Sign Language",
"eso": "Estonian Sign Language",
"esq": "Esselen",
"ess": "Central Siberian Yupik",
"esu": "Central Yupik",
"esx": "Eskimo-Aleut languages",
"esy": "Eskayan",
"et": "Estonian",
"etb": "Etebi",
"etc": "Etchemin",
"eth": "Ethiopian Sign Language",
"etn": "Eton (Vanuatu)",
"eto": "Eton (Cameroon)",
"etr": "Edolo",
"ets": "Yekhee",
"ett": "Etruscan",
"etu": "Ejagham",
"etx": "Eten",
"etz": "Semimi",
"eu": "Basque",
"euq": "Basque (family)",
"eve": "Even",
"evh": "Uvbie",
"evn": "Evenki",
"ewo": "Ewondo",
"ext": "Extremaduran",
"eya": "Eyak",
"eyo": "Keiyo",
"eza": "Ezaa",
"eze": "Uzekwe",
"fa": "Persian",
"faa": "Fasu",
"fab": "Fa d'Ambu",
"fad": "Wagi",
"faf": "Fagani",
"fag": "Finongan",
"fah": "Baissa Fali",
"fai": "Faiwol",
"faj": "Faita",
"fak": "Fang (Cameroon)",
"fal": "South Fali",
"fam": "Fam",
"fan": "Fang (Equatorial Guinea)",
"fap": "Paloor",
"far": "Fataleka",
"fat": "Fanti",
"fau": "Fayu",
"fax": "Fala",
"fay": "Southwestern Fars",
"faz": "Northwestern Fars",
"fbl": "West Albay Bikol",
"fcs": "Quebec Sign Language",
"fer": "Feroge",
"ff": "Fulah",
"ffi": "Foia Foia",
"ffm": "Maasina Fulfulde",
"fgr": "Fongoro",
"fi": "Finnish",
"fia": "Nobiin",
"fie": "Fyer",
"fif": "Faifi",
"fil": "Filipino; Pilipino",
"fip": "Fipa",
"fir": "Firan",
"fit": "Tornedalen Finnish; Meänkieli",
"fiu": "Finno-Ugrian languages",
"fiw": "Fiwaga",
"fj": "Fijian",
"fkk": "Kirya-Konzəl",
"fkv": "Kven Finnish",
"fla": "Kalispel-Pend d'Oreille",
"flh": "Foau",
"fli": "Fali",
"fll": "North Fali",
"fln": "Flinders Island",
"flr": "Fuliiru",
"fly": "Flaaitaal; Tsotsitaal",
"fmp": "Fe'fe'",
"fmu": "Far Western Muria",
"fnb": "Fanbak",
"fng": "Fanagalo",
"fni": "Fania",
"fo": "Faroese",
"fod": "Foodo",
"foi": "Foi",
"fom": "Foma",
"fon": "Fon",
"for": "Fore",
"fos": "Siraya",
"fox": "Formosan languages",
"fpe": "Fernando Po Creole English",
"fqs": "Fas",
"fr": "French",
"frc": "Cajun French",
"frd": "Fordata",
"frk": "Frankish",
"frm": "Middle French (ca. 1400-1600)",
"fro": "Old French (842-ca. 1400)",
"frp": "Arpitan; Francoprovençal",
"frq": "Forak",
"frr": "Northern Frisian",
"frs": "Eastern Frisian",
"frt": "Fortsenal",
"fse": "Finnish Sign Language",
"fsl": "French Sign Language",
"fss": "Finland-Swedish Sign Language; finlandssvenskt teckenspråk; suomenruotsalainen viittomakieli",
"fub": "Adamawa Fulfulde",
"fuc": "Pulaar",
"fud": "East Futuna",
"fue": "Borgu Fulfulde",
"fuf": "Pular",
"fuh": "Western Niger Fulfulde",
"fui": "Bagirmi Fulfulde",
"fuj": "Ko",
"fum": "Fum",
"fun": "Fulniô",
"fuq": "Central-Eastern Niger Fulfulde",
"fur": "Friulian",
"fut": "Futuna-Aniwa",
"fuu": "Furu",
"fuv": "Nigerian Fulfulde",
"fuy": "Fuyug",
"fvr": "Fur",
"fwa": "Fwâi",
"fwe": "Fwe",
"fy": "Western Frisian",
"ga": "Irish",
"gaa": "Ga",
"gab": "Gabri",
"gac": "Mixed Great Andamanese",
"gad": "Gaddang",
"gae": "Guarequena",
"gaf": "Gende",
"gag": "Gagauz",
"gah": "Alekano",
"gai": "Borei",
"gaj": "Gadsup",
"gak": "Gamkonora",
"gal": "Galolen",
"gam": "Kandawo",
"gan": "Gan Chinese",
"gao": "Gants",
"gap": "Gal",
"gaq": "Gata'",
"gar": "Galeya",
"gas": "Adiwasi Garasia",
"gat": "Kenati",
"gau": "Mudhili Gadaba",
"gaw": "Nobonob",
"gax": "Borana-Arsi-Guji Oromo",
"gay": "Gayo",
"gaz": "West Central Oromo",
"gba": "Gbaya (Central African Republic)",
"gbb": "Kaytetye",
"gbd": "Karajarri",
"gbe": "Niksek",
"gbf": "Gaikundi",
"gbg": "Gbanziri",
"gbh": "Defi Gbe",
"gbi": "Galela",
"gbj": "Bodo Gadaba",
"gbk": "Gaddi",
"gbl": "Gamit",
"gbm": "Garhwali",
"gbn": "Mo'da",
"gbo": "Northern Grebo",
"gbp": "Gbaya-Bossangoa",
"gbq": "Gbaya-Bozoum",
"gbr": "Gbagyi",
"gbs": "Gbesi Gbe",
"gbu": "Gagadu",
"gbv": "Gbanu",
"gbw": "Gabi-Gabi",
"gbx": "Eastern Xwla Gbe",
"gby": "Gbari",
"gbz": "Zoroastrian Dari",
"gcc": "Mali",
"gcd": "Ganggalida",
"gce": "Galice",
"gcf": "Guadeloupean Creole French",
"gcl": "Grenadian Creole English",
"gcn": "Gaina",
"gcr": "Guianese Creole French",
"gct": "Colonia Tovar German",
"gd": "Scottish Gaelic; Gaelic",
"gda": "Gade Lohar",
"gdb": "Pottangi Ollar Gadaba",
"gdc": "Gugu Badhun",
"gdd": "Gedaged",
"gde": "Gude",
"gdf": "Guduf-Gava",
"gdg": "Ga'dang",
"gdh": "Gadjerawang; Gajirrabeng",
"gdi": "Gundi",
"gdj": "Gurdjar",
"gdk": "Gadang",
"gdl": "Dirasha",
"gdm": "Laal",
"gdn": "Umanakaina",
"gdo": "Ghodoberi",
"gdq": "Mehri",
"gdr": "Wipi",
"gds": "Ghandruk Sign Language",
"gdt": "Kungardutyi",
"gdu": "Gudu",
"gdx": "Godwari",
"gea": "Geruma",
"geb": "Kire",
"gec": "Gboloo Grebo",
"ged": "Gade",
"gef": "Gerai",
"geg": "Gengle",
"geh": "Hutterite German; Hutterisch",
"gei": "Gebe",
"gej": "Gen",
"gek": "Ywom",
"gel": "ut-Ma'in",
"gem": "Germanic languages",
"geq": "Geme",
"ges": "Geser-Gorom",
"gev": "Eviya",
"gew": "Gera",
"gex": "Garre",
"gey": "Enya",
"gez": "Geez",
"gfk": "Patpatar",
"gft": "Gafat",
"gga": "Gao",
"ggb": "Gbii",
"ggd": "Gugadj",
"gge": "Gurr-goni",
"ggg": "Gurgula",
"ggk": "Kungarakany",
"ggl": "Ganglau",
"ggt": "Gitua",
"ggu": "Gagu; Gban",
"ggw": "Gogodala",
"gha": "Ghadamès",
"ghc": "Hiberno-Scottish Gaelic",
"ghe": "Southern Ghale",
"ghh": "Northern Ghale",
"ghk": "Geko Karen",
"ghl": "Ghulfan",
"ghn": "Ghanongga",
"gho": "Ghomara",
"ghr": "Ghera",
"ghs": "Guhu-Samane",
"ght": "Kuke; Kutang Ghale",
"gia": "Kija",
"gib": "Gibanawa",
"gic": "Gail",
"gid": "Gidar",
"gie": "Gaɓogbo; Guébie",
"gig": "Goaria",
"gih": "Githabul",
"gii": "Girirra",
"gil": "Gilbertese",
"gim": "Gimi (Eastern Highlands)",
"gin": "Hinukh",
"gip": "Gimi (West New Britain)",
"giq": "Green Gelao",
"gir": "Red Gelao",
"gis": "North Giziga",
"git": "Gitxsan",
"giu": "Mulao",
"giw": "White Gelao",
"gix": "Gilima",
"giy": "Giyug",
"giz": "South Giziga",
"gjk": "Kachi Koli",
"gjm": "Gunditjmara",
"gjn": "Gonja",
"gjr": "Gurindji Kriol",
"gju": "Gujari",
"gka": "Guya",
"gkd": "Magɨ (Madang Province)",
"gke": "Ndai",
"gkn": "Gokana",
"gko": "Kok-Nar",
"gkp": "Guinea Kpelle",
"gku": "ǂUngkue",
"gl": "Galician",
"glb": "Belning",
"glc": "Bon Gula",
"gld": "Nanai",
"glh": "Northwest Pashai; Northwest Pashayi",
"glj": "Gula Iro",
"glk": "Gilaki",
"gll": "Garlali",
"glo": "Galambu",
"glr": "Glaro-Twabo",
"glu": "Gula (Chad)",
"glw": "Glavda",
"gly": "Gule",
"gma": "Gambera",
"gmb": "Gula'alaa",
"gmd": "Mághdì",
"gme": "East Germanic languages",
"gmg": "Magɨyi",
"gmh": "Middle High German (ca. 1050-1500)",
"gml": "Middle Low German",
"gmm": "Gbaya-Mbodomo",
"gmn": "Gimnime",
"gmq": "North Germanic languages",
"gmr": "Mirning; Mirniny",
"gmu": "Gumalu",
"gmv": "Gamo",
"gmw": "West Germanic languages",
"gmx": "Magoma",
"gmy": "Mycenaean Greek",
"gmz": "Mgbolizhia",
"gn": "Guarani",
"gna": "Kaansa",
"gnb": "Gangte",
"gnc": "Guanche",
"gnd": "Zulgo-Gemzek",
"gne": "Ganang",
"gng": "Ngangam",
"gnh": "Lere",
"gni": "Gooniyandi",
"gnj": "Ngen",
"gnk": "ǁGana",
"gnl": "Gangulu",
"gnm": "Ginuman",
"gnn": "Gumatj",
"gno": "Northern Gondi",
"gnq": "Gana",
"gnr": "Gureng Gureng",
"gnt": "Guntai",
"gnu": "Gnau",
"gnw": "Western Bolivian Guaraní",
"gnz": "Ganzi",
"goa": "Guro",
"gob": "Playero",
"goc": "Gorakor",
"god": "Godié",
"goe": "Gongduk",
"gof": "Gofa",
"gog": "Gogo",
"goh": "Old High German (ca. 750-1050)",
"goi": "Gobasi",
"goj": "Gowlan",
"gok": "Gowli",
"gol": "Gola",
"gom": "Goan Konkani",
"gon": "Gondi",
"goo": "Gone Dau",
"gop": "Yeretuar",
"goq": "Gorap",
"gor": "Gorontalo",
"gos": "Gronings",
"got": "Gothic",
"gou": "Gavar",
"gov": "Goo",
"gow": "Gorowa",
"gox": "Gobu",
"goy": "Goundo",
"goz": "Gozarkhani",
"gpa": "Gupa-Abawa",
"gpe": "Ghanaian Pidgin English",
"gpn": "Taiap",
"gqa": "Ga'anda",
"gqi": "Guiqiong",
"gqn": "Guana (Brazil)",
"gqr": "Gor",
"gqu": "Qau",
"gra": "Rajput Garasia",
"grb": "Grebo",
"grc": "Ancient Greek (to 1453)",
"grd": "Guruntum-Mbaaru",
"grg": "Madi",
"grh": "Gbiri-Niragu",
"gri": "Ghari",
"grj": "Southern Grebo",
"grk": "Greek languages",
"grm": "Kota Marudu Talantang",
"gro": "Groma",
"grq": "Gorovu",
"grr": "Taznatit",
"grs": "Gresi",
"grt": "Garo",
"gru": "Kistane",
"grv": "Central Grebo",
"grw": "Gweda",
"grx": "Guriaso",
"gry": "Barclayville Grebo",
"grz": "Guramalum",
"gse": "Ghanaian Sign Language",
"gsg": "German Sign Language",
"gsl": "Gusilay",
"gsm": "Guatemalan Sign Language",
"gsn": "Nema; Gusan",
"gso": "Southwest Gbaya",
"gsp": "Wasembo",
"gss": "Greek Sign Language",
"gsw": "Swiss German; Alemannic; Alsatian",
"gta": "Guató",
"gtu": "Aghu-Tharnggala",
"gu": "Gujarati",
"gua": "Shiki",
"gub": "Guajajára",
"guc": "Wayuu",
"gud": "Yocoboué Dida",
"gue": "Gurindji",
"guf": "Gupapuyngu",
"gug": "Paraguayan Guaraní",
"guh": "Guahibo",
"gui": "Eastern Bolivian Guaraní",
"guk": "Gumuz",
"gul": "Sea Island Creole English",
"gum": "Guambiano",
"gun": "Mbyá Guaraní",
"guo": "Guayabero",
"gup": "Gunwinggu",
"guq": "Aché",
"gur": "Farefare",
"gus": "Guinean Sign Language",
"gut": "Maléku Jaíka",
"guu": "Yanomamö",
"guw": "Gun",
"gux": "Gourmanchéma",
"guz": "Gusii; Ekegusii",
"gv": "Manx",
"gva": "Guana (Paraguay)",
"gvc": "Guanano",
"gve": "Duwet",
"gvf": "Golin",
"gvj": "Guajá",
"gvl": "Gulay",
"gvm": "Gurmana",
"gvn": "Kuku-Yalanji",
"gvo": "Gavião Do Jiparaná",
"gvp": "Pará Gavião",
"gvr": "Gurung",
"gvs": "Gumawana",
"gvy": "Guyani",
"gwa": "Mbato",
"gwb": "Gwa",
"gwc": "Gawri; Kalami",
"gwd": "Gawwada",
"gwe": "Gweno",
"gwf": "Gowro",
"gwg": "Moo",
"gwi": "Gwichʼin",
"gwj": "ǀGwi",
"gwm": "Awngthim",
"gwn": "Gwandara",
"gwr": "Gwere",
"gwt": "Gawar-Bati",
"gwu": "Guwamu",
"gww": "Kwini",
"gwx": "Gua",
"gxx": "Wè Southern",
"gya": "Northwest Gbaya",
"gyb": "Garus",
"gyd": "Kayardild",
"gye": "Gyem",
"gyf": "Gungabula",
"gyg": "Gbayi",
"gyi": "Gyele",
"gyl": "Gayil",
"gym": "Ngäbere",
"gyn": "Guyanese Creole English",
"gyo": "Gyalsumdo",
"gyr": "Guarayu",
"gyy": "Gunya",
"gyz": "Geji; Gyaazi",
"gza": "Ganza",
"gzi": "Gazi",
"gzn": "Gane",
"ha": "Hausa",
"haa": "Han",
"hab": "Hanoi Sign Language",
"hac": "Gurani",
"had": "Hatam",
"hae": "Eastern Oromo",
"haf": "Haiphong Sign Language",
"hag": "Hanga",
"hah": "Hahon",
"hai": "Haida",
"haj": "Hajong",
"hak": "Hakka Chinese",
"hal": "Halang",
"ham": "Hewa",
"han": "Hangaza",
"hao": "Hakö",
"hap": "Hupla",
"haq": "Ha",
"har": "Harari",
"has": "Haisla",
"hav": "Havu",
"haw": "Hawaiian",
"hax": "Southern Haida",
"hay": "Haya",
"haz": "Hazaragi",
"hba": "Hamba",
"hbb": "Huba",
"hbn": "Heiban",
"hbo": "Ancient Hebrew",
"hbu": "Habu",
"hca": "Andaman Creole Hindi",
"hch": "Huichol",
"hdn": "Northern Haida",
"hds": "Honduras Sign Language",
"hdy": "Hadiyya",
"he": "Hebrew",
"hea": "Northern Qiandong Miao",
"hed": "Herdé",
"heg": "Helong",
"heh": "Hehe",
"hei": "Heiltsuk",
"hem": "Hemba",
"hgm": "Haiǁom",
"hgw": "Haigwai",
"hhi": "Hoia Hoia",
"hhr": "Kerak",
"hhy": "Hoyahoya",
"hi": "Hindi",
"hia": "Lamang",
"hib": "Hibito",
"hid": "Hidatsa",
"hif": "Fiji Hindi",
"hig": "Kamwe",
"hih": "Pamosu",
"hii": "Hinduri",
"hij": "Hijuk",
"hik": "Seit-Kaitetu",
"hil": "Hiligaynon",
"him": "Himachali languages; Western Pahari languages",
"hio": "Tsoa",
"hir": "Himarimã",
"hit": "Hittite",
"hiw": "Hiw",
"hix": "Hixkaryána",
"hji": "Haji",
"hka": "Kahe",
"hke": "Hunde",
"hkh": "Khah; Poguli",
"hkk": "Hunjara-Kaina Ke",
"hkn": "Mel-Khaonh",
"hks": "Hong Kong Sign Language; Heung Kong Sau Yue",
"hla": "Halia",
"hlb": "Halbi",
"hld": "Halang Doan",
"hle": "Hlersu",
"hlt": "Matu Chin",
"hlu": "Hieroglyphic Luwian",
"hma": "Southern Mashan Hmong; Southern Mashan Miao",
"hmb": "Humburi Senni Songhay",
"hmc": "Central Huishui Hmong; Central Huishui Miao",
"hmd": "Large Flowery Miao; A-hmaos; Da-Hua Miao",
"hme": "Eastern Huishui Hmong; Eastern Huishui Miao",
"hmf": "Hmong Don",
"hmg": "Southwestern Guiyang Hmong",
"hmh": "Southwestern Huishui Hmong; Southwestern Huishui Miao",
"hmi": "Northern Huishui Hmong; Northern Huishui Miao",
"hmj": "Ge; Gejia",
"hmk": "Maek",
"hml": "Luopohe Hmong; Luopohe Miao",
"hmm": "Central Mashan Hmong; Central Mashan Miao",
"hmn": "Hmong; Mong",
"hmp": "Northern Mashan Hmong; Northern Mashan Miao",
"hmq": "Eastern Qiandong Miao",
"hmr": "Hmar",
"hms": "Southern Qiandong Miao",
"hmt": "Hamtai",
"hmu": "Hamap",
"hmv": "Hmong Dô",
"hmw": "Western Mashan Hmong; Western Mashan Miao",
"hmx": "Hmong-Mien languages",
"hmy": "Southern Guiyang Hmong; Southern Guiyang Miao",
"hmz": "Hmong Shua; Sinicized Miao",
"hna": "Mina (Cameroon)",
"hnd": "Southern Hindko",
"hne": "Chhattisgarhi",
"hng": "Hungu",
"hnh": "ǁAni",
"hni": "Hani",
"hnj": "Hmong Njua; Mong Leng; Mong Njua",
"hnn": "Hanunoo",
"hno": "Northern Hindko",
"hns": "Caribbean Hindustani",
"hnu": "Hung",
"ho": "Hiri Motu",
"hoa": "Hoava",
"hob": "Mari (Madang Province)",
"hoc": "Ho",
"hod": "Holma",
"hoe": "Horom",
"hoh": "Hobyót",
"hoi": "Holikachuk",
"hoj": "Hadothi; Haroti",
"hok": "Hokan languages",
"hol": "Holu",
"hom": "Homa",
"hoo": "Holoholo",
"hop": "Hopi",
"hor": "Horo",
"hos": "Ho Chi Minh City Sign Language",
"hot": "Hote; Malê",
"hov": "Hovongan",
"how": "Honi",
"hoy": "Holiya",
"hoz": "Hozo",
"hpo": "Hpon",
"hps": "Hawai'i Sign Language (HSL); Hawai'i Pidgin Sign Language",
"hr": "Croatian",
"hra": "Hrangkhol",
"hrc": "Niwer Mil",
"hre": "Hre",
"hrk": "Haruku",
"hrm": "Horned Miao",
"hro": "Haroi",
"hrp": "Nhirrpi",
"hrt": "Hértevin",
"hru": "Hruso",
"hrw": "Warwar Feni",
"hrx": "Hunsrik",
"hrz": "Harzani",
"hsb": "Upper Sorbian",
"hsh": "Hungarian Sign Language",
"hsl": "Hausa Sign Language",
"hsn": "Xiang Chinese",
"hss": "Harsusi",
"ht": "Haitian; Haitian Creole",
"hti": "Hoti",
"hto": "Minica Huitoto",
"hts": "Hadza",
"htu": "Hitu",
"htx": "Middle Hittite",
"hu": "Hungarian",
"hub": "Huambisa",
"huc": "ǂHua; ǂʼAmkhoe",
"hud": "Huaulu",
"hue": "San Francisco Del Mar Huave",
"huf": "Humene",
"hug": "Huachipaeri",
"huh": "Huilliche",
"hui": "Huli",
"huj": "Northern Guiyang Hmong; Northern Guiyang Miao",
"huk": "Hulung",
"hul": "Hula",
"hum": "Hungana",
"huo": "Hu",
"hup": "Hupa",
"huq": "Tsat",
"hur": "Halkomelem",
"hus": "Huastec",
"hut": "Humla",
"huu": "Murui Huitoto",
"huv": "San Mateo Del Mar Huave",
"huw": "Hukumina",
"hux": "Nüpode Huitoto",
"huy": "Hulaulá",
"huz": "Hunzib",
"hvc": "Haitian Vodoun Culture Language",
"hve": "San Dionisio Del Mar Huave",
"hvk": "Haveke",
"hvn": "Sabu",
"hvv": "Santa María Del Mar Huave",
"hwa": "Wané",
"hwc": "Hawai'i Creole English; Hawai'i Pidgin",
"hwo": "Hwana",
"hy": "Armenian",
"hya": "Hya",
"hyw": "Western Armenian",
"hyx": "Armenian (family)",
"hz": "Herero",
"ia": "Interlingua (International Auxiliary Language Association)",
"iai": "Iaai",
"ian": "Iatmul",
"iar": "Purari",
"iba": "Iban",
"ibb": "Ibibio",
"ibd": "Iwaidja",
"ibe": "Akpes",
"ibg": "Ibanag",
"ibh": "Bih",
"ibl": "Ibaloi",
"ibm": "Agoi",
"ibn": "Ibino",
"ibr": "Ibuoro",
"ibu": "Ibu",
"iby": "Ibani",
"ica": "Ede Ica",
"ich": "Etkywan",
"icl": "Icelandic Sign Language",
"icr": "Islander Creole English",
"id": "Indonesian",
"ida": "Idakho-Isukha-Tiriki; Luidakho-Luisukha-Lutirichi",
"idb": "Indo-Portuguese",
"idc": "Idon; Ajiya",
"idd": "Ede Idaca",
"ide": "Idere",
"idi": "Idi",
"idr": "Indri",
"ids": "Idesa",
"idt": "Idaté",
"idu": "Idoma",
"ie": "Interlingue; Occidental",
"ifa": "Amganad Ifugao",
"ifb": "Batad Ifugao; Ayangan Ifugao",
"ife": "Ifè",
"iff": "Ifo",
"ifk": "Tuwali Ifugao",
"ifm": "Teke-Fuumu",
"ifu": "Mayoyao Ifugao",
"ify": "Keley-I Kallahan",
"ig": "Igbo",
"igb": "Ebira",
"ige": "Igede",
"igg": "Igana",
"igl": "Igala",
"igm": "Kanggape",
"ign": "Ignaciano",
"igo": "Isebe",
"igs": "Interglossa",
"igw": "Igwe",
"ihb": "Iha Based Pidgin",
"ihi": "Ihievbe",
"ihp": "Iha",
"ihw": "Bidhawal",
"ii": "Sichuan Yi; Nuosu",
"iin": "Thiin",
"iir": "Indo-Iranian languages",
"ijc": "Izon",
"ije": "Biseni",
"ijj": "Ede Ije",
"ijn": "Kalabari",
"ijo": "Ijo languages",
"ijs": "Southeast Ijo",
"ik": "Inupiaq",
"ike": "Eastern Canadian Inuktitut",
"iki": "Iko",
"ikk": "Ika",
"ikl": "Ikulu",
"iko": "Olulumo-Ikom",
"ikp": "Ikpeshi",
"ikr": "Ikaranggal",
"iks": "Inuit Sign Language",
"ikt": "Inuinnaqtun; Western Canadian Inuktitut",
"ikv": "Iku-Gora-Ankwa",
"ikw": "Ikwere",
"ikx": "Ik",
"ikz": "Ikizu",
"ila": "Ile Ape",
"ilb": "Ila",
"ilg": "Garig-Ilgar",
"ili": "Ili Turki",
"ilk": "Ilongot",
"ilm": "Iranun (Malaysia)",
"ilo": "Iloko",
"ilp": "Iranun (Philippines)",
"ils": "International Sign",
"ilu": "Ili'uun",
"ilv": "Ilue",
"ima": "Mala Malasar",
"imi": "Anamgura",
"iml": "Miluk",
"imn": "Imonda",
"imo": "Imbongu",
"imr": "Imroing",
"ims": "Marsian",
"imt": "Imotong",
"imy": "Milyan",
"inb": "Inga",
"inc": "Indic languages",
"ine": "Indo-European languages",
"ing": "Degexit'an",
"inh": "Ingush",
"inj": "Jungle Inga",
"inl": "Indonesian Sign Language",
"inm": "Minaean",
"inn": "Isinai",
"ino": "Inoke-Yate",
"inp": "Iñapari",
"ins": "Indian Sign Language",
"int": "Intha",
"inz": "Ineseño",
"io": "Ido",
"ior": "Inor",
"iou": "Tuma-Irumu",
"iow": "Iowa-Oto",
"ipi": "Ipili",
"ipo": "Ipiko",
"iqu": "Iquito",
"iqw": "Ikwo",
"ira": "Iranian languages",
"ire": "Iresim",
"irh": "Irarutu",
"iri": "Rigwe; Irigwe",
"irk": "Iraqw",
"irn": "Irántxe",
"iro": "Iroquoian languages",
"irr": "Ir",
"iru": "Irula",
"irx": "Kamberau",
"iry": "Iraya",
"is": "Icelandic",
"isa": "Isabi",
"isc": "Isconahua",
"isd": "Isnag",
"ise": "Italian Sign Language",
"isg": "Irish Sign Language",
"ish": "Esan",
"isi": "Nkem-Nkum",
"isk": "Ishkashimi",
"ism": "Masimasi",
"isn": "Isanzu",
"iso": "Isoko",
"isr": "Israeli Sign Language",
"ist": "Istriot",
"isu": "Isu (Menchum Division)",
"it": "Italian",
"itb": "Binongan Itneg",
"itc": "Italic languages",
"itd": "Southern Tidung",
"ite": "Itene",
"iti": "Inlaod Itneg",
"itk": "Judeo-Italian",
"itl": "Itelmen",
"itm": "Itu Mbon Uzo",
"ito": "Itonama",
"itr": "Iteri",
"its": "Isekiri",
"itt": "Maeng Itneg",
"itv": "Itawit",
"itw": "Ito",
"itx": "Itik",
"ity": "Moyadan Itneg",
"itz": "Itzá",
"iu": "Inuktitut",
"ium": "Iu Mien",
"ivb": "Ibatan",
"ivv": "Ivatan",
"iwk": "I-Wak",
"iwm": "Iwam",
"iwo": "Iwur",
"iws": "Sepik Iwam",
"ixc": "Ixcatec",
"ixl": "Ixil",
"iya": "Iyayu",
"iyo": "Mesaka",
"iyx": "Yaka (Congo)",
"izh": "Ingrian",
"izr": "Izere",
"izz": "Izii",
"ja": "Japanese",
"jaa": "Jamamadí",
"jab": "Hyam",
"jac": "Popti'; Jakalteko",
"jad": "Jahanka",
"jae": "Yabem",
"jaf": "Jara",
"jah": "Jah Hut",
"jaj": "Zazao",
"jak": "Jakun",
"jal": "Yalahatan",
"jam": "Jamaican Creole English",
"jan": "Jandai",
"jao": "Yanyuwa",
"jaq": "Yaqay",
"jas": "New Caledonian Javanese",
"jat": "Jakati",
"jau": "Yaur",
"jax": "Jambi Malay",
"jay": "Yan-nhangu; Nhangu",
"jaz": "Jawe",
"jbe": "Judeo-Berber",
"jbi": "Badjiri",
"jbj": "Arandai",
"jbk": "Barikewa",
"jbm": "Bijim",
"jbn": "Nafusi",
"jbo": "Lojban",
"jbr": "Jofotek-Bromnya",
"jbt": "Jabutí",
"jbu": "Jukun Takum",
"jbw": "Yawijibaya",
"jcs": "Jamaican Country Sign Language",
"jct": "Krymchak",
"jda": "Jad",
"jdg": "Jadgali",
"jdt": "Judeo-Tat",
"jeb": "Jebero",
"jee": "Jerung",
"jeh": "Jeh",
"jei": "Yei",
"jek": "Jeri Kuo",
"jel": "Yelmek",
"jen": "Dza",
"jer": "Jere",
"jet": "Manem",
"jeu": "Jonkor Bourmataguil",
"jgb": "Ngbee",
"jge": "Judeo-Georgian",
"jgk": "Gwak",
"jgo": "Ngomba",
"jhi": "Jehai",
"jhs": "Jhankot Sign Language",
"jia": "Jina",
"jib": "Jibu",
"jic": "Tol",
"jid": "Bu (Kaduna State)",
"jie": "Jilbe",
"jig": "Jingulu; Djingili",
"jih": "sTodsde; Shangzhai",
"jii": "Jiiddu",
"jil": "Jilim",
"jim": "Jimi (Cameroon)",
"jio": "Jiamao",
"jiq": "Guanyinqiao; Lavrung",
"jit": "Jita",
"jiu": "Youle Jinuo",
"jiv": "Shuar",
"jiy": "Buyuan Jinuo",
"jje": "Jejueo",
"jjr": "Bankal",
"jka": "Kaera",
"jkm": "Mobwa Karen",
"jko": "Kubo",
"jkp": "Paku Karen",
"jkr": "Koro (India)",
"jks": "Amami Koniya Sign Language",
"jku": "Labir",
"jle": "Ngile",
"jls": "Jamaican Sign Language",
"jma": "Dima",
"jmb": "Zumbun",
"jmc": "Machame",
"jmd": "Yamdena",
"jmi": "Jimi (Nigeria)",
"jml": "Jumli",
"jmn": "Makuri Naga",
"jmr": "Kamara",
"jms": "Mashi (Nigeria)",
"jmw": "Mouwase",
"jmx": "Western Juxtlahuaca Mixtec",
"jna": "Jangshung",
"jnd": "Jandavra",
"jng": "Yangman",
"jni": "Janji",
"jnj": "Yemsa",
"jnl": "Rawat",
"jns": "Jaunsari",
"job": "Joba",
"jod": "Wojenaka",
"jog": "Jogi",
"jor": "Jorá",
"jos": "Jordanian Sign Language",
"jow": "Jowulu",
"jpa": "Jewish Palestinian Aramaic",
"jpr": "Judeo-Persian",
"jpx": "Japanese (family)",
"jqr": "Jaqaru",
"jra": "Jarai",
"jrb": "Judeo-Arabic",
"jrr": "Jiru",
"jrt": "Jakattoe",
"jru": "Japrería",
"jsl": "Japanese Sign Language",
"jua": "Júma",
"jub": "Wannu",
"juc": "Jurchen",
"jud": "Worodougou",
"juh": "Hõne",
"jui": "Ngadjuri",
"juk": "Wapan",
"jul": "Jirel",
"jum": "Jumjum",
"jun": "Juang",
"juo": "Jiba",
"jup": "Hupdë",
"jur": "Jurúna",
"jus": "Jumla Sign Language",
"jut": "Jutish",
"juu": "Ju",
"juw": "Wãpha",
"juy": "Juray",
"jv": "Javanese",
"jvd": "Javindo",
"jvn": "Caribbean Javanese",
"jwi": "Jwira-Pepesa",
"jya": "Jiarong",
"jye": "Judeo-Yemeni Arabic",
"jyy": "Jaya",
"ka": "Georgian",
"kaa": "Kara-Kalpak; Karakalpak",
"kab": "Kabyle",
"kac": "Kachin; Jingpho",
"kad": "Adara",
"kae": "Ketangalan",
"kaf": "Katso",
"kag": "Kajaman",
"kah": "Kara (Central African Republic)",
"kai": "Karekare",
"kaj": "Jju",
"kak": "Kalanguya; Kayapa Kallahan",
"kam": "Kamba (Kenya)",
"kao": "Xaasongaxango",
"kap": "Bezhta",
"kaq": "Capanahua",
"kar": "Karen languages",
"kav": "Katukína",
"kaw": "Kawi",
"kax": "Kao",
"kay": "Kamayurá",
"kba": "Kalarko",
"kbb": "Kaxuiâna",
"kbc": "Kadiwéu",
"kbd": "Kabardian",
"kbe": "Kanju",
"kbg": "Khamba",
"kbh": "Camsá",
"kbi": "Kaptiau",
"kbj": "Kari",
"kbk": "Grass Koiari",
"kbl": "Kanembu",
"kbm": "Iwal",
"kbn": "Kare (Central African Republic)",
"kbo": "Keliko",
"kbp": "Kabiyè",
"kbq": "Kamano",
"kbr": "Kafa",
"kbs": "Kande",
"kbt": "Abadi",
"kbu": "Kabutra",
"kbv": "Dera (Indonesia)",
"kbw": "Kaiep",
"kbx": "Ap Ma",
"kby": "Manga Kanuri",
"kbz": "Duhwa",
"kca": "Khanty",
"kcb": "Kawacha",
"kcc": "Lubila",
"kcd": "Ngkâlmpw Kanum",
"kce": "Kaivi",
"kcf": "Ukaan",
"kcg": "Tyap",
"kch": "Vono",
"kci": "Kamantan",
"kcj": "Kobiana",
"kck": "Kalanga",
"kcl": "Kela (Papua New Guinea); Kala",
"kcm": "Gula (Central African Republic)",
"kcn": "Nubi",
"kco": "Kinalakna",
"kcp": "Kanga",
"kcq": "Kamo",
"kcr": "Katla",
"kcs": "Koenoem",
"kct": "Kaian",
"kcu": "Kami (Tanzania)",
"kcv": "Kete",
"kcw": "Kabwari",
"kcx": "Kachama-Ganjule",
"kcy": "Korandje",
"kcz": "Konongo",
"kda": "Worimi",
"kdc": "Kutu",
"kdd": "Yankunytjatjara",
"kde": "Makonde",
"kdf": "Mamusi",
"kdg": "Seba",
"kdh": "Tem",
"kdi": "Kumam",
"kdj": "Karamojong",
"kdk": "Numèè; Kwényi",
"kdl": "Tsikimba",
"kdm": "Kagoma",
"kdn": "Kunda",
"kdo": "Kordofanian languages",
"kdp": "Kaningdon-Nindem",
"kdq": "Koch",
"kdr": "Karaim",
"kdt": "Kuy",
"kdu": "Kadaru",
"kdw": "Koneraw",
"kdx": "Kam",
"kdy": "Keder; Keijar",
"kdz": "Kwaja",
"kea": "Kabuverdianu",
"keb": "Kélé",
"kec": "Keiga",
"ked": "Kerewe",
"kee": "Eastern Keres",
"kef": "Kpessi",
"keg": "Tese",
"keh": "Keak",
"kei": "Kei",
"kej": "Kadar",
"kek": "Kekchí",
"kel": "Kela (Democratic Republic of Congo)",
"kem": "Kemak",
"ken": "Kenyang",
"keo": "Kakwa",
"kep": "Kaikadi",
"keq": "Kamar",
"ker": "Kera",
"kes": "Kugbo",
"ket": "Ket",
"keu": "Akebu",
"kev": "Kanikkaran",
"kew": "West Kewa",
"kex": "Kukna",
"key": "Kupia",
"kez": "Kukele",
"kfa": "Kodava",
"kfb": "Northwestern Kolami",
"kfc": "Konda-Dora",
"kfd": "Korra Koraga",
"kfe": "Kota (India)",
"kff": "Koya",
"kfg": "Kudiya",
"kfh": "Kurichiya",
"kfi": "Kannada Kurumba",
"kfj": "Kemiehua",
"kfk": "Kinnauri",
"kfl": "Kung",
"kfm": "Khunsari",
"kfn": "Kuk",
"kfo": "Koro (Côte d'Ivoire)",
"kfp": "Korwa",
"kfq": "Korku",
"kfr": "Kachhi; Kutchi",
"kfs": "Bilaspuri",
"kft": "Kanjari",
"kfu": "Katkari",
"kfv": "Kurmukar",
"kfw": "Kharam Naga",
"kfx": "Kullu Pahari",
"kfy": "Kumaoni",
"kfz": "Koromfé",
"kg": "Kongo",
"kga": "Koyaga",
"kgb": "Kawe",
"kge": "Komering",
"kgf": "Kube",
"kgg": "Kusunda",
"kgi": "Selangor Sign Language",
"kgj": "Gamale Kham",
"kgk": "Kaiwá",
"kgl": "Kunggari",
"kgm": "Karipúna",
"kgn": "Karingani",
"kgo": "Krongo",
"kgp": "Kaingang",
"kgq": "Kamoro",
"kgr": "Abun",
"kgs": "Kumbainggar",
"kgt": "Somyev",
"kgu": "Kobol",
"kgv": "Karas",
"kgw": "Karon Dori",
"kgx": "Kamaru",
"kgy": "Kyerung",
"kha": "Khasi",
"khb": "Lü",
"khc": "Tukang Besi North",
"khd": "Bädi Kanum",
"khe": "Korowai",
"khf": "Khuen",
"khg": "Khams Tibetan",
"khh": "Kehu",
"khi": "Khoisan languages",
"khj": "Kuturmi",
"khk": "Halh Mongolian",
"khl": "Lusi",
"khn": "Khandesi",
"kho": "Khotanese; Sakan",
"khp": "Kapori; Kapauri",
"khq": "Koyra Chiini Songhay",
"khr": "Kharia",
"khs": "Kasua",
"kht": "Khamti",
"khu": "Nkhumbi",
"khv": "Khvarshi",
"khw": "Khowar",
"khx": "Kanu",
"khy": "Kele (Democratic Republic of Congo)",
"khz": "Keapara",
"ki": "Kikuyu; Gikuyu",
"kia": "Kim",
"kib": "Koalib",
"kic": "Kickapoo",
"kid": "Koshin",
"kie": "Kibet",
"kif": "Eastern Parbate Kham",
"kig": "Kimaama; Kimaghima",
"kih": "Kilmeri",
"kii": "Kitsai",
"kij": "Kilivila",
"kil": "Kariya",
"kim": "Karagas",
"kio": "Kiowa",
"kip": "Sheshi Kham",
"kiq": "Kosadle; Kosare",
"kis": "Kis",
"kit": "Agob",
"kiu": "Kirmanjki (individual language)",
"kiv": "Kimbu",
"kiw": "Northeast Kiwai",
"kix": "Khiamniungan Naga",
"kiy": "Kirikiri",
"kiz": "Kisi",
"kj": "Kuanyama; Kwanyama",
"kja": "Mlap",
"kjb": "Q'anjob'al; Kanjobal",
"kjc": "Coastal Konjo",
"kjd": "Southern Kiwai",
"kje": "Kisar",
"kjg": "Khmu",
"kjh": "Khakas",
"kji": "Zabana",
"kjj": "Khinalugh",
"kjk": "Highland Konjo",
"kjl": "Western Parbate Kham",
"kjm": "Kháng",
"kjn": "Kunjen",
"kjo": "Harijan Kinnauri",
"kjp": "Pwo Eastern Karen",
"kjq": "Western Keres",
"kjr": "Kurudu",
"kjs": "East Kewa",
"kjt": "Phrae Pwo Karen",
"kju": "Kashaya",
"kjv": "Kaikavian Literary Language",
"kjx": "Ramopa",
"kjy": "Erave",
"kjz": "Bumthangkha",
"kk": "Kazakh",
"kka": "Kakanda",
"kkb": "Kwerisa",
"kkc": "Odoodee",
"kkd": "Kinuku",
"kke": "Kakabe",
"kkf": "Kalaktang Monpa",
"kkg": "Mabaka Valley Kalinga",
"kkh": "Khün",
"kki": "Kagulu",
"kkj": "Kako",
"kkk": "Kokota",
"kkl": "Kosarek Yale",
"kkm": "Kiong",
"kkn": "Kon Keu",
"kko": "Karko",
"kkp": "Gugubera; Koko-Bera",
"kkq": "Kaeku",
"kkr": "Kir-Balar",
"kks": "Giiwo",
"kkt": "Koi",
"kku": "Tumi",
"kkv": "Kangean",
"kkw": "Teke-Kukuya",
"kkx": "Kohin",
"kky": "Guugu Yimidhirr; Guguyimidjir",
"kkz": "Kaska",
"kl": "Kalaallisut; Greenlandic",
"kla": "Klamath-Modoc",
"klb": "Kiliwa",
"klc": "Kolbila",
"kld": "Gamilaraay",
"kle": "Kulung (Nepal)",
"klf": "Kendeje",
"klg": "Tagakaulo",
"klh": "Weliki",
"kli": "Kalumpang",
"klj": "Khalaj",
"klk": "Kono (Nigeria)",
"kll": "Kagan Kalagan",
"klm": "Migum",
"kln": "Kalenjin",
"klo": "Kapya",
"klp": "Kamasa",
"klq": "Rumu",
"klr": "Khaling",
"kls": "Kalasha",
"klt": "Nukna",
"klu": "Klao",
"klv": "Maskelynes",
"klw": "Tado; Lindu",
"klx": "Koluwawa",
"kly": "Kalao",
"klz": "Kabola",
"km": "Khmer; Central Khmer",
"kma": "Konni",
"kmb": "Kimbundu",
"kmc": "Southern Dong",
"kmd": "Majukayang Kalinga",
"kme": "Bakole",
"kmf": "Kare (Papua New Guinea)",
"kmg": "Kâte",
"kmh": "Kalam",
"kmi": "Kami (Nigeria)",
"kmj": "Kumarbhag Paharia",
"kmk": "Limos Kalinga",
"kml": "Tanudan Kalinga",
"kmm": "Kom (India)",
"kmn": "Awtuw",
"kmo": "Kwoma",
"kmp": "Gimme",
"kmq": "Kwama",
"kmr": "Northern Kurdish",
"kms": "Kamasau",
"kmt": "Kemtuik",
"kmu": "Kanite",
"kmv": "Karipúna Creole French",
"kmw": "Komo (Democratic Republic of Congo)",
"kmx": "Waboda",
"kmy": "Koma",
"kmz": "Khorasani Turkish",
"kn": "Kannada",
"kna": "Dera (Nigeria)",
"knb": "Lubuagan Kalinga",
"knc": "Central Kanuri",
"knd": "Konda",
"kne": "Kankanaey",
"knf": "Mankanya",
"kng": "Koongo",
"kni": "Kanufi",
"knj": "Western Kanjobal",
"knk": "Kuranko",
"knl": "Keninjal",
"knm": "Kanamarí",
"knn": "Konkani (individual language)",
"kno": "Kono (Sierra Leone)",
"knp": "Kwanja",
"knq": "Kintaq",
"knr": "Kaningra",
"kns": "Kensiu",
"knt": "Panoan Katukína",
"knu": "Kono (Guinea)",
"knv": "Tabo",
"knw": "Kung-Ekoka",
"knx": "Kendayan; Salako",
"kny": "Kanyok",
"knz": "Kalamsé",
"ko": "Korean",
"koa": "Konomala",
"koc": "Kpati",
"kod": "Kodi",
"koe": "Kacipo-Bale Suri",
"kof": "Kubi",
"kog": "Cogui; Kogi",
"koh": "Koyo",
"koi": "Komi-Permyak",
"kok": "Konkani (macrolanguage)",
"kol": "Kol (Papua New Guinea)",
"koo": "Konzo",
"kop": "Waube",
"koq": "Kota (Gabon)",
"kos": "Kosraean",
"kot": "Lagwan",
"kou": "Koke",
"kov": "Kudu-Camo",
"kow": "Kugama",
"koy": "Koyukon",
"koz": "Korak",
"kpa": "Kutto",
"kpb": "Mullu Kurumba",
"kpc": "Curripaco",
"kpd": "Koba",
"kpe": "Kpelle",
"kpf": "Komba",
"kpg": "Kapingamarangi",
"kph": "Kplang",
"kpi": "Kofei",
"kpj": "Karajá",
"kpk": "Kpan",
"kpl": "Kpala",
"kpm": "Koho",
"kpn": "Kepkiriwát",
"kpo": "Ikposo",
"kpq": "Korupun-Sela",
"kpr": "Korafe-Yegha",
"kps": "Tehit",
"kpt": "Karata",
"kpu": "Kafoa",
"kpv": "Komi-Zyrian",
"kpw": "Kobon",
"kpx": "Mountain Koiali",
"kpy": "Koryak",
"kpz": "Kupsabiny",
"kqa": "Mum",
"kqb": "Kovai",
"kqc": "Doromu-Koki",
"kqd": "Koy Sanjaq Surat",
"kqe": "Kalagan",
"kqf": "Kakabai",
"kqg": "Khe",
"kqh": "Kisankasa",
"kqi": "Koitabu",
"kqj": "Koromira",
"kqk": "Kotafon Gbe",
"kql": "Kyenele",
"kqm": "Khisa",
"kqn": "Kaonde",
"kqo": "Eastern Krahn",
"kqp": "Kimré",
"kqq": "Krenak",
"kqr": "Kimaragang",
"kqs": "Northern Kissi",
"kqt": "Klias River Kadazan",
"kqu": "Seroa",
"kqv": "Okolod",
"kqw": "Kandas",
"kqx": "Mser",
"kqy": "Koorete",
"kqz": "Korana",
"kr": "Kanuri",
"kra": "Kumhali",
"krb": "Karkin",
"krc": "Karachay-Balkar",
"krd": "Kairui-Midiki",
"kre": "Panará",
"krf": "Koro (Vanuatu)",
"krh": "Kurama",
"kri": "Krio",
"krj": "Kinaray-A",
"krk": "Kerek",
"krl": "Karelian",
"krn": "Sapo",
"kro": "Kru languages",
"krp": "Korop",
"krr": "Krung",
"krs": "Gbaya (Sudan)",
"krt": "Tumari Kanuri",
"kru": "Kurukh",
"krv": "Kavet",
"krw": "Western Krahn",
"krx": "Karon",
"kry": "Kryts",
"krz": "Sota Kanum",
"ks": "Kashmiri",
"ksa": "Shuwa-Zamani",
"ksb": "Shambala",
"ksc": "Southern Kalinga",
"ksd": "Kuanua",
"kse": "Kuni",
"ksf": "Bafia",
"ksg": "Kusaghe",
"ksh": "Kölsch",
"ksi": "Krisa; I'saka",
"ksj": "Uare",
"ksk": "Kansa",
"ksl": "Kumalu",
"ksm": "Kumba",
"ksn": "Kasiguranin",
"kso": "Kofa",
"ksp": "Kaba",
"ksq": "Kwaami",
"ksr": "Borong",
"kss": "Southern Kisi",
"kst": "Winyé",
"ksu": "Khamyang",
"ksv": "Kusu",
"ksw": "S'gaw Karen",
"ksx": "Kedang",
"ksy": "Kharia Thar",
"ksz": "Kodaku",
"kta": "Katua",
"ktb": "Kambaata",
"ktc": "Kholok",
"ktd": "Kokata; Kukatha",
"kte": "Nubri",
"ktf": "Kwami",
"ktg": "Kalkutung",
"kth": "Karanga",
"kti": "North Muyu",
"ktj": "Plapo Krumen",
"ktk": "Kaniet",
"ktl": "Koroshi",
"ktm": "Kurti",
"ktn": "Karitiâna",
"kto": "Kuot",
"ktp": "Kaduo",
"ktq": "Katabaga",
"kts": "South Muyu",
"ktt": "Ketum",
"ktu": "Kituba (Democratic Republic of Congo)",
"ktv": "Eastern Katu",
"ktw": "Kato",
"ktx": "Kaxararí",
"kty": "Kango (Bas-Uélé District)",
"ktz": "Juǀʼhoan; Juǀʼhoansi",
"ku": "Kurdish",
"kub": "Kutep",
"kuc": "Kwinsu",
"kud": "'Auhelawa",
"kue": "Kuman (Papua New Guinea)",
"kuf": "Western Katu",
"kug": "Kupa",
"kuh": "Kushi",
"kui": "Kuikúro-Kalapálo; Kalapalo",
"kuj": "Kuria",
"kuk": "Kepo'",
"kul": "Kulere",
"kum": "Kumyk",
"kun": "Kunama",
"kuo": "Kumukio",
"kup": "Kunimaipa",
"kuq": "Karipuna",
"kus": "Kusaal",
"kut": "Kutenai",
"kuu": "Upper Kuskokwim",
"kuv": "Kur",
"kuw": "Kpagua",
"kux": "Kukatja",
"kuy": "Kuuku-Ya'u",
"kuz": "Kunza",
"kv": "Komi",
"kva": "Bagvalal",
"kvb": "Kubu",
"kvc": "Kove",
"kvd": "Kui (Indonesia)",
"kve": "Kalabakan",
"kvf": "Kabalai",
"kvg": "Kuni-Boazi",
"kvh": "Komodo",
"kvi": "Kwang",
"kvj": "Psikye",
"kvk": "Korean Sign Language",
"kvl": "Kayaw",
"kvm": "Kendem",
"kvn": "Border Kuna",
"kvo": "Dobel",
"kvp": "Kompane",
"kvq": "Geba Karen",
"kvr": "Kerinci",
"kvt": "Lahta Karen; Lahta",
"kvu": "Yinbaw Karen",
"kvv": "Kola",
"kvw": "Wersing",
"kvx": "Parkari Koli",
"kvy": "Yintale Karen; Yintale",
"kvz": "Tsakwambo; Tsaukambo",
"kw": "Cornish",
"kwa": "Dâw",
"kwb": "Kwa",
"kwc": "Likwala",
"kwd": "Kwaio",
"kwe": "Kwerba",
"kwf": "Kwara'ae",
"kwg": "Sara Kaba Deme",
"kwh": "Kowiai",
"kwi": "Awa-Cuaiquer",
"kwj": "Kwanga",
"kwk": "Kwakiutl",
"kwl": "Kofyar",
"kwm": "Kwambi",
"kwn": "Kwangali",
"kwo": "Kwomtari",
"kwp": "Kodia",
"kwr": "Kwer",
"kws": "Kwese",
"kwt": "Kwesten",
"kwu": "Kwakum",
"kwv": "Sara Kaba Náà",
"kww": "Kwinti",
"kwx": "Khirwar",
"kwy": "San Salvador Kongo",
"kwz": "Kwadi",
"kxa": "Kairiru",
"kxb": "Krobu",
"kxc": "Konso; Khonso",
"kxd": "Brunei",
"kxf": "Manumanaw Karen; Manumanaw",
"kxh": "Karo (Ethiopia)",
"kxi": "Keningau Murut",
"kxj": "Kulfa",
"kxk": "Zayein Karen",
"kxm": "Northern Khmer",
"kxn": "Kanowit-Tanjong Melanau",
"kxo": "Kanoé",
"kxp": "Wadiyara Koli",
"kxq": "Smärky Kanum",
"kxr": "Koro (Papua New Guinea)",
"kxs": "Kangjia",
"kxt": "Koiwat",
"kxv": "Kuvi",
"kxw": "Konai",
"kxx": "Likuba",
"kxy": "Kayong",
"kxz": "Kerewo",
"ky": "Kirghiz; Kyrgyz",
"kya": "Kwaya",
"kyb": "Butbut Kalinga",
"kyc": "Kyaka",
"kyd": "Karey",
"kye": "Krache",
"kyf": "Kouya",
"kyg": "Keyagana",
"kyh": "Karok",
"kyi": "Kiput",
"kyj": "Karao",
"kyk": "Kamayo",
"kyl": "Kalapuya",
"kym": "Kpatili",
"kyn": "Northern Binukidnon",
"kyo": "Kelon",
"kyp": "Kang",
"kyq": "Kenga",
"kyr": "Kuruáya",
"kys": "Baram Kayan",
"kyt": "Kayagar",
"kyu": "Western Kayah",
"kyv": "Kayort",
"kyw": "Kudmali",
"kyx": "Rapoisi",
"kyy": "Kambaira",
"kyz": "Kayabí",
"kza": "Western Karaboro",
"kzb": "Kaibobo",
"kzc": "Bondoukou Kulango",
"kzd": "Kadai",
"kze": "Kosena",
"kzf": "Da'a Kaili",
"kzg": "Kikai",
"kzi": "Kelabit",
"kzk": "Kazukuru",
"kzl": "Kayeli",
"kzm": "Kais",
"kzn": "Kokola",
"kzo": "Kaningi",
"kzp": "Kaidipang",
"kzq": "Kaike",
"kzr": "Karang",
"kzs": "Sugut Dusun",
"kzu": "Kayupulau",
"kzv": "Komyandaret",
"kzw": "Karirí-Xocó",
"kzx": "Kamarian",
"kzy": "Kango (Tshopo District)",
"kzz": "Kalabra",
"la": "Latin",
"laa": "Southern Subanen",
"lab": "Linear A",
"lac": "Lacandon",
"lad": "Ladino",
"lae": "Pattani",
"laf": "Lafofa",
"lag": "Langi",
"lah": "Lahnda",
"lai": "Lambya",
"laj": "Lango (Uganda)",
"lal": "Lalia",
"lam": "Lamba",
"lan": "Laru",
"lap": "Laka (Chad)",
"laq": "Qabiao",
"lar": "Larteh",
"las": "Lama (Togo)",
"lau": "Laba",
"law": "Lauje",
"lax": "Tiwa",
"lay": "Lama Bai",
"laz": "Aribwatsa",
"lb": "Luxembourgish; Letzeburgesch",
"lbb": "Label",
"lbc": "Lakkia",
"lbe": "Lak",
"lbf": "Tinani",
"lbg": "Laopang",
"lbi": "La'bi",
"lbj": "Ladakhi",
"lbk": "Central Bontok",
"lbl": "Libon Bikol",
"lbm": "Lodhi",
"lbn": "Rmeet",
"lbo": "Laven",
"lbq": "Wampar",
"lbr": "Lohorung",
"lbs": "Libyan Sign Language",
"lbt": "Lachi",
"lbu": "Labu",
"lbv": "Lavatbura-Lamusong",
"lbw": "Tolaki",
"lbx": "Lawangan",
"lby": "Lamalama; Lamu-Lamu",
"lbz": "Lardil",
"lcc": "Legenyem",
"lcd": "Lola",
"lce": "Loncong; Sekak",
"lcf": "Lubu",
"lch": "Luchazi",
"lcl": "Lisela",
"lcm": "Tungag",
"lcp": "Western Lawa",
"lcq": "Luhu",
"lcs": "Lisabata-Nuniali",
"lda": "Kla-Dan",
"ldb": "Dũya",
"ldd": "Luri",
"ldg": "Lenyima",
"ldh": "Lamja-Dengsa-Tola",
"ldi": "Laari",
"ldj": "Lemoro",
"ldk": "Leelau",
"ldl": "Kaan",
"ldm": "Landoma",
"ldn": "Láadan",
"ldo": "Loo",
"ldp": "Tso",
"ldq": "Lufu",
"lea": "Lega-Shabunda",
"leb": "Lala-Bisa",
"lec": "Leco",
"led": "Lendu",
"lee": "Lyélé",
"lef": "Lelemi",
"leh": "Lenje",
"lei": "Lemio",
"lej": "Lengola",
"lek": "Leipon",
"lel": "Lele (Democratic Republic of Congo)",
"lem": "Nomaande",
"len": "Lenca",
"leo": "Leti (Cameroon)",
"lep": "Lepcha",
"leq": "Lembena",
"ler": "Lenkau",
"les": "Lese",
"let": "Lesing-Gelimi; Amio-Gelimi",
"leu": "Kara (Papua New Guinea)",
"lev": "Lamma",
"lew": "Ledo Kaili",
"lex": "Luang",
"ley": "Lemolang",
"lez": "Lezghian",
"lfa": "Lefa",
"lfn": "Lingua Franca Nova",
"lg": "Ganda; Luganda",
"lga": "Lungga",
"lgb": "Laghu",
"lgg": "Lugbara",
"lgh": "Laghuu",
"lgi": "Lengilu",
"lgk": "Lingarak; Neverver",
"lgl": "Wala",
"lgm": "Lega-Mwenga",
"lgn": "T'apo; Opuuo",
"lgo": "Lango (South Sudan)",
"lgq": "Logba",
"lgr": "Lengo",
"lgt": "Pahi",
"lgu": "Longgu",
"lgz": "Ligenza",
"lha": "Laha (Viet Nam)",
"lhh": "Laha (Indonesia)",
"lhi": "Lahu Shi",
"lhl": "Lahul Lohar",
"lhm": "Lhomi",
"lhn": "Lahanan",
"lhp": "Lhokpu",
"lhs": "Mlahsö",
"lht": "Lo-Toga",
"lhu": "Lahu",
"li": "Limburgan; Limburger; Limburgish",
"lia": "West-Central Limba",
"lib": "Likum",
"lic": "Hlai",
"lid": "Nyindrou",
"lie": "Likila",
"lif": "Limbu",
"lig": "Ligbi",
"lih": "Lihir",
"lij": "Ligurian",
"lik": "Lika",
"lil": "Lillooet",
"lio": "Liki",
"lip": "Sekpele",
"liq": "Libido",
"lir": "Liberian English",
"lis": "Lisu",
"liu": "Logorik",
"liv": "Liv",
"liw": "Col",
"lix": "Liabuku",
"liy": "Banda-Bambari",
"liz": "Libinza",
"lja": "Golpa",
"lje": "Rampi",
"lji": "Laiyolo",
"ljl": "Li'o",
"ljp": "Lampung Api",
"ljw": "Yirandali",
"ljx": "Yuru",
"lka": "Lakalei",
"lkb": "Kabras; Lukabaras",
"lkc": "Kucong",
"lkd": "Lakondê",
"lke": "Kenyi",
"lkh": "Lakha",
"lki": "Laki",
"lkj": "Remun",
"lkl": "Laeko-Libuat",
"lkm": "Kalaamaya",
"lkn": "Lakon; Vure",
"lko": "Khayo; Olukhayo",
"lkr": "Päri",
"lks": "Kisa; Olushisa",
"lkt": "Lakota",
"lku": "Kungkari",
"lky": "Lokoya",
"lla": "Lala-Roba",
"llb": "Lolo",
"llc": "Lele (Guinea)",
"lld": "Ladin",
"lle": "Lele (Papua New Guinea)",
"llf": "Hermit",
"llg": "Lole",
"llh": "Lamu",
"lli": "Teke-Laali",
"llj": "Ladji Ladji",
"llk": "Lelak",
"lll": "Lilau",
"llm": "Lasalimu",
"lln": "Lele (Chad)",
"llp": "North Efate",
"llq": "Lolak",
"lls": "Lithuanian Sign Language",
"llu": "Lau",
"llx": "Lauan",
"lma": "East Limba",
"lmb": "Merei",
"lmc": "Limilngan",
"lmd": "Lumun",
"lme": "Pévé",
"lmf": "South Lembata",
"lmg": "Lamogai",
"lmh": "Lambichhong",
"lmi": "Lombi",
"lmj": "West Lembata",
"lmk": "Lamkang",
"lml": "Hano",
"lmn": "Lambadi",
"lmo": "Lombard",
"lmp": "Limbum",
"lmq": "Lamatuka",
"lmr": "Lamalera",
"lmu": "Lamenu",
"lmv": "Lomaiviti",
"lmw": "Lake Miwok",
"lmx": "Laimbue",
"lmy": "Lamboya",
"ln": "Lingala",
"lna": "Langbashe",
"lnb": "Mbalanhu",
"lnd": "Lundayeh; Lun Bawang",
"lng": "Langobardic",
"lnh": "Lanoh",
"lni": "Daantanai'",
"lnj": "Leningitij",
"lnl": "South Central Banda",
"lnm": "Langam",
"lnn": "Lorediakarkar",
"lns": "Lamnso'",
"lnu": "Longuda",
"lnw": "Lanima",
"lnz": "Lonzo",
"lo": "Lao",
"loa": "Loloda",
"lob": "Lobi",
"loc": "Inonhan",
"loe": "Saluan",
"lof": "Logol",
"log": "Logo",
"loh": "Narim",
"loi": "Loma (Côte d'Ivoire)",
"loj": "Lou",
"lok": "Loko",
"lol": "Mongo",
"lom": "Loma (Liberia)",
"lon": "Malawi Lomwe",
"loo": "Lombo",
"lop": "Lopa",
"loq": "Lobala",
"lor": "Téén",
"los": "Loniu",
"lot": "Otuho",
"lou": "Louisiana Creole",
"lov": "Lopi",
"low": "Tampias Lobu",
"lox": "Loun",
"loy": "Loke",
"loz": "Lozi",
"lpa": "Lelepa",
"lpe": "Lepki",
"lpn": "Long Phuri Naga",
"lpo": "Lipo",
"lpx": "Lopit",
"lqr": "Logir",
"lra": "Rara Bakati'",
"lrc": "Northern Luri",
"lre": "Laurentian",
"lrg": "Laragia",
"lri": "Marachi; Olumarachi",
"lrk": "Loarki",
"lrl": "Lari",
"lrm": "Marama; Olumarama",
"lrn": "Lorang",
"lro": "Laro",
"lrr": "Southern Yamphu",
"lrt": "Larantuka Malay",
"lrv": "Larevat",
"lrz": "Lemerig",
"lsa": "Lasgerdi",
"lsb": "Burundian Sign Language; Langue des Signes Burundaise",
"lsc": "Albarradas Sign Language; Lengua de señas Albarradas",
"lsd": "Lishana Deni",
"lse": "Lusengo",
"lsh": "Lish",
"lsi": "Lashi",
"lsl": "Latvian Sign Language",
"lsm": "Saamia; Olusamia",
"lsn": "Tibetan Sign Language",
"lso": "Laos Sign Language",
"lsp": "Panamanian Sign Language; Lengua de Señas Panameñas",
"lsr": "Aruop",
"lss": "Lasi",
"lst": "Trinidad and Tobago Sign Language",
"lsv": "Sivia Sign Language",
"lsw": "Seychelles Sign Language; Lalang Siny Seselwa; Langue des Signes Seychelloise",
"lsy": "Mauritian Sign Language",
"lt": "Lithuanian",
"ltc": "Late Middle Chinese",
"ltg": "Latgalian",
"lth": "Thur",
"lti": "Leti (Indonesia)",
"ltn": "Latundê",
"lto": "Tsotso; Olutsotso",
"lts": "Tachoni; Lutachoni",
"ltu": "Latu",
"lu": "Luba-Katanga",
"lua": "Luba-Lulua",
"luc": "Aringa",
"lud": "Ludian",
"lue": "Luvale",
"luf": "Laua",
"lui": "Luiseno",
"luj": "Luna",
"luk": "Lunanakha",
"lul": "Olu'bo",
"lum": "Luimbi",
"lun": "Lunda",
"luo": "Luo (Kenya and Tanzania); Dholuo",
"lup": "Lumbu",
"luq": "Lucumi",
"lur": "Laura",
"lus": "Lushai",
"lut": "Lushootseed",
"luu": "Lumba-Yakkha",
"luv": "Luwati",
"luw": "Luo (Cameroon)",
"luy": "Luyia; Oluluyia",
"luz": "Southern Luri",
"lv": "Latvian",
"lva": "Maku'a",
"lvi": "Lavi",
"lvk": "Lavukaleve",
"lvs": "Standard Latvian",
"lvu": "Levuka",
"lwa": "Lwalu",
"lwe": "Lewo Eleng",
"lwg": "Wanga; Oluwanga",
"lwh": "White Lachi",
"lwl": "Eastern Lawa",
"lwm": "Laomian",
"lwo": "Luwo",
"lws": "Malawian Sign Language",
"lwt": "Lewotobi",
"lwu": "Lawu",
"lww": "Lewo",
"lxm": "Lakurumau",
"lya": "Layakha",
"lyg": "Lyngngam",
"lyn": "Luyana",
"lzh": "Literary Chinese",
"lzl": "Litzlitz",
"lzn": "Leinong Naga",
"lzz": "Laz",
"maa": "San Jerónimo Tecóatl Mazatec",
"mab": "Yutanduchi Mixtec",
"mad": "Madurese",
"mae": "Bo-Rukul",
"maf": "Mafa",
"mag": "Magahi",
"mai": "Maithili",
"maj": "Jalapa De Díaz Mazatec",
"mak": "Makasar",
"mam": "Mam",
"man": "Mandingo; Manding",
"map": "Austronesian languages",
"maq": "Chiquihuitlán Mazatec",
"mas": "Masai",
"mat": "San Francisco Matlatzinca",
"mau": "Huautla Mazatec",
"mav": "Sateré-Mawé",
"maw": "Mampruli",
"max": "North Moluccan Malay",
"maz": "Central Mazahua",
"mba": "Higaonon",
"mbb": "Western Bukidnon Manobo",
"mbc": "Macushi",
"mbd": "Dibabawon Manobo",
"mbe": "Molale",
"mbf": "Baba Malay",
"mbh": "Mangseng",
"mbi": "Ilianen Manobo",
"mbj": "Nadëb",
"mbk": "Malol",
"mbl": "Maxakalí",
"mbm": "Ombamba",
"mbn": "Macaguán",
"mbo": "Mbo (Cameroon)",
"mbp": "Malayo",
"mbq": "Maisin",
"mbr": "Nukak Makú",
"mbs": "Sarangani Manobo",
"mbt": "Matigsalug Manobo",
"mbu": "Mbula-Bwazza",
"mbv": "Mbulungish",
"mbw": "Maring",
"mbx": "Mari (East Sepik Province)",
"mby": "Memoni",
"mbz": "Amoltepec Mixtec",
"mca": "Maca",
"mcb": "Machiguenga",
"mcc": "Bitur",
"mcd": "Sharanahua",
"mce": "Itundujia Mixtec",
"mcf": "Matsés",
"mcg": "Mapoyo",
"mch": "Maquiritari",
"mci": "Mese",
"mcj": "Mvanip",
"mck": "Mbunda",
"mcl": "Macaguaje",
"mcm": "Malaccan Creole Portuguese",
"mcn": "Masana",
"mco": "Coatlán Mixe",
"mcp": "Makaa",
"mcq": "Ese",
"mcr": "Menya",
"mcs": "Mambai",
"mct": "Mengisa",
"mcu": "Cameroon Mambila",
"mcv": "Minanibai",
"mcw": "Mawa (Chad)",
"mcx": "Mpiemo",
"mcy": "South Watut",
"mcz": "Mawan",
"mda": "Mada (Nigeria)",
"mdb": "Morigi",
"mdc": "Male (Papua New Guinea)",
"mdd": "Mbum",
"mde": "Maba (Chad)",
"mdf": "Moksha",
"mdg": "Massalat",
"mdh": "Maguindanaon",
"mdi": "Mamvu",
"mdj": "Mangbetu",
"mdk": "Mangbutu",
"mdl": "Maltese Sign Language",
"mdm": "Mayogo",
"mdn": "Mbati",
"mdp": "Mbala",
"mdq": "Mbole",
"mdr": "Mandar",
"mds": "Maria (Papua New Guinea)",
"mdt": "Mbere",
"mdu": "Mboko",
"mdv": "Santa Lucía Monteverde Mixtec",
"mdw": "Mbosi",
"mdx": "Dizin",
"mdy": "Male (Ethiopia)",
"mdz": "Suruí Do Pará",
"mea": "Menka",
"meb": "Ikobi",
"mec": "Marra",
"med": "Melpa",
"mee": "Mengen",
"mef": "Megam",
"meh": "Southwestern Tlaxiaco Mixtec",
"mei": "Midob",
"mej": "Meyah",
"mek": "Mekeo",
"mel": "Central Melanau",
"mem": "Mangala",
"men": "Mende (Sierra Leone)",
"meo": "Kedah Malay",
"mep": "Miriwoong",
"meq": "Merey",
"mer": "Meru",
"mes": "Masmaje",
"met": "Mato",
"meu": "Motu",
"mev": "Mano",
"mew": "Maaka",
"mey": "Hassaniyya",
"mez": "Menominee",
"mfa": "Pattani Malay",
"mfb": "Bangka",
"mfc": "Mba",
"mfd": "Mendankwe-Nkwen",
"mfe": "Morisyen",
"mff": "Naki",
"mfg": "Mogofin",
"mfh": "Matal",
"mfi": "Wandala",
"mfj": "Mefele",
"mfk": "North Mofu",
"mfl": "Putai",
"mfm": "Marghi South",
"mfn": "Cross River Mbembe",
"mfo": "Mbe",
"mfp": "Makassar Malay",
"mfq": "Moba",
"mfr": "Marrithiyel",
"mfs": "Mexican Sign Language",
"mft": "Mokerang",
"mfu": "Mbwela",
"mfv": "Mandjak",
"mfw": "Mulaha",
"mfx": "Melo",
"mfy": "Mayo",
"mfz": "Mabaan",
"mg": "Malagasy",
"mga": "Middle Irish (900-1200)",
"mgb": "Mararit",
"mgc": "Morokodo",
"mgd": "Moru",
"mge": "Mango",
"mgf": "Maklew",
"mgg": "Mpumpong",
"mgh": "Makhuwa-Meetto",
"mgi": "Lijili",
"mgj": "Abureni",
"mgk": "Mawes",
"mgl": "Maleu-Kilenge",
"mgm": "Mambae",
"mgn": "Mbangi",
"mgo": "Meta'",
"mgp": "Eastern Magar",
"mgq": "Malila",
"mgr": "Mambwe-Lungu",
"mgs": "Manda (Tanzania)",
"mgt": "Mongol",
"mgu": "Mailu",
"mgv": "Matengo",
"mgw": "Matumbi",
"mgy": "Mbunga",
"mgz": "Mbugwe",
"mh": "Marshallese",
"mha": "Manda (India)",
"mhb": "Mahongwe",
"mhc": "Mocho",
"mhd": "Mbugu",
"mhe": "Besisi; Mah Meri",
"mhf": "Mamaa",
"mhg": "Margu",
"mhi": "Ma'di",
"mhj": "Mogholi",
"mhk": "Mungaka",
"mhl": "Mauwake",
"mhm": "Makhuwa-Moniga",
"mhn": "Mócheno",
"mho": "Mashi (Zambia)",
"mhp": "Balinese Malay",
"mhq": "Mandan",
"mhr": "Eastern Mari",
"mhs": "Buru (Indonesia)",
"mht": "Mandahuaca",
"mhu": "Digaro-Mishmi; Darang Deng",
"mhw": "Mbukushu",
"mhx": "Maru; Lhaovo",
"mhy": "Ma'anyan",
"mhz": "Mor (Mor Islands)",
"mi": "Maori",
"mia": "Miami",
"mib": "Atatláhuca Mixtec",
"mic": "Mi'kmaq; Micmac",
"mid": "Mandaic",
"mie": "Ocotepec Mixtec",
"mif": "Mofu-Gudur",
"mig": "San Miguel El Grande Mixtec",
"mih": "Chayuco Mixtec",
"mii": "Chigmecatitlán Mixtec",
"mij": "Abar; Mungbam",
"mik": "Mikasuki",
"mil": "Peñoles Mixtec",
"mim": "Alacatlatzala Mixtec",
"min": "Minangkabau",
"mio": "Pinotepa Nacional Mixtec",
"mip": "Apasco-Apoala Mixtec",
"miq": "Mískito",
"mir": "Isthmus Mixe",
"mit": "Southern Puebla Mixtec",
"miu": "Cacaloxtepec Mixtec",
"miw": "Akoye",
"mix": "Mixtepec Mixtec",
"miy": "Ayutla Mixtec",
"miz": "Coatzospan Mixtec",
"mjb": "Makalero",
"mjc": "San Juan Colorado Mixtec",
"mjd": "Northwest Maidu",
"mje": "Muskum",
"mjg": "Tu",
"mjh": "Mwera (Nyasa)",
"mji": "Kim Mun",
"mjj": "Mawak",
"mjk": "Matukar",
"mjl": "Mandeali",
"mjm": "Medebur",
"mjn": "Ma (Papua New Guinea)",
"mjo": "Malankuravan",
"mjp": "Malapandaram",
"mjq": "Malaryan",
"mjr": "Malavedan",
"mjs": "Miship",
"mjt": "Sauria Paharia",
"mju": "Manna-Dora",
"mjv": "Mannan",
"mjw": "Karbi",
"mjx": "Mahali",
"mjy": "Mahican",
"mjz": "Majhi",
"mk": "Macedonian",
"mka": "Mbre",
"mkb": "Mal Paharia",
"mkc": "Siliput",
"mke": "Mawchi",
"mkf": "Miya",
"mkg": "Mak (China)",
"mkh": "Mon-Khmer languages",
"mki": "Dhatki",
"mkj": "Mokilese",
"mkk": "Byep",
"mkl": "Mokole",
"mkm": "Moklen",
"mkn": "Kupang Malay",
"mko": "Mingang Doso",
"mkp": "Moikodi",
"mkq": "Bay Miwok",
"mkr": "Malas",
"mks": "Silacayoapan Mixtec",
"mkt": "Vamale",
"mku": "Konyanka Maninka",
"mkv": "Mafea",
"mkw": "Kituba (Congo)",
"mkx": "Kinamiging Manobo",
"mky": "East Makian",
"mkz": "Makasae",
"ml": "Malayalam",
"mla": "Malo",
"mlb": "Mbule",
"mlc": "Cao Lan",
"mle": "Manambu",
"mlf": "Mal",
"mlh": "Mape",
"mli": "Malimpung",
"mlj": "Miltu",
"mlk": "Ilwana; Kiwilwana",
"mll": "Malua Bay",
"mlm": "Mulam",
"mln": "Malango",
"mlo": "Mlomp",
"mlp": "Bargam",
"mlq": "Western Maninkakan",
"mlr": "Vame",
"mls": "Masalit",
"mlu": "To'abaita",
"mlv": "Motlav; Mwotlap",
"mlw": "Moloko",
"mlx": "Malfaxal; Naha'ai",
"mlz": "Malaynon",
"mma": "Mama",
"mmb": "Momina",
"mmc": "Michoacán Mazahua",
"mmd": "Maonan",
"mme": "Mae",
"mmf": "Mundat",
"mmg": "North Ambrym",
"mmh": "Mehináku",
"mmi": "Musar",
"mmj": "Majhwar",
"mmk": "Mukha-Dora",
"mml": "Man Met",
"mmm": "Maii",
"mmn": "Mamanwa",
"mmo": "Mangga Buang",
"mmp": "Siawi",
"mmq": "Musak",
"mmr": "Western Xiangxi Miao",
"mmt": "Malalamai",
"mmu": "Mmaala",
"mmv": "Miriti",
"mmw": "Emae",
"mmx": "Madak",
"mmy": "Migaama",
"mmz": "Mabaale",
"mn": "Mongolian",
"mna": "Mbula",
"mnb": "Muna",
"mnc": "Manchu",
"mnd": "Mondé",
"mne": "Naba",
"mnf": "Mundani",
"mng": "Eastern Mnong",
"mnh": "Mono (Democratic Republic of Congo)",
"mni": "Manipuri",
"mnj": "Munji",
"mnk": "Mandinka",
"mnl": "Tiale",
"mnm": "Mapena",
"mnn": "Southern Mnong",
"mno": "Manobo languages",
"mnp": "Min Bei Chinese",
"mnq": "Minriq",
"mnr": "Mono (USA)",
"mns": "Mansi",
"mnu": "Mer",
"mnv": "Rennell-Bellona",
"mnw": "Mon",
"mnx": "Manikion",
"mny": "Manyawa",
"mnz": "Moni",
"moa": "Mwan",
"moc": "Mocoví",
"mod": "Mobilian",
"moe": "Innu; Montagnais",
"mog": "Mongondow",
"moh": "Mohawk",
"moi": "Mboi",
"moj": "Monzombo",
"mok": "Morori",
"mom": "Mangue",
"moo": "Monom",
"mop": "Mopán Maya",
"moq": "Mor (Bomberai Peninsula)",
"mor": "Moro",
"mos": "Mossi",
"mot": "Barí",
"mou": "Mogum",
"mov": "Mohave",
"mow": "Moi (Congo)",
"mox": "Molima",
"moy": "Shekkacho",
"moz": "Mukulu; Gergiko",
"mpa": "Mpoto",
"mpb": "Malak Malak; Mullukmulluk",
"mpc": "Mangarrayi",
"mpd": "Machinere",
"mpe": "Majang",
"mpg": "Marba",
"mph": "Maung",
"mpi": "Mpade",
"mpj": "Martu Wangka; Wangkajunga",
"mpk": "Mbara (Chad)",
"mpl": "Middle Watut",
"mpm": "Yosondúa Mixtec",
"mpn": "Mindiri",
"mpo": "Miu",
"mpp": "Migabac",
"mpq": "Matís",
"mpr": "Vangunu",
"mps": "Dadibi",
"mpt": "Mian",
"mpu": "Makuráp",
"mpv": "Mungkip",
"mpw": "Mapidian",
"mpx": "Misima-Panaeati",
"mpy": "Mapia",
"mpz": "Mpi",
"mqa": "Maba (Indonesia)",
"mqb": "Mbuko",
"mqc": "Mangole",
"mqe": "Matepi",
"mqf": "Momuna",
"mqg": "Kota Bangun Kutai Malay",
"mqh": "Tlazoyaltepec Mixtec",
"mqi": "Mariri",
"mqj": "Mamasa",
"mqk": "Rajah Kabunsuwan Manobo",
"mql": "Mbelime",
"mqm": "South Marquesan",
"mqn": "Moronene",
"mqo": "Modole",
"mqp": "Manipa",
"mqq": "Minokok",
"mqr": "Mander",
"mqs": "West Makian",
"mqt": "Mok",
"mqu": "Mandari",
"mqv": "Mosimo",
"mqw": "Murupi",
"mqx": "Mamuju",
"mqy": "Manggarai",
"mqz": "Pano",
"mr": "Marathi",
"mra": "Mlabri",
"mrb": "Marino",
"mrc": "Maricopa",
"mrd": "Western Magar",
"mre": "Martha's Vineyard Sign Language",
"mrf": "Elseng",
"mrg": "Mising",
"mrh": "Mara Chin",
"mrj": "Western Mari",
"mrk": "Hmwaveke",
"mrl": "Mortlockese",
"mrm": "Merlav; Mwerlap",
"mrn": "Cheke Holo",
"mro": "Mru",
"mrp": "Morouas",
"mrq": "North Marquesan",
"mrr": "Maria (India)",
"mrs": "Maragus",
"mrt": "Marghi Central",
"mru": "Mono (Cameroon)",
"mrv": "Mangareva",
"mrw": "Maranao",
"mrx": "Maremgi; Dineor",
"mry": "Mandaya",
"mrz": "Marind",
"ms": "Malay (macrolanguage)",
"msb": "Masbatenyo",
"msc": "Sankaran Maninka",
"msd": "Yucatec Maya Sign Language",
"mse": "Musey",
"msf": "Mekwei",
"msg": "Moraid",
"msh": "Masikoro Malagasy",
"msi": "Sabah Malay",
"msj": "Ma (Democratic Republic of Congo)",
"msk": "Mansaka",
"msl": "Molof; Poule",
"msm": "Agusan Manobo",
"msn": "Vurës",
"mso": "Mombum",
"msp": "Maritsauá",
"msq": "Caac",
"msr": "Mongolian Sign Language",
"mss": "West Masela",
"msu": "Musom",
"msv": "Maslam",
"msw": "Mansoanka",
"msx": "Moresada",
"msy": "Aruamu",
"msz": "Momare",
"mt": "Maltese",
"mta": "Cotabato Manobo",
"mtb": "Anyin Morofo",
"mtc": "Munit",
"mtd": "Mualang",
"mte": "Mono (Solomon Islands)",
"mtf": "Murik (Papua New Guinea)",
"mtg": "Una",
"mth": "Munggui",
"mti": "Maiwa (Papua New Guinea)",
"mtj": "Moskona",
"mtk": "Mbe'",
"mtl": "Montol",
"mtm": "Mator",
"mtn": "Matagalpa",
"mto": "Totontepec Mixe",
"mtp": "Wichí Lhamtés Nocten",
"mtq": "Muong",
"mtr": "Mewari",
"mts": "Yora",
"mtt": "Mota",
"mtu": "Tututepec Mixtec",
"mtv": "Asaro'o",
"mtw": "Southern Binukidnon",
"mtx": "Tidaá Mixtec",
"mty": "Nabi",
"mua": "Mundang",
"mub": "Mubi",
"muc": "Ajumbu",
"mud": "Mednyj Aleut",
"mue": "Media Lengua",
"mug": "Musgu",
"muh": "Mündü",
"mui": "Musi",
"muj": "Mabire",
"muk": "Mugom",
"mum": "Maiwala",
"mun": "Munda languages",
"muo": "Nyong",
"mup": "Malvi",
"muq": "Eastern Xiangxi Miao",
"mur": "Murle",
"mus": "Creek",
"mut": "Western Muria",
"muu": "Yaaku",
"muv": "Muthuvan",
"mux": "Bo-Ung",
"muy": "Muyang",
"muz": "Mursi",
"mva": "Manam",
"mvb": "Mattole",
"mvd": "Mamboru",
"mve": "Marwari (Pakistan)",
"mvf": "Peripheral Mongolian",
"mvg": "Yucuañe Mixtec",
"mvh": "Mulgi",
"mvi": "Miyako",
"mvk": "Mekmek",
"mvl": "Mbara (Australia)",
"mvn": "Minaveha",
"mvo": "Marovo",
"mvp": "Duri",
"mvq": "Moere",
"mvr": "Marau",
"mvs": "Massep",
"mvt": "Mpotovoro",
"mvu": "Marfa",
"mvv": "Tagal Murut",
"mvw": "Machinga",
"mvx": "Meoswar",
"mvy": "Indus Kohistani",
"mvz": "Mesqan",
"mwa": "Mwatebu",
"mwb": "Juwal",
"mwc": "Are",
"mwe": "Mwera (Chimwera)",
"mwf": "Murrinh-Patha",
"mwg": "Aiklep",
"mwh": "Mouk-Aria",
"mwi": "Labo; Ninde",
"mwk": "Kita Maninkakan",
"mwl": "Mirandese",
"mwm": "Sar",
"mwn": "Nyamwanga",
"mwo": "Central Maewo",
"mwp": "Kala Lagaw Ya",
"mwq": "Mün Chin",
"mwr": "Marwari",
"mws": "Mwimbi-Muthambi",
"mwt": "Moken",
"mwu": "Mittu",
"mwv": "Mentawai",
"mww": "Hmong Daw",
"mwz": "Moingi",
"mxa": "Northwest Oaxaca Mixtec",
"mxb": "Tezoatlán Mixtec",
"mxc": "Manyika",
"mxd": "Modang",
"mxe": "Mele-Fila",
"mxf": "Malgbe",
"mxg": "Mbangala",
"mxh": "Mvuba",
"mxi": "Mozarabic",
"mxj": "Miju-Mishmi; Geman Deng",
"mxk": "Monumbo",
"mxl": "Maxi Gbe",
"mxm": "Meramera",
"mxn": "Moi (Indonesia)",
"mxo": "Mbowe",
"mxp": "Tlahuitoltepec Mixe",
"mxq": "Juquila Mixe",
"mxr": "Murik (Malaysia)",
"mxs": "Huitepec Mixtec",
"mxt": "Jamiltepec Mixtec",
"mxu": "Mada (Cameroon)",
"mxv": "Metlatónoc Mixtec",
"mxw": "Namo",
"mxx": "Mahou; Mawukakan",
"mxy": "Southeastern Nochixtlán Mixtec",
"mxz": "Central Masela",
"my": "Burmese",
"myb": "Mbay",
"myc": "Mayeka",
"mye": "Myene",
"myf": "Bambassi",
"myg": "Manta",
"myh": "Makah",
"myj": "Mangayat",
"myk": "Mamara Senoufo",
"myl": "Moma",
"mym": "Me'en",
"myn": "Mayan languages",
"myo": "Anfillo",
"myp": "Pirahã",
"myr": "Muniche",
"mys": "Mesmes",
"myu": "Mundurukú",
"myv": "Erzya",
"myw": "Muyuw",
"myx": "Masaaba",
"myy": "Macuna",
"myz": "Classical Mandaic",
"mza": "Santa María Zacatepec Mixtec",
"mzb": "Tumzabt",
"mzc": "Madagascar Sign Language",
"mzd": "Malimba",
"mze": "Morawa",
"mzg": "Monastic Sign Language",
"mzh": "Wichí Lhamtés Güisnay",
"mzi": "Ixcatlán Mazatec",
"mzj": "Manya",
"mzk": "Nigeria Mambila",
"mzl": "Mazatlán Mixe",
"mzm": "Mumuye",
"mzn": "Mazanderani",
"mzo": "Matipuhy",
"mzp": "Movima",
"mzq": "Mori Atas",
"mzr": "Marúbo",
"mzs": "Macanese",
"mzt": "Mintil",
"mzu": "Inapang",
"mzv": "Manza",
"mzw": "Deg",
"mzx": "Mawayana",
"mzy": "Mozambican Sign Language",
"mzz": "Maiadomu",
"na": "Nauru",
"naa": "Namla",
"nab": "Southern Nambikuára",
"nac": "Narak",
"nae": "Naka'ela",
"naf": "Nabak",
"nag": "Naga Pidgin",
"nah": "Nahuatl languages",
"nai": "North American Indian languages",
"naj": "Nalu",
"nak": "Nakanai",
"nal": "Nalik",
"nam": "Ngan'gityemerri",
"nan": "Min Nan Chinese",
"nao": "Naaba",
"nap": "Neapolitan",
"naq": "Khoekhoe; Nama (Namibia)",
"nar": "Iguta",
"nas": "Naasioi",
"nat": "Ca̱hungwa̱rya̱; Hungworo",
"naw": "Nawuri",
"nax": "Nakwi",
"nay": "Ngarrindjeri",
"naz": "Coatepec Nahuatl",
"nb": "Norwegian Bokmål",
"nba": "Nyemba",
"nbb": "Ndoe",
"nbc": "Chang Naga",
"nbd": "Ngbinda",
"nbe": "Konyak Naga",
"nbg": "Nagarchal",
"nbh": "Ngamo",
"nbi": "Mao Naga",
"nbj": "Ngarinyman",
"nbk": "Nake",
"nbm": "Ngbaka Ma'bo",
"nbn": "Kuri",
"nbo": "Nkukoli",
"nbp": "Nnam",
"nbq": "Nggem",
"nbr": "Numana",
"nbs": "Namibian Sign Language",
"nbt": "Na",
"nbu": "Rongmei Naga",
"nbv": "Ngamambo",
"nbw": "Southern Ngbandi",
"nby": "Ningera",
"nca": "Iyo",
"ncb": "Central Nicobarese",
"ncc": "Ponam",
"ncd": "Nachering",
"nce": "Yale",
"ncf": "Notsi",
"ncg": "Nisga'a",
"nch": "Central Huasteca Nahuatl",
"nci": "Classical Nahuatl",
"ncj": "Northern Puebla Nahuatl",
"nck": "Na-kara",
"ncl": "Michoacán Nahuatl",
"ncm": "Nambo",
"ncn": "Nauna",
"nco": "Sibe",
"ncq": "Northern Katang",
"ncr": "Ncane",
"ncs": "Nicaraguan Sign Language",
"nct": "Chothe Naga",
"ncu": "Chumburung",
"ncx": "Central Puebla Nahuatl",
"ncz": "Natchez",
"nd": "North Ndebele",
"nda": "Ndasa",
"ndb": "Kenswei Nsei",
"ndc": "Ndau",
"ndd": "Nde-Nsele-Nta",
"ndf": "Nadruvian",
"ndg": "Ndengereko",
"ndh": "Ndali",
"ndi": "Samba Leko",
"ndj": "Ndamba",
"ndk": "Ndaka",
"ndl": "Ndolo",
"ndm": "Ndam",
"ndn": "Ngundi",
"ndp": "Ndo",
"ndq": "Ndombe",
"ndr": "Ndoola",
"nds": "Low German; Low Saxon",
"ndt": "Ndunga",
"ndu": "Dugun",
"ndv": "Ndut",
"ndw": "Ndobo",
"ndx": "Nduga",
"ndy": "Lutos",
"ndz": "Ndogo",
"ne": "Nepali (macrolanguage)",
"nea": "Eastern Ngad'a",
"neb": "Toura (Côte d'Ivoire)",
"nec": "Nedebang",
"ned": "Nde-Gbite",
"nee": "Nêlêmwa-Nixumwak",
"nef": "Nefamese",
"neg": "Negidal",
"neh": "Nyenkha",
"nei": "Neo-Hittite",
"nej": "Neko",
"nek": "Neku",
"nem": "Nemi",
"nen": "Nengone",
"neo": "Ná-Meo",
"neq": "North Central Mixe",
"ner": "Yahadian",
"nes": "Bhoti Kinnauri",
"net": "Nete",
"neu": "Neo",
"nev": "Nyaheun",
"new": "Newari; Nepal Bhasa",
"nex": "Neme",
"ney": "Neyo",
"nez": "Nez Perce",
"nfa": "Dhao",
"nfd": "Ahwai",
"nfl": "Ayiwo; Äiwoo",
"nfr": "Nafaanra",
"nfu": "Mfumte",
"ng": "Ndonga",
"nga": "Ngbaka",
"ngb": "Northern Ngbandi",
"ngc": "Ngombe (Democratic Republic of Congo)",
"ngd": "Ngando (Central African Republic)",
"nge": "Ngemba",
"ngf": "Trans-New Guinea languages",
"ngg": "Ngbaka Manza",
"ngh": "Nǁng",
"ngi": "Ngizim",
"ngj": "Ngie",
"ngk": "Dalabon",
"ngl": "Lomwe",
"ngm": "Ngatik Men's Creole",
"ngn": "Ngwo",
"ngp": "Ngulu",
"ngq": "Ngurimi; Ngoreme",
"ngr": "Engdewu",
"ngs": "Gvoko",
"ngt": "Kriang; Ngeq",
"ngu": "Guerrero Nahuatl",
"ngv": "Nagumi",
"ngw": "Ngwaba",
"ngx": "Nggwahyi",
"ngy": "Tibea",
"ngz": "Ngungwel",
"nha": "Nhanda",
"nhb": "Beng",
"nhc": "Tabasco Nahuatl",
"nhd": "Chiripá; Ava Guaraní",
"nhe": "Eastern Huasteca Nahuatl",
"nhf": "Nhuwala",
"nhg": "Tetelcingo Nahuatl",
"nhh": "Nahari",
"nhi": "Zacatlán-Ahuacatlán-Tepetzintla Nahuatl",
"nhk": "Isthmus-Cosoleacaque Nahuatl",
"nhm": "Morelos Nahuatl",
"nhn": "Central Nahuatl",
"nho": "Takuu",
"nhp": "Isthmus-Pajapan Nahuatl",
"nhq": "Huaxcaleca Nahuatl",
"nhr": "Naro",
"nht": "Ometepec Nahuatl",
"nhu": "Noone",
"nhv": "Temascaltepec Nahuatl",
"nhw": "Western Huasteca Nahuatl",
"nhx": "Isthmus-Mecayapan Nahuatl",
"nhy": "Northern Oaxaca Nahuatl",
"nhz": "Santa María La Alta Nahuatl",
"nia": "Nias",
"nib": "Nakame",
"nic": "Niger-Kordofanian languages",
"nid": "Ngandi",
"nie": "Niellim",
"nif": "Nek",
"nig": "Ngalakgan",
"nih": "Nyiha (Tanzania)",
"nii": "Nii",
"nij": "Ngaju",
"nik": "Southern Nicobarese",
"nil": "Nila",
"nim": "Nilamba",
"nin": "Ninzo",
"nio": "Nganasan",
"niq": "Nandi",
"nir": "Nimboran",
"nis": "Nimi",
"nit": "Southeastern Kolami",
"niu": "Niuean",
"niv": "Gilyak",
"niw": "Nimo",
"nix": "Hema",
"niy": "Ngiti",
"niz": "Ningil",
"nja": "Nzanyi",
"njb": "Nocte Naga",
"njd": "Ndonde Hamba",
"njh": "Lotha Naga",
"nji": "Gudanji",
"njj": "Njen",
"njl": "Njalgulgule",
"njm": "Angami Naga",
"njn": "Liangmai Naga",
"njo": "Ao Naga",
"njr": "Njerep",
"njs": "Nisa",
"njt": "Ndyuka-Trio Pidgin",
"nju": "Ngadjunmaya",
"njx": "Kunyi",
"njy": "Njyem",
"njz": "Nyishi",
"nka": "Nkoya",
"nkb": "Khoibu Naga",
"nkc": "Nkongho",
"nkd": "Koireng",
"nke": "Duke",
"nkf": "Inpui Naga",
"nkg": "Nekgini",
"nkh": "Khezha Naga",
"nki": "Thangal Naga",
"nkj": "Nakai",
"nkk": "Nokuku",
"nkm": "Namat",
"nkn": "Nkangala",
"nko": "Nkonya",
"nkp": "Niuatoputapu",
"nkq": "Nkami",
"nkr": "Nukuoro",
"nks": "North Asmat",
"nkt": "Nyika (Tanzania)",
"nku": "Bouna Kulango",
"nkv": "Nyika (Malawi and Zambia)",
"nkw": "Nkutu",
"nkx": "Nkoroo",
"nkz": "Nkari",
"nl": "Dutch; Flemish",
"nla": "Ngombale",
"nlc": "Nalca",
"nle": "East Nyala",
"nlg": "Gela",
"nli": "Grangali",
"nlj": "Nyali",
"nlk": "Ninia Yali",
"nll": "Nihali",
"nlm": "Mankiyali",
"nlo": "Ngul",
"nlq": "Lao Naga",
"nlu": "Nchumbulu",
"nlv": "Orizaba Nahuatl",
"nlw": "Walangama",
"nlx": "Nahali",
"nly": "Nyamal",
"nlz": "Nalögo",
"nma": "Maram Naga",
"nmb": "Big Nambas; V'ënen Taut",
"nmc": "Ngam",
"nmd": "Ndumu",
"nme": "Mzieme Naga",
"nmf": "Tangkhul Naga (India)",
"nmg": "Kwasio",
"nmh": "Monsang Naga",
"nmi": "Nyam",
"nmj": "Ngombe (Central African Republic)",
"nmk": "Namakura",
"nml": "Ndemli",
"nmm": "Manangba",
"nmn": "ǃXóõ",
"nmo": "Moyon Naga",
"nmp": "Nimanbur",
"nmq": "Nambya",
"nmr": "Nimbari",
"nms": "Letemboi",
"nmt": "Namonuito",
"nmu": "Northeast Maidu",
"nmv": "Ngamini",
"nmw": "Nimoa; Rifao",
"nmx": "Nama (Papua New Guinea)",
"nmy": "Namuyi",
"nmz": "Nawdm",
"nn": "Norwegian Nynorsk",
"nna": "Nyangumarta",
"nnb": "Nande",
"nnc": "Nancere",
"nnd": "West Ambae",
"nne": "Ngandyera",
"nnf": "Ngaing",
"nng": "Maring Naga",
"nnh": "Ngiemboon",
"nni": "North Nuaulu",
"nnj": "Nyangatom",
"nnk": "Nankina",
"nnl": "Northern Rengma Naga",
"nnm": "Namia",
"nnn": "Ngete",
"nnp": "Wancho Naga",
"nnq": "Ngindo",
"nnr": "Narungga",
"nnt": "Nanticoke",
"nnu": "Dwang",
"nnv": "Nugunu (Australia)",
"nnw": "Southern Nuni",
"nny": "Nyangga",
"nnz": "Nda'nda'",
"no": "Norwegian",
"noa": "Woun Meu",
"noc": "Nuk",
"nod": "Northern Thai",
"noe": "Nimadi",
"nof": "Nomane",
"nog": "Nogai",
"noh": "Nomu",
"noi": "Noiri",
"noj": "Nonuya",
"nok": "Nooksack",
"nol": "Nomlaki",
"nom": "Nocamán",
"non": "Old Norse",
"nop": "Numanggang",
"noq": "Ngongo",
"nos": "Eastern Nisu",
"not": "Nomatsiguenga",
"nou": "Ewage-Notu",
"nov": "Novial",
"now": "Nyambo",
"noy": "Noy",
"noz": "Nayi",
"npa": "Nar Phu",
"npb": "Nupbikha",
"npg": "Ponyo-Gongwang Naga",
"nph": "Phom Naga",
"npi": "Nepali (individual language)",
"npl": "Southeastern Puebla Nahuatl",
"npn": "Mondropolon",
"npo": "Pochuri Naga",
"nps": "Nipsan",
"npu": "Puimei Naga",
"npx": "Noipx",
"npy": "Napu",
"nqg": "Southern Nago",
"nqk": "Kura Ede Nago",
"nql": "Ngendelengo",
"nqm": "Ndom",
"nqn": "Nen",
"nqo": "N'Ko; N’Ko",
"nqq": "Kyan-Karyaw Naga",
"nqt": "Nteng",
"nqy": "Akyaung Ari Naga",
"nr": "South Ndebele",
"nra": "Ngom",
"nrb": "Nara",
"nrc": "Noric",
"nre": "Southern Rengma Naga",
"nrf": "Jèrriais; Guernésiais",
"nrg": "Narango",
"nri": "Chokri Naga",
"nrk": "Ngarla",
"nrl": "Ngarluma",
"nrm": "Narom",
"nrn": "Norn",
"nrp": "North Picene",
"nrr": "Norra; Nora",
"nrt": "Northern Kalapuya",
"nru": "Narua",
"nrx": "Ngurmbur",
"nrz": "Lala",
"nsa": "Sangtam Naga",
"nsb": "Lower Nossob",
"nsc": "Nshi",
"nsd": "Southern Nisu",
"nse": "Nsenga",
"nsf": "Northwestern Nisu",
"nsg": "Ngasa",
"nsh": "Ngoshie",
"nsi": "Nigerian Sign Language",
"nsk": "Naskapi",
"nsl": "Norwegian Sign Language",
"nsm": "Sumi Naga",
"nsn": "Nehan",
"nso": "Pedi; Northern Sotho; Sepedi",
"nsp": "Nepalese Sign Language",
"nsq": "Northern Sierra Miwok",
"nsr": "Maritime Sign Language",
"nss": "Nali",
"nst": "Tase Naga",
"nsu": "Sierra Negra Nahuatl",
"nsv": "Southwestern Nisu",
"nsw": "Navut",
"nsx": "Nsongo",
"nsy": "Nasal",
"nsz": "Nisenan",
"ntd": "Northern Tidung",
"nte": "Nathembo",
"ntg": "Ngantangarra",
"nti": "Natioro",
"ntj": "Ngaanyatjarra",
"ntk": "Ikoma-Nata-Isenye",
"ntm": "Nateni",
"nto": "Ntomba",
"ntp": "Northern Tepehuan",
"ntr": "Delo",
"ntu": "Natügu",
"ntw": "Nottoway",
"ntx": "Tangkhul Naga (Myanmar)",
"nty": "Mantsi",
"ntz": "Natanzi",
"nua": "Yuanga",
"nub": "Nubian languages",
"nuc": "Nukuini",
"nud": "Ngala",
"nue": "Ngundu",
"nuf": "Nusu",
"nug": "Nungali",
"nuh": "Ndunda",
"nui": "Ngumbi",
"nuj": "Nyole",
"nuk": "Nuu-chah-nulth; Nuuchahnulth",
"nul": "Nusa Laut",
"num": "Niuafo'ou",
"nun": "Anong",
"nuo": "Nguôn",
"nup": "Nupe-Nupe-Tako",
"nuq": "Nukumanu",
"nur": "Nukuria",
"nus": "Nuer",
"nut": "Nung (Viet Nam)",
"nuu": "Ngbundu",
"nuv": "Northern Nuni",
"nuw": "Nguluwan",
"nux": "Mehek",
"nuy": "Nunggubuyu",
"nuz": "Tlamacazapa Nahuatl",
"nv": "Navajo; Navaho",
"nvh": "Nasarian",
"nvm": "Namiae",
"nvo": "Nyokon",
"nwa": "Nawathinehena",
"nwb": "Nyabwa",
"nwc": "Classical Newari; Classical Nepal Bhasa; Old Newari",
"nwe": "Ngwe",
"nwg": "Ngayawung",
"nwi": "Southwest Tanna",
"nwm": "Nyamusa-Molo",
"nwo": "Nauo",
"nwr": "Nawaru",
"nww": "Ndwewe",
"nwx": "Middle Newar",
"nwy": "Nottoway-Meherrin",
"nxa": "Nauete",
"nxd": "Ngando (Democratic Republic of Congo)",
"nxe": "Nage",
"nxg": "Ngad'a",
"nxi": "Nindi",
"nxk": "Koki Naga",
"nxl": "South Nuaulu",
"nxm": "Numidian",
"nxn": "Ngawun",
"nxo": "Ndambomo",
"nxq": "Naxi",
"nxr": "Ninggerum",
"nxx": "Nafri",
"ny": "Nyanja; Chewa; Chichewa",
"nyb": "Nyangbo",
"nyc": "Nyanga-li",
"nyd": "Nyore; Olunyole",
"nye": "Nyengo",
"nyf": "Giryama; Kigiryama",
"nyg": "Nyindu",
"nyh": "Nyikina",
"nyi": "Ama (Sudan)",
"nyj": "Nyanga",
"nyk": "Nyaneka",
"nyl": "Nyeu",
"nym": "Nyamwezi",
"nyn": "Nyankole",
"nyo": "Nyoro",
"nyp": "Nyang'i",
"nyq": "Nayini",
"nyr": "Nyiha (Malawi)",
"nys": "Nyungar",
"nyt": "Nyawaygi",
"nyu": "Nyungwe",
"nyv": "Nyulnyul",
"nyw": "Nyaw",
"nyx": "Nganyaywana",
"nyy": "Nyakyusa-Ngonde",
"nza": "Tigon Mbembe",
"nzb": "Njebi",
"nzd": "Nzadi",
"nzi": "Nzima",
"nzk": "Nzakara",
"nzm": "Zeme Naga",
"nzs": "New Zealand Sign Language",
"nzu": "Teke-Nzikou",
"nzy": "Nzakambay",
"nzz": "Nanga Dama Dogon",
"oaa": "Orok",
"oac": "Oroch",
"oar": "Old Aramaic (up to 700 BCE); Ancient Aramaic (up to 700 BCE)",
"oav": "Old Avar",
"obi": "Obispeño",
"obk": "Southern Bontok",
"obl": "Oblo",
"obm": "Moabite",
"obo": "Obo Manobo",
"obr": "Old Burmese",
"obt": "Old Breton",
"obu": "Obulom",
"oc": "Occitan (post 1500)",
"oca": "Ocaina",
"och": "Old Chinese",
"ocm": "Old Cham",
"oco": "Old Cornish",
"ocu": "Atzingo Matlatzinca",
"oda": "Odut",
"odk": "Od",
"odt": "Old Dutch",
"odu": "Odual",
"ofo": "Ofo",
"ofs": "Old Frisian",
"ofu": "Efutop",
"ogb": "Ogbia",
"ogc": "Ogbah",
"oge": "Old Georgian",
"ogg": "Ogbogolo",
"ogo": "Khana",
"ogu": "Ogbronuagum",
"oht": "Old Hittite",
"ohu": "Old Hungarian",
"oia": "Oirata",
"oie": "Okolie",
"oin": "Inebu One",
"oj": "Ojibwa",
"ojb": "Northwestern Ojibwa",
"ojc": "Central Ojibwa",
"ojg": "Eastern Ojibwa",
"ojp": "Old Japanese",
"ojs": "Severn Ojibwa",
"ojv": "Ontong Java",
"ojw": "Western Ojibwa",
"oka": "Okanagan",
"okb": "Okobo",
"okc": "Kobo",
"okd": "Okodia",
"oke": "Okpe (Southwestern Edo)",
"okg": "Koko Babangk",
"okh": "Koresh-e Rostam",
"oki": "Okiek",
"okj": "Oko-Juwoi",
"okk": "Kwamtim One",
"okl": "Old Kentish Sign Language",
"okm": "Middle Korean (10th-16th cent.)",
"okn": "Oki-No-Erabu",
"oko": "Old Korean (3rd-9th cent.)",
"okr": "Kirike",
"oks": "Oko-Eni-Osayen",
"oku": "Oku",
"okv": "Orokaiva",
"okx": "Okpe (Northwestern Edo)",
"okz": "Old Khmer",
"ola": "Walungge",
"old": "Mochi",
"ole": "Olekha",
"olk": "Olkol",
"olm": "Oloma",
"olo": "Livvi",
"olr": "Olrat",
"olt": "Old Lithuanian",
"olu": "Kuvale",
"om": "Oromo",
"oma": "Omaha-Ponca",
"omb": "East Ambae",
"omc": "Mochica",
"omg": "Omagua",
"omi": "Omi",
"omk": "Omok",
"oml": "Ombo",
"omn": "Minoan",
"omo": "Utarmbung",
"omp": "Old Manipuri",
"omq": "Oto-Manguean languages",
"omr": "Old Marathi",
"omt": "Omotik",
"omu": "Omurano",
"omv": "Omotic languages",
"omw": "South Tairora",
"omx": "Old Mon",
"omy": "Old Malay",
"ona": "Ona",
"onb": "Lingao",
"one": "Oneida",
"ong": "Olo",
"oni": "Onin",
"onj": "Onjob",
"onk": "Kabore One",
"onn": "Onobasulu",
"ono": "Onondaga",
"onp": "Sartang",
"onr": "Northern One",
"ons": "Ono",
"ont": "Ontenu",
"onu": "Unua",
"onw": "Old Nubian",
"onx": "Onin Based Pidgin",
"ood": "Tohono O'odham",
"oog": "Ong",
"oon": "Önge",
"oor": "Oorlams",
"oos": "Old Ossetic",
"opa": "Okpamheri",
"opk": "Kopkaka",
"opm": "Oksapmin",
"opo": "Opao",
"opt": "Opata",
"opy": "Ofayé",
"or": "Oriya (macrolanguage); Odia (macrolanguage)",
"ora": "Oroha",
"orc": "Orma",
"ore": "Orejón",
"org": "Oring",
"orh": "Oroqen",
"orn": "Orang Kanaq",
"oro": "Orokolo",
"orr": "Oruma",
"ors": "Orang Seletar",
"ort": "Adivasi Oriya",
"oru": "Ormuri",
"orv": "Old Russian",
"orw": "Oro Win",
"orx": "Oro",
"ory": "Odia (individual language); Oriya (individual language)",
"orz": "Ormu",
"os": "Ossetian; Ossetic",
"osa": "Osage",
"osc": "Oscan",
"osi": "Osing",
"osn": "Old Sundanese",
"oso": "Ososo",
"osp": "Old Spanish",
"ost": "Osatu",
"osu": "Southern One",
"osx": "Old Saxon",
"ota": "Ottoman Turkish (1500-1928)",
"otb": "Old Tibetan",
"otd": "Ot Danum",
"ote": "Mezquital Otomi",
"oti": "Oti",
"otk": "Old Turkish",
"otl": "Tilapa Otomi",
"otm": "Eastern Highland Otomi",
"otn": "Tenango Otomi",
"oto": "Otomian languages",
"otq": "Querétaro Otomi",
"otr": "Otoro",
"ots": "Estado de México Otomi",
"ott": "Temoaya Otomi",
"otu": "Otuke",
"otw": "Ottawa",
"otx": "Texcatepec Otomi",
"oty": "Old Tamil",
"otz": "Ixtenco Otomi",
"oua": "Tagargrent",
"oub": "Glio-Oubi",
"oue": "Oune",
"oui": "Old Uighur",
"oum": "Ouma",
"ovd": "Elfdalian; Övdalian",
"owi": "Owiniga",
"owl": "Old Welsh",
"oyb": "Oy",
"oyd": "Oyda",
"oym": "Wayampi",
"oyy": "Oya'oya",
"ozm": "Koonzime",
"pa": "Panjabi; Punjabi",
"paa": "Papuan languages",
"pab": "Parecís",
"pac": "Pacoh",
"pad": "Paumarí",
"pae": "Pagibete",
"paf": "Paranawát",
"pag": "Pangasinan",
"pah": "Tenharim",
"pai": "Pe",
"pak": "Parakanã",
"pal": "Pahlavi",
"pam": "Pampanga; Kapampangan",
"pao": "Northern Paiute",
"pap": "Papiamento",
"paq": "Parya",
"par": "Panamint; Timbisha",
"pas": "Papasena",
"pau": "Palauan",
"pav": "Pakaásnovos",
"paw": "Pawnee",
"pax": "Pankararé",
"pay": "Pech",
"paz": "Pankararú",
"pbb": "Páez",
"pbc": "Patamona",
"pbe": "Mezontla Popoloca",
"pbf": "Coyotepec Popoloca",
"pbg": "Paraujano",
"pbh": "E'ñapa Woromaipu",
"pbi": "Parkwa",
"pbl": "Mak (Nigeria)",
"pbm": "Puebla Mazatec",
"pbn": "Kpasam",
"pbo": "Papel",
"pbp": "Badyara",
"pbr": "Pangwa",
"pbs": "Central Pame",
"pbt": "Southern Pashto",
"pbu": "Northern Pashto",
"pbv": "Pnar",
"pby": "Pyu (Papua New Guinea)",
"pca": "Santa Inés Ahuatempan Popoloca",
"pcb": "Pear",
"pcc": "Bouyei",
"pcd": "Picard",
"pce": "Ruching Palaung",
"pcf": "Paliyan",
"pcg": "Paniya",
"pch": "Pardhan",
"pci": "Duruwa",
"pcj": "Parenga",
"pck": "Paite Chin",
"pcl": "Pardhi",
"pcm": "Nigerian Pidgin",
"pcn": "Piti",
"pcp": "Pacahuara",
"pcw": "Pyapun",
"pda": "Anam",
"pdc": "Pennsylvania German",
"pdi": "Pa Di",
"pdn": "Podena; Fedan",
"pdo": "Padoe",
"pdt": "Plautdietsch",
"pdu": "Kayan",
"pea": "Peranakan Indonesian",
"peb": "Eastern Pomo",
"ped": "Mala (Papua New Guinea)",
"pee": "Taje",
"pef": "Northeastern Pomo",
"peg": "Pengo",
"peh": "Bonan",
"pei": "Chichimeca-Jonaz",
"pej": "Northern Pomo",
"pek": "Penchal",
"pel": "Pekal",
"pem": "Phende",
"peo": "Old Persian (ca. 600-400 B.C.)",
"pep": "Kunja",
"peq": "Southern Pomo",
"pes": "Iranian Persian",
"pev": "Pémono",
"pex": "Petats",
"pey": "Petjo",
"pez": "Eastern Penan",
"pfa": "Pááfang",
"pfe": "Pere",
"pfl": "Pfaelzisch",
"pga": "Sudanese Creole Arabic",
"pgd": "Gāndhārī",
"pgg": "Pangwali",
"pgi": "Pagi",
"pgk": "Rerep",
"pgl": "Primitive Irish",
"pgn": "Paelignian",
"pgs": "Pangseng",
"pgu": "Pagu",
"pgz": "Papua New Guinean Sign Language",
"pha": "Pa-Hng",
"phd": "Phudagi",
"phg": "Phuong",
"phh": "Phukha",
"phi": "Philippine languages",
"phj": "Pahari",
"phk": "Phake",
"phl": "Phalura; Palula",
"phm": "Phimbi",
"phn": "Phoenician",
"pho": "Phunoi",
"phq": "Phana'",
"phr": "Pahari-Potwari",
"pht": "Phu Thai",
"phu": "Phuan",
"phv": "Pahlavani",
"phw": "Phangduwali",
"pi": "Pali",
"pia": "Pima Bajo",
"pib": "Yine",
"pic": "Pinji",
"pid": "Piaroa",
"pie": "Piro",
"pif": "Pingelapese",
"pig": "Pisabo",
"pih": "Pitcairn-Norfolk",
"pij": "Pijao",
"pil": "Yom",
"pim": "Powhatan",
"pin": "Piame",
"pio": "Piapoco",
"pip": "Pero",
"pir": "Piratapuyo",
"pis": "Pijin",
"pit": "Pitta Pitta",
"piu": "Pintupi-Luritja",
"piv": "Pileni; Vaeakau-Taumako",
"piw": "Pimbwe",
"pix": "Piu",
"piy": "Piya-Kwonci",
"piz": "Pije",
"pjt": "Pitjantjatjara",
"pka": "Ardhamāgadhī Prākrit",
"pkb": "Pokomo; Kipfokomo",
"pkc": "Paekche",
"pkg": "Pak-Tong",
"pkh": "Pankhu",
"pkn": "Pakanha",
"pko": "Pökoot",
"pkp": "Pukapuka",
"pkr": "Attapady Kurumba",
"pks": "Pakistan Sign Language",
"pkt": "Maleng",
"pku": "Paku",
"pl": "Polish",
"pla": "Miani",
"plb": "Polonombauk",
"plc": "Central Palawano",
"pld": "Polari",
"ple": "Palu'e",
"plf": "Central Malayo-Polynesian languages",
"plg": "Pilagá",
"plh": "Paulohi",
"plj": "Polci",
"plk": "Kohistani Shina",
"pll": "Shwe Palaung",
"pln": "Palenquero",
"plo": "Oluta Popoluca",
"plq": "Palaic",
"plr": "Palaka Senoufo",
"pls": "San Marcos Tlacoyalco Popoloca; San Marcos Tlalcoyalco Popoloca",
"plt": "Plateau Malagasy",
"plu": "Palikúr",
"plv": "Southwest Palawano",
"plw": "Brooke's Point Palawano",
"ply": "Bolyu",
"plz": "Paluan",
"pma": "Paama",
"pmb": "Pambia",
"pmd": "Pallanganmiddang",
"pme": "Pwaamei",
"pmf": "Pamona",
"pmh": "Māhārāṣṭri Prākrit",
"pmi": "Northern Pumi",
"pmj": "Southern Pumi",
"pmk": "Pamlico",
"pml": "Lingua Franca",
"pmm": "Pomo",
"pmn": "Pam",
"pmo": "Pom",
"pmq": "Northern Pame",
"pmr": "Paynamar",
"pms": "Piemontese",
"pmt": "Tuamotuan",
"pmw": "Plains Miwok",
"pmx": "Poumei Naga",
"pmy": "Papuan Malay",
"pmz": "Southern Pame",
"pna": "Punan Bah-Biau",
"pnb": "Western Panjabi",
"pnc": "Pannei",
"pnd": "Mpinda",
"pne": "Western Penan",
"png": "Pangu; Pongu",
"pnh": "Penrhyn",
"pni": "Aoheng",
"pnj": "Pinjarup",
"pnk": "Paunaka",
"pnl": "Paleni",
"pnm": "Punan Batu 1",
"pnn": "Pinai-Hagahai",
"pno": "Panobo",
"pnp": "Pancana",
"pnq": "Pana (Burkina Faso)",
"pnr": "Panim",
"pns": "Ponosakan",
"pnt": "Pontic",
"pnu": "Jiongnai Bunu",
"pnv": "Pinigura",
"pnw": "Banyjima; Panytyima",
"pnx": "Phong-Kniang",
"pny": "Pinyin",
"pnz": "Pana (Central African Republic)",
"poc": "Poqomam",
"poe": "San Juan Atzingo Popoloca",
"pof": "Poke",
"pog": "Potiguára",
"poh": "Poqomchi'",
"poi": "Highland Popoluca",
"pok": "Pokangá",
"pom": "Southeastern Pomo",
"pon": "Pohnpeian",
"poo": "Central Pomo",
"pop": "Pwapwâ",
"poq": "Texistepec Popoluca",
"pos": "Sayula Popoluca",
"pot": "Potawatomi",
"pov": "Upper Guinea Crioulo",
"pow": "San Felipe Otlaltepec Popoloca",
"pox": "Polabian",
"poy": "Pogolo",
"poz": "Malayo-Polynesian languages",
"ppe": "Papi",
"ppi": "Paipai",
"ppk": "Uma",
"ppl": "Pipil; Nicarao",
"ppm": "Papuma",
"ppn": "Papapana",
"ppo": "Folopa",
"ppp": "Pelende",
"ppq": "Pei",
"pps": "San Luís Temalacayuca Popoloca",
"ppt": "Pare",
"ppu": "Papora",
"pqa": "Pa'a",
"pqe": "Eastern Malayo-Polynesian languages",
"pqm": "Malecite-Passamaquoddy",
"pqw": "Western Malayo-Polynesian languages",
"pra": "Prakrit languages",
"prc": "Parachi",
"prd": "Parsi-Dari",
"pre": "Principense",
"prf": "Paranan",
"prg": "Prussian",
"prh": "Porohanon",
"pri": "Paicî",
"prk": "Parauk",
"prl": "Peruvian Sign Language",
"prm": "Kibiri",
"prn": "Prasuni",
"pro": "Old Provençal (to 1500); Old Occitan (to 1500)",
"prp": "Parsi",
"prq": "Ashéninka Perené",
"prr": "Puri",
"prs": "Dari; Afghan Persian",
"prt": "Phai",
"pru": "Puragi",
"prw": "Parawen",
"prx": "Purik",
"prz": "Providencia Sign Language",
"ps": "Pushto; Pashto",
"psa": "Asue Awyu",
"psc": "Iranian Sign Language; Persian Sign Language",
"psd": "Plains Indian Sign Language",
"pse": "Central Malay",
"psg": "Penang Sign Language",
"psh": "Southwest Pashai; Southwest Pashayi",
"psi": "Southeast Pashai; Southeast Pashayi",
"psl": "Puerto Rican Sign Language",
"psm": "Pauserna",
"psn": "Panasuan",
"pso": "Polish Sign Language",
"psp": "Philippine Sign Language",
"psq": "Pasi",
"psr": "Portuguese Sign Language",
"pss": "Kaulong",
"pst": "Central Pashto",
"psu": "Sauraseni Prākrit",
"psw": "Port Sandwich",
"psy": "Piscataway",
"pt": "Portuguese",
"pta": "Pai Tavytera",
"pth": "Pataxó Hã-Ha-Hãe",
"pti": "Pindiini; Wangkatha",
"ptn": "Patani",
"pto": "Zo'é",
"ptp": "Patep",
"ptq": "Pattapu",
"ptr": "Piamatsina",
"ptt": "Enrekang",
"ptu": "Bambam",
"ptv": "Port Vato",
"ptw": "Pentlatch",
"pty": "Pathiya",
"pua": "Western Highland Purepecha",
"pub": "Purum",
"puc": "Punan Merap",
"pud": "Punan Aput",
"pue": "Puelche",
"puf": "Punan Merah",
"pug": "Phuie",
"pui": "Puinave",
"puj": "Punan Tubu",
"pum": "Puma",
"puo": "Puoc",
"pup": "Pulabu",
"puq": "Puquina",
"pur": "Puruborá",
"put": "Putoh",
"puu": "Punu",
"puw": "Puluwatese",
"pux": "Puare",
"puy": "Purisimeño",
"pwa": "Pawaia",
"pwb": "Panawa",
"pwg": "Gapapaiwa",
"pwi": "Patwin",
"pwm": "Molbog",
"pwn": "Paiwan",
"pwo": "Pwo Western Karen",
"pwr": "Powari",
"pww": "Pwo Northern Karen",
"pxm": "Quetzaltepec Mixe",
"pye": "Pye Krumen",
"pym": "Fyam",
"pyn": "Poyanáwa",
"pys": "Paraguayan Sign Language; Lengua de Señas del Paraguay",
"pyu": "Puyuma",
"pyx": "Pyu (Myanmar)",
"pyy": "Pyen",
"pzh": "Pazeh",
"pzn": "Jejara Naga; Para Naga",
"qu": "Quechua",
"qua": "Quapaw",
"qub": "Huallaga Huánuco Quechua",
"quc": "K'iche'; Quiché",
"qud": "Calderón Highland Quichua",
"quf": "Lambayeque Quechua",
"qug": "Chimborazo Highland Quichua",
"quh": "South Bolivian Quechua",
"qui": "Quileute",
"quk": "Chachapoyas Quechua",
"qul": "North Bolivian Quechua",
"qum": "Sipacapense",
"qun": "Quinault",
"qup": "Southern Pastaza Quechua",
"quq": "Quinqui",
"qur": "Yanahuanca Pasco Quechua",
"qus": "Santiago del Estero Quichua",
"quv": "Sacapulteco",
"quw": "Tena Lowland Quichua",
"qux": "Yauyos Quechua",
"quy": "Ayacucho Quechua",
"quz": "Cusco Quechua",
"qva": "Ambo-Pasco Quechua",
"qvc": "Cajamarca Quechua",
"qve": "Eastern Apurímac Quechua",
"qvh": "Huamalíes-Dos de Mayo Huánuco Quechua",
"qvi": "Imbabura Highland Quichua",
"qvj": "Loja Highland Quichua",
"qvl": "Cajatambo North Lima Quechua",
"qvm": "Margos-Yarowilca-Lauricocha Quechua",
"qvn": "North Junín Quechua",
"qvo": "Napo Lowland Quechua",
"qvp": "Pacaraos Quechua",
"qvs": "San Martín Quechua",
"qvw": "Huaylla Wanca Quechua",
"qvy": "Queyu",
"qvz": "Northern Pastaza Quichua",
"qwa": "Corongo Ancash Quechua",
"qwc": "Classical Quechua",
"qwe": "Quechuan (family)",
"qwh": "Huaylas Ancash Quechua",
"qwm": "Kuman (Russia)",
"qws": "Sihuas Ancash Quechua",
"qwt": "Kwalhioqua-Tlatskanai",
"qxa": "Chiquián Ancash Quechua",
"qxc": "Chincha Quechua",
"qxh": "Panao Huánuco Quechua",
"qxl": "Salasaca Highland Quichua",
"qxn": "Northern Conchucos Ancash Quechua",
"qxo": "Southern Conchucos Ancash Quechua",
"qxp": "Puno Quechua",
"qxq": "Qashqa'i",
"qxr": "Cañar Highland Quichua",
"qxs": "Southern Qiang",
"qxt": "Santa Ana de Tusi Pasco Quechua",
"qxu": "Arequipa-La Unión Quechua",
"qxw": "Jauja Wanca Quechua",
"qya": "Quenya",
"qyp": "Quiripi",
"raa": "Dungmali",
"rab": "Camling",
"rac": "Rasawa",
"rad": "Rade",
"raf": "Western Meohang",
"rag": "Logooli; Lulogooli",
"rah": "Rabha",
"rai": "Ramoaaina",
"raj": "Rajasthani",
"rak": "Tulu-Bohuai",
"ral": "Ralte",
"ram": "Canela",
"ran": "Riantana",
"rao": "Rao",
"rap": "Rapanui",
"raq": "Saam",
"rar": "Rarotongan; Cook Islands Maori",
"ras": "Tegali",
"rat": "Razajerdi",
"rau": "Raute",
"rav": "Sampang",
"raw": "Rawang",
"rax": "Rang",
"ray": "Rapa",
"raz": "Rahambuu",
"rbb": "Rumai Palaung",
"rbk": "Northern Bontok",
"rbl": "Miraya Bikol",
"rbp": "Barababaraba",
"rcf": "Réunion Creole French",
"rdb": "Rudbari",
"rea": "Rerau",
"reb": "Rembong",
"ree": "Rejang Kayan",
"reg": "Kara (Tanzania)",
"rei": "Reli",
"rej": "Rejang",
"rel": "Rendille",
"rem": "Remo",
"ren": "Rengao",
"rer": "Rer Bare",
"res": "Reshe",
"ret": "Retta",
"rey": "Reyesano",
"rga": "Roria",
"rge": "Romano-Greek",
"rgk": "Rangkas",
"rgn": "Romagnol",
"rgr": "Resígaro",
"rgs": "Southern Roglai",
"rgu": "Ringgou",
"rhg": "Rohingya",
"rhp": "Yahang",
"ria": "Riang (India)",
"rib": "Bribri Sign Language",
"rif": "Tarifit",
"ril": "Riang Lang; Riang (Myanmar)",
"rim": "Nyaturu",
"rin": "Nungu",
"rir": "Ribun",
"rit": "Ritharrngu",
"riu": "Riung",
"rjg": "Rajong",
"rji": "Raji",
"rjs": "Rajbanshi",
"rka": "Kraol",
"rkb": "Rikbaktsa",
"rkh": "Rakahanga-Manihiki",
"rki": "Rakhine",
"rkm": "Marka",
"rkt": "Rangpuri; Kamta",
"rkw": "Arakwal",
"rm": "Romansh",
"rma": "Rama",
"rmb": "Rembarrnga",
"rmc": "Carpathian Romani",
"rmd": "Traveller Danish",
"rme": "Angloromani",
"rmf": "Kalo Finnish Romani",
"rmg": "Traveller Norwegian",
"rmh": "Murkim",
"rmi": "Lomavren",
"rmk": "Romkun",
"rml": "Baltic Romani",
"rmm": "Roma",
"rmn": "Balkan Romani",
"rmo": "Sinte Romani",
"rmp": "Rempi",
"rmq": "Caló",
"rms": "Romanian Sign Language",
"rmt": "Domari",
"rmu": "Tavringer Romani",
"rmv": "Romanova",
"rmw": "Welsh Romani",
"rmx": "Romam",
"rmy": "Vlax Romani",
"rmz": "Marma",
"rn": "Rundi",
"rnb": "Brunca Sign Language",
"rnd": "Ruund",
"rng": "Ronga",
"rnl": "Ranglong",
"rnn": "Roon",
"rnp": "Rongpo",
"rnr": "Nari Nari",
"rnw": "Rungwa",
"ro": "Romanian; Moldavian; Moldovan",
"roa": "Romance languages",
"rob": "Tae'",
"roc": "Cacgia Roglai",
"rod": "Rogo",
"roe": "Ronji",
"rof": "Rombo",
"rog": "Northern Roglai",
"rol": "Romblomanon",
"rom": "Romany",
"roo": "Rotokas",
"rop": "Kriol",
"ror": "Rongga",
"rou": "Runga",
"row": "Dela-Oenale",
"rpn": "Repanbitip",
"rpt": "Rapting",
"rri": "Ririo",
"rro": "Waima",
"rrt": "Arritinngithigh",
"rsb": "Romano-Serbian",
"rsk": "Ruthenian; Rusyn",
"rsl": "Russian Sign Language",
"rsm": "Miriwoong Sign Language",
"rsn": "Rwandan Sign Language",
"rtc": "Rungtu Chin",
"rth": "Ratahan",
"rtm": "Rotuman",
"rts": "Yurats",
"rtw": "Rathawi",
"ru": "Russian",
"rub": "Gungu",
"ruc": "Ruuli",
"rue": "Rusyn",
"ruf": "Luguru",
"rug": "Roviana",
"ruh": "Ruga",
"rui": "Rufiji",
"ruk": "Che",
"ruo": "Istro Romanian",
"rup": "Macedo-Romanian; Aromanian; Arumanian",
"ruq": "Megleno Romanian",
"rut": "Rutul",
"ruu": "Lanas Lobu",
"ruy": "Mala (Nigeria)",
"ruz": "Ruma",
"rw": "Kinyarwanda",
"rwa": "Rawo",
"rwk": "Rwa",
"rwl": "Ruwila",
"rwm": "Amba (Uganda)",
"rwo": "Rawa",
"rwr": "Marwari (India)",
"rxd": "Ngardi",
"rxw": "Karuwali; Garuwali",
"ryn": "Northern Amami-Oshima",
"rys": "Yaeyama",
"ryu": "Central Okinawan",
"rzh": "Rāziḥī",
"sa": "Sanskrit",
"saa": "Saba",
"sab": "Buglere",
"sac": "Meskwaki",
"sad": "Sandawe",
"sae": "Sabanê",
"saf": "Safaliba",
"sah": "Yakut",
"sai": "South American Indian languages",
"saj": "Sahu",
"sak": "Sake",
"sal": "Salishan languages",
"sam": "Samaritan Aramaic",
"sao": "Sause",
"saq": "Samburu",
"sar": "Saraveca",
"sas": "Sasak",
"sat": "Santali",
"sau": "Saleman",
"sav": "Saafi-Saafi",
"saw": "Sawi",
"sax": "Sa",
"say": "Saya",
"saz": "Saurashtra",
"sba": "Ngambay",
"sbb": "Simbo",
"sbc": "Kele (Papua New Guinea)",
"sbd": "Southern Samo",
"sbe": "Saliba",
"sbf": "Chabu; Shabo",
"sbg": "Seget",
"sbh": "Sori-Harengan",
"sbi": "Seti",
"sbj": "Surbakhal",
"sbk": "Safwa",
"sbl": "Botolan Sambal",
"sbm": "Sagala",
"sbn": "Sindhi Bhil",
"sbo": "Sabüm",
"sbp": "Sangu (Tanzania)",
"sbq": "Sileibi",
"sbr": "Sembakung Murut",
"sbs": "Subiya",
"sbt": "Kimki",
"sbu": "Stod Bhoti",
"sbv": "Sabine",
"sbw": "Simba",
"sbx": "Seberuang",
"sby": "Soli",
"sbz": "Sara Kaba",
"sc": "Sardinian",
"scb": "Chut",
"sce": "Dongxiang",
"scf": "San Miguel Creole French",
"scg": "Sanggau",
"sch": "Sakachep",
"sci": "Sri Lankan Creole Malay",
"sck": "Sadri",
"scl": "Shina",
"scn": "Sicilian",
"sco": "Scots",
"scp": "Hyolmo; Helambu Sherpa",
"scq": "Sa'och",
"scs": "North Slavey",
"sct": "Southern Katang",
"scu": "Shumcho",
"scv": "Sheni",
"scw": "Sha",
"scx": "Sicel",
"sd": "Sindhi",
"sda": "Toraja-Sa'dan",
"sdb": "Shabak",
"sdc": "Sassarese Sardinian",
"sde": "Surubu",
"sdf": "Sarli",
"sdg": "Savi",
"sdh": "Southern Kurdish",
"sdj": "Suundi",
"sdk": "Sos Kundi",
"sdl": "Saudi Arabian Sign Language",
"sdn": "Gallurese Sardinian",
"sdo": "Bukar-Sadung Bidayuh",
"sdp": "Sherdukpen",
"sdq": "Semandang",
"sdr": "Oraon Sadri",
"sds": "Sened",
"sdt": "Shuadit",
"sdu": "Sarudu",
"sdv": "Eastern Sudanic languages",
"sdx": "Sibu Melanau",
"sdz": "Sallands",
"se": "Northern Sami",
"sea": "Semai",
"seb": "Shempire Senoufo",
"sec": "Sechelt",
"sed": "Sedang",
"see": "Seneca",
"sef": "Cebaara Senoufo",
"seg": "Segeju",
"seh": "Sena",
"sei": "Seri",
"sej": "Sene",
"sek": "Sekani",
"sel": "Selkup",
"sem": "Semitic languages",
"sen": "Nanerigé Sénoufo",
"seo": "Suarmin",
"sep": "Sìcìté Sénoufo",
"seq": "Senara Sénoufo",
"ser": "Serrano",
"ses": "Koyraboro Senni Songhai",
"set": "Sentani",
"seu": "Serui-Laut",
"sev": "Nyarafolo Senoufo",
"sew": "Sewa Bay",
"sey": "Secoya",
"sez": "Senthang Chin",
"sfb": "Langue des signes de Belgique Francophone; French Belgian Sign Language",
"sfe": "Eastern Subanen",
"sfm": "Small Flowery Miao",
"sfs": "South African Sign Language",
"sfw": "Sehwi",
"sg": "Sango",
"sga": "Old Irish (to 900)",
"sgb": "Mag-antsi Ayta",
"sgc": "Kipsigis",
"sgd": "Surigaonon",
"sge": "Segai",
"sgg": "Swiss-German Sign Language",
"sgh": "Shughni",
"sgi": "Suga",
"sgj": "Surgujia",
"sgk": "Sangkong",
"sgm": "Singa",
"sgn": "Sign languages",
"sgp": "Singpho",
"sgr": "Sangisari",
"sgs": "Samogitian",
"sgt": "Brokpake",
"sgu": "Salas",
"sgw": "Sebat Bet Gurage",
"sgx": "Sierra Leone Sign Language",
"sgy": "Sanglechi",
"sgz": "Sursurunga",
"sh": "Serbo-Croatian",
"sha": "Shall-Zwall",
"shb": "Ninam",
"shc": "Sonde",
"shd": "Kundal Shahi",
"she": "Sheko",
"shg": "Shua",
"shh": "Shoshoni",
"shi": "Tachelhit",
"shj": "Shatt",
"shk": "Shilluk",
"shl": "Shendu",
"shm": "Shahrudi",
"shn": "Shan",
"sho": "Shanga",
"shp": "Shipibo-Conibo",
"shq": "Sala",
"shr": "Shi",
"shs": "Shuswap",
"sht": "Shasta",
"shu": "Chadian Arabic",
"shv": "Shehri",
"shw": "Shwai",
"shx": "She",
"shy": "Tachawit",
"shz": "Syenara Senoufo",
"si": "Sinhala; Sinhalese",
"sia": "Akkala Sami",
"sib": "Sebop",
"sid": "Sidamo",
"sie": "Simaa",
"sif": "Siamou",
"sig": "Paasaal",
"sih": "Zire; Sîshëë",
"sii": "Shom Peng",
"sij": "Numbami",
"sik": "Sikiana",
"sil": "Tumulung Sisaala",
"sim": "Mende (Papua New Guinea)",
"sio": "Siouan languages",
"sip": "Sikkimese",
"siq": "Sonia",
"sir": "Siri",
"sis": "Siuslaw",
"sit": "Sino-Tibetan languages",
"siu": "Sinagen",
"siv": "Sumariup",
"siw": "Siwai",
"six": "Sumau",
"siy": "Sivandi",
"siz": "Siwi",
"sja": "Epena",
"sjb": "Sajau Basap",
"sjd": "Kildin Sami",
"sje": "Pite Sami",
"sjg": "Assangori",
"sjk": "Kemi Sami",
"sjl": "Sajalong; Miji",
"sjm": "Mapun",
"sjn": "Sindarin",
"sjo": "Xibe",
"sjp": "Surjapuri",
"sjr": "Siar-Lak",
"sjs": "Senhaja De Srair",
"sjt": "Ter Sami",
"sju": "Ume Sami",
"sjw": "Shawnee",
"sk": "Slovak",
"ska": "Skagit",
"skb": "Saek",
"skc": "Ma Manda",
"skd": "Southern Sierra Miwok",
"ske": "Seke (Vanuatu)",
"skf": "Sakirabiá",
"skg": "Sakalava Malagasy",
"skh": "Sikule",
"ski": "Sika",
"skj": "Seke (Nepal)",
"skm": "Kutong",
"skn": "Kolibugan Subanon",
"sko": "Seko Tengah",
"skp": "Sekapan",
"skq": "Sininkere",
"skr": "Saraiki; Seraiki",
"sks": "Maia",
"skt": "Sakata",
"sku": "Sakao",
"skv": "Skou",
"skw": "Skepi Creole Dutch",
"skx": "Seko Padang",
"sky": "Sikaiana",
"skz": "Sekar",
"sl": "Slovenian",
"sla": "Slavic languages",
"slc": "Sáliba",
"sld": "Sissala",
"sle": "Sholaga",
"slf": "Swiss-Italian Sign Language",
"slg": "Selungai Murut",
"slh": "Southern Puget Sound Salish",
"sli": "Lower Silesian",
"slj": "Salumá",
"sll": "Salt-Yui",
"slm": "Pangutaran Sama",
"sln": "Salinan",
"slp": "Lamaholot",
"slq": "Salchuq",
"slr": "Salar",
"sls": "Singapore Sign Language",
"slt": "Sila",
"slu": "Selaru",
"slw": "Sialum",
"slx": "Salampasu",
"sly": "Selayar",
"slz": "Ma'ya",
"sm": "Samoan",
"sma": "Southern Sami",
"smb": "Simbari",
"smc": "Som",
"smf": "Auwe",
"smg": "Simbali",
"smh": "Samei",
"smi": "Sami languages",
"smj": "Lule Sami",
"smk": "Bolinao",
"sml": "Central Sama",
"smm": "Musasa",
"smn": "Inari Sami",
"smp": "Samaritan",
"smq": "Samo",
"smr": "Simeulue",
"sms": "Skolt Sami",
"smt": "Simte",
"smu": "Somray",
"smv": "Samvedi",
"smw": "Sumbawa",
"smx": "Samba",
"smy": "Semnani",
"smz": "Simeku",
"sn": "Shona",
"snc": "Sinaugoro",
"sne": "Bau Bidayuh",
"snf": "Noon",
"sng": "Sanga (Democratic Republic of Congo)",
"sni": "Sensi",
"snj": "Riverain Sango",
"snk": "Soninke",
"snl": "Sangil",
"snm": "Southern Ma'di",
"snn": "Siona",
"sno": "Snohomish",
"snp": "Siane",
"snq": "Sangu (Gabon)",
"snr": "Sihan",
"sns": "South West Bay; Nahavaq",
"snu": "Senggi; Viid",
"snv": "Sa'ban",
"snw": "Selee",
"snx": "Sam",
"sny": "Saniyo-Hiyewe",
"snz": "Kou",
"so": "Somali",
"soa": "Thai Song",
"sob": "Sobei",
"soc": "So (Democratic Republic of Congo)",
"sod": "Songoora",
"soe": "Songomeno",
"sog": "Sogdian",
"soh": "Aka",
"soi": "Sonha",
"soj": "Soi",
"sok": "Sokoro",
"sol": "Solos",
"son": "Songhai languages",
"soo": "Songo",
"sop": "Songe",
"soq": "Kanasi",
"sor": "Somrai",
"sos": "Seeku",
"sou": "Southern Thai",
"sov": "Sonsorol",
"sow": "Sowanda",
"sox": "Swo",
"soy": "Miyobe",
"soz": "Temi",
"spb": "Sepa (Indonesia)",
"spc": "Sapé",
"spd": "Saep",
"spe": "Sepa (Papua New Guinea)",
"spg": "Sian",
"spi": "Saponi",
"spk": "Sengo",
"spl": "Selepet",
"spm": "Akukem",
"spn": "Sanapaná",
"spo": "Spokane",
"spp": "Supyire Senoufo",
"spq": "Loreto-Ucayali Spanish",
"spr": "Saparua",
"sps": "Saposa",
"spt": "Spiti Bhoti",
"spu": "Sapuan",
"spv": "Sambalpuri; Kosli",
"spx": "South Picene",
"spy": "Sabaot",
"sq": "Albanian",
"sqa": "Shama-Sambuga",
"sqh": "Shau",
"sqj": "Albanian languages",
"sqk": "Albanian Sign Language",
"sqm": "Suma",
"sqn": "Susquehannock",
"sqo": "Sorkhei",
"sqq": "Sou",
"sqr": "Siculo Arabic",
"sqs": "Sri Lankan Sign Language",
"sqt": "Soqotri",
"squ": "Squamish",
"sqx": "Kufr Qassem Sign Language (KQSL)",
"sr": "Serbian",
"sra": "Saruga",
"srb": "Sora",
"src": "Logudorese Sardinian",
"sre": "Sara",
"srf": "Nafi",
"srg": "Sulod",
"srh": "Sarikoli",
"sri": "Siriano",
"srk": "Serudung Murut",
"srl": "Isirawa",
"srm": "Saramaccan",
"srn": "Sranan Tongo",
"sro": "Campidanese Sardinian",
"srq": "Sirionó",
"srr": "Serer",
"srs": "Sarsi",
"srt": "Sauri",
"sru": "Suruí",
"srv": "Southern Sorsoganon",
"srw": "Serua",
"srx": "Sirmauri",
"sry": "Sera",
"srz": "Shahmirzadi",
"ss": "Swati",
"ssa": "Nilo-Saharan languages",
"ssb": "Southern Sama",
"ssc": "Suba-Simbiti",
"ssd": "Siroi",
"sse": "Balangingi; Bangingih Sama",
"ssf": "Thao",
"ssg": "Seimat",
"ssh": "Shihhi Arabic",
"ssi": "Sansi",
"ssj": "Sausi",
"ssk": "Sunam",
"ssl": "Western Sisaala",
"ssm": "Semnam",
"ssn": "Waata",
"sso": "Sissano",
"ssp": "Spanish Sign Language",
"ssq": "So'a",
"ssr": "Swiss-French Sign Language",
"sss": "Sô",
"sst": "Sinasina",
"ssu": "Susuami",
"ssv": "Shark Bay",
"ssx": "Samberigi",
"ssy": "Saho",
"ssz": "Sengseng",
"st": "Southern Sotho",
"sta": "Settla",
"stb": "Northern Subanen",
"std": "Sentinel",
"ste": "Liana-Seti",
"stf": "Seta",
"stg": "Trieng",
"sth": "Shelta",
"sti": "Bulo Stieng",
"stj": "Matya Samo",
"stk": "Arammba",
"stl": "Stellingwerfs",
"stm": "Setaman",
"stn": "Owa",
"sto": "Stoney",
"stp": "Southeastern Tepehuan",
"stq": "Saterfriesisch",
"str": "Straits Salish",
"sts": "Shumashti",
"stt": "Budeh Stieng",
"stu": "Samtao",
"stv": "Silt'e",
"stw": "Satawalese",
"sty": "Siberian Tatar",
"su": "Sundanese",
"sua": "Sulka",
"sub": "Suku",
"suc": "Western Subanon",
"sue": "Suena",
"sug": "Suganga",
"sui": "Suki",
"suj": "Shubi",
"suk": "Sukuma",
"suo": "Bouni",
"suq": "Tirmaga-Chai Suri; Suri",
"sur": "Mwaghavul",
"sus": "Susu",
"sut": "Subtiaba",
"suv": "Puroik",
"suw": "Sumbwa",
"sux": "Sumerian",
"suy": "Suyá",
"suz": "Sunwar",
"sv": "Swedish",
"sva": "Svan",
"svb": "Ulau-Suain",
"svc": "Vincentian Creole English",
"sve": "Serili",
"svk": "Slovakian Sign Language",
"svm": "Slavomolisano",
"svs": "Savosavo",
"svx": "Skalvian",
"sw": "Swahili (macrolanguage)",
"swb": "Maore Comorian",
"swc": "Congo Swahili",
"swf": "Sere",
"swg": "Swabian",
"swh": "Swahili (individual language); Kiswahili",
"swi": "Sui",
"swj": "Sira",
"swk": "Malawi Sena",
"swl": "Swedish Sign Language",
"swm": "Samosa",
"swn": "Sawknah",
"swo": "Shanenawa",
"swp": "Suau",
"swq": "Sharwa",
"swr": "Saweru",
"sws": "Seluwasan",
"swt": "Sawila",
"swu": "Suwawa",
"swv": "Shekhawati",
"sww": "Sowa",
"swx": "Suruahá",
"swy": "Sarua",
"sxb": "Suba",
"sxc": "Sicanian",
"sxe": "Sighu",
"sxg": "Shuhi; Shixing",
"sxk": "Southern Kalapuya",
"sxl": "Selian",
"sxm": "Samre",
"sxn": "Sangir",
"sxo": "Sorothaptic",
"sxr": "Saaroa",
"sxs": "Sasaru",
"sxu": "Upper Saxon",
"sxw": "Saxwe Gbe",
"sya": "Siang",
"syb": "Central Subanen",
"syc": "Classical Syriac",
"syd": "Samoyedic languages",
"syi": "Seki",
"syk": "Sukur",
"syl": "Sylheti",
"sym": "Maya Samo",
"syn": "Senaya",
"syo": "Suoy",
"syr": "Syriac",
"sys": "Sinyar",
"syw": "Kagate",
"syx": "Samay",
"syy": "Al-Sayyid Bedouin Sign Language",
"sza": "Semelai",
"szb": "Ngalum",
"szc": "Semaq Beri",
"szd": "Seru",
"sze": "Seze",
"szg": "Sengele",
"szl": "Silesian",
"szn": "Sula",
"szp": "Suabo",
"szs": "Solomon Islands Sign Language",
"szv": "Isu (Fako Division)",
"szw": "Sawai",
"szy": "Sakizaya",
"ta": "Tamil",
"taa": "Lower Tanana",
"tab": "Tabassaran",
"tac": "Lowland Tarahumara",
"tad": "Tause",
"tae": "Tariana",
"taf": "Tapirapé",
"tag": "Tagoi",
"tai": "Tai languages",
"taj": "Eastern Tamang",
"tak": "Tala",
"tal": "Tal",
"tan": "Tangale",
"tao": "Yami",
"tap": "Taabwa",
"taq": "Tamasheq",
"tar": "Central Tarahumara",
"tas": "Tay Boi",
"tau": "Upper Tanana",
"tav": "Tatuyo",
"taw": "Tai",
"tax": "Tamki",
"tay": "Atayal",
"taz": "Tocho",
"tba": "Aikanã",
"tbc": "Takia",
"tbd": "Kaki Ae",
"tbe": "Tanimbili",
"tbf": "Mandara",
"tbg": "North Tairora",
"tbh": "Dharawal; Thurawal",
"tbi": "Gaam",
"tbj": "Tiang",
"tbk": "Calamian Tagbanwa",
"tbl": "Tboli",
"tbm": "Tagbu",
"tbn": "Barro Negro Tunebo",
"tbo": "Tawala",
"tbp": "Taworta; Diebroud",
"tbq": "Tibeto-Burman languages",
"tbr": "Tumtum",
"tbs": "Tanguat",
"tbt": "Tembo (Kitembo)",
"tbu": "Tubar",
"tbv": "Tobo",
"tbw": "Tagbanwa",
"tbx": "Kapin",
"tby": "Tabaru",
"tbz": "Ditammari",
"tca": "Ticuna",
"tcb": "Tanacross",
"tcc": "Datooga",
"tcd": "Tafi",
"tce": "Southern Tutchone",
"tcf": "Malinaltepec Me'phaa; Malinaltepec Tlapanec",
"tcg": "Tamagario",
"tch": "Turks And Caicos Creole English",
"tci": "Wára",
"tck": "Tchitchege",
"tcl": "Taman (Myanmar)",
"tcm": "Tanahmerah",
"tcn": "Tichurong",
"tco": "Taungyo",
"tcp": "Tawr Chin",
"tcq": "Kaiy",
"tcs": "Torres Strait Creole; Yumplatok",
"tct": "T'en",
"tcu": "Southeastern Tarahumara",
"tcw": "Tecpatlán Totonac",
"tcx": "Toda",
"tcy": "Tulu",
"tcz": "Thado Chin",
"tda": "Tagdal",
"tdb": "Panchpargania",
"tdc": "Emberá-Tadó",
"tdd": "Tai Nüa",
"tde": "Tiranige Diga Dogon",
"tdf": "Talieng",
"tdg": "Western Tamang",
"tdh": "Thulung",
"tdi": "Tomadino",
"tdj": "Tajio",
"tdk": "Tambas",
"tdl": "Sur",
"tdm": "Taruma",
"tdn": "Tondano",
"tdo": "Teme",
"tdq": "Tita",
"tdr": "Todrah",
"tds": "Doutai",
"tdt": "Tetun Dili",
"tdv": "Toro",
"tdx": "Tandroy-Mahafaly Malagasy",
"tdy": "Tadyawan",
"te": "Telugu",
"tea": "Temiar",
"teb": "Tetete",
"tec": "Terik",
"ted": "Tepo Krumen",
"tee": "Huehuetla Tepehua",
"tef": "Teressa",
"teg": "Teke-Tege",
"teh": "Tehuelche",
"tei": "Torricelli",
"tek": "Ibali Teke",
"tem": "Timne",
"ten": "Tama (Colombia)",
"teo": "Teso",
"tep": "Tepecano",
"teq": "Temein",
"ter": "Tereno",
"tes": "Tengger",
"tet": "Tetum",
"teu": "Soo",
"tev": "Teor",
"tew": "Tewa (USA)",
"tex": "Tennet",
"tey": "Tulishi",
"tez": "Tetserret",
"tfi": "Tofin Gbe",
"tfn": "Tanaina",
"tfo": "Tefaro",
"tfr": "Teribe",
"tft": "Ternate",
"tg": "Tajik",
"tga": "Sagalla",
"tgb": "Tobilung",
"tgc": "Tigak",
"tgd": "Ciwogai",
"tge": "Eastern Gorkha Tamang",
"tgf": "Chalikha",
"tgh": "Tobagonian Creole English",
"tgi": "Lawunuia",
"tgj": "Tagin",
"tgn": "Tandaganon",
"tgo": "Sudest",
"tgp": "Tangoa",
"tgq": "Tring",
"tgr": "Tareng",
"tgs": "Nume",
"tgt": "Central Tagbanwa",
"tgu": "Tanggu",
"tgv": "Tingui-Boto",
"tgw": "Tagwana Senoufo",
"tgx": "Tagish",
"tgy": "Togoyo",
"tgz": "Tagalaka",
"th": "Thai",
"thd": "Kuuk Thaayorre; Thayore",
"the": "Chitwania Tharu",
"thf": "Thangmi",
"thh": "Northern Tarahumara",
"thi": "Tai Long",
"thk": "Tharaka; Kitharaka",
"thl": "Dangaura Tharu",
"thm": "Aheu",
"thn": "Thachanadan",
"thp": "Thompson",
"thq": "Kochila Tharu",
"thr": "Rana Tharu",
"ths": "Thakali",
"tht": "Tahltan",
"thu": "Thuri",
"thv": "Tahaggart Tamahaq",
"thy": "Tha",
"thz": "Tayart Tamajeq",
"ti": "Tigrinya",
"tia": "Tidikelt Tamazight",
"tic": "Tira",
"tif": "Tifal",
"tig": "Tigre",
"tih": "Timugon Murut",
"tii": "Tiene",
"tij": "Tilung",
"tik": "Tikar",
"til": "Tillamook",
"tim": "Timbe",
"tin": "Tindi",
"tio": "Teop",
"tip": "Trimuris",
"tiq": "Tiéfo",
"tis": "Masadiit Itneg",
"tit": "Tinigua",
"tiu": "Adasen",
"tiv": "Tiv",
"tiw": "Tiwi",
"tix": "Southern Tiwa",
"tiy": "Tiruray",
"tiz": "Tai Hongjin",
"tja": "Tajuasohn",
"tjg": "Tunjung",
"tji": "Northern Tujia",
"tjj": "Tjungundji",
"tjl": "Tai Laing",
"tjm": "Timucua",
"tjn": "Tonjon",
"tjo": "Temacine Tamazight",
"tjp": "Tjupany",
"tjs": "Southern Tujia",
"tju": "Tjurruru",
"tjw": "Djabwurrung",
"tk": "Turkmen",
"tka": "Truká",
"tkb": "Buksa",
"tkd": "Tukudede",
"tke": "Takwane",
"tkf": "Tukumanféd",
"tkg": "Tesaka Malagasy",
"tkl": "Tokelau",
"tkm": "Takelma",
"tkn": "Toku-No-Shima",
"tkp": "Tikopia",
"tkq": "Tee",
"tkr": "Tsakhur",
"tks": "Takestani",
"tkt": "Kathoriya Tharu",
"tku": "Upper Necaxa Totonac",
"tkv": "Mur Pano",
"tkw": "Teanu",
"tkx": "Tangko",
"tkz": "Takua",
"tl": "Tagalog",
"tla": "Southwestern Tepehuan",
"tlb": "Tobelo",
"tlc": "Yecuatla Totonac",
"tld": "Talaud",
"tlf": "Telefol",
"tlg": "Tofanma",
"tlh": "Klingon; tlhIngan Hol",
"tli": "Tlingit",
"tlj": "Talinga-Bwisi",
"tlk": "Taloki",
"tll": "Tetela",
"tlm": "Tolomako",
"tln": "Talondo'",
"tlo": "Talodi",
"tlp": "Filomena Mata-Coahuitlán Totonac",
"tlq": "Tai Loi",
"tlr": "Talise",
"tls": "Tambotalo",
"tlt": "Sou Nama; Teluti",
"tlu": "Tulehu",
"tlv": "Taliabu",
"tlx": "Khehek",
"tly": "Talysh",
"tma": "Tama (Chad)",
"tmb": "Katbol; Avava",
"tmc": "Tumak",
"tmd": "Haruai",
"tme": "Tremembé",
"tmf": "Toba-Maskoy",
"tmg": "Ternateño",
"tmh": "Tamashek",
"tmi": "Tutuba",
"tmj": "Samarokena",
"tmk": "Northwestern Tamang",
"tml": "Tamnim Citak",
"tmm": "Tai Thanh",
"tmn": "Taman (Indonesia)",
"tmo": "Temoq",
"tmq": "Tumleo",
"tmr": "Jewish Babylonian Aramaic (ca. 200-1200 CE)",
"tms": "Tima",
"tmt": "Tasmate",
"tmu": "Iau",
"tmv": "Tembo (Motembo)",
"tmw": "Temuan",
"tmy": "Tami",
"tmz": "Tamanaku",
"tn": "Tswana",
"tna": "Tacana",
"tnb": "Western Tunebo",
"tnc": "Tanimuca-Retuarã",
"tnd": "Angosturas Tunebo",
"tng": "Tobanga",
"tnh": "Maiani",
"tni": "Tandia",
"tnk": "Kwamera",
"tnl": "Lenakel",
"tnm": "Tabla",
"tnn": "North Tanna",
"tno": "Toromono",
"tnp": "Whitesands",
"tnq": "Taino",
"tnr": "Ménik",
"tns": "Tenis",
"tnt": "Tontemboan",
"tnu": "Tay Khang",
"tnv": "Tangchangya",
"tnw": "Tonsawang",
"tnx": "Tanema",
"tny": "Tongwe",
"tnz": "Ten'edn",
"to": "Tonga (Tonga Islands)",
"tob": "Toba",
"toc": "Coyutla Totonac",
"tod": "Toma",
"tof": "Gizrra",
"tog": "Tonga (Nyasa)",
"toh": "Gitonga",
"toi": "Tonga (Zambia)",
"toj": "Tojolabal",
"tok": "Toki Pona",
"tol": "Tolowa",
"tom": "Tombulu",
"too": "Xicotepec De Juárez Totonac",
"top": "Papantla Totonac",
"toq": "Toposa",
"tor": "Togbo-Vara Banda",
"tos": "Highland Totonac",
"tou": "Tho",
"tov": "Upper Taromi",
"tow": "Jemez",
"tox": "Tobian",
"toy": "Topoiyo",
"toz": "To",
"tpa": "Taupota",
"tpc": "Azoyú Me'phaa; Azoyú Tlapanec",
"tpe": "Tippera",
"tpf": "Tarpia",
"tpg": "Kula",
"tpi": "Tok Pisin",
"tpj": "Tapieté",
"tpk": "Tupinikin",
"tpl": "Tlacoapa Me'phaa; Tlacoapa Tlapanec",
"tpm": "Tampulma",
"tpn": "Tupinambá",
"tpo": "Tai Pao",
"tpp": "Pisaflores Tepehua",
"tpq": "Tukpa",
"tpr": "Tuparí",
"tpt": "Tlachichilco Tepehua",
"tpu": "Tampuan",
"tpv": "Tanapag",
"tpw": "Tupí",
"tpx": "Acatepec Me'phaa; Acatepec Tlapanec",
"tpy": "Trumai",
"tpz": "Tinputz",
"tqb": "Tembé",
"tql": "Lehali",
"tqm": "Turumsa",
"tqn": "Tenino",
"tqo": "Toaripi",
"tqp": "Tomoip",
"tqq": "Tunni",
"tqr": "Torona",
"tqt": "Western Totonac",
"tqu": "Touo",
"tqw": "Tonkawa",
"tr": "Turkish",
"tra": "Tirahi",
"trb": "Terebu",
"trc": "Copala Triqui",
"trd": "Turi",
"tre": "East Tarangan",
"trf": "Trinidadian Creole English",
"trg": "Lishán Didán",
"trh": "Turaka",
"tri": "Trió",
"trj": "Toram",
"trk": "Turkic languages",
"trl": "Traveller Scottish",
"trm": "Tregami",
"trn": "Trinitario",
"tro": "Tarao Naga",
"trp": "Kok Borok",
"trq": "San Martín Itunyoso Triqui",
"trr": "Taushiro",
"trs": "Chicahuaxtla Triqui",
"trt": "Tunggare",
"tru": "Turoyo; Surayt",
"trv": "Sediq; Seediq; Taroko",
"trw": "Torwali",
"trx": "Tringgus-Sembaan Bidayuh",
"try": "Turung",
"trz": "Torá",
"ts": "Tsonga",
"tsa": "Tsaangi",
"tsb": "Tsamai",
"tsc": "Tswa",
"tsd": "Tsakonian",
"tse": "Tunisian Sign Language",
"tsg": "Tausug",
"tsh": "Tsuvan",
"tsi": "Tsimshian",
"tsj": "Tshangla",
"tsk": "Tseku",
"tsl": "Ts'ün-Lao",
"tsm": "Turkish Sign Language; Türk İşaret Dili",
"tsp": "Northern Toussian",
"tsq": "Thai Sign Language",
"tsr": "Akei",
"tss": "Taiwan Sign Language",
"tst": "Tondi Songway Kiini",
"tsu": "Tsou",
"tsv": "Tsogo",
"tsw": "Tsishingini",
"tsx": "Mubami",
"tsy": "Tebul Sign Language",
"tsz": "Purepecha",
"tt": "Tatar",
"tta": "Tutelo",
"ttb": "Gaa",
"ttc": "Tektiteko",
"ttd": "Tauade",
"tte": "Bwanabwana",
"ttf": "Tuotomb",
"ttg": "Tutong",
"tth": "Upper Ta'oih",
"tti": "Tobati",
"ttj": "Tooro",
"ttk": "Totoro",
"ttl": "Totela",
"ttm": "Northern Tutchone",
"ttn": "Towei",
"tto": "Lower Ta'oih",
"ttp": "Tombelala",
"ttq": "Tawallammat Tamajaq",
"ttr": "Tera",
"tts": "Northeastern Thai",
"ttt": "Muslim Tat",
"ttu": "Torau",
"ttv": "Titan",
"ttw": "Long Wat",
"tty": "Sikaritai",
"ttz": "Tsum",
"tua": "Wiarumus",
"tub": "Tübatulabal",
"tuc": "Mutu",
"tud": "Tuxá",
"tue": "Tuyuca",
"tuf": "Central Tunebo",
"tug": "Tunia",
"tuh": "Taulil",
"tui": "Tupuri",
"tuj": "Tugutil",
"tul": "Tula",
"tum": "Tumbuka",
"tun": "Tunica",
"tuo": "Tucano",
"tup": "Tupi languages",
"tuq": "Tedaga",
"tus": "Tuscarora",
"tut": "Altaic languages",
"tuu": "Tututni",
"tuv": "Turkana",
"tuw": "Tungus languages",
"tux": "Tuxináwa",
"tuy": "Tugen",
"tuz": "Turka",
"tva": "Vaghua",
"tvd": "Tsuvadi",
"tve": "Te'un",
"tvk": "Southeast Ambrym",
"tvl": "Tuvalu",
"tvm": "Tela-Masbuar",
"tvn": "Tavoyan",
"tvo": "Tidore",
"tvs": "Taveta",
"tvt": "Tutsa Naga",
"tvu": "Tunen",
"tvw": "Sedoa",
"tvx": "Taivoan",
"tvy": "Timor Pidgin",
"tw": "Twi",
"twa": "Twana",
"twb": "Western Tawbuid",
"twc": "Teshenawa",
"twd": "Twents",
"twe": "Tewa (Indonesia)",
"twf": "Northern Tiwa",
"twg": "Tereweng",
"twh": "Tai Dón",
"twl": "Tawara",
"twm": "Tawang Monpa",
"twn": "Twendi",
"two": "Tswapong",
"twp": "Ere",
"twq": "Tasawaq",
"twr": "Southwestern Tarahumara",
"twt": "Turiwára",
"twu": "Termanu",
"tww": "Tuwari",
"twx": "Tewe",
"twy": "Tawoyan",
"txa": "Tombonuo",
"txb": "Tokharian B",
"txc": "Tsetsaut",
"txe": "Totoli",
"txg": "Tangut",
"txh": "Thracian",
"txi": "Ikpeng",
"txj": "Tarjumo",
"txm": "Tomini",
"txn": "West Tarangan",
"txo": "Toto",
"txq": "Tii",
"txr": "Tartessian",
"txs": "Tonsea",
"txt": "Citak",
"txu": "Kayapó",
"txx": "Tatana",
"txy": "Tanosy Malagasy",
"ty": "Tahitian",
"tya": "Tauya",
"tye": "Kyanga",
"tyh": "O'du",
"tyi": "Teke-Tsaayi",
"tyj": "Tai Do; Tai Yo",
"tyl": "Thu Lao",
"tyn": "Kombai",
"typ": "Thaypan",
"tyr": "Tai Daeng",
"tys": "Tày Sa Pa",
"tyt": "Tày Tac",
"tyu": "Kua",
"tyv": "Tuvinian",
"tyx": "Teke-Tyee",
"tyy": "Tiyaa",
"tyz": "Tày",
"tza": "Tanzanian Sign Language",
"tzh": "Tzeltal",
"tzj": "Tz'utujil",
"tzl": "Talossan",
"tzm": "Central Atlas Tamazight",
"tzn": "Tugun",
"tzo": "Tzotzil",
"tzx": "Tabriak",
"uam": "Uamué",
"uan": "Kuan",
"uar": "Tairuma",
"uba": "Ubang",
"ubi": "Ubi",
"ubl": "Buhi'non Bikol",
"ubr": "Ubir",
"ubu": "Umbu-Ungu",
"uby": "Ubykh",
"uda": "Uda",
"ude": "Udihe",
"udg": "Muduga",
"udi": "Udi",
"udj": "Ujir",
"udl": "Wuzlam",
"udm": "Udmurt",
"udu": "Uduk",
"ues": "Kioko",
"ufi": "Ufim",
"ug": "Uighur; Uyghur",
"uga": "Ugaritic",
"ugb": "Kuku-Ugbanh",
"uge": "Ughele",
"ugh": "Kubachi",
"ugn": "Ugandan Sign Language",
"ugo": "Ugong",
"ugy": "Uruguayan Sign Language",
"uha": "Uhami",
"uhn": "Damal",
"uis": "Uisai",
"uiv": "Iyive",
"uji": "Tanjijili",
"uk": "Ukrainian",
"uka": "Kaburi",
"ukg": "Ukuriguma",
"ukh": "Ukhwejo",
"uki": "Kui (India)",
"ukk": "Muak Sa-aak",
"ukl": "Ukrainian Sign Language",
"ukp": "Ukpe-Bayobiri",
"ukq": "Ukwa",
"uks": "Urubú-Kaapor Sign Language; Kaapor Sign Language",
"uku": "Ukue",
"ukv": "Kuku",
"ukw": "Ukwuani-Aboh-Ndoni",
"uky": "Kuuk-Yak",
"ula": "Fungwa",
"ulb": "Ulukwumi",
"ulc": "Ulch",
"ule": "Lule",
"ulf": "Usku; Afra",
"uli": "Ulithian",
"ulk": "Meriam Mir",
"ull": "Ullatan",
"ulm": "Ulumanda'",
"uln": "Unserdeutsch",
"ulu": "Uma' Lung",
"ulw": "Ulwa",
"uma": "Umatilla",
"umb": "Umbundu",
"umc": "Marrucinian",
"umd": "Umbindhamu",
"umg": "Morrobalama; Umbuygamu",
"umi": "Ukit",
"umm": "Umon",
"umn": "Makyan Naga",
"umo": "Umotína",
"ump": "Umpila",
"umr": "Umbugarla",
"ums": "Pendau",
"umu": "Munsee",
"una": "North Watut",
"und": "Undetermined",
"une": "Uneme",
"ung": "Ngarinyin",
"uni": "Uni",
"unk": "Enawené-Nawé",
"unm": "Unami",
"unn": "Kurnai",
"unr": "Mundari",
"unu": "Unubahe",
"unx": "Munda",
"unz": "Unde Kaili",
"uon": "Kulon",
"upi": "Umeda",
"upv": "Uripiv-Wala-Rano-Atchin",
"ur": "Urdu",
"ura": "Urarina",
"urb": "Urubú-Kaapor; Kaapor",
"urc": "Urningangg",
"ure": "Uru",
"urf": "Uradhi",
"urg": "Urigina",
"urh": "Urhobo",
"uri": "Urim",
"urj": "Uralic languages",
"urk": "Urak Lawoi'",
"url": "Urali",
"urm": "Urapmin",
"urn": "Uruangnirin",
"uro": "Ura (Papua New Guinea)",
"urp": "Uru-Pa-In",
"urr": "Lehalurup; Löyöp",
"urt": "Urat",
"uru": "Urumi",
"urv": "Uruava",
"urw": "Sop",
"urx": "Urimo",
"ury": "Orya",
"urz": "Uru-Eu-Wau-Wau",
"usa": "Usarufa",
"ush": "Ushojo",
"usi": "Usui",
"usk": "Usaghade",
"usp": "Uspanteco",
"uss": "us-Saare",
"usu": "Uya",
"uta": "Otank",
"ute": "Ute-Southern Paiute",
"uth": "ut-Hun",
"utp": "Amba (Solomon Islands)",
"utr": "Etulo",
"utu": "Utu",
"uum": "Urum",
"uur": "Ura (Vanuatu)",
"uuu": "U",
"uve": "West Uvean; Fagauvea",
"uvh": "Uri",
"uvl": "Lote",
"uwa": "Kuku-Uwanh",
"uya": "Doko-Uyanga",
"uz": "Uzbek",
"uzn": "Northern Uzbek",
"uzs": "Southern Uzbek",
"vaa": "Vaagri Booli",
"vae": "Vale",
"vaf": "Vafsi",
"vag": "Vagla",
"vah": "Varhadi-Nagpuri",
"vai": "Vai",
"vaj": "Sekele; Northwestern ǃKung; Vasekele",
"val": "Vehes",
"vam": "Vanimo",
"van": "Valman",
"vao": "Vao",
"vap": "Vaiphei",
"var": "Huarijio",
"vas": "Vasavi",
"vau": "Vanuma",
"vav": "Varli",
"vay": "Wayu",
"vbb": "Southeast Babar",
"vbk": "Southwestern Bontok",
"ve": "Venda",
"vec": "Venetian",
"ved": "Veddah",
"vel": "Veluws",
"vem": "Vemgo-Mabas",
"veo": "Ventureño",
"vep": "Veps",
"ver": "Mom Jango",
"vgr": "Vaghri",
"vgt": "Vlaamse Gebarentaal; Flemish Sign Language",
"vi": "Vietnamese",
"vic": "Virgin Islands Creole English",
"vid": "Vidunda",
"vif": "Vili",
"vig": "Viemo",
"vil": "Vilela",
"vin": "Vinza",
"vis": "Vishavan",
"vit": "Viti",
"viv": "Iduna",
"vka": "Kariyarra",
"vkj": "Kujarge",
"vkk": "Kaur",
"vkl": "Kulisusu",
"vkm": "Kamakan",
"vkn": "Koro Nulu",
"vko": "Kodeoha",
"vkp": "Korlai Creole Portuguese",
"vkt": "Tenggarong Kutai Malay",
"vku": "Kurrama",
"vkz": "Koro Zuba",
"vlp": "Valpei",
"vls": "Vlaams",
"vma": "Martuyhunira",
"vmb": "Barbaram",
"vmc": "Juxtlahuaca Mixtec",
"vmd": "Mudu Koraga",
"vme": "East Masela",
"vmf": "Mainfränkisch",
"vmg": "Lungalunga",
"vmh": "Maraghei",
"vmi": "Miwa",
"vmj": "Ixtayutla Mixtec",
"vmk": "Makhuwa-Shirima",
"vml": "Malgana",
"vmm": "Mitlatongo Mixtec",
"vmp": "Soyaltepec Mazatec",
"vmq": "Soyaltepec Mixtec",
"vmr": "Marenje",
"vms": "Moksela",
"vmu": "Muluridyi",
"vmv": "Valley Maidu",
"vmw": "Makhuwa",
"vmx": "Tamazola Mixtec",
"vmy": "Ayautla Mazatec",
"vmz": "Mazatlán Mazatec",
"vnk": "Vano; Lovono",
"vnm": "Vinmavis; Neve'ei",
"vnp": "Vunapu",
"vo": "Volapük",
"vor": "Voro",
"vot": "Votic",
"vra": "Vera'a",
"vro": "Võro",
"vrs": "Varisi",
"vrt": "Burmbar; Banam Bay",
"vsi": "Moldova Sign Language",
"vsl": "Venezuelan Sign Language",
"vsv": "Valencian Sign Language; Llengua de signes valenciana",
"vto": "Vitou",
"vum": "Vumbu",
"vun": "Vunjo",
"vut": "Vute",
"vwa": "Awa (China)",
"wa": "Walloon",
"waa": "Walla Walla",
"wab": "Wab",
"wac": "Wasco-Wishram",
"wad": "Wamesa; Wondama",
"wae": "Walser",
"waf": "Wakoná",
"wag": "Wa'ema",
"wah": "Watubela",
"wai": "Wares",
"waj": "Waffa",
"wak": "Wakashan languages",
"wal": "Wolaytta; Wolaitta",
"wam": "Wampanoag",
"wan": "Wan",
"wao": "Wappo",
"wap": "Wapishana",
"waq": "Wagiman",
"war": "Waray (Philippines)",
"was": "Washo",
"wat": "Kaninuwa",
"wau": "Waurá",
"wav": "Waka",
"waw": "Waiwai",
"wax": "Watam; Marangis",
"way": "Wayana",
"waz": "Wampur",
"wba": "Warao",
"wbb": "Wabo",
"wbe": "Waritai",
"wbf": "Wara",
"wbh": "Wanda",
"wbi": "Vwanji",
"wbj": "Alagwa",
"wbk": "Waigali",
"wbl": "Wakhi",
"wbm": "Wa",
"wbp": "Warlpiri",
"wbq": "Waddar",
"wbr": "Wagdi",
"wbs": "West Bengal Sign Language",
"wbt": "Warnman",
"wbv": "Wajarri",
"wbw": "Woi",
"wca": "Yanomámi",
"wci": "Waci Gbe",
"wdd": "Wandji",
"wdg": "Wadaginam",
"wdj": "Wadjiginy",
"wdk": "Wadikali",
"wdt": "Wendat",
"wdu": "Wadjigu",
"wdy": "Wadjabangayi",
"wea": "Wewaw",
"wec": "Wè Western",
"wed": "Wedau",
"weg": "Wergaia",
"weh": "Weh",
"wei": "Kiunum",
"wem": "Weme Gbe",
"wen": "Sorbian languages",
"weo": "Wemale",
"wep": "Westphalien",
"wer": "Weri",
"wes": "Cameroon Pidgin",
"wet": "Perai",
"weu": "Rawngtu Chin",
"wew": "Wejewa",
"wfg": "Yafi; Zorop",
"wga": "Wagaya",
"wgb": "Wagawaga",
"wgg": "Wangkangurru; Wangganguru",
"wgi": "Wahgi",
"wgo": "Waigeo",
"wgu": "Wirangu",
"wgy": "Warrgamay",
"wha": "Sou Upaa; Manusela",
"whg": "North Wahgi",
"whk": "Wahau Kenyah",
"whu": "Wahau Kayan",
"wib": "Southern Toussian",
"wic": "Wichita",
"wie": "Wik-Epa",
"wif": "Wik-Keyangan",
"wig": "Wik Ngathan",
"wih": "Wik-Me'anha",
"wii": "Minidien",
"wij": "Wik-Iiyanh",
"wik": "Wikalkan",
"wil": "Wilawila",
"wim": "Wik-Mungkan",
"win": "Ho-Chunk",
"wir": "Wiraféd",
"wiu": "Wiru",
"wiv": "Vitu",
"wiy": "Wiyot",
"wja": "Waja",
"wji": "Warji",
"wka": "Kw'adza",
"wkb": "Kumbaran",
"wkd": "Wakde; Mo",
"wkl": "Kalanadi",
"wkr": "Keerray-Woorroong",
"wku": "Kunduvadi",
"wkw": "Wakawaka",
"wky": "Wangkayutyuru",
"wla": "Walio",
"wlc": "Mwali Comorian",
"wle": "Wolane",
"wlg": "Kunbarlang",
"wlh": "Welaun",
"wli": "Waioli",
"wlk": "Wailaki",
"wll": "Wali (Sudan)",
"wlm": "Middle Welsh",
"wlo": "Wolio",
"wlr": "Wailapa",
"wls": "Wallisian",
"wlu": "Wuliwuli",
"wlv": "Wichí Lhamtés Vejoz",
"wlw": "Walak",
"wlx": "Wali (Ghana)",
"wly": "Waling",
"wma": "Mawa (Nigeria)",
"wmb": "Wambaya",
"wmc": "Wamas",
"wmd": "Mamaindé",
"wme": "Wambule",
"wmg": "Western Minyag",
"wmh": "Waima'a",
"wmi": "Wamin",
"wmm": "Maiwa (Indonesia)",
"wmn": "Waamwang",
"wmo": "Wom (Papua New Guinea)",
"wms": "Wambon",
"wmt": "Walmajarri",
"wmw": "Mwani",
"wmx": "Womo",
"wnb": "Wanambre",
"wnc": "Wantoat",
"wnd": "Wandarang",
"wne": "Waneci",
"wng": "Wanggom",
"wni": "Ndzwani Comorian",
"wnk": "Wanukaka",
"wnm": "Wanggamala",
"wnn": "Wunumara",
"wno": "Wano",
"wnp": "Wanap",
"wnu": "Usan",
"wnw": "Wintu",
"wny": "Wanyi; Waanyi",
"wo": "Wolof",
"woa": "Kuwema; Tyaraity",
"wob": "Wè Northern",
"woc": "Wogeo",
"wod": "Wolani",
"woe": "Woleaian",
"wof": "Gambian Wolof",
"wog": "Wogamusin",
"woi": "Kamang",
"wok": "Longto",
"wom": "Wom (Nigeria)",
"won": "Wongo",
"woo": "Manombai",
"wor": "Woria",
"wos": "Hanga Hundi",
"wow": "Wawonii",
"woy": "Weyto",
"wpc": "Maco",
"wrb": "Waluwarra; Warluwara",
"wrg": "Warungu; Gudjal",
"wrh": "Wiradjuri",
"wri": "Wariyangga",
"wrk": "Garrwa",
"wrl": "Warlmanpa",
"wrm": "Warumungu",
"wrn": "Warnang",
"wro": "Worrorra",
"wrp": "Waropen",
"wrr": "Wardaman",
"wrs": "Waris",
"wru": "Waru",
"wrv": "Waruna",
"wrw": "Gugu Warra",
"wrx": "Wae Rana",
"wry": "Merwari",
"wrz": "Waray (Australia)",
"wsa": "Warembori",
"wsg": "Adilabad Gondi",
"wsi": "Wusi",
"wsk": "Waskia",
"wsr": "Owenia",
"wss": "Wasa",
"wsu": "Wasu",
"wsv": "Wotapuri-Katarqalai",
"wtf": "Watiwa",
"wth": "Wathawurrung",
"wti": "Berta",
"wtk": "Watakataui",
"wtm": "Mewati",
"wtw": "Wotu",
"wua": "Wikngenchera",
"wub": "Wunambal",
"wud": "Wudu",
"wuh": "Wutunhua",
"wul": "Silimo",
"wum": "Wumbvu",
"wun": "Bungu",
"wur": "Wurrugu",
"wut": "Wutung",
"wuu": "Wu Chinese",
"wuv": "Wuvulu-Aua",
"wux": "Wulna",
"wuy": "Wauyai",
"wwa": "Waama",
"wwb": "Wakabunga",
"wwo": "Wetamut; Dorig",
"wwr": "Warrwa",
"www": "Wawa",
"wxa": "Waxianghua",
"wxw": "Wardandi",
"wyb": "Wangaaybuwan-Ngiyambaa",
"wyi": "Woiwurrung",
"wym": "Wymysorys",
"wyn": "Wyandot",
"wyr": "Wayoró",
"wyy": "Western Fijian",
"xaa": "Andalusian Arabic",
"xab": "Sambe",
"xac": "Kachari",
"xad": "Adai",
"xae": "Aequian",
"xag": "Aghwan",
"xai": "Kaimbé",
"xaj": "Ararandewára",
"xak": "Máku",
"xal": "Kalmyk; Oirat",
"xam": "ǀXam",
"xan": "Xamtanga",
"xao": "Khao",
"xap": "Apalachee",
"xaq": "Aquitanian",
"xar": "Karami",
"xas": "Kamas",
"xat": "Katawixi",
"xau": "Kauwera",
"xav": "Xavánte",
"xaw": "Kawaiisu",
"xay": "Kayan Mahakam",
"xbb": "Lower Burdekin",
"xbc": "Bactrian",
"xbd": "Bindal",
"xbe": "Bigambal",
"xbg": "Bunganditj",
"xbi": "Kombio",
"xbj": "Birrpayi",
"xbm": "Middle Breton",
"xbn": "Kenaboi",
"xbo": "Bolgarian",
"xbp": "Bibbulman",
"xbr": "Kambera",
"xbw": "Kambiwá",
"xby": "Batjala; Batyala",
"xcb": "Cumbric",
"xcc": "Camunic",
"xce": "Celtiberian",
"xcg": "Cisalpine Gaulish",
"xch": "Chemakum; Chimakum",
"xcl": "Classical Armenian",
"xcm": "Comecrudo",
"xcn": "Cotoname",
"xco": "Chorasmian",
"xcr": "Carian",
"xct": "Classical Tibetan",
"xcu": "Curonian",
"xcv": "Chuvantsy",
"xcw": "Coahuilteco",
"xcy": "Cayuse",
"xda": "Darkinyung",
"xdc": "Dacian",
"xdk": "Dharuk",
"xdm": "Edomite",
"xdo": "Kwandu",
"xdq": "Kaitag",
"xdy": "Malayic Dayak",
"xeb": "Eblan",
"xed": "Hdi",
"xeg": "ǁXegwi",
"xel": "Kelo",
"xem": "Kembayan",
"xep": "Epi-Olmec",
"xer": "Xerénte",
"xes": "Kesawai",
"xet": "Xetá",
"xeu": "Keoru-Ahia",
"xfa": "Faliscan",
"xga": "Galatian",
"xgb": "Gbin",
"xgd": "Gudang",
"xgf": "Gabrielino-Fernandeño",
"xgg": "Goreng",
"xgi": "Garingbal",
"xgl": "Galindan",
"xgm": "Dharumbal; Guwinmal",
"xgn": "Mongolian languages",
"xgr": "Garza",
"xgu": "Unggumi",
"xgw": "Guwa",
"xh": "Xhosa",
"xha": "Harami",
"xhc": "Hunnic",
"xhd": "Hadrami",
"xhe": "Khetrani",
"xhm": "Middle Khmer (1400 to 1850 CE)",
"xhr": "Hernican",
"xht": "Hattic",
"xhu": "Hurrian",
"xhv": "Khua",
"xib": "Iberian",
"xii": "Xiri",
"xil": "Illyrian",
"xin": "Xinca",
"xir": "Xiriâna",
"xis": "Kisan",
"xiv": "Indus Valley Language",
"xiy": "Xipaya",
"xjb": "Minjungbal",
"xjt": "Jaitmatang",
"xka": "Kalkoti",
"xkb": "Northern Nago",
"xkc": "Kho'ini",
"xkd": "Mendalam Kayan",
"xke": "Kereho",
"xkf": "Khengkha",
"xkg": "Kagoro",
"xki": "Kenyan Sign Language",
"xkj": "Kajali",
"xkk": "Kachok; Kaco'",
"xkl": "Mainstream Kenyah",
"xkn": "Kayan River Kayan",
"xko": "Kiorr",
"xkp": "Kabatei",
"xkq": "Koroni",
"xkr": "Xakriabá",
"xks": "Kumbewaha",
"xkt": "Kantosi",
"xku": "Kaamba",
"xkv": "Kgalagadi",
"xkw": "Kembra",
"xkx": "Karore",
"xky": "Uma' Lasan",
"xkz": "Kurtokha",
"xla": "Kamula",
"xlb": "Loup B",
"xlc": "Lycian",
"xld": "Lydian",
"xle": "Lemnian",
"xlg": "Ligurian (Ancient)",
"xli": "Liburnian",
"xln": "Alanic",
"xlo": "Loup A",
"xlp": "Lepontic",
"xls": "Lusitanian",
"xlu": "Cuneiform Luwian",
"xly": "Elymian",
"xma": "Mushungulu",
"xmb": "Mbonga",
"xmc": "Makhuwa-Marrevone",
"xmd": "Mbudum",
"xme": "Median",
"xmf": "Mingrelian",
"xmg": "Mengaka",
"xmh": "Kugu-Muminh",
"xmj": "Majera",
"xmk": "Ancient Macedonian",
"xml": "Malaysian Sign Language",
"xmm": "Manado Malay",
"xmn": "Manichaean Middle Persian",
"xmo": "Morerebi",
"xmp": "Kuku-Mu'inh",
"xmq": "Kuku-Mangk",
"xmr": "Meroitic",
"xms": "Moroccan Sign Language",
"xmt": "Matbat",
"xmu": "Kamu",
"xmv": "Antankarana Malagasy; Tankarana Malagasy",
"xmw": "Tsimihety Malagasy",
"xmx": "Salawati; Maden",
"xmy": "Mayaguduna",
"xmz": "Mori Bawah",
"xna": "Ancient North Arabian",
"xnb": "Kanakanabu",
"xnd": "Na-Dene languages",
"xng": "Middle Mongolian",
"xnh": "Kuanhua",
"xni": "Ngarigu",
"xnj": "Ngoni (Tanzania)",
"xnk": "Nganakarti",
"xnm": "Ngumbarl",
"xnn": "Northern Kankanay",
"xno": "Anglo-Norman",
"xnq": "Ngoni (Mozambique)",
"xnr": "Kangri",
"xns": "Kanashi",
"xnt": "Narragansett",
"xnu": "Nukunul",
"xny": "Nyiyaparli",
"xnz": "Kenzi; Mattoki",
"xoc": "O'chi'chi'",
"xod": "Kokoda",
"xog": "Soga",
"xoi": "Kominimung",
"xok": "Xokleng",
"xom": "Komo (Sudan)",
"xon": "Konkomba",
"xoo": "Xukurú",
"xop": "Kopar",
"xor": "Korubo",
"xow": "Kowaki",
"xpa": "Pirriya",
"xpb": "Northeastern Tasmanian; Pyemmairrener",
"xpc": "Pecheneg",
"xpd": "Oyster Bay Tasmanian",
"xpe": "Liberia Kpelle",
"xpf": "Southeast Tasmanian; Nuenonne",
"xpg": "Phrygian",
"xph": "North Midlands Tasmanian; Tyerrenoterpanner",
"xpi": "Pictish",
"xpj": "Mpalitjanh",
"xpk": "Kulina Pano",
"xpl": "Port Sorell Tasmanian",
"xpm": "Pumpokol",
"xpn": "Kapinawá",
"xpo": "Pochutec",
"xpp": "Puyo-Paekche",
"xpq": "Mohegan-Pequot",
"xpr": "Parthian",
"xps": "Pisidian",
"xpt": "Punthamara",
"xpu": "Punic",
"xpv": "Northern Tasmanian; Tommeginne",
"xpw": "Northwestern Tasmanian; Peerapper",
"xpx": "Southwestern Tasmanian; Toogee",
"xpy": "Puyo",
"xpz": "Bruny Island Tasmanian",
"xqa": "Karakhanid",
"xqt": "Qatabanian",
"xra": "Krahô",
"xrb": "Eastern Karaboro",
"xrd": "Gundungurra",
"xre": "Kreye",
"xrg": "Minang",
"xri": "Krikati-Timbira",
"xrm": "Armazic",
"xrn": "Arin",
"xrr": "Raetic",
"xrt": "Aranama-Tamique",
"xru": "Marriammu",
"xrw": "Karawa",
"xsa": "Sabaean",
"xsb": "Sambal",
"xsc": "Scythian",
"xsd": "Sidetic",
"xse": "Sempan",
"xsh": "Shamang",
"xsi": "Sio",
"xsj": "Subi",
"xsl": "South Slavey",
"xsm": "Kasem",
"xsn": "Sanga (Nigeria)",
"xso": "Solano",
"xsp": "Silopi",
"xsq": "Makhuwa-Saka",
"xsr": "Sherpa",
"xss": "Assan",
"xsu": "Sanumá",
"xsv": "Sudovian",
"xsy": "Saisiyat",
"xta": "Alcozauca Mixtec",
"xtb": "Chazumba Mixtec",
"xtc": "Katcha-Kadugli-Miri",
"xtd": "Diuxi-Tilantongo Mixtec",
"xte": "Ketengban",
"xtg": "Transalpine Gaulish",
"xth": "Yitha Yitha",
"xti": "Sinicahua Mixtec",
"xtj": "San Juan Teita Mixtec",
"xtl": "Tijaltepec Mixtec",
"xtm": "Magdalena Peñasco Mixtec",
"xtn": "Northern Tlaxiaco Mixtec",
"xto": "Tokharian A",
"xtp": "San Miguel Piedras Mixtec",
"xtq": "Tumshuqese",
"xtr": "Early Tripuri",
"xts": "Sindihui Mixtec",
"xtt": "Tacahua Mixtec",
"xtu": "Cuyamecalco Mixtec",
"xtv": "Thawa",
"xtw": "Tawandê",
"xty": "Yoloxochitl Mixtec",
"xua": "Alu Kurumba",
"xub": "Betta Kurumba",
"xud": "Umiida",
"xug": "Kunigami",
"xuj": "Jennu Kurumba",
"xul": "Ngunawal; Nunukul",
"xum": "Umbrian",
"xun": "Unggaranggu",
"xuo": "Kuo",
"xup": "Upper Umpqua",
"xur": "Urartian",
"xut": "Kuthant",
"xuu": "Kxoe; Khwedam",
"xve": "Venetic",
"xvi": "Kamviri",
"xvn": "Vandalic",
"xvo": "Volscian",
"xvs": "Vestinian",
"xwa": "Kwaza",
"xwc": "Woccon",
"xwd": "Wadi Wadi",
"xwe": "Xwela Gbe",
"xwg": "Kwegu",
"xwj": "Wajuk",
"xwk": "Wangkumara",
"xwl": "Western Xwla Gbe",
"xwo": "Written Oirat",
"xwr": "Kwerba Mamberamo",
"xwt": "Wotjobaluk",
"xww": "Wemba Wemba",
"xxb": "Boro (Ghana)",
"xxk": "Ke'o",
"xxm": "Minkin",
"xxr": "Koropó",
"xxt": "Tambora",
"xya": "Yaygir",
"xyb": "Yandjibara",
"xyj": "Mayi-Yapi",
"xyk": "Mayi-Kulan",
"xyl": "Yalakalore",
"xyt": "Mayi-Thakurti",
"xyy": "Yorta Yorta",
"xzh": "Zhang-Zhung",
"xzm": "Zemgalian",
"xzp": "Ancient Zapotec",
"yaa": "Yaminahua",
"yab": "Yuhup",
"yac": "Pass Valley Yali",
"yad": "Yagua",
"yae": "Pumé",
"yaf": "Yaka (Democratic Republic of Congo)",
"yag": "Yámana",
"yah": "Yazgulyam",
"yai": "Yagnobi",
"yaj": "Banda-Yangere",
"yak": "Yakama",
"yal": "Yalunka",
"yam": "Yamba",
"yan": "Mayangna",
"yao": "Yao",
"yap": "Yapese",
"yaq": "Yaqui",
"yar": "Yabarana",
"yas": "Nugunu (Cameroon)",
"yat": "Yambeta",
"yau": "Yuwana",
"yav": "Yangben",
"yaw": "Yawalapití",
"yax": "Yauma",
"yay": "Agwagwune",
"yaz": "Lokaa",
"yba": "Yala",
"ybb": "Yemba",
"ybe": "West Yugur",
"ybh": "Yakha",
"ybi": "Yamphu",
"ybj": "Hasha",
"ybk": "Bokha",
"ybl": "Yukuben",
"ybm": "Yaben",
"ybn": "Yabaâna",
"ybo": "Yabong",
"ybx": "Yawiyo",
"yby": "Yaweyuha",
"ych": "Chesu",
"ycl": "Lolopo",
"ycn": "Yucuna",
"ycp": "Chepya",
"yda": "Yanda",
"ydd": "Eastern Yiddish",
"yde": "Yangum Dey",
"ydg": "Yidgha",
"ydk": "Yoidik",
"yea": "Ravula",
"yec": "Yeniche",
"yee": "Yimas",
"yei": "Yeni",
"yej": "Yevanic",
"yel": "Yela",
"yer": "Tarok",
"yes": "Nyankpa",
"yet": "Yetfa",
"yeu": "Yerukula",
"yev": "Yapunda",
"yey": "Yeyi",
"yga": "Malyangapa",
"ygi": "Yiningayi",
"ygl": "Yangum Gel",
"ygm": "Yagomi",
"ygp": "Gepo",
"ygr": "Yagaria",
"ygs": "Yolŋu Sign Language",
"ygu": "Yugul",
"ygw": "Yagwoia",
"yha": "Baha Buyang",
"yhd": "Judeo-Iraqi Arabic",
"yhl": "Hlepho Phowa",
"yhs": "Yan-nhaŋu Sign Language",
"yi": "Yiddish",
"yia": "Yinggarda",
"yif": "Ache",
"yig": "Wusa Nasu",
"yih": "Western Yiddish",
"yii": "Yidiny",
"yij": "Yindjibarndi",
"yik": "Dongshanba Lalo",
"yil": "Yindjilandji",
"yim": "Yimchungru Naga",
"yin": "Riang Lai; Yinchia",
"yip": "Pholo",
"yiq": "Miqie",
"yir": "North Awyu",
"yis": "Yis",
"yit": "Eastern Lalu",
"yiu": "Awu",
"yiv": "Northern Nisu",
"yix": "Axi Yi",
"yiz": "Azhe",
"yka": "Yakan",
"ykg": "Northern Yukaghir",
"yki": "Yoke",
"ykk": "Yakaikeke",
"ykl": "Khlula",
"ykm": "Kap",
"ykn": "Kua-nsi",
"yko": "Yasa",
"ykr": "Yekora",
"ykt": "Kathu",
"yku": "Kuamasi",
"yky": "Yakoma",
"yla": "Yaul",
"ylb": "Yaleba",
"yle": "Yele",
"ylg": "Yelogu",
"yli": "Angguruk Yali",
"yll": "Yil",
"ylm": "Limi",
"yln": "Langnian Buyang",
"ylo": "Naluo Yi",
"ylr": "Yalarnnga",
"ylu": "Aribwaung",
"yly": "Nyâlayu; Nyelâyu",
"ymb": "Yambes",
"ymc": "Southern Muji",
"ymd": "Muda",
"yme": "Yameo",
"ymg": "Yamongeri",
"ymh": "Mili",
"ymi": "Moji",
"ymk": "Makwe",
"yml": "Iamalele",
"ymm": "Maay",
"ymn": "Yamna; Sunum",
"ymo": "Yangum Mon",
"ymp": "Yamap",
"ymq": "Qila Muji",
"ymr": "Malasar",
"yms": "Mysian",
"ymx": "Northern Muji",
"ymz": "Muzi",
"yna": "Aluo",
"ynd": "Yandruwandha",
"yne": "Lang'e",
"yng": "Yango",
"ynk": "Naukan Yupik",
"ynl": "Yangulam",
"ynn": "Yana",
"yno": "Yong",
"ynq": "Yendang",
"yns": "Yansi",
"ynu": "Yahuna",
"yo": "Yoruba",
"yob": "Yoba",
"yog": "Yogad",
"yoi": "Yonaguni",
"yok": "Yokuts",
"yol": "Yola",
"yom": "Yombe",
"yon": "Yongkom",
"yot": "Yotti",
"yox": "Yoron",
"yoy": "Yoy",
"ypa": "Phala",
"ypb": "Labo Phowa",
"ypg": "Phola",
"yph": "Phupha",
"ypk": "Yupik languages",
"ypm": "Phuma",
"ypn": "Ani Phowa",
"ypo": "Alo Phola",
"ypp": "Phupa",
"ypz": "Phuza",
"yra": "Yerakai",
"yrb": "Yareba",
"yre": "Yaouré",
"yrk": "Nenets",
"yrl": "Nhengatu",
"yrm": "Yirrk-Mel",
"yrn": "Yerong",
"yro": "Yaroamë",
"yrs": "Yarsun",
"yrw": "Yarawata",
"yry": "Yarluyandi",
"ysc": "Yassic",
"ysd": "Samatao",
"ysg": "Sonaga",
"ysl": "Yugoslavian Sign Language",
"ysm": "Myanmar Sign Language",
"ysn": "Sani",
"yso": "Nisi (China)",
"ysp": "Southern Lolopo",
"ysr": "Sirenik Yupik",
"yss": "Yessan-Mayo",
"ysy": "Sanie",
"yta": "Talu",
"ytl": "Tanglang",
"ytp": "Thopho",
"ytw": "Yout Wam",
"yty": "Yatay",
"yua": "Yucateco; Yucatec Maya",
"yub": "Yugambal",
"yuc": "Yuchi",
"yud": "Judeo-Tripolitanian Arabic",
"yue": "Yue Chinese; Cantonese",
"yuf": "Havasupai-Walapai-Yavapai",
"yug": "Yug",
"yui": "Yurutí",
"yuj": "Karkar-Yuri",
"yuk": "Yuki",
"yul": "Yulu",
"yum": "Quechan",
"yun": "Bena (Nigeria)",
"yup": "Yukpa",
"yuq": "Yuqui",
"yur": "Yurok",
"yut": "Yopno",
"yuw": "Yau (Morobe Province)",
"yux": "Southern Yukaghir",
"yuy": "East Yugur",
"yuz": "Yuracare",
"yva": "Yawa",
"yvt": "Yavitero",
"ywa": "Kalou",
"ywg": "Yinhawangka",
"ywl": "Western Lalu",
"ywn": "Yawanawa",
"ywq": "Wuding-Luquan Yi",
"ywr": "Yawuru",
"ywt": "Xishanba Lalo; Central Lalo",
"ywu": "Wumeng Nasu",
"yww": "Yawarawarga",
"yxa": "Mayawali",
"yxg": "Yagara",
"yxl": "Yardliyawarra",
"yxm": "Yinwum",
"yxu": "Yuyu",
"yxy": "Yabula Yabula",
"yyr": "Yir Yoront",
"yyu": "Yau (Sandaun Province)",
"yyz": "Ayizi",
"yzg": "E'ma Buyang",
"yzk": "Zokhuo",
"za": "Zhuang; Chuang",
"zaa": "Sierra de Juárez Zapotec",
"zab": "Western Tlacolula Valley Zapotec; San Juan Guelavía Zapotec",
"zac": "Ocotlán Zapotec",
"zad": "Cajonos Zapotec",
"zae": "Yareni Zapotec",
"zaf": "Ayoquesco Zapotec",
"zag": "Zaghawa",
"zah": "Zangwal",
"zai": "Isthmus Zapotec",
"zaj": "Zaramo",
"zak": "Zanaki",
"zal": "Zauzou",
"zam": "Miahuatlán Zapotec",
"zao": "Ozolotepec Zapotec",
"zap": "Zapotec",
"zaq": "Aloápam Zapotec",
"zar": "Rincón Zapotec",
"zas": "Santo Domingo Albarradas Zapotec",
"zat": "Tabaa Zapotec",
"zau": "Zangskari",
"zav": "Yatzachi Zapotec",
"zaw": "Mitla Zapotec",
"zax": "Xadani Zapotec",
"zay": "Zayse-Zergulla; Zaysete",
"zaz": "Zari",
"zba": "Balaibalan",
"zbc": "Central Berawan",
"zbe": "East Berawan",
"zbl": "Blissymbols; Bliss; Blissymbolics",
"zbt": "Batui",
"zbu": "Bu (Bauchi State)",
"zbw": "West Berawan",
"zca": "Coatecas Altas Zapotec",
"zcd": "Las Delicias Zapotec",
"zch": "Central Hongshuihe Zhuang",
"zdj": "Ngazidja Comorian",
"zea": "Zeeuws",
"zeg": "Zenag",
"zeh": "Eastern Hongshuihe Zhuang",
"zen": "Zenaga",
"zga": "Kinga",
"zgb": "Guibei Zhuang",
"zgh": "Standard Moroccan Tamazight",
"zgm": "Minz Zhuang",
"zgn": "Guibian Zhuang",
"zgr": "Magori",
"zh": "Chinese",
"zhb": "Zhaba",
"zhd": "Dai Zhuang",
"zhi": "Zhire",
"zhn": "Nong Zhuang",
"zhw": "Zhoa",
"zhx": "Chinese (family)",
"zia": "Zia",
"zib": "Zimbabwe Sign Language",
"zik": "Zimakani",
"zil": "Zialo",
"zim": "Mesme",
"zin": "Zinza",
"ziw": "Zigula",
"ziz": "Zizilivakan",
"zka": "Kaimbulawa",
"zkb": "Koibal",
"zkd": "Kadu",
"zkg": "Koguryo",
"zkh": "Khorezmian",
"zkk": "Karankawa",
"zkn": "Kanan",
"zko": "Kott",
"zkp": "São Paulo Kaingáng",
"zkr": "Zakhring",
"zkt": "Kitan",
"zku": "Kaurna",
"zkv": "Krevinian",
"zkz": "Khazar",
"zla": "Zula",
"zle": "East Slavic languages",
"zlj": "Liujiang Zhuang",
"zlm": "Malay (individual language)",
"zln": "Lianshan Zhuang",
"zlq": "Liuqian Zhuang",
"zls": "South Slavic languages",
"zlw": "West Slavic languages",
"zma": "Manda (Australia)",
"zmb": "Zimba",
"zmc": "Margany",
"zmd": "Maridan",
"zme": "Mangerr",
"zmf": "Mfinu",
"zmg": "Marti Ke",
"zmh": "Makolkol",
"zmi": "Negeri Sembilan Malay",
"zmj": "Maridjabin",
"zmk": "Mandandanyi",
"zml": "Matngala",
"zmm": "Marimanindji; Marramaninyshi",
"zmn": "Mbangwe",
"zmo": "Molo",
"zmp": "Mpuono",
"zmq": "Mituku",
"zmr": "Maranunggu",
"zms": "Mbesa",
"zmt": "Maringarr",
"zmu": "Muruwari",
"zmv": "Mbariman-Gudhinma",
"zmw": "Mbo (Democratic Republic of Congo)",
"zmx": "Bomitaba",
"zmy": "Mariyedi",
"zmz": "Mbandja",
"zna": "Zan Gula",
"znd": "Zande languages",
"zne": "Zande (individual language)",
"zng": "Mang",
"znk": "Manangkari",
"zns": "Mangas",
"zoc": "Copainalá Zoque",
"zoh": "Chimalapa Zoque",
"zom": "Zou",
"zoo": "Asunción Mixtepec Zapotec",
"zoq": "Tabasco Zoque",
"zor": "Rayón Zoque",
"zos": "Francisco León Zoque",
"zpa": "Lachiguiri Zapotec",
"zpb": "Yautepec Zapotec",
"zpc": "Choapan Zapotec",
"zpd": "Southeastern Ixtlán Zapotec",
"zpe": "Petapa Zapotec",
"zpf": "San Pedro Quiatoni Zapotec",
"zpg": "Guevea De Humboldt Zapotec",
"zph": "Totomachapan Zapotec",
"zpi": "Santa María Quiegolani Zapotec",
"zpj": "Quiavicuzas Zapotec",
"zpk": "Tlacolulita Zapotec",
"zpl": "Lachixío Zapotec",
"zpm": "Mixtepec Zapotec",
"zpn": "Santa Inés Yatzechi Zapotec",
"zpo": "Amatlán Zapotec",
"zpp": "El Alto Zapotec",
"zpq": "Zoogocho Zapotec",
"zpr": "Santiago Xanica Zapotec",
"zps": "Coatlán Zapotec",
"zpt": "San Vicente Coatlán Zapotec",
"zpu": "Yalálag Zapotec",
"zpv": "Chichicapan Zapotec",
"zpw": "Zaniza Zapotec",
"zpx": "San Baltazar Loxicha Zapotec",
"zpy": "Mazaltepec Zapotec",
"zpz": "Texmelucan Zapotec",
"zqe": "Qiubei Zhuang",
"zra": "Kara (Korea)",
"zrg": "Mirgan",
"zrn": "Zerenkel",
"zro": "Záparo",
"zrp": "Zarphatic",
"zrs": "Mairasi",
"zsa": "Sarasira",
"zsk": "Kaskean",
"zsl": "Zambian Sign Language",
"zsm": "Standard Malay",
"zsr": "Southern Rincon Zapotec",
"zsu": "Sukurum",
"zte": "Elotepec Zapotec",
"ztg": "Xanaguía Zapotec",
"ztl": "Lapaguía-Guivini Zapotec",
"ztm": "San Agustín Mixtepec Zapotec",
"ztn": "Santa Catarina Albarradas Zapotec",
"ztp": "Loxicha Zapotec",
"ztq": "Quioquitani-Quierí Zapotec",
"zts": "Tilquiapan Zapotec",
"ztt": "Tejalapan Zapotec",
"ztu": "Güilá Zapotec",
"ztx": "Zaachila Zapotec",
"zty": "Yatee Zapotec",
"zu": "Zulu",
"zua": "Zeem",
"zuh": "Tokano",
"zum": "Kumzari",
"zun": "Zuni",
"zuy": "Zumaya",
"zwa": "Zay",
"zyb": "Yongbei Zhuang",
"zyg": "Yang Zhuang",
"zyj": "Youjiang Zhuang",
"zyn": "Yongnan Zhuang",
"zyp": "Zyphe Chin",
"zza": "Zaza; Dimili; Dimli (macrolanguage); Kirdki; Kirmanjki (macrolanguage); Zazaki",
"zzj": "Zuojiang Zhuang"
} | datasets/src/datasets/utils/resources/languages.json/0 | {
"file_path": "datasets/src/datasets/utils/resources/languages.json",
"repo_id": "datasets",
"token_count": 111198
} |
# ruff: noqa: F401
# This is the module that test_patching.py uses to test patch_submodule()
import os
import os as renamed_os
from os import path
from os import path as renamed_path
from os.path import join
from os.path import join as renamed_join
open = open # we just need to have a builtin inside this module to test it properly
| datasets/tests/_test_patching.py/0 | {
"file_path": "datasets/tests/_test_patching.py",
"repo_id": "datasets",
"token_count": 95
} |
import os
import tarfile
import pyarrow as pa
import pytest
from datasets import Dataset, concatenate_datasets, load_dataset
from datasets.features import Audio, Features, Sequence, Value
from ..utils import (
require_librosa,
require_sndfile,
)
@pytest.fixture()
def tar_wav_path(shared_datadir, tmp_path_factory):
audio_path = str(shared_datadir / "test_audio_44100.wav")
path = tmp_path_factory.mktemp("data") / "audio_data.wav.tar"
with tarfile.TarFile(path, "w") as f:
f.add(audio_path, arcname=os.path.basename(audio_path))
return path
@pytest.fixture()
def tar_mp3_path(shared_datadir, tmp_path_factory):
audio_path = str(shared_datadir / "test_audio_44100.mp3")
path = tmp_path_factory.mktemp("data") / "audio_data.mp3.tar"
with tarfile.TarFile(path, "w") as f:
f.add(audio_path, arcname=os.path.basename(audio_path))
return path
def iter_archive(archive_path):
with tarfile.open(archive_path) as tar:
for tarinfo in tar:
file_path = tarinfo.name
file_obj = tar.extractfile(tarinfo)
yield file_path, file_obj
def test_audio_instantiation():
audio = Audio()
assert audio.sampling_rate is None
assert audio.mono is True
assert audio.id is None
assert audio.dtype == "dict"
assert audio.pa_type == pa.struct({"bytes": pa.binary(), "path": pa.string()})
assert audio._type == "Audio"
def test_audio_feature_type_to_arrow():
features = Features({"audio": Audio()})
assert features.arrow_schema == pa.schema({"audio": Audio().pa_type})
features = Features({"struct_containing_an_audio": {"audio": Audio()}})
assert features.arrow_schema == pa.schema({"struct_containing_an_audio": pa.struct({"audio": Audio().pa_type})})
features = Features({"sequence_of_audios": Sequence(Audio())})
assert features.arrow_schema == pa.schema({"sequence_of_audios": pa.list_(Audio().pa_type)})
@require_librosa
@pytest.mark.parametrize(
"build_example",
[
lambda audio_path: audio_path,
lambda audio_path: open(audio_path, "rb").read(),
lambda audio_path: {"path": audio_path},
lambda audio_path: {"path": audio_path, "bytes": None},
lambda audio_path: {"path": audio_path, "bytes": open(audio_path, "rb").read()},
lambda audio_path: {"path": None, "bytes": open(audio_path, "rb").read()},
lambda audio_path: {"bytes": open(audio_path, "rb").read()},
lambda audio_path: {"array": [0.1, 0.2, 0.3], "sampling_rate": 16_000},
],
)
def test_audio_feature_encode_example(shared_datadir, build_example):
audio_path = str(shared_datadir / "test_audio_44100.wav")
audio = Audio()
encoded_example = audio.encode_example(build_example(audio_path))
assert isinstance(encoded_example, dict)
assert encoded_example.keys() == {"bytes", "path"}
assert encoded_example["bytes"] is not None or encoded_example["path"] is not None
decoded_example = audio.decode_example(encoded_example)
assert decoded_example.keys() == {"path", "array", "sampling_rate"}
@require_librosa
@pytest.mark.parametrize(
"build_example",
[
lambda audio_path: {"path": audio_path, "sampling_rate": 16_000},
lambda audio_path: {"path": audio_path, "bytes": None, "sampling_rate": 16_000},
lambda audio_path: {"path": audio_path, "bytes": open(audio_path, "rb").read(), "sampling_rate": 16_000},
lambda audio_path: {"array": [0.1, 0.2, 0.3], "sampling_rate": 16_000},
],
)
def test_audio_feature_encode_example_pcm(shared_datadir, build_example):
audio_path = str(shared_datadir / "test_audio_16000.pcm")
audio = Audio(sampling_rate=16_000)
encoded_example = audio.encode_example(build_example(audio_path))
assert isinstance(encoded_example, dict)
assert encoded_example.keys() == {"bytes", "path"}
assert encoded_example["bytes"] is not None or encoded_example["path"] is not None
decoded_example = audio.decode_example(encoded_example)
assert decoded_example.keys() == {"path", "array", "sampling_rate"}
@require_librosa
@require_sndfile
def test_audio_decode_example(shared_datadir):
audio_path = str(shared_datadir / "test_audio_44100.wav")
audio = Audio()
decoded_example = audio.decode_example(audio.encode_example(audio_path))
assert decoded_example.keys() == {"path", "array", "sampling_rate"}
assert decoded_example["path"] == audio_path
assert decoded_example["array"].shape == (202311,)
assert decoded_example["sampling_rate"] == 44100
with pytest.raises(RuntimeError):
Audio(decode=False).decode_example(audio_path)
@require_librosa
@require_sndfile
def test_audio_resampling(shared_datadir):
audio_path = str(shared_datadir / "test_audio_44100.wav")
audio = Audio(sampling_rate=16000)
decoded_example = audio.decode_example(audio.encode_example(audio_path))
assert decoded_example.keys() == {"path", "array", "sampling_rate"}
assert decoded_example["path"] == audio_path
assert decoded_example["array"].shape == (73401,)
assert decoded_example["sampling_rate"] == 16000
@require_librosa
@require_sndfile
def test_audio_decode_example_mp3(shared_datadir):
audio_path = str(shared_datadir / "test_audio_44100.mp3")
audio = Audio()
decoded_example = audio.decode_example(audio.encode_example(audio_path))
assert decoded_example.keys() == {"path", "array", "sampling_rate"}
assert decoded_example["path"] == audio_path
assert decoded_example["array"].shape == (110592,)
assert decoded_example["sampling_rate"] == 44100
@require_librosa
@require_sndfile
def test_audio_decode_example_opus(shared_datadir):
audio_path = str(shared_datadir / "test_audio_48000.opus")
audio = Audio()
decoded_example = audio.decode_example(audio.encode_example(audio_path))
assert decoded_example.keys() == {"path", "array", "sampling_rate"}
assert decoded_example["path"] == audio_path
assert decoded_example["array"].shape == (48000,)
assert decoded_example["sampling_rate"] == 48000
@require_librosa
@pytest.mark.parametrize("sampling_rate", [16_000, 48_000])
def test_audio_decode_example_pcm(shared_datadir, sampling_rate):
audio_path = str(shared_datadir / "test_audio_16000.pcm")
audio_input = {"path": audio_path, "sampling_rate": 16_000}
audio = Audio(sampling_rate=sampling_rate)
decoded_example = audio.decode_example(audio.encode_example(audio_input))
assert decoded_example.keys() == {"path", "array", "sampling_rate"}
assert decoded_example["path"] is None
assert decoded_example["array"].shape == (16208 * sampling_rate // 16_000,)
assert decoded_example["sampling_rate"] == sampling_rate
@require_librosa
@require_sndfile
def test_audio_resampling_mp3_different_sampling_rates(shared_datadir):
audio_path = str(shared_datadir / "test_audio_44100.mp3")
audio_path2 = str(shared_datadir / "test_audio_16000.mp3")
audio = Audio(sampling_rate=48000)
decoded_example = audio.decode_example(audio.encode_example(audio_path))
assert decoded_example.keys() == {"path", "array", "sampling_rate"}
assert decoded_example["path"] == audio_path
assert decoded_example["array"].shape == (120373,)
assert decoded_example["sampling_rate"] == 48000
decoded_example = audio.decode_example(audio.encode_example(audio_path2))
assert decoded_example.keys() == {"path", "array", "sampling_rate"}
assert decoded_example["path"] == audio_path2
assert decoded_example["array"].shape == (122688,)
assert decoded_example["sampling_rate"] == 48000
@require_librosa
@require_sndfile
def test_dataset_with_audio_feature(shared_datadir):
audio_path = str(shared_datadir / "test_audio_44100.wav")
data = {"audio": [audio_path]}
features = Features({"audio": Audio()})
dset = Dataset.from_dict(data, features=features)
item = dset[0]
assert item.keys() == {"audio"}
assert item["audio"].keys() == {"path", "array", "sampling_rate"}
assert item["audio"]["path"] == audio_path
assert item["audio"]["array"].shape == (202311,)
assert item["audio"]["sampling_rate"] == 44100
batch = dset[:1]
assert batch.keys() == {"audio"}
assert len(batch["audio"]) == 1
assert batch["audio"][0].keys() == {"path", "array", "sampling_rate"}
assert batch["audio"][0]["path"] == audio_path
assert batch["audio"][0]["array"].shape == (202311,)
assert batch["audio"][0]["sampling_rate"] == 44100
column = dset["audio"]
assert len(column) == 1
assert column[0].keys() == {"path", "array", "sampling_rate"}
assert column[0]["path"] == audio_path
assert column[0]["array"].shape == (202311,)
assert column[0]["sampling_rate"] == 44100
@require_librosa
@require_sndfile
def test_dataset_with_audio_feature_tar_wav(tar_wav_path):
audio_filename = "test_audio_44100.wav"
data = {"audio": []}
for file_path, file_obj in iter_archive(tar_wav_path):
data["audio"].append({"path": file_path, "bytes": file_obj.read()})
break
features = Features({"audio": Audio()})
dset = Dataset.from_dict(data, features=features)
item = dset[0]
assert item.keys() == {"audio"}
assert item["audio"].keys() == {"path", "array", "sampling_rate"}
assert item["audio"]["path"] == audio_filename
assert item["audio"]["array"].shape == (202311,)
assert item["audio"]["sampling_rate"] == 44100
batch = dset[:1]
assert batch.keys() == {"audio"}
assert len(batch["audio"]) == 1
assert batch["audio"][0].keys() == {"path", "array", "sampling_rate"}
assert batch["audio"][0]["path"] == audio_filename
assert batch["audio"][0]["array"].shape == (202311,)
assert batch["audio"][0]["sampling_rate"] == 44100
column = dset["audio"]
assert len(column) == 1
assert column[0].keys() == {"path", "array", "sampling_rate"}
assert column[0]["path"] == audio_filename
assert column[0]["array"].shape == (202311,)
assert column[0]["sampling_rate"] == 44100
@require_librosa
@require_sndfile
def test_dataset_with_audio_feature_tar_mp3(tar_mp3_path):
audio_filename = "test_audio_44100.mp3"
data = {"audio": []}
for file_path, file_obj in iter_archive(tar_mp3_path):
data["audio"].append({"path": file_path, "bytes": file_obj.read()})
break
features = Features({"audio": Audio()})
dset = Dataset.from_dict(data, features=features)
item = dset[0]
assert item.keys() == {"audio"}
assert item["audio"].keys() == {"path", "array", "sampling_rate"}
assert item["audio"]["path"] == audio_filename
assert item["audio"]["array"].shape == (110592,)
assert item["audio"]["sampling_rate"] == 44100
batch = dset[:1]
assert batch.keys() == {"audio"}
assert len(batch["audio"]) == 1
assert batch["audio"][0].keys() == {"path", "array", "sampling_rate"}
assert batch["audio"][0]["path"] == audio_filename
assert batch["audio"][0]["array"].shape == (110592,)
assert batch["audio"][0]["sampling_rate"] == 44100
column = dset["audio"]
assert len(column) == 1
assert column[0].keys() == {"path", "array", "sampling_rate"}
assert column[0]["path"] == audio_filename
assert column[0]["array"].shape == (110592,)
assert column[0]["sampling_rate"] == 44100
@require_sndfile
def test_dataset_with_audio_feature_with_none():
data = {"audio": [None]}
features = Features({"audio": Audio()})
dset = Dataset.from_dict(data, features=features)
item = dset[0]
assert item.keys() == {"audio"}
assert item["audio"] is None
batch = dset[:1]
assert len(batch) == 1
assert batch.keys() == {"audio"}
assert isinstance(batch["audio"], list) and all(item is None for item in batch["audio"])
column = dset["audio"]
assert len(column) == 1
assert isinstance(column, list) and all(item is None for item in column)
# nested tests
data = {"audio": [[None]]}
features = Features({"audio": Sequence(Audio())})
dset = Dataset.from_dict(data, features=features)
item = dset[0]
assert item.keys() == {"audio"}
assert all(i is None for i in item["audio"])
data = {"nested": [{"audio": None}]}
features = Features({"nested": {"audio": Audio()}})
dset = Dataset.from_dict(data, features=features)
item = dset[0]
assert item.keys() == {"nested"}
assert item["nested"].keys() == {"audio"}
assert item["nested"]["audio"] is None
@require_librosa
@require_sndfile
def test_resampling_at_loading_dataset_with_audio_feature(shared_datadir):
audio_path = str(shared_datadir / "test_audio_44100.wav")
data = {"audio": [audio_path]}
features = Features({"audio": Audio(sampling_rate=16000)})
dset = Dataset.from_dict(data, features=features)
item = dset[0]
assert item.keys() == {"audio"}
assert item["audio"].keys() == {"path", "array", "sampling_rate"}
assert item["audio"]["path"] == audio_path
assert item["audio"]["array"].shape == (73401,)
assert item["audio"]["sampling_rate"] == 16000
batch = dset[:1]
assert batch.keys() == {"audio"}
assert len(batch["audio"]) == 1
assert batch["audio"][0].keys() == {"path", "array", "sampling_rate"}
assert batch["audio"][0]["path"] == audio_path
assert batch["audio"][0]["array"].shape == (73401,)
assert batch["audio"][0]["sampling_rate"] == 16000
column = dset["audio"]
assert len(column) == 1
assert column[0].keys() == {"path", "array", "sampling_rate"}
assert column[0]["path"] == audio_path
assert column[0]["array"].shape == (73401,)
assert column[0]["sampling_rate"] == 16000
@require_librosa
@require_sndfile
def test_resampling_at_loading_dataset_with_audio_feature_mp3(shared_datadir):
audio_path = str(shared_datadir / "test_audio_44100.mp3")
data = {"audio": [audio_path]}
features = Features({"audio": Audio(sampling_rate=16000)})
dset = Dataset.from_dict(data, features=features)
item = dset[0]
assert item.keys() == {"audio"}
assert item["audio"].keys() == {"path", "array", "sampling_rate"}
assert item["audio"]["path"] == audio_path
assert item["audio"]["array"].shape == (40125,)
assert item["audio"]["sampling_rate"] == 16000
batch = dset[:1]
assert batch.keys() == {"audio"}
assert len(batch["audio"]) == 1
assert batch["audio"][0].keys() == {"path", "array", "sampling_rate"}
assert batch["audio"][0]["path"] == audio_path
assert batch["audio"][0]["array"].shape == (40125,)
assert batch["audio"][0]["sampling_rate"] == 16000
column = dset["audio"]
assert len(column) == 1
assert column[0].keys() == {"path", "array", "sampling_rate"}
assert column[0]["path"] == audio_path
assert column[0]["array"].shape == (40125,)
assert column[0]["sampling_rate"] == 16000
@require_librosa
@require_sndfile
def test_resampling_after_loading_dataset_with_audio_feature(shared_datadir):
audio_path = str(shared_datadir / "test_audio_44100.wav")
data = {"audio": [audio_path]}
features = Features({"audio": Audio()})
dset = Dataset.from_dict(data, features=features)
item = dset[0]
assert item["audio"]["sampling_rate"] == 44100
dset = dset.cast_column("audio", Audio(sampling_rate=16000))
item = dset[0]
assert item.keys() == {"audio"}
assert item["audio"].keys() == {"path", "array", "sampling_rate"}
assert item["audio"]["path"] == audio_path
assert item["audio"]["array"].shape == (73401,)
assert item["audio"]["sampling_rate"] == 16000
batch = dset[:1]
assert batch.keys() == {"audio"}
assert len(batch["audio"]) == 1
assert batch["audio"][0].keys() == {"path", "array", "sampling_rate"}
assert batch["audio"][0]["path"] == audio_path
assert batch["audio"][0]["array"].shape == (73401,)
assert batch["audio"][0]["sampling_rate"] == 16000
column = dset["audio"]
assert len(column) == 1
assert column[0].keys() == {"path", "array", "sampling_rate"}
assert column[0]["path"] == audio_path
assert column[0]["array"].shape == (73401,)
assert column[0]["sampling_rate"] == 16000
@require_librosa
@require_sndfile
def test_resampling_after_loading_dataset_with_audio_feature_mp3(shared_datadir):
audio_path = str(shared_datadir / "test_audio_44100.mp3")
data = {"audio": [audio_path]}
features = Features({"audio": Audio()})
dset = Dataset.from_dict(data, features=features)
item = dset[0]
assert item["audio"]["sampling_rate"] == 44100
dset = dset.cast_column("audio", Audio(sampling_rate=16000))
item = dset[0]
assert item.keys() == {"audio"}
assert item["audio"].keys() == {"path", "array", "sampling_rate"}
assert item["audio"]["path"] == audio_path
assert item["audio"]["array"].shape == (40125,)
assert item["audio"]["sampling_rate"] == 16000
batch = dset[:1]
assert batch.keys() == {"audio"}
assert len(batch["audio"]) == 1
assert batch["audio"][0].keys() == {"path", "array", "sampling_rate"}
assert batch["audio"][0]["path"] == audio_path
assert batch["audio"][0]["array"].shape == (40125,)
assert batch["audio"][0]["sampling_rate"] == 16000
column = dset["audio"]
assert len(column) == 1
assert column[0].keys() == {"path", "array", "sampling_rate"}
assert column[0]["path"] == audio_path
assert column[0]["array"].shape == (40125,)
assert column[0]["sampling_rate"] == 16000
@require_librosa
@pytest.mark.parametrize(
"build_data",
[
lambda audio_path: {"audio": [audio_path]},
lambda audio_path: {"audio": [open(audio_path, "rb").read()]},
lambda audio_path: {"audio": [{"path": audio_path}]},
lambda audio_path: {"audio": [{"path": audio_path, "bytes": None}]},
lambda audio_path: {"audio": [{"path": audio_path, "bytes": open(audio_path, "rb").read()}]},
lambda audio_path: {"audio": [{"path": None, "bytes": open(audio_path, "rb").read()}]},
lambda audio_path: {"audio": [{"bytes": open(audio_path, "rb").read()}]},
],
)
def test_dataset_cast_to_audio_features(shared_datadir, build_data):
audio_path = str(shared_datadir / "test_audio_44100.wav")
data = build_data(audio_path)
dset = Dataset.from_dict(data)
item = dset.cast(Features({"audio": Audio()}))[0]
assert item.keys() == {"audio"}
assert item["audio"].keys() == {"path", "array", "sampling_rate"}
item = dset.cast_column("audio", Audio())[0]
assert item.keys() == {"audio"}
assert item["audio"].keys() == {"path", "array", "sampling_rate"}
@require_librosa
def test_dataset_concatenate_audio_features(shared_datadir):
# we use a different data structure between 1 and 2 to make sure they are compatible with each other
audio_path = str(shared_datadir / "test_audio_44100.wav")
data1 = {"audio": [audio_path]}
dset1 = Dataset.from_dict(data1, features=Features({"audio": Audio()}))
data2 = {"audio": [{"bytes": open(audio_path, "rb").read()}]}
dset2 = Dataset.from_dict(data2, features=Features({"audio": Audio()}))
concatenated_dataset = concatenate_datasets([dset1, dset2])
assert len(concatenated_dataset) == len(dset1) + len(dset2)
assert concatenated_dataset[0]["audio"]["array"].shape == dset1[0]["audio"]["array"].shape
assert concatenated_dataset[1]["audio"]["array"].shape == dset2[0]["audio"]["array"].shape
@require_librosa
def test_dataset_concatenate_nested_audio_features(shared_datadir):
# we use a different data structure between 1 and 2 to make sure they are compatible with each other
audio_path = str(shared_datadir / "test_audio_44100.wav")
features = Features({"list_of_structs_of_audios": [{"audio": Audio()}]})
data1 = {"list_of_structs_of_audios": [[{"audio": audio_path}]]}
dset1 = Dataset.from_dict(data1, features=features)
data2 = {"list_of_structs_of_audios": [[{"audio": {"bytes": open(audio_path, "rb").read()}}]]}
dset2 = Dataset.from_dict(data2, features=features)
concatenated_dataset = concatenate_datasets([dset1, dset2])
assert len(concatenated_dataset) == len(dset1) + len(dset2)
assert (
concatenated_dataset[0]["list_of_structs_of_audios"][0]["audio"]["array"].shape
== dset1[0]["list_of_structs_of_audios"][0]["audio"]["array"].shape
)
assert (
concatenated_dataset[1]["list_of_structs_of_audios"][0]["audio"]["array"].shape
== dset2[0]["list_of_structs_of_audios"][0]["audio"]["array"].shape
)
@require_sndfile
def test_dataset_with_audio_feature_map_is_not_decoded(shared_datadir):
audio_path = str(shared_datadir / "test_audio_44100.wav")
data = {"audio": [audio_path], "text": ["Hello"]}
features = Features({"audio": Audio(), "text": Value("string")})
dset = Dataset.from_dict(data, features=features)
expected_audio = features.encode_batch(data)["audio"][0]
for item in dset.cast_column("audio", Audio(decode=False)):
assert item.keys() == {"audio", "text"}
assert item == {"audio": expected_audio, "text": "Hello"}
def process_text(example):
example["text"] = example["text"] + " World!"
return example
processed_dset = dset.map(process_text)
for item in processed_dset.cast_column("audio", Audio(decode=False)):
assert item.keys() == {"audio", "text"}
assert item == {"audio": expected_audio, "text": "Hello World!"}
@require_librosa
@require_sndfile
def test_dataset_with_audio_feature_map_is_decoded(shared_datadir):
audio_path = str(shared_datadir / "test_audio_44100.wav")
data = {"audio": [audio_path], "text": ["Hello"]}
features = Features({"audio": Audio(), "text": Value("string")})
dset = Dataset.from_dict(data, features=features)
def process_audio_sampling_rate_by_example(example):
example["double_sampling_rate"] = 2 * example["audio"]["sampling_rate"]
return example
decoded_dset = dset.map(process_audio_sampling_rate_by_example)
for item in decoded_dset.cast_column("audio", Audio(decode=False)):
assert item.keys() == {"audio", "text", "double_sampling_rate"}
assert item["double_sampling_rate"] == 88200
def process_audio_sampling_rate_by_batch(batch):
double_sampling_rates = []
for audio in batch["audio"]:
double_sampling_rates.append(2 * audio["sampling_rate"])
batch["double_sampling_rate"] = double_sampling_rates
return batch
decoded_dset = dset.map(process_audio_sampling_rate_by_batch, batched=True)
for item in decoded_dset.cast_column("audio", Audio(decode=False)):
assert item.keys() == {"audio", "text", "double_sampling_rate"}
assert item["double_sampling_rate"] == 88200
@require_librosa
@require_sndfile
def test_formatted_dataset_with_audio_feature(shared_datadir):
audio_path = str(shared_datadir / "test_audio_44100.wav")
data = {"audio": [audio_path, audio_path]}
features = Features({"audio": Audio()})
dset = Dataset.from_dict(data, features=features)
with dset.formatted_as("numpy"):
item = dset[0]
assert item.keys() == {"audio"}
assert item["audio"].keys() == {"path", "array", "sampling_rate"}
assert item["audio"]["path"] == audio_path
assert item["audio"]["array"].shape == (202311,)
assert item["audio"]["sampling_rate"] == 44100
batch = dset[:1]
assert batch.keys() == {"audio"}
assert len(batch["audio"]) == 1
assert batch["audio"][0].keys() == {"path", "array", "sampling_rate"}
assert batch["audio"][0]["path"] == audio_path
assert batch["audio"][0]["array"].shape == (202311,)
assert batch["audio"][0]["sampling_rate"] == 44100
column = dset["audio"]
assert len(column) == 2
assert column[0].keys() == {"path", "array", "sampling_rate"}
assert column[0]["path"] == audio_path
assert column[0]["array"].shape == (202311,)
assert column[0]["sampling_rate"] == 44100
with dset.formatted_as("pandas"):
item = dset[0]
assert item.shape == (1, 1)
assert item.columns == ["audio"]
assert item["audio"][0].keys() == {"path", "array", "sampling_rate"}
assert item["audio"][0]["path"] == audio_path
assert item["audio"][0]["array"].shape == (202311,)
assert item["audio"][0]["sampling_rate"] == 44100
batch = dset[:1]
assert batch.shape == (1, 1)
assert batch.columns == ["audio"]
assert batch["audio"][0].keys() == {"path", "array", "sampling_rate"}
assert batch["audio"][0]["path"] == audio_path
assert batch["audio"][0]["array"].shape == (202311,)
assert batch["audio"][0]["sampling_rate"] == 44100
column = dset["audio"]
assert len(column) == 2
assert column[0].keys() == {"path", "array", "sampling_rate"}
assert column[0]["path"] == audio_path
assert column[0]["array"].shape == (202311,)
assert column[0]["sampling_rate"] == 44100
@pytest.fixture
def jsonl_audio_dataset_path(shared_datadir, tmp_path_factory):
import json
audio_path = str(shared_datadir / "test_audio_44100.wav")
data = [{"audio": audio_path, "text": "Hello world!"}]
path = str(tmp_path_factory.mktemp("data") / "audio_dataset.jsonl")
with open(path, "w") as f:
for item in data:
f.write(json.dumps(item) + "\n")
return path
@require_librosa
@require_sndfile
@pytest.mark.parametrize("streaming", [False, True])
def test_load_dataset_with_audio_feature(streaming, jsonl_audio_dataset_path, shared_datadir):
audio_path = str(shared_datadir / "test_audio_44100.wav")
data_files = jsonl_audio_dataset_path
features = Features({"audio": Audio(), "text": Value("string")})
dset = load_dataset("json", split="train", data_files=data_files, features=features, streaming=streaming)
item = dset[0] if not streaming else next(iter(dset))
assert item.keys() == {"audio", "text"}
assert item["audio"].keys() == {"path", "array", "sampling_rate"}
assert item["audio"]["path"] == audio_path
assert item["audio"]["array"].shape == (202311,)
assert item["audio"]["sampling_rate"] == 44100
@require_sndfile
@pytest.mark.integration
def test_dataset_with_audio_feature_loaded_from_cache():
# load first time
ds = load_dataset("patrickvonplaten/librispeech_asr_dummy", "clean", trust_remote_code=True)
# load from cache
ds = load_dataset("patrickvonplaten/librispeech_asr_dummy", "clean", trust_remote_code=True, split="validation")
assert isinstance(ds, Dataset)
def test_dataset_with_audio_feature_undecoded(shared_datadir):
audio_path = str(shared_datadir / "test_audio_44100.wav")
data = {"audio": [audio_path]}
features = Features({"audio": Audio(decode=False)})
dset = Dataset.from_dict(data, features=features)
item = dset[0]
assert item.keys() == {"audio"}
assert item["audio"] == {"path": audio_path, "bytes": None}
batch = dset[:1]
assert batch.keys() == {"audio"}
assert len(batch["audio"]) == 1
assert batch["audio"][0] == {"path": audio_path, "bytes": None}
column = dset["audio"]
assert len(column) == 1
assert column[0] == {"path": audio_path, "bytes": None}
def test_formatted_dataset_with_audio_feature_undecoded(shared_datadir):
audio_path = str(shared_datadir / "test_audio_44100.wav")
data = {"audio": [audio_path]}
features = Features({"audio": Audio(decode=False)})
dset = Dataset.from_dict(data, features=features)
with dset.formatted_as("numpy"):
item = dset[0]
assert item.keys() == {"audio"}
assert item["audio"] == {"path": audio_path, "bytes": None}
batch = dset[:1]
assert batch.keys() == {"audio"}
assert len(batch["audio"]) == 1
assert batch["audio"][0] == {"path": audio_path, "bytes": None}
column = dset["audio"]
assert len(column) == 1
assert column[0] == {"path": audio_path, "bytes": None}
with dset.formatted_as("pandas"):
item = dset[0]
assert item.shape == (1, 1)
assert item.columns == ["audio"]
assert item["audio"][0] == {"path": audio_path, "bytes": None}
batch = dset[:1]
assert batch.shape == (1, 1)
assert batch.columns == ["audio"]
assert batch["audio"][0] == {"path": audio_path, "bytes": None}
column = dset["audio"]
assert len(column) == 1
assert column[0] == {"path": audio_path, "bytes": None}
def test_dataset_with_audio_feature_map_undecoded(shared_datadir):
audio_path = str(shared_datadir / "test_audio_44100.wav")
data = {"audio": [audio_path]}
features = Features({"audio": Audio(decode=False)})
dset = Dataset.from_dict(data, features=features)
def assert_audio_example_undecoded(example):
assert example["audio"] == {"path": audio_path, "bytes": None}
dset.map(assert_audio_example_undecoded)
def assert_audio_batch_undecoded(batch):
for audio in batch["audio"]:
assert audio == {"path": audio_path, "bytes": None}
dset.map(assert_audio_batch_undecoded, batched=True)
def test_audio_embed_storage(shared_datadir):
audio_path = str(shared_datadir / "test_audio_44100.wav")
example = {"bytes": None, "path": audio_path}
storage = pa.array([example], type=pa.struct({"bytes": pa.binary(), "path": pa.string()}))
embedded_storage = Audio().embed_storage(storage)
embedded_example = embedded_storage.to_pylist()[0]
assert embedded_example == {"bytes": open(audio_path, "rb").read(), "path": "test_audio_44100.wav"}
| datasets/tests/features/test_audio.py/0 | {
"file_path": "datasets/tests/features/test_audio.py",
"repo_id": "datasets",
"token_count": 11701
} |
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def _check_text_dataset(dataset, expected_features):
assert isinstance(dataset, Dataset)
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory", [False, True])
def test_dataset_from_text_keep_in_memory(keep_in_memory, text_path, tmp_path):
cache_dir = tmp_path / "cache"
expected_features = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
dataset = TextDatasetReader(text_path, cache_dir=cache_dir, keep_in_memory=keep_in_memory).read()
_check_text_dataset(dataset, expected_features)
@pytest.mark.parametrize(
"features",
[
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
],
)
def test_dataset_from_text_features(features, text_path, tmp_path):
cache_dir = tmp_path / "cache"
default_expected_features = {"text": "string"}
expected_features = features.copy() if features else default_expected_features
features = (
Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None
)
dataset = TextDatasetReader(text_path, features=features, cache_dir=cache_dir).read()
_check_text_dataset(dataset, expected_features)
@pytest.mark.parametrize("split", [None, NamedSplit("train"), "train", "test"])
def test_dataset_from_text_split(split, text_path, tmp_path):
cache_dir = tmp_path / "cache"
expected_features = {"text": "string"}
dataset = TextDatasetReader(text_path, cache_dir=cache_dir, split=split).read()
_check_text_dataset(dataset, expected_features)
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type", [str, list])
def test_dataset_from_text_path_type(path_type, text_path, tmp_path):
if issubclass(path_type, str):
path = text_path
elif issubclass(path_type, list):
path = [text_path]
cache_dir = tmp_path / "cache"
expected_features = {"text": "string"}
dataset = TextDatasetReader(path, cache_dir=cache_dir).read()
_check_text_dataset(dataset, expected_features)
def _check_text_datasetdict(dataset_dict, expected_features, splits=("train",)):
assert isinstance(dataset_dict, DatasetDict)
for split in splits:
dataset = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory", [False, True])
def test_datasetdict_from_text_keep_in_memory(keep_in_memory, text_path, tmp_path):
cache_dir = tmp_path / "cache"
expected_features = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
dataset = TextDatasetReader({"train": text_path}, cache_dir=cache_dir, keep_in_memory=keep_in_memory).read()
_check_text_datasetdict(dataset, expected_features)
@pytest.mark.parametrize(
"features",
[
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
],
)
def test_datasetdict_from_text_features(features, text_path, tmp_path):
cache_dir = tmp_path / "cache"
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
default_expected_features = {"text": "string"}
expected_features = features.copy() if features else default_expected_features
features = (
Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None
)
dataset = TextDatasetReader({"train": text_path}, features=features, cache_dir=cache_dir).read()
_check_text_datasetdict(dataset, expected_features)
@pytest.mark.parametrize("split", [None, NamedSplit("train"), "train", "test"])
def test_datasetdict_from_text_split(split, text_path, tmp_path):
if split:
path = {split: text_path}
else:
split = "train"
path = {"train": text_path, "test": text_path}
cache_dir = tmp_path / "cache"
expected_features = {"text": "string"}
dataset = TextDatasetReader(path, cache_dir=cache_dir).read()
_check_text_datasetdict(dataset, expected_features, splits=list(path.keys()))
assert all(dataset[split].split == split for split in path.keys())
| datasets/tests/io/test_text.py/0 | {
"file_path": "datasets/tests/io/test_text.py",
"repo_id": "datasets",
"token_count": 1833
} |
import os
import tempfile
from pathlib import Path
from unittest import TestCase
import pyarrow as pa
import pytest
from datasets.arrow_dataset import Dataset
from datasets.arrow_reader import ArrowReader, BaseReader, FileInstructions, ReadInstruction, make_file_instructions
from datasets.info import DatasetInfo
from datasets.splits import NamedSplit, Split, SplitDict, SplitInfo
from .utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
class ReaderTest(BaseReader):
"""
Build a Dataset object out of Instruction instance(s).
This reader is made for testing. It mocks file reads.
"""
def _get_table_from_filename(self, filename_skip_take, in_memory=False):
"""Returns a Dataset instance from given (filename, skip, take)."""
filename, skip, take = (
filename_skip_take["filename"],
filename_skip_take["skip"] if "skip" in filename_skip_take else None,
filename_skip_take["take"] if "take" in filename_skip_take else None,
)
open(os.path.join(filename), "wb").close()
pa_table = pa.Table.from_pydict({"filename": [Path(filename).name] * 100})
if take == -1:
take = len(pa_table) - skip
if skip is not None and take is not None:
pa_table = pa_table.slice(skip, take)
return pa_table
class BaseReaderTest(TestCase):
def test_read(self):
name = "my_name"
train_info = SplitInfo(name="train", num_examples=100)
test_info = SplitInfo(name="test", num_examples=100)
split_infos = [train_info, test_info]
split_dict = SplitDict()
split_dict.add(train_info)
split_dict.add(test_info)
info = DatasetInfo(splits=split_dict)
with tempfile.TemporaryDirectory() as tmp_dir:
reader = ReaderTest(tmp_dir, info)
instructions = "test[:33%]"
dset = Dataset(**reader.read(name, instructions, split_infos))
self.assertEqual(dset["filename"][0], f"{name}-test")
self.assertEqual(dset.num_rows, 33)
self.assertEqual(dset.num_columns, 1)
instructions1 = ["train", "test[:33%]"]
instructions2 = [Split.TRAIN, ReadInstruction.from_spec("test[:33%]")]
for instructions in [instructions1, instructions2]:
datasets_kwargs = [reader.read(name, instr, split_infos) for instr in instructions]
train_dset, test_dset = (Dataset(**dataset_kwargs) for dataset_kwargs in datasets_kwargs)
self.assertEqual(train_dset["filename"][0], f"{name}-train")
self.assertEqual(train_dset.num_rows, 100)
self.assertEqual(train_dset.num_columns, 1)
self.assertIsInstance(train_dset.split, NamedSplit)
self.assertEqual(str(train_dset.split), "train")
self.assertEqual(test_dset["filename"][0], f"{name}-test")
self.assertEqual(test_dset.num_rows, 33)
self.assertEqual(test_dset.num_columns, 1)
self.assertIsInstance(test_dset.split, NamedSplit)
self.assertEqual(str(test_dset.split), "test[:33%]")
del train_dset, test_dset
def test_read_sharded(self):
name = "my_name"
train_info = SplitInfo(name="train", num_examples=1000, shard_lengths=[100] * 10)
split_infos = [train_info]
split_dict = SplitDict()
split_dict.add(train_info)
info = DatasetInfo(splits=split_dict)
with tempfile.TemporaryDirectory() as tmp_dir:
reader = ReaderTest(tmp_dir, info)
instructions = "train[:33%]"
dset = Dataset(**reader.read(name, instructions, split_infos))
self.assertEqual(dset["filename"][0], f"{name}-train-00000-of-00010")
self.assertEqual(dset["filename"][-1], f"{name}-train-00003-of-00010")
self.assertEqual(dset.num_rows, 330)
self.assertEqual(dset.num_columns, 1)
def test_read_files(self):
train_info = SplitInfo(name="train", num_examples=100)
test_info = SplitInfo(name="test", num_examples=100)
split_dict = SplitDict()
split_dict.add(train_info)
split_dict.add(test_info)
info = DatasetInfo(splits=split_dict)
with tempfile.TemporaryDirectory() as tmp_dir:
reader = ReaderTest(tmp_dir, info)
files = [
{"filename": os.path.join(tmp_dir, "train")},
{"filename": os.path.join(tmp_dir, "test"), "skip": 10, "take": 10},
]
dset = Dataset(**reader.read_files(files, original_instructions="train+test[10:20]"))
self.assertEqual(dset.num_rows, 110)
self.assertEqual(dset.num_columns, 1)
del dset
@pytest.mark.parametrize("in_memory", [False, True])
def test_read_table(in_memory, dataset, arrow_file):
filename = arrow_file
with assert_arrow_memory_increases() if in_memory else assert_arrow_memory_doesnt_increase():
table = ArrowReader.read_table(filename, in_memory=in_memory)
assert table.shape == dataset.data.shape
assert set(table.column_names) == set(dataset.data.column_names)
assert dict(table.to_pydict()) == dict(dataset.data.to_pydict()) # to_pydict returns OrderedDict
@pytest.mark.parametrize("in_memory", [False, True])
def test_read_files(in_memory, dataset, arrow_file):
filename = arrow_file
reader = ArrowReader("", None)
with assert_arrow_memory_increases() if in_memory else assert_arrow_memory_doesnt_increase():
dataset_kwargs = reader.read_files([{"filename": filename}], in_memory=in_memory)
assert dataset_kwargs.keys() == {"arrow_table", "info", "split"}
table = dataset_kwargs["arrow_table"]
assert table.shape == dataset.data.shape
assert set(table.column_names) == set(dataset.data.column_names)
assert dict(table.to_pydict()) == dict(dataset.data.to_pydict()) # to_pydict returns OrderedDict
def test_read_instruction_spec():
assert ReadInstruction("train", to=10, unit="abs").to_spec() == "train[:10]"
assert ReadInstruction("train", from_=-80, to=10, unit="%").to_spec() == "train[-80%:10%]"
spec_train_test = "train+test"
assert ReadInstruction.from_spec(spec_train_test).to_spec() == spec_train_test
spec_train_abs = "train[2:10]"
assert ReadInstruction.from_spec(spec_train_abs).to_spec() == spec_train_abs
spec_train_pct = "train[15%:-20%]"
assert ReadInstruction.from_spec(spec_train_pct).to_spec() == spec_train_pct
spec_train_pct_rounding = "train[:10%](closest)"
assert ReadInstruction.from_spec(spec_train_pct_rounding).to_spec() == "train[:10%]"
spec_train_pct_rounding = "train[:10%](pct1_dropremainder)"
assert ReadInstruction.from_spec(spec_train_pct_rounding).to_spec() == spec_train_pct_rounding
spec_train_test_pct_rounding = "train[:10%](pct1_dropremainder)+test[-10%:](pct1_dropremainder)"
assert ReadInstruction.from_spec(spec_train_test_pct_rounding).to_spec() == spec_train_test_pct_rounding
def test_make_file_instructions_basic():
name = "dummy"
split_infos = [SplitInfo(name="train", num_examples=100)]
instruction = "train[:33%]"
filetype_suffix = "arrow"
prefix_path = "prefix"
file_instructions = make_file_instructions(name, split_infos, instruction, filetype_suffix, prefix_path)
assert isinstance(file_instructions, FileInstructions)
assert file_instructions.num_examples == 33
assert file_instructions.file_instructions == [
{"filename": os.path.join(prefix_path, f"{name}-train.arrow"), "skip": 0, "take": 33}
]
split_infos = [SplitInfo(name="train", num_examples=100, shard_lengths=[10] * 10)]
file_instructions = make_file_instructions(name, split_infos, instruction, filetype_suffix, prefix_path)
assert isinstance(file_instructions, FileInstructions)
assert file_instructions.num_examples == 33
assert file_instructions.file_instructions == [
{"filename": os.path.join(prefix_path, f"{name}-train-00000-of-00010.arrow"), "skip": 0, "take": -1},
{"filename": os.path.join(prefix_path, f"{name}-train-00001-of-00010.arrow"), "skip": 0, "take": -1},
{"filename": os.path.join(prefix_path, f"{name}-train-00002-of-00010.arrow"), "skip": 0, "take": -1},
{"filename": os.path.join(prefix_path, f"{name}-train-00003-of-00010.arrow"), "skip": 0, "take": 3},
]
@pytest.mark.parametrize(
"split_name, instruction, shard_lengths, read_range",
[
("train", "train[-20%:]", 100, (80, 100)),
("train", "train[:200]", 100, (0, 100)),
("train", "train[:-200]", 100, None),
("train", "train[-200:]", 100, (0, 100)),
("train", "train[-20%:]", [10] * 10, (80, 100)),
("train", "train[:200]", [10] * 10, (0, 100)),
("train", "train[:-200]", [10] * 10, None),
("train", "train[-200:]", [10] * 10, (0, 100)),
],
)
def test_make_file_instructions(split_name, instruction, shard_lengths, read_range):
name = "dummy"
split_infos = split_infos = [
SplitInfo(
name="train",
num_examples=shard_lengths if not isinstance(shard_lengths, list) else sum(shard_lengths),
shard_lengths=shard_lengths if isinstance(shard_lengths, list) else None,
)
]
filetype_suffix = "arrow"
prefix_path = "prefix"
file_instructions = make_file_instructions(name, split_infos, instruction, filetype_suffix, prefix_path)
assert isinstance(file_instructions, FileInstructions)
assert file_instructions.num_examples == (read_range[1] - read_range[0] if read_range is not None else 0)
if read_range is None:
assert file_instructions.file_instructions == []
else:
if not isinstance(shard_lengths, list):
assert file_instructions.file_instructions == [
{
"filename": os.path.join(prefix_path, f"{name}-{split_name}.arrow"),
"skip": read_range[0],
"take": read_range[1] - read_range[0],
}
]
else:
file_instructions_list = []
shard_offset = 0
for i, shard_length in enumerate(shard_lengths):
filename = os.path.join(prefix_path, f"{name}-{split_name}-{i:05d}-of-{len(shard_lengths):05d}.arrow")
if shard_offset <= read_range[0] < shard_offset + shard_length:
file_instructions_list.append(
{
"filename": filename,
"skip": read_range[0] - shard_offset,
"take": read_range[1] - read_range[0]
if read_range[1] < shard_offset + shard_length
else -1,
}
)
elif shard_offset < read_range[1] <= shard_offset + shard_length:
file_instructions_list.append(
{
"filename": filename,
"skip": 0,
"take": read_range[1] - shard_offset
if read_range[1] < shard_offset + shard_length
else -1,
}
)
elif read_range[0] < shard_offset and read_range[1] > shard_offset + shard_length:
file_instructions_list.append(
{
"filename": filename,
"skip": 0,
"take": -1,
}
)
shard_offset += shard_length
assert file_instructions.file_instructions == file_instructions_list
@pytest.mark.parametrize("name, expected_exception", [(None, TypeError), ("", ValueError)])
def test_make_file_instructions_raises(name, expected_exception):
split_infos = [SplitInfo(name="train", num_examples=100)]
instruction = "train"
filetype_suffix = "arrow"
prefix_path = "prefix_path"
with pytest.raises(expected_exception):
_ = make_file_instructions(name, split_infos, instruction, filetype_suffix, prefix_path)
| datasets/tests/test_arrow_reader.py/0 | {
"file_path": "datasets/tests/test_arrow_reader.py",
"repo_id": "datasets",
"token_count": 5688
} |
from textwrap import dedent
from types import SimpleNamespace
from unittest.mock import patch
from urllib.parse import quote
import pytest
from huggingface_hub import CommitOperationAdd, CommitOperationDelete
from packaging import version
import datasets
from datasets.config import METADATA_CONFIGS_FIELD, PYARROW_VERSION
from datasets.hub import convert_to_parquet, delete_from_hub
from datasets.utils.hub import hf_dataset_url
DUMMY_DATASET_SCRIPT = dedent("""\
import datasets
class NewDataset(datasets.GeneratorBasedBuilder):
BUILDER_CONFIGS = [
datasets.BuilderConfig(name="first"),
datasets.BuilderConfig(name="second"),
]
DEFAULT_CONFIG_NAME = "first"
def _info(self):
return datasets.DatasetInfo(
features=datasets.Features({"text": datasets.Value("string")}),
)
def _split_generators(self, dl_manager):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN)]
def _generate_examples(self):
for key in range(5):
yield key, {"text": f"{self.config.name}-{key}"}
""")
@pytest.mark.parametrize("repo_id", ["canonical_dataset_name", "org-name/dataset-name"])
@pytest.mark.parametrize("filename", ["filename.csv", "filename with blanks.csv"])
@pytest.mark.parametrize("revision", [None, "v2"])
def test_dataset_url(repo_id, filename, revision):
url = hf_dataset_url(repo_id=repo_id, filename=filename, revision=revision)
assert url == f"https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(filename)}"
def test_convert_to_parquet(temporary_repo, hf_api, hf_token, ci_hub_config, ci_hfh_hf_hub_url):
with temporary_repo() as repo_id:
hf_api.create_repo(repo_id, token=hf_token, repo_type="dataset")
hf_api.upload_file(
token=hf_token,
path_or_fileobj=DUMMY_DATASET_SCRIPT.encode(),
path_in_repo=f"{repo_id.split('/')[-1]}.py",
repo_id=repo_id,
repo_type="dataset",
)
commit_info = SimpleNamespace(
pr_revision="refs/pr/1", # "main", #
pr_url="https:///hub-ci.huggingface.co/datasets/__DUMMY_USER__/__DUMMY_DATASET__/refs%2Fpr%2F1",
)
with patch.object(datasets.hub.HfApi, "create_commit", return_value=commit_info) as mock_create_commit:
with patch.object(datasets.hub.HfApi, "create_branch") as mock_create_branch:
with patch.object(datasets.hub.HfApi, "list_repo_tree", return_value=[]): # not needed
with patch.object(datasets.hub.HfApi, "preupload_lfs_files", return_value=None): # not needed
_ = convert_to_parquet(repo_id, token=hf_token, trust_remote_code=True)
# mock_create_branch
assert mock_create_branch.called
assert mock_create_branch.call_count == 1
assert mock_create_branch.call_args.kwargs.get("branch") == "script"
# mock_create_commit
assert mock_create_commit.called
assert mock_create_commit.call_count == 2
expected_readmes = [
dedent(f"""\
---
dataset_info:
config_name: first
features:
- name: text
dtype: string
splits:
- name: train
num_bytes: 55
num_examples: 5
download_size: 717
dataset_size: 55
{METADATA_CONFIGS_FIELD}:
- config_name: first
data_files:
- split: train
path: first/train-*
default: true
---
"""),
dedent(f"""\
---
dataset_info:
config_name: second
features:
- name: text
dtype: string
splits:
- name: train
num_bytes: 60
num_examples: 5
download_size: 723
dataset_size: 60
{METADATA_CONFIGS_FIELD}:
- config_name: second
data_files:
- split: train
path: second/train-*
---
"""),
]
if PYARROW_VERSION < version.parse("18.1.0"):
expected_readmes[0] = expected_readmes[0].replace("download_size: 717", "download_size: 726")
expected_readmes[1] = expected_readmes[1].replace("download_size: 723", "download_size: 732")
if PYARROW_VERSION < version.parse("18.0.0"):
expected_readmes[0] = expected_readmes[0].replace("download_size: 726", "download_size: 790")
expected_readmes[1] = expected_readmes[1].replace("download_size: 732", "download_size: 798")
for call_args, expected_commit_message, expected_create_pr, expected_readme, expected_parquet_path_in_repo in zip(
mock_create_commit.call_args_list,
["Convert dataset to Parquet", "Add 'second' config data files"],
[True, False],
expected_readmes,
["first/train-00000-of-00001.parquet", "second/train-00000-of-00001.parquet"],
):
assert call_args.kwargs.get("commit_message") == expected_commit_message
assert call_args.kwargs.get("create_pr") is expected_create_pr
operations = call_args.kwargs.get("operations")
assert len(operations) == 2
for operation in operations:
if operation.path_in_repo == "README.md":
assert operation.path_or_fileobj.decode() == expected_readme
else:
assert operation.path_in_repo == expected_parquet_path_in_repo
def test_delete_from_hub(temporary_repo, hf_api, hf_token, csv_path, ci_hub_config, ci_hfh_hf_hub_url) -> None:
with temporary_repo() as repo_id:
hf_api.create_repo(repo_id, token=hf_token, repo_type="dataset")
hf_api.upload_file(
path_or_fileobj=str(csv_path),
path_in_repo="cats/train/0000.csv",
repo_id=repo_id,
repo_type="dataset",
token=hf_token,
)
hf_api.upload_file(
path_or_fileobj=str(csv_path),
path_in_repo="dogs/train/0000.csv",
repo_id=repo_id,
repo_type="dataset",
token=hf_token,
)
hf_api.upload_file(
token=hf_token,
path_or_fileobj=dedent(f"""\
---
{METADATA_CONFIGS_FIELD}:
- config_name: cats
data_files:
- split: train
path: cats/train/*
- config_name: dogs
data_files:
- split: train
path: dogs/train/*
---
""").encode(),
path_in_repo="README.md",
repo_id=repo_id,
repo_type="dataset",
)
commit_info = SimpleNamespace(
pr_url="https:///hub-ci.huggingface.co/datasets/__DUMMY_USER__/__DUMMY_DATASET__/refs%2Fpr%2F1"
)
with patch.object(datasets.hub.HfApi, "create_commit", return_value=commit_info) as mock_method:
_ = delete_from_hub(repo_id, "dogs")
assert mock_method.called
assert mock_method.call_args.kwargs.get("commit_message") == "Delete 'dogs' config"
assert mock_method.call_args.kwargs.get("create_pr")
expected_operations = [
CommitOperationDelete(path_in_repo="dogs/train/0000.csv", is_folder=False),
CommitOperationAdd(
path_in_repo="README.md",
path_or_fileobj=dedent(f"""\
---
{METADATA_CONFIGS_FIELD}:
- config_name: cats
data_files:
- split: train
path: cats/train/*
---
""").encode(),
),
]
assert mock_method.call_args.kwargs.get("operations") == expected_operations
| datasets/tests/test_hub.py/0 | {
"file_path": "datasets/tests/test_hub.py",
"repo_id": "datasets",
"token_count": 3742
} |
import unittest
from unittest.mock import patch
import pytest
from pytest import CaptureFixture
from datasets.utils import (
are_progress_bars_disabled,
disable_progress_bars,
enable_progress_bars,
tqdm,
)
class TestTqdmUtils(unittest.TestCase):
@pytest.fixture(autouse=True)
def capsys(self, capsys: CaptureFixture) -> None:
"""Workaround to make capsys work in unittest framework.
Capsys is a convenient pytest fixture to capture stdout.
See https://waylonwalker.com/pytest-capsys/.
Taken from https://github.com/pytest-dev/pytest/issues/2504#issuecomment-309475790.
"""
self.capsys = capsys
def setUp(self) -> None:
"""Get verbosity to set it back after the tests."""
self._previous_are_progress_bars_disabled = are_progress_bars_disabled()
return super().setUp()
def tearDown(self) -> None:
"""Set back progress bars verbosity as before testing."""
if self._previous_are_progress_bars_disabled:
disable_progress_bars()
else:
enable_progress_bars()
@patch("datasets.utils._tqdm.HF_DATASETS_DISABLE_PROGRESS_BARS", None)
def test_tqdm_helpers(self) -> None:
"""Test helpers to enable/disable progress bars."""
disable_progress_bars()
self.assertTrue(are_progress_bars_disabled())
enable_progress_bars()
self.assertFalse(are_progress_bars_disabled())
@patch("datasets.utils._tqdm.HF_DATASETS_DISABLE_PROGRESS_BARS", True)
def test_cannot_enable_tqdm_when_env_variable_is_set(self) -> None:
"""
Test helpers cannot enable/disable progress bars when
`HF_DATASETS_DISABLE_PROGRESS_BARS` is set.
"""
disable_progress_bars()
self.assertTrue(are_progress_bars_disabled())
with self.assertWarns(UserWarning):
enable_progress_bars()
self.assertTrue(are_progress_bars_disabled()) # Still disabled !
@patch("datasets.utils._tqdm.HF_DATASETS_DISABLE_PROGRESS_BARS", False)
def test_cannot_disable_tqdm_when_env_variable_is_set(self) -> None:
"""
Test helpers cannot enable/disable progress bars when
`HF_DATASETS_DISABLE_PROGRESS_BARS` is set.
"""
enable_progress_bars()
self.assertFalse(are_progress_bars_disabled())
with self.assertWarns(UserWarning):
disable_progress_bars()
self.assertFalse(are_progress_bars_disabled()) # Still enabled !
@patch("datasets.utils._tqdm.HF_DATASETS_DISABLE_PROGRESS_BARS", None)
def test_tqdm_disabled(self) -> None:
"""Test TQDM not outputting anything when globally disabled."""
disable_progress_bars()
for _ in tqdm(range(10)):
pass
captured = self.capsys.readouterr()
self.assertEqual(captured.out, "")
self.assertEqual(captured.err, "")
@patch("datasets.utils._tqdm.HF_DATASETS_DISABLE_PROGRESS_BARS", None)
def test_tqdm_disabled_cannot_be_forced(self) -> None:
"""Test TQDM cannot be forced when globally disabled."""
disable_progress_bars()
for _ in tqdm(range(10), disable=False):
pass
captured = self.capsys.readouterr()
self.assertEqual(captured.out, "")
self.assertEqual(captured.err, "")
@patch("datasets.utils._tqdm.HF_DATASETS_DISABLE_PROGRESS_BARS", None)
def test_tqdm_can_be_disabled_when_globally_enabled(self) -> None:
"""Test TQDM can still be locally disabled even when globally enabled."""
enable_progress_bars()
for _ in tqdm(range(10), disable=True):
pass
captured = self.capsys.readouterr()
self.assertEqual(captured.out, "")
self.assertEqual(captured.err, "")
@patch("datasets.utils._tqdm.HF_DATASETS_DISABLE_PROGRESS_BARS", None)
def test_tqdm_enabled(self) -> None:
"""Test TQDM work normally when globally enabled."""
enable_progress_bars()
for _ in tqdm(range(10)):
pass
captured = self.capsys.readouterr()
self.assertEqual(captured.out, "")
self.assertIn("10/10", captured.err) # tqdm log
| datasets/tests/test_tqdm.py/0 | {
"file_path": "datasets/tests/test_tqdm.py",
"repo_id": "datasets",
"token_count": 1804
} |
import argparse
import sys
sys.path.append(".")
from base_classes import T2IAdapterBenchmark, T2IAdapterSDXLBenchmark # noqa: E402
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--ckpt",
type=str,
default="TencentARC/t2iadapter_canny_sd14v1",
choices=["TencentARC/t2iadapter_canny_sd14v1", "TencentARC/t2i-adapter-canny-sdxl-1.0"],
)
parser.add_argument("--batch_size", type=int, default=1)
parser.add_argument("--num_inference_steps", type=int, default=50)
parser.add_argument("--model_cpu_offload", action="store_true")
parser.add_argument("--run_compile", action="store_true")
args = parser.parse_args()
benchmark_pipe = (
T2IAdapterBenchmark(args)
if args.ckpt == "TencentARC/t2iadapter_canny_sd14v1"
else T2IAdapterSDXLBenchmark(args)
)
benchmark_pipe.benchmark(args)
| diffusers/benchmarks/benchmark_t2i_adapter.py/0 | {
"file_path": "diffusers/benchmarks/benchmark_t2i_adapter.py",
"repo_id": "diffusers",
"token_count": 393
} |
<!---
Copyright 2024- The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
# Generating the documentation
To generate the documentation, you first have to build it. Several packages are necessary to build the doc,
you can install them with the following command, at the root of the code repository:
```bash
pip install -e ".[docs]"
```
Then you need to install our open source documentation builder tool:
```bash
pip install git+https://github.com/huggingface/doc-builder
```
---
**NOTE**
You only need to generate the documentation to inspect it locally (if you're planning changes and want to
check how they look before committing for instance). You don't have to commit the built documentation.
---
## Previewing the documentation
To preview the docs, first install the `watchdog` module with:
```bash
pip install watchdog
```
Then run the following command:
```bash
doc-builder preview {package_name} {path_to_docs}
```
For example:
```bash
doc-builder preview diffusers docs/source/en
```
The docs will be viewable at [http://localhost:3000](http://localhost:3000). You can also preview the docs once you have opened a PR. You will see a bot add a comment to a link where the documentation with your changes lives.
---
**NOTE**
The `preview` command only works with existing doc files. When you add a completely new file, you need to update `_toctree.yml` & restart `preview` command (`ctrl-c` to stop it & call `doc-builder preview ...` again).
---
## Adding a new element to the navigation bar
Accepted files are Markdown (.md).
Create a file with its extension and put it in the source directory. You can then link it to the toc-tree by putting
the filename without the extension in the [`_toctree.yml`](https://github.com/huggingface/diffusers/blob/main/docs/source/en/_toctree.yml) file.
## Renaming section headers and moving sections
It helps to keep the old links working when renaming the section header and/or moving sections from one document to another. This is because the old links are likely to be used in Issues, Forums, and Social media and it'd make for a much more superior user experience if users reading those months later could still easily navigate to the originally intended information.
Therefore, we simply keep a little map of moved sections at the end of the document where the original section was. The key is to preserve the original anchor.
So if you renamed a section from: "Section A" to "Section B", then you can add at the end of the file:
```md
Sections that were moved:
[ <a href="#section-b">Section A</a><a id="section-a"></a> ]
```
and of course, if you moved it to another file, then:
```md
Sections that were moved:
[ <a href="../new-file#section-b">Section A</a><a id="section-a"></a> ]
```
Use the relative style to link to the new file so that the versioned docs continue to work.
For an example of a rich moved section set please see the very end of [the transformers Trainer doc](https://github.com/huggingface/transformers/blob/main/docs/source/en/main_classes/trainer.md).
## Writing Documentation - Specification
The `huggingface/diffusers` documentation follows the
[Google documentation](https://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_google.html) style for docstrings,
although we can write them directly in Markdown.
### Adding a new tutorial
Adding a new tutorial or section is done in two steps:
- Add a new Markdown (.md) file under `docs/source/<languageCode>`.
- Link that file in `docs/source/<languageCode>/_toctree.yml` on the correct toc-tree.
Make sure to put your new file under the proper section. It's unlikely to go in the first section (*Get Started*), so
depending on the intended targets (beginners, more advanced users, or researchers) it should go in sections two, three, or four.
### Adding a new pipeline/scheduler
When adding a new pipeline:
- Create a file `xxx.md` under `docs/source/<languageCode>/api/pipelines` (don't hesitate to copy an existing file as template).
- Link that file in (*Diffusers Summary*) section in `docs/source/api/pipelines/overview.md`, along with the link to the paper, and a colab notebook (if available).
- Write a short overview of the diffusion model:
- Overview with paper & authors
- Paper abstract
- Tips and tricks and how to use it best
- Possible an end-to-end example of how to use it
- Add all the pipeline classes that should be linked in the diffusion model. These classes should be added using our Markdown syntax. By default as follows:
```
[[autodoc]] XXXPipeline
- all
- __call__
```
This will include every public method of the pipeline that is documented, as well as the `__call__` method that is not documented by default. If you just want to add additional methods that are not documented, you can put the list of all methods to add in a list that contains `all`.
```
[[autodoc]] XXXPipeline
- all
- __call__
- enable_attention_slicing
- disable_attention_slicing
- enable_xformers_memory_efficient_attention
- disable_xformers_memory_efficient_attention
```
You can follow the same process to create a new scheduler under the `docs/source/<languageCode>/api/schedulers` folder.
### Writing source documentation
Values that should be put in `code` should either be surrounded by backticks: \`like so\`. Note that argument names
and objects like True, None, or any strings should usually be put in `code`.
When mentioning a class, function, or method, it is recommended to use our syntax for internal links so that our tool
adds a link to its documentation with this syntax: \[\`XXXClass\`\] or \[\`function\`\]. This requires the class or
function to be in the main package.
If you want to create a link to some internal class or function, you need to
provide its path. For instance: \[\`pipelines.ImagePipelineOutput\`\]. This will be converted into a link with
`pipelines.ImagePipelineOutput` in the description. To get rid of the path and only keep the name of the object you are
linking to in the description, add a ~: \[\`~pipelines.ImagePipelineOutput\`\] will generate a link with `ImagePipelineOutput` in the description.
The same works for methods so you can either use \[\`XXXClass.method\`\] or \[\`~XXXClass.method\`\].
#### Defining arguments in a method
Arguments should be defined with the `Args:` (or `Arguments:` or `Parameters:`) prefix, followed by a line return and
an indentation. The argument should be followed by its type, with its shape if it is a tensor, a colon, and its
description:
```
Args:
n_layers (`int`): The number of layers of the model.
```
If the description is too long to fit in one line, another indentation is necessary before writing the description
after the argument.
Here's an example showcasing everything so far:
```
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AlbertTokenizer`]. See [`~PreTrainedTokenizer.encode`] and
[`~PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
```
For optional arguments or arguments with defaults we follow the following syntax: imagine we have a function with the
following signature:
```py
def my_function(x: str=None, a: float=3.14):
```
then its documentation should look like this:
```
Args:
x (`str`, *optional*):
This argument controls ...
a (`float`, *optional*, defaults to `3.14`):
This argument is used to ...
```
Note that we always omit the "defaults to \`None\`" when None is the default for any argument. Also note that even
if the first line describing your argument type and its default gets long, you can't break it on several lines. You can
however write as many lines as you want in the indented description (see the example above with `input_ids`).
#### Writing a multi-line code block
Multi-line code blocks can be useful for displaying examples. They are done between two lines of three backticks as usual in Markdown:
````
```
# first line of code
# second line
# etc
```
````
#### Writing a return block
The return block should be introduced with the `Returns:` prefix, followed by a line return and an indentation.
The first line should be the type of the return, followed by a line return. No need to indent further for the elements
building the return.
Here's an example of a single value return:
```
Returns:
`List[int]`: A list of integers in the range [0, 1] --- 1 for a special token, 0 for a sequence token.
```
Here's an example of a tuple return, comprising several objects:
```
Returns:
`tuple(torch.Tensor)` comprising various elements depending on the configuration ([`BertConfig`]) and inputs:
- ** loss** (*optional*, returned when `masked_lm_labels` is provided) `torch.Tensor` of shape `(1,)` --
Total loss is the sum of the masked language modeling loss and the next sequence prediction (classification) loss.
- **prediction_scores** (`torch.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`) --
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
```
#### Adding an image
Due to the rapidly growing repository, it is important to make sure that no files that would significantly weigh down the repository are added. This includes images, videos, and other non-text files. We prefer to leverage a hf.co hosted `dataset` like
the ones hosted on [`hf-internal-testing`](https://huggingface.co/hf-internal-testing) in which to place these files and reference
them by URL. We recommend putting them in the following dataset: [huggingface/documentation-images](https://huggingface.co/datasets/huggingface/documentation-images).
If an external contribution, feel free to add the images to your PR and ask a Hugging Face member to migrate your images
to this dataset.
## Styling the docstring
We have an automatic script running with the `make style` command that will make sure that:
- the docstrings fully take advantage of the line width
- all code examples are formatted using black, like the code of the Transformers library
This script may have some weird failures if you made a syntax mistake or if you uncover a bug. Therefore, it's
recommended to commit your changes before running `make style`, so you can revert the changes done by that script
easily.
| diffusers/docs/README.md/0 | {
"file_path": "diffusers/docs/README.md",
"repo_id": "diffusers",
"token_count": 3142
} |
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# SD3Transformer2D
This class is useful when *only* loading weights into a [`SD3Transformer2DModel`]. If you need to load weights into the text encoder or a text encoder and SD3Transformer2DModel, check [`SD3LoraLoaderMixin`](lora#diffusers.loaders.SD3LoraLoaderMixin) class instead.
The [`SD3Transformer2DLoadersMixin`] class currently only loads IP-Adapter weights, but will be used in the future to save weights and load LoRAs.
<Tip>
To learn more about how to load LoRA weights, see the [LoRA](../../using-diffusers/loading_adapters#lora) loading guide.
</Tip>
## SD3Transformer2DLoadersMixin
[[autodoc]] loaders.transformer_sd3.SD3Transformer2DLoadersMixin
- all
- _load_ip_adapter_weights | diffusers/docs/source/en/api/loaders/transformer_sd3.md/0 | {
"file_path": "diffusers/docs/source/en/api/loaders/transformer_sd3.md",
"repo_id": "diffusers",
"token_count": 379
} |
<!-- Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License. -->
# MochiTransformer3DModel
A Diffusion Transformer model for 3D video-like data was introduced in [Mochi-1 Preview](https://huggingface.co/genmo/mochi-1-preview) by Genmo.
The model can be loaded with the following code snippet.
```python
from diffusers import MochiTransformer3DModel
transformer = MochiTransformer3DModel.from_pretrained("genmo/mochi-1-preview", subfolder="transformer", torch_dtype=torch.float16).to("cuda")
```
## MochiTransformer3DModel
[[autodoc]] MochiTransformer3DModel
## Transformer2DModelOutput
[[autodoc]] models.modeling_outputs.Transformer2DModelOutput
| diffusers/docs/source/en/api/models/mochi_transformer3d.md/0 | {
"file_path": "diffusers/docs/source/en/api/models/mochi_transformer3d.md",
"repo_id": "diffusers",
"token_count": 340
} |
<!--Copyright 2024 The HuggingFace Team, The Black Forest Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# FluxControlInpaint
FluxControlInpaintPipeline is an implementation of Inpainting for Flux.1 Depth/Canny models. It is a pipeline that allows you to inpaint images using the Flux.1 Depth/Canny models. The pipeline takes an image and a mask as input and returns the inpainted image.
FLUX.1 Depth and Canny [dev] is a 12 billion parameter rectified flow transformer capable of generating an image based on a text description while following the structure of a given input image. **This is not a ControlNet model**.
| Control type | Developer | Link |
| -------- | ---------- | ---- |
| Depth | [Black Forest Labs](https://huggingface.co/black-forest-labs) | [Link](https://huggingface.co/black-forest-labs/FLUX.1-Depth-dev) |
| Canny | [Black Forest Labs](https://huggingface.co/black-forest-labs) | [Link](https://huggingface.co/black-forest-labs/FLUX.1-Canny-dev) |
<Tip>
Flux can be quite expensive to run on consumer hardware devices. However, you can perform a suite of optimizations to run it faster and in a more memory-friendly manner. Check out [this section](https://huggingface.co/blog/sd3#memory-optimizations-for-sd3) for more details. Additionally, Flux can benefit from quantization for memory efficiency with a trade-off in inference latency. Refer to [this blog post](https://huggingface.co/blog/quanto-diffusers) to learn more. For an exhaustive list of resources, check out [this gist](https://gist.github.com/sayakpaul/b664605caf0aa3bf8585ab109dd5ac9c).
</Tip>
```python
import torch
from diffusers import FluxControlInpaintPipeline
from diffusers.models.transformers import FluxTransformer2DModel
from transformers import T5EncoderModel
from diffusers.utils import load_image, make_image_grid
from image_gen_aux import DepthPreprocessor # https://github.com/huggingface/image_gen_aux
from PIL import Image
import numpy as np
pipe = FluxControlInpaintPipeline.from_pretrained(
"black-forest-labs/FLUX.1-Depth-dev",
torch_dtype=torch.bfloat16,
)
# use following lines if you have GPU constraints
# ---------------------------------------------------------------
transformer = FluxTransformer2DModel.from_pretrained(
"sayakpaul/FLUX.1-Depth-dev-nf4", subfolder="transformer", torch_dtype=torch.bfloat16
)
text_encoder_2 = T5EncoderModel.from_pretrained(
"sayakpaul/FLUX.1-Depth-dev-nf4", subfolder="text_encoder_2", torch_dtype=torch.bfloat16
)
pipe.transformer = transformer
pipe.text_encoder_2 = text_encoder_2
pipe.enable_model_cpu_offload()
# ---------------------------------------------------------------
pipe.to("cuda")
prompt = "a blue robot singing opera with human-like expressions"
image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/robot.png")
head_mask = np.zeros_like(image)
head_mask[65:580,300:642] = 255
mask_image = Image.fromarray(head_mask)
processor = DepthPreprocessor.from_pretrained("LiheYoung/depth-anything-large-hf")
control_image = processor(image)[0].convert("RGB")
output = pipe(
prompt=prompt,
image=image,
control_image=control_image,
mask_image=mask_image,
num_inference_steps=30,
strength=0.9,
guidance_scale=10.0,
generator=torch.Generator().manual_seed(42),
).images[0]
make_image_grid([image, control_image, mask_image, output.resize(image.size)], rows=1, cols=4).save("output.png")
```
## FluxControlInpaintPipeline
[[autodoc]] FluxControlInpaintPipeline
- all
- __call__
## FluxPipelineOutput
[[autodoc]] pipelines.flux.pipeline_output.FluxPipelineOutput | diffusers/docs/source/en/api/pipelines/control_flux_inpaint.md/0 | {
"file_path": "diffusers/docs/source/en/api/pipelines/control_flux_inpaint.md",
"repo_id": "diffusers",
"token_count": 1298
} |
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# Pipelines
Pipelines provide a simple way to run state-of-the-art diffusion models in inference by bundling all of the necessary components (multiple independently-trained models, schedulers, and processors) into a single end-to-end class. Pipelines are flexible and they can be adapted to use different schedulers or even model components.
All pipelines are built from the base [`DiffusionPipeline`] class which provides basic functionality for loading, downloading, and saving all the components. Specific pipeline types (for example [`StableDiffusionPipeline`]) loaded with [`~DiffusionPipeline.from_pretrained`] are automatically detected and the pipeline components are loaded and passed to the `__init__` function of the pipeline.
<Tip warning={true}>
You shouldn't use the [`DiffusionPipeline`] class for training. Individual components (for example, [`UNet2DModel`] and [`UNet2DConditionModel`]) of diffusion pipelines are usually trained individually, so we suggest directly working with them instead.
<br>
Pipelines do not offer any training functionality. You'll notice PyTorch's autograd is disabled by decorating the [`~DiffusionPipeline.__call__`] method with a [`torch.no_grad`](https://pytorch.org/docs/stable/generated/torch.no_grad.html) decorator because pipelines should not be used for training. If you're interested in training, please take a look at the [Training](../../training/overview) guides instead!
</Tip>
The table below lists all the pipelines currently available in 🤗 Diffusers and the tasks they support. Click on a pipeline to view its abstract and published paper.
| Pipeline | Tasks |
|---|---|
| [aMUSEd](amused) | text2image |
| [AnimateDiff](animatediff) | text2video |
| [Attend-and-Excite](attend_and_excite) | text2image |
| [AudioLDM](audioldm) | text2audio |
| [AudioLDM2](audioldm2) | text2audio |
| [AuraFlow](auraflow) | text2image |
| [BLIP Diffusion](blip_diffusion) | text2image |
| [CogVideoX](cogvideox) | text2video |
| [Consistency Models](consistency_models) | unconditional image generation |
| [ControlNet](controlnet) | text2image, image2image, inpainting |
| [ControlNet with Flux.1](controlnet_flux) | text2image |
| [ControlNet with Hunyuan-DiT](controlnet_hunyuandit) | text2image |
| [ControlNet with Stable Diffusion 3](controlnet_sd3) | text2image |
| [ControlNet with Stable Diffusion XL](controlnet_sdxl) | text2image |
| [ControlNet-XS](controlnetxs) | text2image |
| [ControlNet-XS with Stable Diffusion XL](controlnetxs_sdxl) | text2image |
| [Dance Diffusion](dance_diffusion) | unconditional audio generation |
| [DDIM](ddim) | unconditional image generation |
| [DDPM](ddpm) | unconditional image generation |
| [DeepFloyd IF](deepfloyd_if) | text2image, image2image, inpainting, super-resolution |
| [DiffEdit](diffedit) | inpainting |
| [DiT](dit) | text2image |
| [Flux](flux) | text2image |
| [Hunyuan-DiT](hunyuandit) | text2image |
| [I2VGen-XL](i2vgenxl) | text2video |
| [InstructPix2Pix](pix2pix) | image editing |
| [Kandinsky 2.1](kandinsky) | text2image, image2image, inpainting, interpolation |
| [Kandinsky 2.2](kandinsky_v22) | text2image, image2image, inpainting |
| [Kandinsky 3](kandinsky3) | text2image, image2image |
| [Kolors](kolors) | text2image |
| [Latent Consistency Models](latent_consistency_models) | text2image |
| [Latent Diffusion](latent_diffusion) | text2image, super-resolution |
| [Latte](latte) | text2image |
| [LEDITS++](ledits_pp) | image editing |
| [Lumina-T2X](lumina) | text2image |
| [Marigold](marigold) | depth |
| [MultiDiffusion](panorama) | text2image |
| [MusicLDM](musicldm) | text2audio |
| [PAG](pag) | text2image |
| [Paint by Example](paint_by_example) | inpainting |
| [PIA](pia) | image2video |
| [PixArt-α](pixart) | text2image |
| [PixArt-Σ](pixart_sigma) | text2image |
| [Self-Attention Guidance](self_attention_guidance) | text2image |
| [Semantic Guidance](semantic_stable_diffusion) | text2image |
| [Shap-E](shap_e) | text-to-3D, image-to-3D |
| [Stable Audio](stable_audio) | text2audio |
| [Stable Cascade](stable_cascade) | text2image |
| [Stable Diffusion](stable_diffusion/overview) | text2image, image2image, depth2image, inpainting, image variation, latent upscaler, super-resolution |
| [Stable Diffusion XL](stable_diffusion/stable_diffusion_xl) | text2image, image2image, inpainting |
| [Stable Diffusion XL Turbo](stable_diffusion/sdxl_turbo) | text2image, image2image, inpainting |
| [Stable unCLIP](stable_unclip) | text2image, image variation |
| [T2I-Adapter](stable_diffusion/adapter) | text2image |
| [Text2Video](text_to_video) | text2video, video2video |
| [Text2Video-Zero](text_to_video_zero) | text2video |
| [unCLIP](unclip) | text2image, image variation |
| [UniDiffuser](unidiffuser) | text2image, image2text, image variation, text variation, unconditional image generation, unconditional audio generation |
| [Value-guided planning](value_guided_sampling) | value guided sampling |
| [Wuerstchen](wuerstchen) | text2image |
## DiffusionPipeline
[[autodoc]] DiffusionPipeline
- all
- __call__
- device
- to
- components
[[autodoc]] pipelines.StableDiffusionMixin.enable_freeu
[[autodoc]] pipelines.StableDiffusionMixin.disable_freeu
## FlaxDiffusionPipeline
[[autodoc]] pipelines.pipeline_flax_utils.FlaxDiffusionPipeline
## PushToHubMixin
[[autodoc]] utils.PushToHubMixin
| diffusers/docs/source/en/api/pipelines/overview.md/0 | {
"file_path": "diffusers/docs/source/en/api/pipelines/overview.md",
"repo_id": "diffusers",
"token_count": 1935
} |
<!--Copyright 2024 The GLIGEN Authors and The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# GLIGEN (Grounded Language-to-Image Generation)
The GLIGEN model was created by researchers and engineers from [University of Wisconsin-Madison, Columbia University, and Microsoft](https://github.com/gligen/GLIGEN). The [`StableDiffusionGLIGENPipeline`] and [`StableDiffusionGLIGENTextImagePipeline`] can generate photorealistic images conditioned on grounding inputs. Along with text and bounding boxes with [`StableDiffusionGLIGENPipeline`], if input images are given, [`StableDiffusionGLIGENTextImagePipeline`] can insert objects described by text at the region defined by bounding boxes. Otherwise, it'll generate an image described by the caption/prompt and insert objects described by text at the region defined by bounding boxes. It's trained on COCO2014D and COCO2014CD datasets, and the model uses a frozen CLIP ViT-L/14 text encoder to condition itself on grounding inputs.
The abstract from the [paper](https://huggingface.co/papers/2301.07093) is:
*Large-scale text-to-image diffusion models have made amazing advances. However, the status quo is to use text input alone, which can impede controllability. In this work, we propose GLIGEN, Grounded-Language-to-Image Generation, a novel approach that builds upon and extends the functionality of existing pre-trained text-to-image diffusion models by enabling them to also be conditioned on grounding inputs. To preserve the vast concept knowledge of the pre-trained model, we freeze all of its weights and inject the grounding information into new trainable layers via a gated mechanism. Our model achieves open-world grounded text2img generation with caption and bounding box condition inputs, and the grounding ability generalizes well to novel spatial configurations and concepts. GLIGEN’s zeroshot performance on COCO and LVIS outperforms existing supervised layout-to-image baselines by a large margin.*
<Tip>
Make sure to check out the Stable Diffusion [Tips](https://huggingface.co/docs/diffusers/en/api/pipelines/stable_diffusion/overview#tips) section to learn how to explore the tradeoff between scheduler speed and quality and how to reuse pipeline components efficiently!
If you want to use one of the official checkpoints for a task, explore the [gligen](https://huggingface.co/gligen) Hub organizations!
</Tip>
[`StableDiffusionGLIGENPipeline`] was contributed by [Nikhil Gajendrakumar](https://github.com/nikhil-masterful) and [`StableDiffusionGLIGENTextImagePipeline`] was contributed by [Nguyễn Công Tú Anh](https://github.com/tuanh123789).
## StableDiffusionGLIGENPipeline
[[autodoc]] StableDiffusionGLIGENPipeline
- all
- __call__
- enable_vae_slicing
- disable_vae_slicing
- enable_vae_tiling
- disable_vae_tiling
- enable_model_cpu_offload
- prepare_latents
- enable_fuser
## StableDiffusionGLIGENTextImagePipeline
[[autodoc]] StableDiffusionGLIGENTextImagePipeline
- all
- __call__
- enable_vae_slicing
- disable_vae_slicing
- enable_vae_tiling
- disable_vae_tiling
- enable_model_cpu_offload
- prepare_latents
- enable_fuser
## StableDiffusionPipelineOutput
[[autodoc]] pipelines.stable_diffusion.StableDiffusionPipelineOutput
| diffusers/docs/source/en/api/pipelines/stable_diffusion/gligen.md/0 | {
"file_path": "diffusers/docs/source/en/api/pipelines/stable_diffusion/gligen.md",
"repo_id": "diffusers",
"token_count": 1049
} |
# xDiT
[xDiT](https://github.com/xdit-project/xDiT) is an inference engine designed for the large scale parallel deployment of Diffusion Transformers (DiTs). xDiT provides a suite of efficient parallel approaches for Diffusion Models, as well as GPU kernel accelerations.
There are four parallel methods supported in xDiT, including [Unified Sequence Parallelism](https://arxiv.org/abs/2405.07719), [PipeFusion](https://arxiv.org/abs/2405.14430), CFG parallelism and data parallelism. The four parallel methods in xDiT can be configured in a hybrid manner, optimizing communication patterns to best suit the underlying network hardware.
Optimization orthogonal to parallelization focuses on accelerating single GPU performance. In addition to utilizing well-known Attention optimization libraries, we leverage compilation acceleration technologies such as torch.compile and onediff.
The overview of xDiT is shown as follows.
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/xDiT/documentation-images/resolve/main/methods/xdit_overview.png">
</div>
You can install xDiT using the following command:
```bash
pip install xfuser
```
Here's an example of using xDiT to accelerate inference of a Diffusers model.
```diff
import torch
from diffusers import StableDiffusion3Pipeline
from xfuser import xFuserArgs, xDiTParallel
from xfuser.config import FlexibleArgumentParser
from xfuser.core.distributed import get_world_group
def main():
+ parser = FlexibleArgumentParser(description="xFuser Arguments")
+ args = xFuserArgs.add_cli_args(parser).parse_args()
+ engine_args = xFuserArgs.from_cli_args(args)
+ engine_config, input_config = engine_args.create_config()
local_rank = get_world_group().local_rank
pipe = StableDiffusion3Pipeline.from_pretrained(
pretrained_model_name_or_path=engine_config.model_config.model,
torch_dtype=torch.float16,
).to(f"cuda:{local_rank}")
# do anything you want with pipeline here
+ pipe = xDiTParallel(pipe, engine_config, input_config)
pipe(
height=input_config.height,
width=input_config.height,
prompt=input_config.prompt,
num_inference_steps=input_config.num_inference_steps,
output_type=input_config.output_type,
generator=torch.Generator(device="cuda").manual_seed(input_config.seed),
)
+ if input_config.output_type == "pil":
+ pipe.save("results", "stable_diffusion_3")
if __name__ == "__main__":
main()
```
As you can see, we only need to use xFuserArgs from xDiT to get configuration parameters, and pass these parameters along with the pipeline object from the Diffusers library into xDiTParallel to complete the parallelization of a specific pipeline in Diffusers.
xDiT runtime parameters can be viewed in the command line using `-h`, and you can refer to this [usage](https://github.com/xdit-project/xDiT?tab=readme-ov-file#2-usage) example for more details.
xDiT needs to be launched using torchrun to support its multi-node, multi-GPU parallel capabilities. For example, the following command can be used for 8-GPU parallel inference:
```bash
torchrun --nproc_per_node=8 ./inference.py --model models/FLUX.1-dev --data_parallel_degree 2 --ulysses_degree 2 --ring_degree 2 --prompt "A snowy mountain" "A small dog" --num_inference_steps 50
```
## Supported models
A subset of Diffusers models are supported in xDiT, such as Flux.1, Stable Diffusion 3, etc. The latest supported models can be found [here](https://github.com/xdit-project/xDiT?tab=readme-ov-file#-supported-dits).
## Benchmark
We tested different models on various machines, and here is some of the benchmark data.
### Flux.1-schnell
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/xDiT/documentation-images/resolve/main/performance/flux/Flux-2k-L40.png">
</div>
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/xDiT/documentation-images/resolve/main/performance/flux/Flux-2K-A100.png">
</div>
### Stable Diffusion 3
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/xDiT/documentation-images/resolve/main/performance/sd3/L40-SD3.png">
</div>
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/xDiT/documentation-images/resolve/main/performance/sd3/A100-SD3.png">
</div>
### HunyuanDiT
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/xDiT/documentation-images/resolve/main/performance/hunuyuandit/L40-HunyuanDiT.png">
</div>
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/xDiT/documentation-images/resolve/main/performance/hunuyuandit/V100-HunyuanDiT.png">
</div>
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/xDiT/documentation-images/resolve/main/performance/hunuyuandit/T4-HunyuanDiT.png">
</div>
More detailed performance metric can be found on our [github page](https://github.com/xdit-project/xDiT?tab=readme-ov-file#perf).
## Reference
[xDiT-project](https://github.com/xdit-project/xDiT)
[USP: A Unified Sequence Parallelism Approach for Long Context Generative AI](https://arxiv.org/abs/2405.07719)
[PipeFusion: Displaced Patch Pipeline Parallelism for Inference of Diffusion Transformer Models](https://arxiv.org/abs/2405.14430) | diffusers/docs/source/en/optimization/xdit.md/0 | {
"file_path": "diffusers/docs/source/en/optimization/xdit.md",
"repo_id": "diffusers",
"token_count": 1813
} |
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# InstructPix2Pix
[InstructPix2Pix](https://hf.co/papers/2211.09800) is a Stable Diffusion model trained to edit images from human-provided instructions. For example, your prompt can be "turn the clouds rainy" and the model will edit the input image accordingly. This model is conditioned on the text prompt (or editing instruction) and the input image.
This guide will explore the [train_instruct_pix2pix.py](https://github.com/huggingface/diffusers/blob/main/examples/instruct_pix2pix/train_instruct_pix2pix.py) training script to help you become familiar with it, and how you can adapt it for your own use case.
Before running the script, make sure you install the library from source:
```bash
git clone https://github.com/huggingface/diffusers
cd diffusers
pip install .
```
Then navigate to the example folder containing the training script and install the required dependencies for the script you're using:
```bash
cd examples/instruct_pix2pix
pip install -r requirements.txt
```
<Tip>
🤗 Accelerate is a library for helping you train on multiple GPUs/TPUs or with mixed-precision. It'll automatically configure your training setup based on your hardware and environment. Take a look at the 🤗 Accelerate [Quick tour](https://huggingface.co/docs/accelerate/quicktour) to learn more.
</Tip>
Initialize an 🤗 Accelerate environment:
```bash
accelerate config
```
To setup a default 🤗 Accelerate environment without choosing any configurations:
```bash
accelerate config default
```
Or if your environment doesn't support an interactive shell, like a notebook, you can use:
```py
from accelerate.utils import write_basic_config
write_basic_config()
```
Lastly, if you want to train a model on your own dataset, take a look at the [Create a dataset for training](create_dataset) guide to learn how to create a dataset that works with the training script.
<Tip>
The following sections highlight parts of the training script that are important for understanding how to modify it, but it doesn't cover every aspect of the script in detail. If you're interested in learning more, feel free to read through the [script](https://github.com/huggingface/diffusers/blob/main/examples/instruct_pix2pix/train_instruct_pix2pix.py) and let us know if you have any questions or concerns.
</Tip>
## Script parameters
The training script has many parameters to help you customize your training run. All of the parameters and their descriptions are found in the [`parse_args()`](https://github.com/huggingface/diffusers/blob/64603389da01082055a901f2883c4810d1144edb/examples/instruct_pix2pix/train_instruct_pix2pix.py#L65) function. Default values are provided for most parameters that work pretty well, but you can also set your own values in the training command if you'd like.
For example, to increase the resolution of the input image:
```bash
accelerate launch train_instruct_pix2pix.py \
--resolution=512 \
```
Many of the basic and important parameters are described in the [Text-to-image](text2image#script-parameters) training guide, so this guide just focuses on the relevant parameters for InstructPix2Pix:
- `--original_image_column`: the original image before the edits are made
- `--edited_image_column`: the image after the edits are made
- `--edit_prompt_column`: the instructions to edit the image
- `--conditioning_dropout_prob`: the dropout probability for the edited image and edit prompts during training which enables classifier-free guidance (CFG) for one or both conditioning inputs
## Training script
The dataset preprocessing code and training loop are found in the [`main()`](https://github.com/huggingface/diffusers/blob/64603389da01082055a901f2883c4810d1144edb/examples/instruct_pix2pix/train_instruct_pix2pix.py#L374) function. This is where you'll make your changes to the training script to adapt it for your own use-case.
As with the script parameters, a walkthrough of the training script is provided in the [Text-to-image](text2image#training-script) training guide. Instead, this guide takes a look at the InstructPix2Pix relevant parts of the script.
The script begins by modifying the [number of input channels](https://github.com/huggingface/diffusers/blob/64603389da01082055a901f2883c4810d1144edb/examples/instruct_pix2pix/train_instruct_pix2pix.py#L445) in the first convolutional layer of the UNet to account for InstructPix2Pix's additional conditioning image:
```py
in_channels = 8
out_channels = unet.conv_in.out_channels
unet.register_to_config(in_channels=in_channels)
with torch.no_grad():
new_conv_in = nn.Conv2d(
in_channels, out_channels, unet.conv_in.kernel_size, unet.conv_in.stride, unet.conv_in.padding
)
new_conv_in.weight.zero_()
new_conv_in.weight[:, :4, :, :].copy_(unet.conv_in.weight)
unet.conv_in = new_conv_in
```
These UNet parameters are [updated](https://github.com/huggingface/diffusers/blob/64603389da01082055a901f2883c4810d1144edb/examples/instruct_pix2pix/train_instruct_pix2pix.py#L545C1-L551C6) by the optimizer:
```py
optimizer = optimizer_cls(
unet.parameters(),
lr=args.learning_rate,
betas=(args.adam_beta1, args.adam_beta2),
weight_decay=args.adam_weight_decay,
eps=args.adam_epsilon,
)
```
Next, the edited images and edit instructions are [preprocessed](https://github.com/huggingface/diffusers/blob/64603389da01082055a901f2883c4810d1144edb/examples/instruct_pix2pix/train_instruct_pix2pix.py#L624) and [tokenized](https://github.com/huggingface/diffusers/blob/64603389da01082055a901f2883c4810d1144edb/examples/instruct_pix2pix/train_instruct_pix2pix.py#L610C24-L610C24). It is important the same image transformations are applied to the original and edited images.
```py
def preprocess_train(examples):
preprocessed_images = preprocess_images(examples)
original_images, edited_images = preprocessed_images.chunk(2)
original_images = original_images.reshape(-1, 3, args.resolution, args.resolution)
edited_images = edited_images.reshape(-1, 3, args.resolution, args.resolution)
examples["original_pixel_values"] = original_images
examples["edited_pixel_values"] = edited_images
captions = list(examples[edit_prompt_column])
examples["input_ids"] = tokenize_captions(captions)
return examples
```
Finally, in the [training loop](https://github.com/huggingface/diffusers/blob/64603389da01082055a901f2883c4810d1144edb/examples/instruct_pix2pix/train_instruct_pix2pix.py#L730), it starts by encoding the edited images into latent space:
```py
latents = vae.encode(batch["edited_pixel_values"].to(weight_dtype)).latent_dist.sample()
latents = latents * vae.config.scaling_factor
```
Then, the script applies dropout to the original image and edit instruction embeddings to support CFG. This is what enables the model to modulate the influence of the edit instruction and original image on the edited image.
```py
encoder_hidden_states = text_encoder(batch["input_ids"])[0]
original_image_embeds = vae.encode(batch["original_pixel_values"].to(weight_dtype)).latent_dist.mode()
if args.conditioning_dropout_prob is not None:
random_p = torch.rand(bsz, device=latents.device, generator=generator)
prompt_mask = random_p < 2 * args.conditioning_dropout_prob
prompt_mask = prompt_mask.reshape(bsz, 1, 1)
null_conditioning = text_encoder(tokenize_captions([""]).to(accelerator.device))[0]
encoder_hidden_states = torch.where(prompt_mask, null_conditioning, encoder_hidden_states)
image_mask_dtype = original_image_embeds.dtype
image_mask = 1 - (
(random_p >= args.conditioning_dropout_prob).to(image_mask_dtype)
* (random_p < 3 * args.conditioning_dropout_prob).to(image_mask_dtype)
)
image_mask = image_mask.reshape(bsz, 1, 1, 1)
original_image_embeds = image_mask * original_image_embeds
```
That's pretty much it! Aside from the differences described here, the rest of the script is very similar to the [Text-to-image](text2image#training-script) training script, so feel free to check it out for more details. If you want to learn more about how the training loop works, check out the [Understanding pipelines, models and schedulers](../using-diffusers/write_own_pipeline) tutorial which breaks down the basic pattern of the denoising process.
## Launch the script
Once you're happy with the changes to your script or if you're okay with the default configuration, you're ready to launch the training script! 🚀
This guide uses the [fusing/instructpix2pix-1000-samples](https://huggingface.co/datasets/fusing/instructpix2pix-1000-samples) dataset, which is a smaller version of the [original dataset](https://huggingface.co/datasets/timbrooks/instructpix2pix-clip-filtered). You can also create and use your own dataset if you'd like (see the [Create a dataset for training](create_dataset) guide).
Set the `MODEL_NAME` environment variable to the name of the model (can be a model id on the Hub or a path to a local model), and the `DATASET_ID` to the name of the dataset on the Hub. The script creates and saves all the components (feature extractor, scheduler, text encoder, UNet, etc.) to a subfolder in your repository.
<Tip>
For better results, try longer training runs with a larger dataset. We've only tested this training script on a smaller-scale dataset.
<br>
To monitor training progress with Weights and Biases, add the `--report_to=wandb` parameter to the training command and specify a validation image with `--val_image_url` and a validation prompt with `--validation_prompt`. This can be really useful for debugging the model.
</Tip>
If you’re training on more than one GPU, add the `--multi_gpu` parameter to the `accelerate launch` command.
```bash
accelerate launch --mixed_precision="fp16" train_instruct_pix2pix.py \
--pretrained_model_name_or_path=$MODEL_NAME \
--dataset_name=$DATASET_ID \
--enable_xformers_memory_efficient_attention \
--resolution=256 \
--random_flip \
--train_batch_size=4 \
--gradient_accumulation_steps=4 \
--gradient_checkpointing \
--max_train_steps=15000 \
--checkpointing_steps=5000 \
--checkpoints_total_limit=1 \
--learning_rate=5e-05 \
--max_grad_norm=1 \
--lr_warmup_steps=0 \
--conditioning_dropout_prob=0.05 \
--mixed_precision=fp16 \
--seed=42 \
--push_to_hub
```
After training is finished, you can use your new InstructPix2Pix for inference:
```py
import PIL
import requests
import torch
from diffusers import StableDiffusionInstructPix2PixPipeline
from diffusers.utils import load_image
pipeline = StableDiffusionInstructPix2PixPipeline.from_pretrained("your_cool_model", torch_dtype=torch.float16).to("cuda")
generator = torch.Generator("cuda").manual_seed(0)
image = load_image("https://huggingface.co/datasets/sayakpaul/sample-datasets/resolve/main/test_pix2pix_4.png")
prompt = "add some ducks to the lake"
num_inference_steps = 20
image_guidance_scale = 1.5
guidance_scale = 10
edited_image = pipeline(
prompt,
image=image,
num_inference_steps=num_inference_steps,
image_guidance_scale=image_guidance_scale,
guidance_scale=guidance_scale,
generator=generator,
).images[0]
edited_image.save("edited_image.png")
```
You should experiment with different `num_inference_steps`, `image_guidance_scale`, and `guidance_scale` values to see how they affect inference speed and quality. The guidance scale parameters are especially impactful because they control how much the original image and edit instructions affect the edited image.
## Stable Diffusion XL
Stable Diffusion XL (SDXL) is a powerful text-to-image model that generates high-resolution images, and it adds a second text-encoder to its architecture. Use the [`train_instruct_pix2pix_sdxl.py`](https://github.com/huggingface/diffusers/blob/main/examples/instruct_pix2pix/train_instruct_pix2pix_sdxl.py) script to train a SDXL model to follow image editing instructions.
The SDXL training script is discussed in more detail in the [SDXL training](sdxl) guide.
## Next steps
Congratulations on training your own InstructPix2Pix model! 🥳 To learn more about the model, it may be helpful to:
- Read the [Instruction-tuning Stable Diffusion with InstructPix2Pix](https://huggingface.co/blog/instruction-tuning-sd) blog post to learn more about some experiments we've done with InstructPix2Pix, dataset preparation, and results for different instructions.
| diffusers/docs/source/en/training/instructpix2pix.md/0 | {
"file_path": "diffusers/docs/source/en/training/instructpix2pix.md",
"repo_id": "diffusers",
"token_count": 4157
} |
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
[[open-in-colab]]
# Load LoRAs for inference
There are many adapter types (with [LoRAs](https://huggingface.co/docs/peft/conceptual_guides/adapter#low-rank-adaptation-lora) being the most popular) trained in different styles to achieve different effects. You can even combine multiple adapters to create new and unique images.
In this tutorial, you'll learn how to easily load and manage adapters for inference with the 🤗 [PEFT](https://huggingface.co/docs/peft/index) integration in 🤗 Diffusers. You'll use LoRA as the main adapter technique, so you'll see the terms LoRA and adapter used interchangeably.
Let's first install all the required libraries.
```bash
!pip install -q transformers accelerate peft diffusers
```
Now, load a pipeline with a [Stable Diffusion XL (SDXL)](../api/pipelines/stable_diffusion/stable_diffusion_xl) checkpoint:
```python
from diffusers import DiffusionPipeline
import torch
pipe_id = "stabilityai/stable-diffusion-xl-base-1.0"
pipe = DiffusionPipeline.from_pretrained(pipe_id, torch_dtype=torch.float16).to("cuda")
```
Next, load a [CiroN2022/toy-face](https://huggingface.co/CiroN2022/toy-face) adapter with the [`~diffusers.loaders.StableDiffusionXLLoraLoaderMixin.load_lora_weights`] method. With the 🤗 PEFT integration, you can assign a specific `adapter_name` to the checkpoint, which lets you easily switch between different LoRA checkpoints. Let's call this adapter `"toy"`.
```python
pipe.load_lora_weights("CiroN2022/toy-face", weight_name="toy_face_sdxl.safetensors", adapter_name="toy")
```
Make sure to include the token `toy_face` in the prompt and then you can perform inference:
```python
prompt = "toy_face of a hacker with a hoodie"
lora_scale = 0.9
image = pipe(
prompt, num_inference_steps=30, cross_attention_kwargs={"scale": lora_scale}, generator=torch.manual_seed(0)
).images[0]
image
```

With the `adapter_name` parameter, it is really easy to use another adapter for inference! Load the [nerijs/pixel-art-xl](https://huggingface.co/nerijs/pixel-art-xl) adapter that has been fine-tuned to generate pixel art images and call it `"pixel"`.
The pipeline automatically sets the first loaded adapter (`"toy"`) as the active adapter, but you can activate the `"pixel"` adapter with the [`~loaders.peft.PeftAdapterMixin.set_adapters`] method:
```python
pipe.load_lora_weights("nerijs/pixel-art-xl", weight_name="pixel-art-xl.safetensors", adapter_name="pixel")
pipe.set_adapters("pixel")
```
Make sure you include the token `pixel art` in your prompt to generate a pixel art image:
```python
prompt = "a hacker with a hoodie, pixel art"
image = pipe(
prompt, num_inference_steps=30, cross_attention_kwargs={"scale": lora_scale}, generator=torch.manual_seed(0)
).images[0]
image
```

<Tip>
By default, if the most up-to-date versions of PEFT and Transformers are detected, `low_cpu_mem_usage` is set to `True` to speed up the loading time of LoRA checkpoints.
</Tip>
## Merge adapters
You can also merge different adapter checkpoints for inference to blend their styles together.
Once again, use the [`~loaders.peft.PeftAdapterMixin.set_adapters`] method to activate the `pixel` and `toy` adapters and specify the weights for how they should be merged.
```python
pipe.set_adapters(["pixel", "toy"], adapter_weights=[0.5, 1.0])
```
<Tip>
LoRA checkpoints in the diffusion community are almost always obtained with [DreamBooth](https://huggingface.co/docs/diffusers/main/en/training/dreambooth). DreamBooth training often relies on "trigger" words in the input text prompts in order for the generation results to look as expected. When you combine multiple LoRA checkpoints, it's important to ensure the trigger words for the corresponding LoRA checkpoints are present in the input text prompts.
</Tip>
Remember to use the trigger words for [CiroN2022/toy-face](https://hf.co/CiroN2022/toy-face) and [nerijs/pixel-art-xl](https://hf.co/nerijs/pixel-art-xl) (these are found in their repositories) in the prompt to generate an image.
```python
prompt = "toy_face of a hacker with a hoodie, pixel art"
image = pipe(
prompt, num_inference_steps=30, cross_attention_kwargs={"scale": 1.0}, generator=torch.manual_seed(0)
).images[0]
image
```

Impressive! As you can see, the model generated an image that mixed the characteristics of both adapters.
> [!TIP]
> Through its PEFT integration, Diffusers also offers more efficient merging methods which you can learn about in the [Merge LoRAs](../using-diffusers/merge_loras) guide!
To return to only using one adapter, use the [`~loaders.peft.PeftAdapterMixin.set_adapters`] method to activate the `"toy"` adapter:
```python
pipe.set_adapters("toy")
prompt = "toy_face of a hacker with a hoodie"
lora_scale = 0.9
image = pipe(
prompt, num_inference_steps=30, cross_attention_kwargs={"scale": lora_scale}, generator=torch.manual_seed(0)
).images[0]
image
```
Or to disable all adapters entirely, use the [`~loaders.peft.PeftAdapterMixin.disable_lora`] method to return the base model.
```python
pipe.disable_lora()
prompt = "toy_face of a hacker with a hoodie"
image = pipe(prompt, num_inference_steps=30, generator=torch.manual_seed(0)).images[0]
image
```

### Customize adapters strength
For even more customization, you can control how strongly the adapter affects each part of the pipeline. For this, pass a dictionary with the control strengths (called "scales") to [`~loaders.peft.PeftAdapterMixin.set_adapters`].
For example, here's how you can turn on the adapter for the `down` parts, but turn it off for the `mid` and `up` parts:
```python
pipe.enable_lora() # enable lora again, after we disabled it above
prompt = "toy_face of a hacker with a hoodie, pixel art"
adapter_weight_scales = { "unet": { "down": 1, "mid": 0, "up": 0} }
pipe.set_adapters("pixel", adapter_weight_scales)
image = pipe(prompt, num_inference_steps=30, generator=torch.manual_seed(0)).images[0]
image
```

Let's see how turning off the `down` part and turning on the `mid` and `up` part respectively changes the image.
```python
adapter_weight_scales = { "unet": { "down": 0, "mid": 1, "up": 0} }
pipe.set_adapters("pixel", adapter_weight_scales)
image = pipe(prompt, num_inference_steps=30, generator=torch.manual_seed(0)).images[0]
image
```

```python
adapter_weight_scales = { "unet": { "down": 0, "mid": 0, "up": 1} }
pipe.set_adapters("pixel", adapter_weight_scales)
image = pipe(prompt, num_inference_steps=30, generator=torch.manual_seed(0)).images[0]
image
```

Looks cool!
This is a really powerful feature. You can use it to control the adapter strengths down to per-transformer level. And you can even use it for multiple adapters.
```python
adapter_weight_scales_toy = 0.5
adapter_weight_scales_pixel = {
"unet": {
"down": 0.9, # all transformers in the down-part will use scale 0.9
# "mid" # because, in this example, "mid" is not given, all transformers in the mid part will use the default scale 1.0
"up": {
"block_0": 0.6, # all 3 transformers in the 0th block in the up-part will use scale 0.6
"block_1": [0.4, 0.8, 1.0], # the 3 transformers in the 1st block in the up-part will use scales 0.4, 0.8 and 1.0 respectively
}
}
}
pipe.set_adapters(["toy", "pixel"], [adapter_weight_scales_toy, adapter_weight_scales_pixel])
image = pipe(prompt, num_inference_steps=30, generator=torch.manual_seed(0)).images[0]
image
```

## Manage adapters
You have attached multiple adapters in this tutorial, and if you're feeling a bit lost on what adapters have been attached to the pipeline's components, use the [`~diffusers.loaders.StableDiffusionLoraLoaderMixin.get_active_adapters`] method to check the list of active adapters:
```py
active_adapters = pipe.get_active_adapters()
active_adapters
["toy", "pixel"]
```
You can also get the active adapters of each pipeline component with [`~diffusers.loaders.StableDiffusionLoraLoaderMixin.get_list_adapters`]:
```py
list_adapters_component_wise = pipe.get_list_adapters()
list_adapters_component_wise
{"text_encoder": ["toy", "pixel"], "unet": ["toy", "pixel"], "text_encoder_2": ["toy", "pixel"]}
```
The [`~loaders.peft.PeftAdapterMixin.delete_adapters`] function completely removes an adapter and their LoRA layers from a model.
```py
pipe.delete_adapters("toy")
pipe.get_active_adapters()
["pixel"]
```
| diffusers/docs/source/en/tutorials/using_peft_for_inference.md/0 | {
"file_path": "diffusers/docs/source/en/tutorials/using_peft_for_inference.md",
"repo_id": "diffusers",
"token_count": 3467
} |
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# IP-Adapter
[IP-Adapter](https://hf.co/papers/2308.06721) is an image prompt adapter that can be plugged into diffusion models to enable image prompting without any changes to the underlying model. Furthermore, this adapter can be reused with other models finetuned from the same base model and it can be combined with other adapters like [ControlNet](../using-diffusers/controlnet). The key idea behind IP-Adapter is the *decoupled cross-attention* mechanism which adds a separate cross-attention layer just for image features instead of using the same cross-attention layer for both text and image features. This allows the model to learn more image-specific features.
> [!TIP]
> Learn how to load an IP-Adapter in the [Load adapters](../using-diffusers/loading_adapters#ip-adapter) guide, and make sure you check out the [IP-Adapter Plus](../using-diffusers/loading_adapters#ip-adapter-plus) section which requires manually loading the image encoder.
This guide will walk you through using IP-Adapter for various tasks and use cases.
## General tasks
Let's take a look at how to use IP-Adapter's image prompting capabilities with the [`StableDiffusionXLPipeline`] for tasks like text-to-image, image-to-image, and inpainting. We also encourage you to try out other pipelines such as Stable Diffusion, LCM-LoRA, ControlNet, T2I-Adapter, or AnimateDiff!
In all the following examples, you'll see the [`~loaders.IPAdapterMixin.set_ip_adapter_scale`] method. This method controls the amount of text or image conditioning to apply to the model. A value of `1.0` means the model is only conditioned on the image prompt. Lowering this value encourages the model to produce more diverse images, but they may not be as aligned with the image prompt. Typically, a value of `0.5` achieves a good balance between the two prompt types and produces good results.
> [!TIP]
> In the examples below, try adding `low_cpu_mem_usage=True` to the [`~loaders.IPAdapterMixin.load_ip_adapter`] method to speed up the loading time.
<hfoptions id="tasks">
<hfoption id="Text-to-image">
Crafting the precise text prompt to generate the image you want can be difficult because it may not always capture what you'd like to express. Adding an image alongside the text prompt helps the model better understand what it should generate and can lead to more accurate results.
Load a Stable Diffusion XL (SDXL) model and insert an IP-Adapter into the model with the [`~loaders.IPAdapterMixin.load_ip_adapter`] method. Use the `subfolder` parameter to load the SDXL model weights.
```py
from diffusers import AutoPipelineForText2Image
from diffusers.utils import load_image
import torch
pipeline = AutoPipelineForText2Image.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16).to("cuda")
pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="sdxl_models", weight_name="ip-adapter_sdxl.bin")
pipeline.set_ip_adapter_scale(0.6)
```
Create a text prompt and load an image prompt before passing them to the pipeline to generate an image.
```py
image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/ip_adapter_diner.png")
generator = torch.Generator(device="cpu").manual_seed(0)
images = pipeline(
prompt="a polar bear sitting in a chair drinking a milkshake",
ip_adapter_image=image,
negative_prompt="deformed, ugly, wrong proportion, low res, bad anatomy, worst quality, low quality",
num_inference_steps=100,
generator=generator,
).images
images[0]
```
<div class="flex flex-row gap-4">
<div class="flex-1">
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/ip_adapter_diner.png"/>
<figcaption class="mt-2 text-center text-sm text-gray-500">IP-Adapter image</figcaption>
</div>
<div class="flex-1">
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/ip_adapter_diner_2.png"/>
<figcaption class="mt-2 text-center text-sm text-gray-500">generated image</figcaption>
</div>
</div>
</hfoption>
<hfoption id="Image-to-image">
IP-Adapter can also help with image-to-image by guiding the model to generate an image that resembles the original image and the image prompt.
Load a Stable Diffusion XL (SDXL) model and insert an IP-Adapter into the model with the [`~loaders.IPAdapterMixin.load_ip_adapter`] method. Use the `subfolder` parameter to load the SDXL model weights.
```py
from diffusers import AutoPipelineForImage2Image
from diffusers.utils import load_image
import torch
pipeline = AutoPipelineForImage2Image.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16).to("cuda")
pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="sdxl_models", weight_name="ip-adapter_sdxl.bin")
pipeline.set_ip_adapter_scale(0.6)
```
Pass the original image and the IP-Adapter image prompt to the pipeline to generate an image. Providing a text prompt to the pipeline is optional, but in this example, a text prompt is used to increase image quality.
```py
image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/ip_adapter_bear_1.png")
ip_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/ip_adapter_bear_2.png")
generator = torch.Generator(device="cpu").manual_seed(4)
images = pipeline(
prompt="best quality, high quality",
image=image,
ip_adapter_image=ip_image,
generator=generator,
strength=0.6,
).images
images[0]
```
<div class="flex gap-4">
<div>
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/ip_adapter_bear_1.png"/>
<figcaption class="mt-2 text-center text-sm text-gray-500">original image</figcaption>
</div>
<div>
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/ip_adapter_bear_2.png"/>
<figcaption class="mt-2 text-center text-sm text-gray-500">IP-Adapter image</figcaption>
</div>
<div>
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/ip_adapter_bear_3.png"/>
<figcaption class="mt-2 text-center text-sm text-gray-500">generated image</figcaption>
</div>
</div>
</hfoption>
<hfoption id="Inpainting">
IP-Adapter is also useful for inpainting because the image prompt allows you to be much more specific about what you'd like to generate.
Load a Stable Diffusion XL (SDXL) model and insert an IP-Adapter into the model with the [`~loaders.IPAdapterMixin.load_ip_adapter`] method. Use the `subfolder` parameter to load the SDXL model weights.
```py
from diffusers import AutoPipelineForInpainting
from diffusers.utils import load_image
import torch
pipeline = AutoPipelineForInpainting.from_pretrained("diffusers/stable-diffusion-xl-1.0-inpainting-0.1", torch_dtype=torch.float16).to("cuda")
pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="sdxl_models", weight_name="ip-adapter_sdxl.bin")
pipeline.set_ip_adapter_scale(0.6)
```
Pass a prompt, the original image, mask image, and the IP-Adapter image prompt to the pipeline to generate an image.
```py
mask_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/ip_adapter_mask.png")
image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/ip_adapter_bear_1.png")
ip_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/ip_adapter_gummy.png")
generator = torch.Generator(device="cpu").manual_seed(4)
images = pipeline(
prompt="a cute gummy bear waving",
image=image,
mask_image=mask_image,
ip_adapter_image=ip_image,
generator=generator,
num_inference_steps=100,
).images
images[0]
```
<div class="flex gap-4">
<div>
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/ip_adapter_bear_1.png"/>
<figcaption class="mt-2 text-center text-sm text-gray-500">original image</figcaption>
</div>
<div>
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/ip_adapter_gummy.png"/>
<figcaption class="mt-2 text-center text-sm text-gray-500">IP-Adapter image</figcaption>
</div>
<div>
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/ip_adapter_inpaint.png"/>
<figcaption class="mt-2 text-center text-sm text-gray-500">generated image</figcaption>
</div>
</div>
</hfoption>
<hfoption id="Video">
IP-Adapter can also help you generate videos that are more aligned with your text prompt. For example, let's load [AnimateDiff](../api/pipelines/animatediff) with its motion adapter and insert an IP-Adapter into the model with the [`~loaders.IPAdapterMixin.load_ip_adapter`] method.
> [!WARNING]
> If you're planning on offloading the model to the CPU, make sure you run it after you've loaded the IP-Adapter. When you call [`~DiffusionPipeline.enable_model_cpu_offload`] before loading the IP-Adapter, it offloads the image encoder module to the CPU and it'll return an error when you try to run the pipeline.
```py
import torch
from diffusers import AnimateDiffPipeline, DDIMScheduler, MotionAdapter
from diffusers.utils import export_to_gif
from diffusers.utils import load_image
adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-2", torch_dtype=torch.float16)
pipeline = AnimateDiffPipeline.from_pretrained("emilianJR/epiCRealism", motion_adapter=adapter, torch_dtype=torch.float16)
scheduler = DDIMScheduler.from_pretrained(
"emilianJR/epiCRealism",
subfolder="scheduler",
clip_sample=False,
timestep_spacing="linspace",
beta_schedule="linear",
steps_offset=1,
)
pipeline.scheduler = scheduler
pipeline.enable_vae_slicing()
pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name="ip-adapter_sd15.bin")
pipeline.enable_model_cpu_offload()
```
Pass a prompt and an image prompt to the pipeline to generate a short video.
```py
ip_adapter_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/ip_adapter_inpaint.png")
output = pipeline(
prompt="A cute gummy bear waving",
negative_prompt="bad quality, worse quality, low resolution",
ip_adapter_image=ip_adapter_image,
num_frames=16,
guidance_scale=7.5,
num_inference_steps=50,
generator=torch.Generator(device="cpu").manual_seed(0),
)
frames = output.frames[0]
export_to_gif(frames, "gummy_bear.gif")
```
<div class="flex flex-row gap-4">
<div class="flex-1">
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/ip_adapter_inpaint.png"/>
<figcaption class="mt-2 text-center text-sm text-gray-500">IP-Adapter image</figcaption>
</div>
<div class="flex-1">
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/gummy_bear.gif"/>
<figcaption class="mt-2 text-center text-sm text-gray-500">generated video</figcaption>
</div>
</div>
</hfoption>
</hfoptions>
## Configure parameters
There are a couple of IP-Adapter parameters that are useful to know about and can help you with your image generation tasks. These parameters can make your workflow more efficient or give you more control over image generation.
### Image embeddings
IP-Adapter enabled pipelines provide the `ip_adapter_image_embeds` parameter to accept precomputed image embeddings. This is particularly useful in scenarios where you need to run the IP-Adapter pipeline multiple times because you have more than one image. For example, [multi IP-Adapter](#multi-ip-adapter) is a specific use case where you provide multiple styling images to generate a specific image in a specific style. Loading and encoding multiple images each time you use the pipeline would be inefficient. Instead, you can precompute and save the image embeddings to disk (which can save a lot of space if you're using high-quality images) and load them when you need them.
> [!TIP]
> This parameter also gives you the flexibility to load embeddings from other sources. For example, ComfyUI image embeddings for IP-Adapters are compatible with Diffusers and should work ouf-of-the-box!
Call the [`~StableDiffusionPipeline.prepare_ip_adapter_image_embeds`] method to encode and generate the image embeddings. Then you can save them to disk with `torch.save`.
> [!TIP]
> If you're using IP-Adapter with `ip_adapter_image_embedding` instead of `ip_adapter_image`', you can set `load_ip_adapter(image_encoder_folder=None,...)` because you don't need to load an encoder to generate the image embeddings.
```py
image_embeds = pipeline.prepare_ip_adapter_image_embeds(
ip_adapter_image=image,
ip_adapter_image_embeds=None,
device="cuda",
num_images_per_prompt=1,
do_classifier_free_guidance=True,
)
torch.save(image_embeds, "image_embeds.ipadpt")
```
Now load the image embeddings by passing them to the `ip_adapter_image_embeds` parameter.
```py
image_embeds = torch.load("image_embeds.ipadpt")
images = pipeline(
prompt="a polar bear sitting in a chair drinking a milkshake",
ip_adapter_image_embeds=image_embeds,
negative_prompt="deformed, ugly, wrong proportion, low res, bad anatomy, worst quality, low quality",
num_inference_steps=100,
generator=generator,
).images
```
### IP-Adapter masking
Binary masks specify which portion of the output image should be assigned to an IP-Adapter. This is useful for composing more than one IP-Adapter image. For each input IP-Adapter image, you must provide a binary mask.
To start, preprocess the input IP-Adapter images with the [`~image_processor.IPAdapterMaskProcessor.preprocess()`] to generate their masks. For optimal results, provide the output height and width to [`~image_processor.IPAdapterMaskProcessor.preprocess()`]. This ensures masks with different aspect ratios are appropriately stretched. If the input masks already match the aspect ratio of the generated image, you don't have to set the `height` and `width`.
```py
from diffusers.image_processor import IPAdapterMaskProcessor
mask1 = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/ip_mask_mask1.png")
mask2 = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/ip_mask_mask2.png")
output_height = 1024
output_width = 1024
processor = IPAdapterMaskProcessor()
masks = processor.preprocess([mask1, mask2], height=output_height, width=output_width)
```
<div class="flex flex-row gap-4">
<div class="flex-1">
<img class="rounded-xl" src="https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/ip_mask_mask1.png"/>
<figcaption class="mt-2 text-center text-sm text-gray-500">mask one</figcaption>
</div>
<div class="flex-1">
<img class="rounded-xl" src="https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/ip_mask_mask2.png"/>
<figcaption class="mt-2 text-center text-sm text-gray-500">mask two</figcaption>
</div>
</div>
When there is more than one input IP-Adapter image, load them as a list and provide the IP-Adapter scale list. Each of the input IP-Adapter images here corresponds to one of the masks generated above.
```py
pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="sdxl_models", weight_name=["ip-adapter-plus-face_sdxl_vit-h.safetensors"])
pipeline.set_ip_adapter_scale([[0.7, 0.7]]) # one scale for each image-mask pair
face_image1 = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/ip_mask_girl1.png")
face_image2 = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/ip_mask_girl2.png")
ip_images = [[face_image1, face_image2]]
masks = [masks.reshape(1, masks.shape[0], masks.shape[2], masks.shape[3])]
```
<div class="flex flex-row gap-4">
<div class="flex-1">
<img class="rounded-xl" src="https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/ip_mask_girl1.png"/>
<figcaption class="mt-2 text-center text-sm text-gray-500">IP-Adapter image one</figcaption>
</div>
<div class="flex-1">
<img class="rounded-xl" src="https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/ip_mask_girl2.png"/>
<figcaption class="mt-2 text-center text-sm text-gray-500">IP-Adapter image two</figcaption>
</div>
</div>
Now pass the preprocessed masks to `cross_attention_kwargs` in the pipeline call.
```py
generator = torch.Generator(device="cpu").manual_seed(0)
num_images = 1
image = pipeline(
prompt="2 girls",
ip_adapter_image=ip_images,
negative_prompt="monochrome, lowres, bad anatomy, worst quality, low quality",
num_inference_steps=20,
num_images_per_prompt=num_images,
generator=generator,
cross_attention_kwargs={"ip_adapter_masks": masks}
).images[0]
image
```
<div class="flex flex-row gap-4">
<div class="flex-1">
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/ip_adapter_attention_mask_result_seed_0.png"/>
<figcaption class="mt-2 text-center text-sm text-gray-500">IP-Adapter masking applied</figcaption>
</div>
<div class="flex-1">
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/ip_adapter_no_attention_mask_result_seed_0.png"/>
<figcaption class="mt-2 text-center text-sm text-gray-500">no IP-Adapter masking applied</figcaption>
</div>
</div>
## Specific use cases
IP-Adapter's image prompting and compatibility with other adapters and models makes it a versatile tool for a variety of use cases. This section covers some of the more popular applications of IP-Adapter, and we can't wait to see what you come up with!
### Face model
Generating accurate faces is challenging because they are complex and nuanced. Diffusers supports two IP-Adapter checkpoints specifically trained to generate faces from the [h94/IP-Adapter](https://huggingface.co/h94/IP-Adapter) repository:
* [ip-adapter-full-face_sd15.safetensors](https://huggingface.co/h94/IP-Adapter/blob/main/models/ip-adapter-full-face_sd15.safetensors) is conditioned with images of cropped faces and removed backgrounds
* [ip-adapter-plus-face_sd15.safetensors](https://huggingface.co/h94/IP-Adapter/blob/main/models/ip-adapter-plus-face_sd15.safetensors) uses patch embeddings and is conditioned with images of cropped faces
Additionally, Diffusers supports all IP-Adapter checkpoints trained with face embeddings extracted by `insightface` face models. Supported models are from the [h94/IP-Adapter-FaceID](https://huggingface.co/h94/IP-Adapter-FaceID) repository.
For face models, use the [h94/IP-Adapter](https://huggingface.co/h94/IP-Adapter) checkpoint. It is also recommended to use [`DDIMScheduler`] or [`EulerDiscreteScheduler`] for face models.
```py
import torch
from diffusers import StableDiffusionPipeline, DDIMScheduler
from diffusers.utils import load_image
pipeline = StableDiffusionPipeline.from_pretrained(
"stable-diffusion-v1-5/stable-diffusion-v1-5",
torch_dtype=torch.float16,
).to("cuda")
pipeline.scheduler = DDIMScheduler.from_config(pipeline.scheduler.config)
pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name="ip-adapter-full-face_sd15.bin")
pipeline.set_ip_adapter_scale(0.5)
image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/ip_adapter_einstein_base.png")
generator = torch.Generator(device="cpu").manual_seed(26)
image = pipeline(
prompt="A photo of Einstein as a chef, wearing an apron, cooking in a French restaurant",
ip_adapter_image=image,
negative_prompt="lowres, bad anatomy, worst quality, low quality",
num_inference_steps=100,
generator=generator,
).images[0]
image
```
<div class="flex flex-row gap-4">
<div class="flex-1">
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/ip_adapter_einstein_base.png"/>
<figcaption class="mt-2 text-center text-sm text-gray-500">IP-Adapter image</figcaption>
</div>
<div class="flex-1">
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/ip_adapter_einstein.png"/>
<figcaption class="mt-2 text-center text-sm text-gray-500">generated image</figcaption>
</div>
</div>
To use IP-Adapter FaceID models, first extract face embeddings with `insightface`. Then pass the list of tensors to the pipeline as `ip_adapter_image_embeds`.
```py
import torch
from diffusers import StableDiffusionPipeline, DDIMScheduler
from diffusers.utils import load_image
from insightface.app import FaceAnalysis
pipeline = StableDiffusionPipeline.from_pretrained(
"stable-diffusion-v1-5/stable-diffusion-v1-5",
torch_dtype=torch.float16,
).to("cuda")
pipeline.scheduler = DDIMScheduler.from_config(pipeline.scheduler.config)
pipeline.load_ip_adapter("h94/IP-Adapter-FaceID", subfolder=None, weight_name="ip-adapter-faceid_sd15.bin", image_encoder_folder=None)
pipeline.set_ip_adapter_scale(0.6)
image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/ip_mask_girl1.png")
ref_images_embeds = []
app = FaceAnalysis(name="buffalo_l", providers=['CUDAExecutionProvider', 'CPUExecutionProvider'])
app.prepare(ctx_id=0, det_size=(640, 640))
image = cv2.cvtColor(np.asarray(image), cv2.COLOR_BGR2RGB)
faces = app.get(image)
image = torch.from_numpy(faces[0].normed_embedding)
ref_images_embeds.append(image.unsqueeze(0))
ref_images_embeds = torch.stack(ref_images_embeds, dim=0).unsqueeze(0)
neg_ref_images_embeds = torch.zeros_like(ref_images_embeds)
id_embeds = torch.cat([neg_ref_images_embeds, ref_images_embeds]).to(dtype=torch.float16, device="cuda")
generator = torch.Generator(device="cpu").manual_seed(42)
images = pipeline(
prompt="A photo of a girl",
ip_adapter_image_embeds=[id_embeds],
negative_prompt="monochrome, lowres, bad anatomy, worst quality, low quality",
num_inference_steps=20, num_images_per_prompt=1,
generator=generator
).images
```
Both IP-Adapter FaceID Plus and Plus v2 models require CLIP image embeddings. You can prepare face embeddings as shown previously, then you can extract and pass CLIP embeddings to the hidden image projection layers.
```py
from insightface.utils import face_align
ref_images_embeds = []
ip_adapter_images = []
app = FaceAnalysis(name="buffalo_l", providers=['CUDAExecutionProvider', 'CPUExecutionProvider'])
app.prepare(ctx_id=0, det_size=(640, 640))
image = cv2.cvtColor(np.asarray(image), cv2.COLOR_BGR2RGB)
faces = app.get(image)
ip_adapter_images.append(face_align.norm_crop(image, landmark=faces[0].kps, image_size=224))
image = torch.from_numpy(faces[0].normed_embedding)
ref_images_embeds.append(image.unsqueeze(0))
ref_images_embeds = torch.stack(ref_images_embeds, dim=0).unsqueeze(0)
neg_ref_images_embeds = torch.zeros_like(ref_images_embeds)
id_embeds = torch.cat([neg_ref_images_embeds, ref_images_embeds]).to(dtype=torch.float16, device="cuda")
clip_embeds = pipeline.prepare_ip_adapter_image_embeds(
[ip_adapter_images], None, torch.device("cuda"), num_images, True)[0]
pipeline.unet.encoder_hid_proj.image_projection_layers[0].clip_embeds = clip_embeds.to(dtype=torch.float16)
pipeline.unet.encoder_hid_proj.image_projection_layers[0].shortcut = False # True if Plus v2
```
### Multi IP-Adapter
More than one IP-Adapter can be used at the same time to generate specific images in more diverse styles. For example, you can use IP-Adapter-Face to generate consistent faces and characters, and IP-Adapter Plus to generate those faces in a specific style.
> [!TIP]
> Read the [IP-Adapter Plus](../using-diffusers/loading_adapters#ip-adapter-plus) section to learn why you need to manually load the image encoder.
Load the image encoder with [`~transformers.CLIPVisionModelWithProjection`].
```py
import torch
from diffusers import AutoPipelineForText2Image, DDIMScheduler
from transformers import CLIPVisionModelWithProjection
from diffusers.utils import load_image
image_encoder = CLIPVisionModelWithProjection.from_pretrained(
"h94/IP-Adapter",
subfolder="models/image_encoder",
torch_dtype=torch.float16,
)
```
Next, you'll load a base model, scheduler, and the IP-Adapters. The IP-Adapters to use are passed as a list to the `weight_name` parameter:
* [ip-adapter-plus_sdxl_vit-h](https://huggingface.co/h94/IP-Adapter#ip-adapter-for-sdxl-10) uses patch embeddings and a ViT-H image encoder
* [ip-adapter-plus-face_sdxl_vit-h](https://huggingface.co/h94/IP-Adapter#ip-adapter-for-sdxl-10) has the same architecture but it is conditioned with images of cropped faces
```py
pipeline = AutoPipelineForText2Image.from_pretrained(
"stabilityai/stable-diffusion-xl-base-1.0",
torch_dtype=torch.float16,
image_encoder=image_encoder,
)
pipeline.scheduler = DDIMScheduler.from_config(pipeline.scheduler.config)
pipeline.load_ip_adapter(
"h94/IP-Adapter",
subfolder="sdxl_models",
weight_name=["ip-adapter-plus_sdxl_vit-h.safetensors", "ip-adapter-plus-face_sdxl_vit-h.safetensors"]
)
pipeline.set_ip_adapter_scale([0.7, 0.3])
pipeline.enable_model_cpu_offload()
```
Load an image prompt and a folder containing images of a certain style you want to use.
```py
face_image = load_image("https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/women_input.png")
style_folder = "https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/style_ziggy"
style_images = [load_image(f"{style_folder}/img{i}.png") for i in range(10)]
```
<div class="flex flex-row gap-4">
<div class="flex-1">
<img class="rounded-xl" src="https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/women_input.png"/>
<figcaption class="mt-2 text-center text-sm text-gray-500">IP-Adapter image of face</figcaption>
</div>
<div class="flex-1">
<img class="rounded-xl" src="https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/ip_style_grid.png"/>
<figcaption class="mt-2 text-center text-sm text-gray-500">IP-Adapter style images</figcaption>
</div>
</div>
Pass the image prompt and style images as a list to the `ip_adapter_image` parameter, and run the pipeline!
```py
generator = torch.Generator(device="cpu").manual_seed(0)
image = pipeline(
prompt="wonderwoman",
ip_adapter_image=[style_images, face_image],
negative_prompt="monochrome, lowres, bad anatomy, worst quality, low quality",
num_inference_steps=50, num_images_per_prompt=1,
generator=generator,
).images[0]
image
```
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/ip_multi_out.png" />
</div>
### Instant generation
[Latent Consistency Models (LCM)](../using-diffusers/inference_with_lcm_lora) are diffusion models that can generate images in as little as 4 steps compared to other diffusion models like SDXL that typically require way more steps. This is why image generation with an LCM feels "instantaneous". IP-Adapters can be plugged into an LCM-LoRA model to instantly generate images with an image prompt.
The IP-Adapter weights need to be loaded first, then you can use [`~StableDiffusionPipeline.load_lora_weights`] to load the LoRA style and weight you want to apply to your image.
```py
from diffusers import DiffusionPipeline, LCMScheduler
import torch
from diffusers.utils import load_image
model_id = "sd-dreambooth-library/herge-style"
lcm_lora_id = "latent-consistency/lcm-lora-sdv1-5"
pipeline = DiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16)
pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name="ip-adapter_sd15.bin")
pipeline.load_lora_weights(lcm_lora_id)
pipeline.scheduler = LCMScheduler.from_config(pipeline.scheduler.config)
pipeline.enable_model_cpu_offload()
```
Try using with a lower IP-Adapter scale to condition image generation more on the [herge_style](https://huggingface.co/sd-dreambooth-library/herge-style) checkpoint, and remember to use the special token `herge_style` in your prompt to trigger and apply the style.
```py
pipeline.set_ip_adapter_scale(0.4)
prompt = "herge_style woman in armor, best quality, high quality"
generator = torch.Generator(device="cpu").manual_seed(0)
ip_adapter_image = load_image("https://user-images.githubusercontent.com/24734142/266492875-2d50d223-8475-44f0-a7c6-08b51cb53572.png")
image = pipeline(
prompt=prompt,
ip_adapter_image=ip_adapter_image,
num_inference_steps=4,
guidance_scale=1,
).images[0]
image
```
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/ip_adapter_herge.png" />
</div>
### Structural control
To control image generation to an even greater degree, you can combine IP-Adapter with a model like [ControlNet](../using-diffusers/controlnet). A ControlNet is also an adapter that can be inserted into a diffusion model to allow for conditioning on an additional control image. The control image can be depth maps, edge maps, pose estimations, and more.
Load a [`ControlNetModel`] checkpoint conditioned on depth maps, insert it into a diffusion model, and load the IP-Adapter.
```py
from diffusers import StableDiffusionControlNetPipeline, ControlNetModel
import torch
from diffusers.utils import load_image
controlnet_model_path = "lllyasviel/control_v11f1p_sd15_depth"
controlnet = ControlNetModel.from_pretrained(controlnet_model_path, torch_dtype=torch.float16)
pipeline = StableDiffusionControlNetPipeline.from_pretrained(
"stable-diffusion-v1-5/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16)
pipeline.to("cuda")
pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name="ip-adapter_sd15.bin")
```
Now load the IP-Adapter image and depth map.
```py
ip_adapter_image = load_image("https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/statue.png")
depth_map = load_image("https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/depth.png")
```
<div class="flex flex-row gap-4">
<div class="flex-1">
<img class="rounded-xl" src="https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/statue.png"/>
<figcaption class="mt-2 text-center text-sm text-gray-500">IP-Adapter image</figcaption>
</div>
<div class="flex-1">
<img class="rounded-xl" src="https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/depth.png"/>
<figcaption class="mt-2 text-center text-sm text-gray-500">depth map</figcaption>
</div>
</div>
Pass the depth map and IP-Adapter image to the pipeline to generate an image.
```py
generator = torch.Generator(device="cpu").manual_seed(33)
image = pipeline(
prompt="best quality, high quality",
image=depth_map,
ip_adapter_image=ip_adapter_image,
negative_prompt="monochrome, lowres, bad anatomy, worst quality, low quality",
num_inference_steps=50,
generator=generator,
).images[0]
image
```
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/ipa-controlnet-out.png" />
</div>
### Style & layout control
[InstantStyle](https://arxiv.org/abs/2404.02733) is a plug-and-play method on top of IP-Adapter, which disentangles style and layout from image prompt to control image generation. This way, you can generate images following only the style or layout from image prompt, with significantly improved diversity. This is achieved by only activating IP-Adapters to specific parts of the model.
By default IP-Adapters are inserted to all layers of the model. Use the [`~loaders.IPAdapterMixin.set_ip_adapter_scale`] method with a dictionary to assign scales to IP-Adapter at different layers.
```py
from diffusers import AutoPipelineForText2Image
from diffusers.utils import load_image
import torch
pipeline = AutoPipelineForText2Image.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16).to("cuda")
pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="sdxl_models", weight_name="ip-adapter_sdxl.bin")
scale = {
"down": {"block_2": [0.0, 1.0]},
"up": {"block_0": [0.0, 1.0, 0.0]},
}
pipeline.set_ip_adapter_scale(scale)
```
This will activate IP-Adapter at the second layer in the model's down-part block 2 and up-part block 0. The former is the layer where IP-Adapter injects layout information and the latter injects style. Inserting IP-Adapter to these two layers you can generate images following both the style and layout from image prompt, but with contents more aligned to text prompt.
```py
style_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/0052a70beed5bf71b92610a43a52df6d286cd5f3/diffusers/rabbit.jpg")
generator = torch.Generator(device="cpu").manual_seed(26)
image = pipeline(
prompt="a cat, masterpiece, best quality, high quality",
ip_adapter_image=style_image,
negative_prompt="text, watermark, lowres, low quality, worst quality, deformed, glitch, low contrast, noisy, saturation, blurry",
guidance_scale=5,
num_inference_steps=30,
generator=generator,
).images[0]
image
```
<div class="flex flex-row gap-4">
<div class="flex-1">
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/0052a70beed5bf71b92610a43a52df6d286cd5f3/diffusers/rabbit.jpg"/>
<figcaption class="mt-2 text-center text-sm text-gray-500">IP-Adapter image</figcaption>
</div>
<div class="flex-1">
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/cat_style_layout.png"/>
<figcaption class="mt-2 text-center text-sm text-gray-500">generated image</figcaption>
</div>
</div>
In contrast, inserting IP-Adapter to all layers will often generate images that overly focus on image prompt and diminish diversity.
Activate IP-Adapter only in the style layer and then call the pipeline again.
```py
scale = {
"up": {"block_0": [0.0, 1.0, 0.0]},
}
pipeline.set_ip_adapter_scale(scale)
generator = torch.Generator(device="cpu").manual_seed(26)
image = pipeline(
prompt="a cat, masterpiece, best quality, high quality",
ip_adapter_image=style_image,
negative_prompt="text, watermark, lowres, low quality, worst quality, deformed, glitch, low contrast, noisy, saturation, blurry",
guidance_scale=5,
num_inference_steps=30,
generator=generator,
).images[0]
image
```
<div class="flex flex-row gap-4">
<div class="flex-1">
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/cat_style_only.png"/>
<figcaption class="mt-2 text-center text-sm text-gray-500">IP-Adapter only in style layer</figcaption>
</div>
<div class="flex-1">
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/cat_ip_adapter.png"/>
<figcaption class="mt-2 text-center text-sm text-gray-500">IP-Adapter in all layers</figcaption>
</div>
</div>
Note that you don't have to specify all layers in the dictionary. Those not included in the dictionary will be set to scale 0 which means disable IP-Adapter by default.
| diffusers/docs/source/en/using-diffusers/ip_adapter.md/0 | {
"file_path": "diffusers/docs/source/en/using-diffusers/ip_adapter.md",
"repo_id": "diffusers",
"token_count": 12486
} |
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# JAX/Flax
[[open-in-colab]]
🤗 Diffusers supports Flax for super fast inference on Google TPUs, such as those available in Colab, Kaggle or Google Cloud Platform. This guide shows you how to run inference with Stable Diffusion using JAX/Flax.
Before you begin, make sure you have the necessary libraries installed:
```py
# uncomment to install the necessary libraries in Colab
#!pip install -q jax==0.3.25 jaxlib==0.3.25 flax transformers ftfy
#!pip install -q diffusers
```
You should also make sure you're using a TPU backend. While JAX does not run exclusively on TPUs, you'll get the best performance on a TPU because each server has 8 TPU accelerators working in parallel.
If you are running this guide in Colab, select *Runtime* in the menu above, select the option *Change runtime type*, and then select *TPU* under the *Hardware accelerator* setting. Import JAX and quickly check whether you're using a TPU:
```python
import jax
import jax.tools.colab_tpu
jax.tools.colab_tpu.setup_tpu()
num_devices = jax.device_count()
device_type = jax.devices()[0].device_kind
print(f"Found {num_devices} JAX devices of type {device_type}.")
assert (
"TPU" in device_type,
"Available device is not a TPU, please select TPU from Runtime > Change runtime type > Hardware accelerator"
)
# Found 8 JAX devices of type Cloud TPU.
```
Great, now you can import the rest of the dependencies you'll need:
```python
import jax.numpy as jnp
from jax import pmap
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxStableDiffusionPipeline
```
## Load a model
Flax is a functional framework, so models are stateless and parameters are stored outside of them. Loading a pretrained Flax pipeline returns *both* the pipeline and the model weights (or parameters). In this guide, you'll use `bfloat16`, a more efficient half-float type that is supported by TPUs (you can also use `float32` for full precision if you want).
```python
dtype = jnp.bfloat16
pipeline, params = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4",
variant="bf16",
dtype=dtype,
)
```
## Inference
TPUs usually have 8 devices working in parallel, so let's use the same prompt for each device. This means you can perform inference on 8 devices at once, with each device generating one image. As a result, you'll get 8 images in the same amount of time it takes for one chip to generate a single image!
<Tip>
Learn more details in the [How does parallelization work?](#how-does-parallelization-work) section.
</Tip>
After replicating the prompt, get the tokenized text ids by calling the `prepare_inputs` function on the pipeline. The length of the tokenized text is set to 77 tokens as required by the configuration of the underlying CLIP text model.
```python
prompt = "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of field, close up, split lighting, cinematic"
prompt = [prompt] * jax.device_count()
prompt_ids = pipeline.prepare_inputs(prompt)
prompt_ids.shape
# (8, 77)
```
Model parameters and inputs have to be replicated across the 8 parallel devices. The parameters dictionary is replicated with [`flax.jax_utils.replicate`](https://flax.readthedocs.io/en/latest/api_reference/flax.jax_utils.html#flax.jax_utils.replicate) which traverses the dictionary and changes the shape of the weights so they are repeated 8 times. Arrays are replicated using `shard`.
```python
# parameters
p_params = replicate(params)
# arrays
prompt_ids = shard(prompt_ids)
prompt_ids.shape
# (8, 1, 77)
```
This shape means each one of the 8 devices receives as an input a `jnp` array with shape `(1, 77)`, where `1` is the batch size per device. On TPUs with sufficient memory, you could have a batch size larger than `1` if you want to generate multiple images (per chip) at once.
Next, create a random number generator to pass to the generation function. This is standard procedure in Flax, which is very serious and opinionated about random numbers. All functions that deal with random numbers are expected to receive a generator to ensure reproducibility, even when you're training across multiple distributed devices.
The helper function below uses a seed to initialize a random number generator. As long as you use the same seed, you'll get the exact same results. Feel free to use different seeds when exploring results later in the guide.
```python
def create_key(seed=0):
return jax.random.PRNGKey(seed)
```
The helper function, or `rng`, is split 8 times so each device receives a different generator and generates a different image.
```python
rng = create_key(0)
rng = jax.random.split(rng, jax.device_count())
```
To take advantage of JAX's optimized speed on a TPU, pass `jit=True` to the pipeline to compile the JAX code into an efficient representation and to ensure the model runs in parallel across the 8 devices.
<Tip warning={true}>
You need to ensure all your inputs have the same shape in subsequent calls, otherwise JAX will need to recompile the code which is slower.
</Tip>
The first inference run takes more time because it needs to compile the code, but subsequent calls (even with different inputs) are much faster. For example, it took more than a minute to compile on a TPU v2-8, but then it takes about **7s** on a future inference run!
```py
%%time
images = pipeline(prompt_ids, p_params, rng, jit=True)[0]
# CPU times: user 56.2 s, sys: 42.5 s, total: 1min 38s
# Wall time: 1min 29s
```
The returned array has shape `(8, 1, 512, 512, 3)` which should be reshaped to remove the second dimension and get 8 images of `512 × 512 × 3`. Then you can use the [`~utils.numpy_to_pil`] function to convert the arrays into images.
```python
from diffusers.utils import make_image_grid
images = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
images = pipeline.numpy_to_pil(images)
make_image_grid(images, rows=2, cols=4)
```

## Using different prompts
You don't necessarily have to use the same prompt on all devices. For example, to generate 8 different prompts:
```python
prompts = [
"Labrador in the style of Hokusai",
"Painting of a squirrel skating in New York",
"HAL-9000 in the style of Van Gogh",
"Times Square under water, with fish and a dolphin swimming around",
"Ancient Roman fresco showing a man working on his laptop",
"Close-up photograph of young black woman against urban background, high quality, bokeh",
"Armchair in the shape of an avocado",
"Clown astronaut in space, with Earth in the background",
]
prompt_ids = pipeline.prepare_inputs(prompts)
prompt_ids = shard(prompt_ids)
images = pipeline(prompt_ids, p_params, rng, jit=True).images
images = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
images = pipeline.numpy_to_pil(images)
make_image_grid(images, 2, 4)
```

## How does parallelization work?
The Flax pipeline in 🤗 Diffusers automatically compiles the model and runs it in parallel on all available devices. Let's take a closer look at how that process works.
JAX parallelization can be done in multiple ways. The easiest one revolves around using the [`jax.pmap`](https://jax.readthedocs.io/en/latest/_autosummary/jax.pmap.html) function to achieve single-program multiple-data (SPMD) parallelization. It means running several copies of the same code, each on different data inputs. More sophisticated approaches are possible, and you can go over to the JAX [documentation](https://jax.readthedocs.io/en/latest/index.html) to explore this topic in more detail if you are interested!
`jax.pmap` does two things:
1. Compiles (or "`jit`s") the code which is similar to `jax.jit()`. This does not happen when you call `pmap`, and only the first time the `pmap`ped function is called.
2. Ensures the compiled code runs in parallel on all available devices.
To demonstrate, call `pmap` on the pipeline's `_generate` method (this is a private method that generates images and may be renamed or removed in future releases of 🤗 Diffusers):
```python
p_generate = pmap(pipeline._generate)
```
After calling `pmap`, the prepared function `p_generate` will:
1. Make a copy of the underlying function, `pipeline._generate`, on each device.
2. Send each device a different portion of the input arguments (this is why it's necessary to call the *shard* function). In this case, `prompt_ids` has shape `(8, 1, 77, 768)` so the array is split into 8 and each copy of `_generate` receives an input with shape `(1, 77, 768)`.
The most important thing to pay attention to here is the batch size (1 in this example), and the input dimensions that make sense for your code. You don't have to change anything else to make the code work in parallel.
The first time you call the pipeline takes more time, but the calls afterward are much faster. The `block_until_ready` function is used to correctly measure inference time because JAX uses asynchronous dispatch and returns control to the Python loop as soon as it can. You don't need to use that in your code; blocking occurs automatically when you want to use the result of a computation that has not yet been materialized.
```py
%%time
images = p_generate(prompt_ids, p_params, rng)
images = images.block_until_ready()
# CPU times: user 1min 15s, sys: 18.2 s, total: 1min 34s
# Wall time: 1min 15s
```
Check your image dimensions to see if they're correct:
```python
images.shape
# (8, 1, 512, 512, 3)
```
## Resources
To learn more about how JAX works with Stable Diffusion, you may be interested in reading:
* [Accelerating Stable Diffusion XL Inference with JAX on Cloud TPU v5e](https://hf.co/blog/sdxl_jax)
| diffusers/docs/source/en/using-diffusers/stable_diffusion_jax_how_to.md/0 | {
"file_path": "diffusers/docs/source/en/using-diffusers/stable_diffusion_jax_how_to.md",
"repo_id": "diffusers",
"token_count": 3095
} |
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# Stable diffusion XL
Stable Diffusion XL은 Dustin Podell, Zion English, Kyle Lacey, Andreas Blattmann, Tim Dockhorn, Jonas Müller, Joe Penna, Robin Rombach에 의해 [SDXL: Improving Latent Diffusion Models for High-Resolution Image Synthesis](https://arxiv.org/abs/2307.01952)에서 제안되었습니다.
논문 초록은 다음을 따릅니다:
*text-to-image의 latent diffusion 모델인 SDXL을 소개합니다. 이전 버전의 Stable Diffusion과 비교하면, SDXL은 세 배 더큰 규모의 UNet 백본을 포함합니다: 모델 파라미터의 증가는 많은 attention 블럭을 사용하고 더 큰 cross-attention context를 SDXL의 두 번째 텍스트 인코더에 사용하기 때문입니다. 다중 종횡비에 다수의 새로운 conditioning 방법을 구성했습니다. 또한 후에 수정하는 image-to-image 기술을 사용함으로써 SDXL에 의해 생성된 시각적 품질을 향상하기 위해 정제된 모델을 소개합니다. SDXL은 이전 버전의 Stable Diffusion보다 성능이 향상되었고, 이러한 black-box 최신 이미지 생성자와 경쟁력있는 결과를 달성했습니다.*
## 팁
- Stable Diffusion XL은 특히 786과 1024사이의 이미지에 잘 작동합니다.
- Stable Diffusion XL은 아래와 같이 학습된 각 텍스트 인코더에 대해 서로 다른 프롬프트를 전달할 수 있습니다. 동일한 프롬프트의 다른 부분을 텍스트 인코더에 전달할 수도 있습니다.
- Stable Diffusion XL 결과 이미지는 아래에 보여지듯이 정제기(refiner)를 사용함으로써 향상될 수 있습니다.
### 이용가능한 체크포인트:
- *Text-to-Image (1024x1024 해상도)*: [`StableDiffusionXLPipeline`]을 사용한 [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0)
- *Image-to-Image / 정제기(refiner) (1024x1024 해상도)*: [`StableDiffusionXLImg2ImgPipeline`]를 사용한 [stabilityai/stable-diffusion-xl-refiner-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-refiner-1.0)
## 사용 예시
SDXL을 사용하기 전에 `transformers`, `accelerate`, `safetensors` 와 `invisible_watermark`를 설치하세요.
다음과 같이 라이브러리를 설치할 수 있습니다:
```sh
pip install transformers
pip install accelerate
pip install safetensors
pip install invisible-watermark>=0.2.0
```
### 워터마커
Stable Diffusion XL로 이미지를 생성할 때 워터마크가 보이지 않도록 추가하는 것을 권장하는데, 이는 다운스트림(downstream) 어플리케이션에서 기계에 합성되었는지를 식별하는데 도움을 줄 수 있습니다. 그렇게 하려면 [invisible_watermark 라이브러리](https://pypi.org/project/invisible-watermark/)를 통해 설치해주세요:
```sh
pip install invisible-watermark>=0.2.0
```
`invisible-watermark` 라이브러리가 설치되면 워터마커가 **기본적으로** 사용될 것입니다.
생성 또는 안전하게 이미지를 배포하기 위해 다른 규정이 있다면, 다음과 같이 워터마커를 비활성화할 수 있습니다:
```py
pipe = StableDiffusionXLPipeline.from_pretrained(..., add_watermarker=False)
```
### Text-to-Image
*text-to-image*를 위해 다음과 같이 SDXL을 사용할 수 있습니다:
```py
from diffusers import StableDiffusionXLPipeline
import torch
pipe = StableDiffusionXLPipeline.from_pretrained(
"stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True
)
pipe.to("cuda")
prompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k"
image = pipe(prompt=prompt).images[0]
```
### Image-to-image
*image-to-image*를 위해 다음과 같이 SDXL을 사용할 수 있습니다:
```py
import torch
from diffusers import StableDiffusionXLImg2ImgPipeline
from diffusers.utils import load_image
pipe = StableDiffusionXLImg2ImgPipeline.from_pretrained(
"stabilityai/stable-diffusion-xl-refiner-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True
)
pipe = pipe.to("cuda")
url = "https://huggingface.co/datasets/patrickvonplaten/images/resolve/main/aa_xl/000000009.png"
init_image = load_image(url).convert("RGB")
prompt = "a photo of an astronaut riding a horse on mars"
image = pipe(prompt, image=init_image).images[0]
```
### 인페인팅
*inpainting*를 위해 다음과 같이 SDXL을 사용할 수 있습니다:
```py
import torch
from diffusers import StableDiffusionXLInpaintPipeline
from diffusers.utils import load_image
pipe = StableDiffusionXLInpaintPipeline.from_pretrained(
"stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True
)
pipe.to("cuda")
img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png"
mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png"
init_image = load_image(img_url).convert("RGB")
mask_image = load_image(mask_url).convert("RGB")
prompt = "A majestic tiger sitting on a bench"
image = pipe(prompt=prompt, image=init_image, mask_image=mask_image, num_inference_steps=50, strength=0.80).images[0]
```
### 이미지 결과물을 정제하기
[base 모델 체크포인트](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0)에서, StableDiffusion-XL 또한 고주파 품질을 향상시키는 이미지를 생성하기 위해 낮은 노이즈 단계 이미지를 제거하는데 특화된 [refiner 체크포인트](https://huggingface.co/stabilityai/stable-diffusion-xl-refiner-1.0)를 포함하고 있습니다. 이 refiner 체크포인트는 이미지 품질을 향상시키기 위해 base 체크포인트를 실행한 후 "두 번째 단계" 파이프라인에 사용될 수 있습니다.
refiner를 사용할 때, 쉽게 사용할 수 있습니다
- 1.) base 모델과 refiner을 사용하는데, 이는 *Denoisers의 앙상블*을 위한 첫 번째 제안된 [eDiff-I](https://research.nvidia.com/labs/dir/eDiff-I/)를 사용하거나
- 2.) base 모델을 거친 후 [SDEdit](https://arxiv.org/abs/2108.01073) 방법으로 단순하게 refiner를 실행시킬 수 있습니다.
**참고**: SD-XL base와 refiner를 앙상블로 사용하는 아이디어는 커뮤니티 기여자들이 처음으로 제안했으며, 이는 다음과 같은 `diffusers`를 구현하는 데도 도움을 주셨습니다.
- [SytanSD](https://github.com/SytanSD)
- [bghira](https://github.com/bghira)
- [Birch-san](https://github.com/Birch-san)
- [AmericanPresidentJimmyCarter](https://github.com/AmericanPresidentJimmyCarter)
#### 1.) Denoisers의 앙상블
base와 refiner 모델을 denoiser의 앙상블로 사용할 때, base 모델은 고주파 diffusion 단계를 위한 전문가의 역할을 해야하고, refiner는 낮은 노이즈 diffusion 단계를 위한 전문가의 역할을 해야 합니다.
2.)에 비해 1.)의 장점은 전체적으로 denoising 단계가 덜 필요하므로 속도가 훨씬 더 빨라집니다. 단점은 base 모델의 결과를 검사할 수 없다는 것입니다. 즉, 여전히 노이즈가 심하게 제거됩니다.
base 모델과 refiner를 denoiser의 앙상블로 사용하기 위해 각각 고노이즈(high-nosise) (*즉* base 모델)와 저노이즈 (*즉* refiner 모델)의 노이즈를 제거하는 단계를 거쳐야하는 타임스텝의 기간을 정의해야 합니다.
base 모델의 [`denoising_end`](https://huggingface.co/docs/diffusers/main/en/api/pipelines/stable_diffusion/stable_diffusion_xl#diffusers.StableDiffusionXLPipeline.__call__.denoising_end)와 refiner 모델의 [`denoising_start`](https://huggingface.co/docs/diffusers/main/en/api/pipelines/stable_diffusion/stable_diffusion_xl#diffusers.StableDiffusionXLImg2ImgPipeline.__call__.denoising_start)를 사용해 간격을 정합니다.
`denoising_end`와 `denoising_start` 모두 0과 1사이의 실수 값으로 전달되어야 합니다.
전달되면 노이즈 제거의 끝과 시작은 모델 스케줄에 의해 정의된 이산적(discrete) 시간 간격의 비율로 정의됩니다.
노이즈 제거 단계의 수는 모델이 학습된 불연속적인 시간 간격과 선언된 fractional cutoff에 의해 결정되므로 '강도' 또한 선언된 경우 이 값이 '강도'를 재정의합니다.
예시를 들어보겠습니다.
우선, 두 개의 파이프라인을 가져옵니다. 텍스트 인코더와 variational autoencoder는 동일하므로 refiner를 위해 다시 불러오지 않아도 됩니다.
```py
from diffusers import DiffusionPipeline
import torch
base = DiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True
)
pipe.to("cuda")
refiner = DiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-xl-refiner-1.0",
text_encoder_2=base.text_encoder_2,
vae=base.vae,
torch_dtype=torch.float16,
use_safetensors=True,
variant="fp16",
)
refiner.to("cuda")
```
이제 추론 단계의 수와 고노이즈에서 노이즈를 제거하는 단계(*즉* base 모델)를 거쳐 실행되는 지점을 정의합니다.
```py
n_steps = 40
high_noise_frac = 0.8
```
Stable Diffusion XL base 모델은 타임스텝 0-999에 학습되며 Stable Diffusion XL refiner는 포괄적인 낮은 노이즈 타임스텝인 0-199에 base 모델로 부터 파인튜닝되어, 첫 800 타임스텝 (높은 노이즈)에 base 모델을 사용하고 마지막 200 타입스텝 (낮은 노이즈)에서 refiner가 사용됩니다. 따라서, `high_noise_frac`는 0.8로 설정하고, 모든 200-999 스텝(노이즈 제거 타임스텝의 첫 80%)은 base 모델에 의해 수행되며 0-199 스텝(노이즈 제거 타임스텝의 마지막 20%)은 refiner 모델에 의해 수행됩니다.
기억하세요, 노이즈 제거 절차는 **높은 값**(높은 노이즈) 타임스텝에서 시작되고, **낮은 값** (낮은 노이즈) 타임스텝에서 끝납니다.
이제 두 파이프라인을 실행해봅시다. `denoising_end`과 `denoising_start`를 같은 값으로 설정하고 `num_inference_steps`는 상수로 유지합니다. 또한 base 모델의 출력은 잠재 공간에 있어야 한다는 점을 기억하세요:
```py
prompt = "A majestic lion jumping from a big stone at night"
image = base(
prompt=prompt,
num_inference_steps=n_steps,
denoising_end=high_noise_frac,
output_type="latent",
).images
image = refiner(
prompt=prompt,
num_inference_steps=n_steps,
denoising_start=high_noise_frac,
image=image,
).images[0]
```
이미지를 살펴보겠습니다.
| 원래의 이미지 | Denoiser들의 앙상블 |
|---|---|
|  | 
동일한 40 단계에서 base 모델을 실행한다면, 이미지의 디테일(예: 사자의 눈과 코)이 떨어졌을 것입니다:
<Tip>
앙상블 방식은 사용 가능한 모든 스케줄러에서 잘 작동합니다!
</Tip>
#### 2.) 노이즈가 완전히 제거된 기본 이미지에서 이미지 출력을 정제하기
일반적인 [`StableDiffusionImg2ImgPipeline`] 방식에서, 기본 모델에서 생성된 완전히 노이즈가 제거된 이미지는 [refiner checkpoint](https://huggingface.co/stabilityai/stable-diffusion-xl-refiner-1.0)를 사용해 더 향상시킬 수 있습니다.
이를 위해, 보통의 "base" text-to-image 파이프라인을 수행 후에 image-to-image 파이프라인으로써 refiner를 실행시킬 수 있습니다. base 모델의 출력을 잠재 공간에 남겨둘 수 있습니다.
```py
from diffusers import DiffusionPipeline
import torch
pipe = DiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True
)
pipe.to("cuda")
refiner = DiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-xl-refiner-1.0",
text_encoder_2=pipe.text_encoder_2,
vae=pipe.vae,
torch_dtype=torch.float16,
use_safetensors=True,
variant="fp16",
)
refiner.to("cuda")
prompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k"
image = pipe(prompt=prompt, output_type="latent" if use_refiner else "pil").images[0]
image = refiner(prompt=prompt, image=image[None, :]).images[0]
```
| 원래의 이미지 | 정제된 이미지 |
|---|---|
|  |  |
<Tip>
refiner는 또한 인페인팅 설정에 잘 사용될 수 있습니다. 아래에 보여지듯이 [`StableDiffusionXLInpaintPipeline`] 클래스를 사용해서 만들어보세요.
</Tip>
Denoiser 앙상블 설정에서 인페인팅에 refiner를 사용하려면 다음을 수행하면 됩니다:
```py
from diffusers import StableDiffusionXLInpaintPipeline
from diffusers.utils import load_image
pipe = StableDiffusionXLInpaintPipeline.from_pretrained(
"stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True
)
pipe.to("cuda")
refiner = StableDiffusionXLInpaintPipeline.from_pretrained(
"stabilityai/stable-diffusion-xl-refiner-1.0",
text_encoder_2=pipe.text_encoder_2,
vae=pipe.vae,
torch_dtype=torch.float16,
use_safetensors=True,
variant="fp16",
)
refiner.to("cuda")
img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png"
mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png"
init_image = load_image(img_url).convert("RGB")
mask_image = load_image(mask_url).convert("RGB")
prompt = "A majestic tiger sitting on a bench"
num_inference_steps = 75
high_noise_frac = 0.7
image = pipe(
prompt=prompt,
image=init_image,
mask_image=mask_image,
num_inference_steps=num_inference_steps,
denoising_start=high_noise_frac,
output_type="latent",
).images
image = refiner(
prompt=prompt,
image=image,
mask_image=mask_image,
num_inference_steps=num_inference_steps,
denoising_start=high_noise_frac,
).images[0]
```
일반적인 SDE 설정에서 인페인팅에 refiner를 사용하기 위해, `denoising_end`와 `denoising_start`를 제거하고 refiner의 추론 단계의 수를 적게 선택하세요.
### 단독 체크포인트 파일 / 원래의 파일 형식으로 불러오기
[`~diffusers.loaders.FromSingleFileMixin.from_single_file`]를 사용함으로써 원래의 파일 형식을 `diffusers` 형식으로 불러올 수 있습니다:
```py
from diffusers import StableDiffusionXLPipeline, StableDiffusionXLImg2ImgPipeline
import torch
pipe = StableDiffusionXLPipeline.from_single_file(
"./sd_xl_base_1.0.safetensors", torch_dtype=torch.float16
)
pipe.to("cuda")
refiner = StableDiffusionXLImg2ImgPipeline.from_single_file(
"./sd_xl_refiner_1.0.safetensors", torch_dtype=torch.float16
)
refiner.to("cuda")
```
### 모델 offloading을 통해 메모리 최적화하기
out-of-memory 에러가 난다면, [`StableDiffusionXLPipeline.enable_model_cpu_offload`]을 사용하는 것을 권장합니다.
```diff
- pipe.to("cuda")
+ pipe.enable_model_cpu_offload()
```
그리고
```diff
- refiner.to("cuda")
+ refiner.enable_model_cpu_offload()
```
### `torch.compile`로 추론 속도를 올리기
`torch.compile`를 사용함으로써 추론 속도를 올릴 수 있습니다. 이는 **ca.** 20% 속도 향상이 됩니다.
```diff
+ pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
+ refiner.unet = torch.compile(refiner.unet, mode="reduce-overhead", fullgraph=True)
```
### `torch < 2.0`일 때 실행하기
**참고** Stable Diffusion XL을 `torch`가 2.0 버전 미만에서 실행시키고 싶을 때, xformers 어텐션을 사용해주세요:
```sh
pip install xformers
```
```diff
+pipe.enable_xformers_memory_efficient_attention()
+refiner.enable_xformers_memory_efficient_attention()
```
## StableDiffusionXLPipeline
[[autodoc]] StableDiffusionXLPipeline
- all
- __call__
## StableDiffusionXLImg2ImgPipeline
[[autodoc]] StableDiffusionXLImg2ImgPipeline
- all
- __call__
## StableDiffusionXLInpaintPipeline
[[autodoc]] StableDiffusionXLInpaintPipeline
- all
- __call__
### 각 텍스트 인코더에 다른 프롬프트를 전달하기
Stable Diffusion XL는 두 개의 텍스트 인코더에 학습되었습니다. 기본 동작은 각 프롬프트에 동일한 프롬프트를 전달하는 것입니다. 그러나 [일부 사용자](https://github.com/huggingface/diffusers/issues/4004#issuecomment-1627764201)가 품질을 향상시킬 수 있다고 지적한 것처럼 텍스트 인코더마다 다른 프롬프트를 전달할 수 있습니다. 그렇게 하려면, `prompt_2`와 `negative_prompt_2`를 `prompt`와 `negative_prompt`에 전달해야 합니다. 그렇게 함으로써, 원래의 프롬프트들(`prompt`)과 부정 프롬프트들(`negative_prompt`)를 `텍스트 인코더`에 전달할 것입니다.(공식 SDXL 0.9/1.0의 [OpenAI CLIP-ViT/L-14](https://huggingface.co/openai/clip-vit-large-patch14)에서 볼 수 있습니다.) 그리고 `prompt_2`와 `negative_prompt_2`는 `text_encoder_2`에 전달됩니다.(공식 SDXL 0.9/1.0의 [OpenCLIP-ViT/bigG-14](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k)에서 볼 수 있습니다.)
```py
from diffusers import StableDiffusionXLPipeline
import torch
pipe = StableDiffusionXLPipeline.from_pretrained(
"stabilityai/stable-diffusion-xl-base-0.9", torch_dtype=torch.float16, variant="fp16", use_safetensors=True
)
pipe.to("cuda")
# OAI CLIP-ViT/L-14에 prompt가 전달됩니다
prompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k"
# OpenCLIP-ViT/bigG-14에 prompt_2가 전달됩니다
prompt_2 = "monet painting"
image = pipe(prompt=prompt, prompt_2=prompt_2).images[0]
``` | diffusers/docs/source/ko/api/pipelines/stable_diffusion/stable_diffusion_xl.md/0 | {
"file_path": "diffusers/docs/source/ko/api/pipelines/stable_diffusion/stable_diffusion_xl.md",
"repo_id": "diffusers",
"token_count": 10962
} |
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# xFormers 설치하기
추론과 학습 모두에 [xFormers](https://github.com/facebookresearch/xformers)를 사용하는 것이 좋습니다.
자체 테스트로 어텐션 블록에서 수행된 최적화가 더 빠른 속도와 적은 메모리 소비를 확인했습니다.
2023년 1월에 출시된 xFormers 버전 '0.0.16'부터 사전 빌드된 pip wheel을 사용하여 쉽게 설치할 수 있습니다:
```bash
pip install xformers
```
<Tip>
xFormers PIP 패키지에는 최신 버전의 PyTorch(xFormers 0.0.16에 1.13.1)가 필요합니다. 이전 버전의 PyTorch를 사용해야 하는 경우 [프로젝트 지침](https://github.com/facebookresearch/xformers#installing-xformers)의 소스를 사용해 xFormers를 설치하는 것이 좋습니다.
</Tip>
xFormers를 설치하면, [여기](fp16#memory-efficient-attention)서 설명한 것처럼 'enable_xformers_memory_efficient_attention()'을 사용하여 추론 속도를 높이고 메모리 소비를 줄일 수 있습니다.
<Tip warning={true}>
[이 이슈](https://github.com/huggingface/diffusers/issues/2234#issuecomment-1416931212)에 따르면 xFormers `v0.0.16`에서 GPU를 사용한 학습(파인 튜닝 또는 Dreambooth)을 할 수 없습니다. 해당 문제가 발견되면. 해당 코멘트를 참고해 development 버전을 설치하세요.
</Tip>
| diffusers/docs/source/ko/optimization/xformers.md/0 | {
"file_path": "diffusers/docs/source/ko/optimization/xformers.md",
"repo_id": "diffusers",
"token_count": 1049
} |
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# Overview
🧨 Diffusers에 오신 걸 환영합니다! 여러분이 diffusion 모델과 생성 AI를 처음 접하고, 더 많은 걸 배우고 싶으셨다면 제대로 찾아오셨습니다. 이 튜토리얼은 diffusion model을 여러분에게 젠틀하게 소개하고, 라이브러리의 기본 사항(핵심 구성요소와 🧨 Diffusers 사용법)을 이해하는 데 도움이 되도록 설계되었습니다.
여러분은 이 튜토리얼을 통해 빠르게 생성하기 위해선 추론 파이프라인을 어떻게 사용해야 하는지, 그리고 라이브러리를 modular toolbox처럼 이용해서 여러분만의 diffusion system을 구축할 수 있도록 파이프라인을 분해하는 법을 배울 수 있습니다. 다음 단원에서는 여러분이 원하는 것을 생성하기 위해 자신만의 diffusion model을 학습하는 방법을 배우게 됩니다.
튜토리얼을 완료한다면 여러분은 라이브러리를 직접 탐색하고, 자신의 프로젝트와 애플리케이션에 적용할 스킬들을 습득할 수 있을 겁니다.
[Discord](https://discord.com/invite/JfAtkvEtRb)나 [포럼](https://discuss.huggingface.co/c/discussion-related-to-httpsgithubcomhuggingfacediffusers/63) 커뮤니티에 자유롭게 참여해서 다른 사용자와 개발자들과 교류하고 협업해 보세요!
자 지금부터 diffusing을 시작해 보겠습니다! 🧨 | diffusers/docs/source/ko/tutorials/tutorial_overview.md/0 | {
"file_path": "diffusers/docs/source/ko/tutorials/tutorial_overview.md",
"repo_id": "diffusers",
"token_count": 1209
} |
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# JAX / Flax에서의 🧨 Stable Diffusion!
[[open-in-colab]]
🤗 Hugging Face [Diffusers] (https://github.com/huggingface/diffusers) 는 버전 0.5.1부터 Flax를 지원합니다! 이를 통해 Colab, Kaggle, Google Cloud Platform에서 사용할 수 있는 것처럼 Google TPU에서 초고속 추론이 가능합니다.
이 노트북은 JAX / Flax를 사용해 추론을 실행하는 방법을 보여줍니다. Stable Diffusion의 작동 방식에 대한 자세한 내용을 원하거나 GPU에서 실행하려면 이 [노트북] ](https://huggingface.co/docs/diffusers/stable_diffusion)을 참조하세요.
먼저, TPU 백엔드를 사용하고 있는지 확인합니다. Colab에서 이 노트북을 실행하는 경우, 메뉴에서 런타임을 선택한 다음 "런타임 유형 변경" 옵션을 선택한 다음 하드웨어 가속기 설정에서 TPU를 선택합니다.
JAX는 TPU 전용은 아니지만 각 TPU 서버에는 8개의 TPU 가속기가 병렬로 작동하기 때문에 해당 하드웨어에서 더 빛을 발한다는 점은 알아두세요.
## Setup
먼저 diffusers가 설치되어 있는지 확인합니다.
```bash
!pip install jax==0.3.25 jaxlib==0.3.25 flax transformers ftfy
!pip install diffusers
```
```python
import jax.tools.colab_tpu
jax.tools.colab_tpu.setup_tpu()
import jax
```
```python
num_devices = jax.device_count()
device_type = jax.devices()[0].device_kind
print(f"Found {num_devices} JAX devices of type {device_type}.")
assert (
"TPU" in device_type
), "Available device is not a TPU, please select TPU from Edit > Notebook settings > Hardware accelerator"
```
```python out
Found 8 JAX devices of type Cloud TPU.
```
그런 다음 모든 dependencies를 가져옵니다.
```python
import numpy as np
import jax
import jax.numpy as jnp
from pathlib import Path
from jax import pmap
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from PIL import Image
from huggingface_hub import notebook_login
from diffusers import FlaxStableDiffusionPipeline
```
## 모델 불러오기
TPU 장치는 효율적인 half-float 유형인 bfloat16을 지원합니다. 테스트에는 이 유형을 사용하지만 대신 float32를 사용하여 전체 정밀도(full precision)를 사용할 수도 있습니다.
```python
dtype = jnp.bfloat16
```
Flax는 함수형 프레임워크이므로 모델은 무상태(stateless)형이며 매개변수는 모델 외부에 저장됩니다. 사전학습된 Flax 파이프라인을 불러오면 파이프라인 자체와 모델 가중치(또는 매개변수)가 모두 반환됩니다. 저희는 bf16 버전의 가중치를 사용하고 있으므로 유형 경고가 표시되지만 무시해도 됩니다.
```python
pipeline, params = FlaxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4",
variant="bf16",
dtype=dtype,
)
```
## 추론
TPU에는 일반적으로 8개의 디바이스가 병렬로 작동하므로 보유한 디바이스 수만큼 프롬프트를 복제합니다. 그런 다음 각각 하나의 이미지 생성을 담당하는 8개의 디바이스에서 한 번에 추론을 수행합니다. 따라서 하나의 칩이 하나의 이미지를 생성하는 데 걸리는 시간과 동일한 시간에 8개의 이미지를 얻을 수 있습니다.
프롬프트를 복제하고 나면 파이프라인의 `prepare_inputs` 함수를 호출하여 토큰화된 텍스트 ID를 얻습니다. 토큰화된 텍스트의 길이는 기본 CLIP 텍스트 모델의 구성에 따라 77토큰으로 설정됩니다.
```python
prompt = "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of field, close up, split lighting, cinematic"
prompt = [prompt] * jax.device_count()
prompt_ids = pipeline.prepare_inputs(prompt)
prompt_ids.shape
```
```python out
(8, 77)
```
### 복사(Replication) 및 정렬화
모델 매개변수와 입력값은 우리가 보유한 8개의 병렬 장치에 복사(Replication)되어야 합니다. 매개변수 딕셔너리는 `flax.jax_utils.replicate`(딕셔너리를 순회하며 가중치의 모양을 변경하여 8번 반복하는 함수)를 사용하여 복사됩니다. 배열은 `shard`를 사용하여 복제됩니다.
```python
p_params = replicate(params)
```
```python
prompt_ids = shard(prompt_ids)
prompt_ids.shape
```
```python out
(8, 1, 77)
```
이 shape은 8개의 디바이스 각각이 shape `(1, 77)`의 jnp 배열을 입력값으로 받는다는 의미입니다. 즉 1은 디바이스당 batch(배치) 크기입니다. 메모리가 충분한 TPU에서는 한 번에 여러 이미지(칩당)를 생성하려는 경우 1보다 클 수 있습니다.
이미지를 생성할 준비가 거의 완료되었습니다! 이제 생성 함수에 전달할 난수 생성기만 만들면 됩니다. 이것은 난수를 다루는 모든 함수에 난수 생성기가 있어야 한다는, 난수에 대해 매우 진지하고 독단적인 Flax의 표준 절차입니다. 이렇게 하면 여러 분산된 기기에서 훈련할 때에도 재현성이 보장됩니다.
아래 헬퍼 함수는 시드를 사용하여 난수 생성기를 초기화합니다. 동일한 시드를 사용하는 한 정확히 동일한 결과를 얻을 수 있습니다. 나중에 노트북에서 결과를 탐색할 때엔 다른 시드를 자유롭게 사용하세요.
```python
def create_key(seed=0):
return jax.random.PRNGKey(seed)
```
rng를 얻은 다음 8번 '분할'하여 각 디바이스가 다른 제너레이터를 수신하도록 합니다. 따라서 각 디바이스마다 다른 이미지가 생성되며 전체 프로세스를 재현할 수 있습니다.
```python
rng = create_key(0)
rng = jax.random.split(rng, jax.device_count())
```
JAX 코드는 매우 빠르게 실행되는 효율적인 표현으로 컴파일할 수 있습니다. 하지만 후속 호출에서 모든 입력이 동일한 모양을 갖도록 해야 하며, 그렇지 않으면 JAX가 코드를 다시 컴파일해야 하므로 최적화된 속도를 활용할 수 없습니다.
`jit = True`를 인수로 전달하면 Flax 파이프라인이 코드를 컴파일할 수 있습니다. 또한 모델이 사용 가능한 8개의 디바이스에서 병렬로 실행되도록 보장합니다.
다음 셀을 처음 실행하면 컴파일하는 데 시간이 오래 걸리지만 이후 호출(입력이 다른 경우에도)은 훨씬 빨라집니다. 예를 들어, 테스트했을 때 TPU v2-8에서 컴파일하는 데 1분 이상 걸리지만 이후 추론 실행에는 약 7초가 걸립니다.
```
%%time
images = pipeline(prompt_ids, p_params, rng, jit=True)[0]
```
```python out
CPU times: user 56.2 s, sys: 42.5 s, total: 1min 38s
Wall time: 1min 29s
```
반환된 배열의 shape은 `(8, 1, 512, 512, 3)`입니다. 이를 재구성하여 두 번째 차원을 제거하고 512 × 512 × 3의 이미지 8개를 얻은 다음 PIL로 변환합니다.
```python
images = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
images = pipeline.numpy_to_pil(images)
```
### 시각화
이미지를 그리드에 표시하는 도우미 함수를 만들어 보겠습니다.
```python
def image_grid(imgs, rows, cols):
w, h = imgs[0].size
grid = Image.new("RGB", size=(cols * w, rows * h))
for i, img in enumerate(imgs):
grid.paste(img, box=(i % cols * w, i // cols * h))
return grid
```
```python
image_grid(images, 2, 4)
```

## 다른 프롬프트 사용
모든 디바이스에서 동일한 프롬프트를 복제할 필요는 없습니다. 프롬프트 2개를 각각 4번씩 생성하거나 한 번에 8개의 서로 다른 프롬프트를 생성하는 등 원하는 것은 무엇이든 할 수 있습니다. 한번 해보세요!
먼저 입력 준비 코드를 편리한 함수로 리팩터링하겠습니다:
```python
prompts = [
"Labrador in the style of Hokusai",
"Painting of a squirrel skating in New York",
"HAL-9000 in the style of Van Gogh",
"Times Square under water, with fish and a dolphin swimming around",
"Ancient Roman fresco showing a man working on his laptop",
"Close-up photograph of young black woman against urban background, high quality, bokeh",
"Armchair in the shape of an avocado",
"Clown astronaut in space, with Earth in the background",
]
```
```python
prompt_ids = pipeline.prepare_inputs(prompts)
prompt_ids = shard(prompt_ids)
images = pipeline(prompt_ids, p_params, rng, jit=True).images
images = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
images = pipeline.numpy_to_pil(images)
image_grid(images, 2, 4)
```

## 병렬화(parallelization)는 어떻게 작동하는가?
앞서 `diffusers` Flax 파이프라인이 모델을 자동으로 컴파일하고 사용 가능한 모든 기기에서 병렬로 실행한다고 말씀드렸습니다. 이제 그 프로세스를 간략하게 살펴보고 작동 방식을 보여드리겠습니다.
JAX 병렬화는 여러 가지 방법으로 수행할 수 있습니다. 가장 쉬운 방법은 jax.pmap 함수를 사용하여 단일 프로그램, 다중 데이터(SPMD) 병렬화를 달성하는 것입니다. 즉, 동일한 코드의 복사본을 각각 다른 데이터 입력에 대해 여러 개 실행하는 것입니다. 더 정교한 접근 방식도 가능하므로 관심이 있으시다면 [JAX 문서](https://jax.readthedocs.io/en/latest/index.html)와 [`pjit` 페이지](https://jax.readthedocs.io/en/latest/jax-101/08-pjit.html?highlight=pjit)에서 이 주제를 살펴보시기 바랍니다!
`jax.pmap`은 두 가지 기능을 수행합니다:
- `jax.jit()`를 호출한 것처럼 코드를 컴파일(또는 `jit`)합니다. 이 작업은 `pmap`을 호출할 때가 아니라 pmapped 함수가 처음 호출될 때 수행됩니다.
- 컴파일된 코드가 사용 가능한 모든 기기에서 병렬로 실행되도록 합니다.
작동 방식을 보여드리기 위해 이미지 생성을 실행하는 비공개 메서드인 파이프라인의 `_generate` 메서드를 `pmap`합니다. 이 메서드는 향후 `Diffusers` 릴리스에서 이름이 변경되거나 제거될 수 있다는 점에 유의하세요.
```python
p_generate = pmap(pipeline._generate)
```
`pmap`을 사용한 후 준비된 함수 `p_generate`는 개념적으로 다음을 수행합니다:
* 각 장치에서 기본 함수 `pipeline._generate`의 복사본을 호출합니다.
* 각 장치에 입력 인수의 다른 부분을 보냅니다. 이것이 바로 샤딩이 사용되는 이유입니다. 이 경우 `prompt_ids`의 shape은 `(8, 1, 77, 768)`입니다. 이 배열은 8개로 분할되고 `_generate`의 각 복사본은 `(1, 77, 768)`의 shape을 가진 입력을 받게 됩니다.
병렬로 호출된다는 사실을 완전히 무시하고 `_generate`를 코딩할 수 있습니다. batch(배치) 크기(이 예제에서는 `1`)와 코드에 적합한 차원만 신경 쓰면 되며, 병렬로 작동하기 위해 아무것도 변경할 필요가 없습니다.
파이프라인 호출을 사용할 때와 마찬가지로, 다음 셀을 처음 실행할 때는 시간이 걸리지만 그 이후에는 훨씬 빨라집니다.
```
%%time
images = p_generate(prompt_ids, p_params, rng)
images = images.block_until_ready()
images.shape
```
```python out
CPU times: user 1min 15s, sys: 18.2 s, total: 1min 34s
Wall time: 1min 15s
```
```python
images.shape
```
```python out
(8, 1, 512, 512, 3)
```
JAX는 비동기 디스패치를 사용하고 가능한 한 빨리 제어권을 Python 루프에 반환하기 때문에 추론 시간을 정확하게 측정하기 위해 `block_until_ready()`를 사용합니다. 아직 구체화되지 않은 계산 결과를 사용하려는 경우 자동으로 차단이 수행되므로 코드에서 이 함수를 사용할 필요가 없습니다. | diffusers/docs/source/ko/using-diffusers/stable_diffusion_jax_how_to.md/0 | {
"file_path": "diffusers/docs/source/ko/using-diffusers/stable_diffusion_jax_how_to.md",
"repo_id": "diffusers",
"token_count": 8474
} |
# Community Scripts
**Community scripts** consist of inference examples using Diffusers pipelines that have been added by the community.
Please have a look at the following table to get an overview of all community examples. Click on the **Code Example** to get a copy-and-paste code example that you can try out.
If a community script doesn't work as expected, please open an issue and ping the author on it.
| Example | Description | Code Example | Colab | Author |
|:--------------------------------------------------------------------------------------------------------------------------------------|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:------------------------------------------------------------------------------------------|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------:|
| Using IP-Adapter with Negative Noise | Using negative noise with IP-adapter to better control the generation (see the [original post](https://github.com/huggingface/diffusers/discussions/7167) on the forum for more details) | [IP-Adapter Negative Noise](#ip-adapter-negative-noise) |[Notebook](https://github.com/huggingface/notebooks/blob/main/diffusers/ip_adapter_negative_noise.ipynb) | [Álvaro Somoza](https://github.com/asomoza)|
| Asymmetric Tiling |configure seamless image tiling independently for the X and Y axes | [Asymmetric Tiling](#Asymmetric-Tiling ) |[Notebook](https://github.com/huggingface/notebooks/blob/main/diffusers/asymetric_tiling.ipynb) | [alexisrolland](https://github.com/alexisrolland)|
| Prompt Scheduling Callback |Allows changing prompts during a generation | [Prompt Scheduling-Callback](#Prompt-Scheduling-Callback ) |[Notebook](https://github.com/huggingface/notebooks/blob/main/diffusers/prompt_scheduling_callback.ipynb) | [hlky](https://github.com/hlky)|
## Example usages
### IP Adapter Negative Noise
Diffusers pipelines are fully integrated with IP-Adapter, which allows you to prompt the diffusion model with an image. However, it does not support negative image prompts (there is no `negative_ip_adapter_image` argument) the same way it supports negative text prompts. When you pass an `ip_adapter_image,` it will create a zero-filled tensor as a negative image. This script shows you how to create a negative noise from `ip_adapter_image` and use it to significantly improve the generation quality while preserving the composition of images.
[cubiq](https://github.com/cubiq) initially developed this feature in his [repository](https://github.com/cubiq/ComfyUI_IPAdapter_plus). The community script was contributed by [asomoza](https://github.com/Somoza). You can find more details about this experimentation [this discussion](https://github.com/huggingface/diffusers/discussions/7167)
IP-Adapter without negative noise
|source|result|
|---|---|
|||
IP-Adapter with negative noise
|source|result|
|---|---|
|||
```python
import torch
from diffusers import AutoencoderKL, DPMSolverMultistepScheduler, StableDiffusionXLPipeline
from diffusers.models import ImageProjection
from diffusers.utils import load_image
def encode_image(
image_encoder,
feature_extractor,
image,
device,
num_images_per_prompt,
output_hidden_states=None,
negative_image=None,
):
dtype = next(image_encoder.parameters()).dtype
if not isinstance(image, torch.Tensor):
image = feature_extractor(image, return_tensors="pt").pixel_values
image = image.to(device=device, dtype=dtype)
if output_hidden_states:
image_enc_hidden_states = image_encoder(image, output_hidden_states=True).hidden_states[-2]
image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0)
if negative_image is None:
uncond_image_enc_hidden_states = image_encoder(
torch.zeros_like(image), output_hidden_states=True
).hidden_states[-2]
else:
if not isinstance(negative_image, torch.Tensor):
negative_image = feature_extractor(negative_image, return_tensors="pt").pixel_values
negative_image = negative_image.to(device=device, dtype=dtype)
uncond_image_enc_hidden_states = image_encoder(negative_image, output_hidden_states=True).hidden_states[-2]
uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0)
return image_enc_hidden_states, uncond_image_enc_hidden_states
else:
image_embeds = image_encoder(image).image_embeds
image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
uncond_image_embeds = torch.zeros_like(image_embeds)
return image_embeds, uncond_image_embeds
@torch.no_grad()
def prepare_ip_adapter_image_embeds(
unet,
image_encoder,
feature_extractor,
ip_adapter_image,
do_classifier_free_guidance,
device,
num_images_per_prompt,
ip_adapter_negative_image=None,
):
if not isinstance(ip_adapter_image, list):
ip_adapter_image = [ip_adapter_image]
if len(ip_adapter_image) != len(unet.encoder_hid_proj.image_projection_layers):
raise ValueError(
f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(unet.encoder_hid_proj.image_projection_layers)} IP Adapters."
)
image_embeds = []
for single_ip_adapter_image, image_proj_layer in zip(
ip_adapter_image, unet.encoder_hid_proj.image_projection_layers
):
output_hidden_state = not isinstance(image_proj_layer, ImageProjection)
single_image_embeds, single_negative_image_embeds = encode_image(
image_encoder,
feature_extractor,
single_ip_adapter_image,
device,
1,
output_hidden_state,
negative_image=ip_adapter_negative_image,
)
single_image_embeds = torch.stack([single_image_embeds] * num_images_per_prompt, dim=0)
single_negative_image_embeds = torch.stack([single_negative_image_embeds] * num_images_per_prompt, dim=0)
if do_classifier_free_guidance:
single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds])
single_image_embeds = single_image_embeds.to(device)
image_embeds.append(single_image_embeds)
return image_embeds
vae = AutoencoderKL.from_pretrained(
"madebyollin/sdxl-vae-fp16-fix",
torch_dtype=torch.float16,
).to("cuda")
pipeline = StableDiffusionXLPipeline.from_pretrained(
"RunDiffusion/Juggernaut-XL-v9",
torch_dtype=torch.float16,
vae=vae,
variant="fp16",
).to("cuda")
pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config)
pipeline.scheduler.config.use_karras_sigmas = True
pipeline.load_ip_adapter(
"h94/IP-Adapter",
subfolder="sdxl_models",
weight_name="ip-adapter-plus_sdxl_vit-h.safetensors",
image_encoder_folder="models/image_encoder",
)
pipeline.set_ip_adapter_scale(0.7)
ip_image = load_image("source.png")
negative_ip_image = load_image("noise.png")
image_embeds = prepare_ip_adapter_image_embeds(
unet=pipeline.unet,
image_encoder=pipeline.image_encoder,
feature_extractor=pipeline.feature_extractor,
ip_adapter_image=[[ip_image]],
do_classifier_free_guidance=True,
device="cuda",
num_images_per_prompt=1,
ip_adapter_negative_image=negative_ip_image,
)
prompt = "cinematic photo of a cyborg in the city, 4k, high quality, intricate, highly detailed"
negative_prompt = "blurry, smooth, plastic"
image = pipeline(
prompt=prompt,
negative_prompt=negative_prompt,
ip_adapter_image_embeds=image_embeds,
guidance_scale=6.0,
num_inference_steps=25,
generator=torch.Generator(device="cpu").manual_seed(1556265306),
).images[0]
image.save("result.png")
```
### Asymmetric Tiling
Stable Diffusion is not trained to generate seamless textures. However, you can use this simple script to add tiling to your generation. This script is contributed by [alexisrolland](https://github.com/alexisrolland). See more details in the [this issue](https://github.com/huggingface/diffusers/issues/556)
|Generated|Tiled|
|---|---|
|||
```py
import torch
from typing import Optional
from diffusers import StableDiffusionPipeline
from diffusers.models.lora import LoRACompatibleConv
def seamless_tiling(pipeline, x_axis, y_axis):
def asymmetric_conv2d_convforward(self, input: torch.Tensor, weight: torch.Tensor, bias: Optional[torch.Tensor] = None):
self.paddingX = (self._reversed_padding_repeated_twice[0], self._reversed_padding_repeated_twice[1], 0, 0)
self.paddingY = (0, 0, self._reversed_padding_repeated_twice[2], self._reversed_padding_repeated_twice[3])
working = torch.nn.functional.pad(input, self.paddingX, mode=x_mode)
working = torch.nn.functional.pad(working, self.paddingY, mode=y_mode)
return torch.nn.functional.conv2d(working, weight, bias, self.stride, torch.nn.modules.utils._pair(0), self.dilation, self.groups)
x_mode = 'circular' if x_axis else 'constant'
y_mode = 'circular' if y_axis else 'constant'
targets = [pipeline.vae, pipeline.text_encoder, pipeline.unet]
convolution_layers = []
for target in targets:
for module in target.modules():
if isinstance(module, torch.nn.Conv2d):
convolution_layers.append(module)
for layer in convolution_layers:
if isinstance(layer, LoRACompatibleConv) and layer.lora_layer is None:
layer.lora_layer = lambda * x: 0
layer._conv_forward = asymmetric_conv2d_convforward.__get__(layer, torch.nn.Conv2d)
return pipeline
pipeline = StableDiffusionPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5", torch_dtype=torch.float16, use_safetensors=True)
pipeline.enable_model_cpu_offload()
prompt = ["texture of a red brick wall"]
seed = 123456
generator = torch.Generator(device='cuda').manual_seed(seed)
pipeline = seamless_tiling(pipeline=pipeline, x_axis=True, y_axis=True)
image = pipeline(
prompt=prompt,
width=512,
height=512,
num_inference_steps=20,
guidance_scale=7,
num_images_per_prompt=1,
generator=generator
).images[0]
seamless_tiling(pipeline=pipeline, x_axis=False, y_axis=False)
torch.cuda.empty_cache()
image.save('image.png')
```
### Prompt Scheduling callback
Prompt scheduling callback allows changing prompts during a generation, like [prompt editing in A1111](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#prompt-editing)
```python
from diffusers import StableDiffusionPipeline
from diffusers.callbacks import PipelineCallback, MultiPipelineCallbacks
from diffusers.configuration_utils import register_to_config
import torch
from typing import Any, Dict, Tuple, Union
class SDPromptSchedulingCallback(PipelineCallback):
@register_to_config
def __init__(
self,
encoded_prompt: Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]],
cutoff_step_ratio=None,
cutoff_step_index=None,
):
super().__init__(
cutoff_step_ratio=cutoff_step_ratio, cutoff_step_index=cutoff_step_index
)
tensor_inputs = ["prompt_embeds"]
def callback_fn(
self, pipeline, step_index, timestep, callback_kwargs
) -> Dict[str, Any]:
cutoff_step_ratio = self.config.cutoff_step_ratio
cutoff_step_index = self.config.cutoff_step_index
if isinstance(self.config.encoded_prompt, tuple):
prompt_embeds, negative_prompt_embeds = self.config.encoded_prompt
else:
prompt_embeds = self.config.encoded_prompt
# Use cutoff_step_index if it's not None, otherwise use cutoff_step_ratio
cutoff_step = (
cutoff_step_index
if cutoff_step_index is not None
else int(pipeline.num_timesteps * cutoff_step_ratio)
)
if step_index == cutoff_step:
if pipeline.do_classifier_free_guidance:
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
callback_kwargs[self.tensor_inputs[0]] = prompt_embeds
return callback_kwargs
pipeline: StableDiffusionPipeline = StableDiffusionPipeline.from_pretrained(
"stable-diffusion-v1-5/stable-diffusion-v1-5",
torch_dtype=torch.float16,
variant="fp16",
use_safetensors=True,
).to("cuda")
pipeline.safety_checker = None
pipeline.requires_safety_checker = False
callback = MultiPipelineCallbacks(
[
SDPromptSchedulingCallback(
encoded_prompt=pipeline.encode_prompt(
prompt=f"prompt {index}",
negative_prompt=f"negative prompt {index}",
device=pipeline._execution_device,
num_images_per_prompt=1,
# pipeline.do_classifier_free_guidance can't be accessed until after pipeline is ran
do_classifier_free_guidance=True,
),
cutoff_step_index=index,
) for index in range(1, 20)
]
)
image = pipeline(
prompt="prompt"
negative_prompt="negative prompt",
callback_on_step_end=callback,
callback_on_step_end_tensor_inputs=["prompt_embeds"],
).images[0]
torch.cuda.empty_cache()
image.save('image.png')
```
```python
from diffusers import StableDiffusionXLPipeline
from diffusers.callbacks import PipelineCallback, MultiPipelineCallbacks
from diffusers.configuration_utils import register_to_config
import torch
from typing import Any, Dict, Tuple, Union
class SDXLPromptSchedulingCallback(PipelineCallback):
@register_to_config
def __init__(
self,
encoded_prompt: Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]],
add_text_embeds: Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]],
add_time_ids: Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]],
cutoff_step_ratio=None,
cutoff_step_index=None,
):
super().__init__(
cutoff_step_ratio=cutoff_step_ratio, cutoff_step_index=cutoff_step_index
)
tensor_inputs = ["prompt_embeds", "add_text_embeds", "add_time_ids"]
def callback_fn(
self, pipeline, step_index, timestep, callback_kwargs
) -> Dict[str, Any]:
cutoff_step_ratio = self.config.cutoff_step_ratio
cutoff_step_index = self.config.cutoff_step_index
if isinstance(self.config.encoded_prompt, tuple):
prompt_embeds, negative_prompt_embeds = self.config.encoded_prompt
else:
prompt_embeds = self.config.encoded_prompt
if isinstance(self.config.add_text_embeds, tuple):
add_text_embeds, negative_add_text_embeds = self.config.add_text_embeds
else:
add_text_embeds = self.config.add_text_embeds
if isinstance(self.config.add_time_ids, tuple):
add_time_ids, negative_add_time_ids = self.config.add_time_ids
else:
add_time_ids = self.config.add_time_ids
# Use cutoff_step_index if it's not None, otherwise use cutoff_step_ratio
cutoff_step = (
cutoff_step_index
if cutoff_step_index is not None
else int(pipeline.num_timesteps * cutoff_step_ratio)
)
if step_index == cutoff_step:
if pipeline.do_classifier_free_guidance:
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
add_text_embeds = torch.cat([negative_add_text_embeds, add_text_embeds])
add_time_ids = torch.cat([negative_add_time_ids, add_time_ids])
callback_kwargs[self.tensor_inputs[0]] = prompt_embeds
callback_kwargs[self.tensor_inputs[1]] = add_text_embeds
callback_kwargs[self.tensor_inputs[2]] = add_time_ids
return callback_kwargs
pipeline: StableDiffusionXLPipeline = StableDiffusionXLPipeline.from_pretrained(
"stabilityai/stable-diffusion-xl-base-1.0",
torch_dtype=torch.float16,
variant="fp16",
use_safetensors=True,
).to("cuda")
callbacks = []
for index in range(1, 20):
(
prompt_embeds,
negative_prompt_embeds,
pooled_prompt_embeds,
negative_pooled_prompt_embeds,
) = pipeline.encode_prompt(
prompt=f"prompt {index}",
negative_prompt=f"prompt {index}",
device=pipeline._execution_device,
num_images_per_prompt=1,
# pipeline.do_classifier_free_guidance can't be accessed until after pipeline is ran
do_classifier_free_guidance=True,
)
text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1])
add_time_ids = pipeline._get_add_time_ids(
(1024, 1024),
(0, 0),
(1024, 1024),
dtype=prompt_embeds.dtype,
text_encoder_projection_dim=text_encoder_projection_dim,
)
negative_add_time_ids = pipeline._get_add_time_ids(
(1024, 1024),
(0, 0),
(1024, 1024),
dtype=prompt_embeds.dtype,
text_encoder_projection_dim=text_encoder_projection_dim,
)
callbacks.append(
SDXLPromptSchedulingCallback(
encoded_prompt=(prompt_embeds, negative_prompt_embeds),
add_text_embeds=(pooled_prompt_embeds, negative_pooled_prompt_embeds),
add_time_ids=(add_time_ids, negative_add_time_ids),
cutoff_step_index=index,
)
)
callback = MultiPipelineCallbacks(callbacks)
image = pipeline(
prompt="prompt",
negative_prompt="negative prompt",
callback_on_step_end=callback,
callback_on_step_end_tensor_inputs=[
"prompt_embeds",
"add_text_embeds",
"add_time_ids",
],
).images[0]
```
| diffusers/examples/community/README_community_scripts.md/0 | {
"file_path": "diffusers/examples/community/README_community_scripts.md",
"repo_id": "diffusers",
"token_count": 9428
} |
import inspect
from typing import Callable, List, Optional, Tuple, Union
import numpy as np
import PIL.Image
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNet2DConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, logging
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
def prepare_mask_and_masked_image(image, mask):
image = np.array(image.convert("RGB"))
image = image[None].transpose(0, 3, 1, 2)
image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0
mask = np.array(mask.convert("L"))
mask = mask.astype(np.float32) / 255.0
mask = mask[None, None]
mask[mask < 0.5] = 0
mask[mask >= 0.5] = 1
mask = torch.from_numpy(mask)
masked_image = image * (mask < 0.5)
return mask, masked_image
def check_size(image, height, width):
if isinstance(image, PIL.Image.Image):
w, h = image.size
elif isinstance(image, torch.Tensor):
*_, h, w = image.shape
if h != height or w != width:
raise ValueError(f"Image size should be {height}x{width}, but got {h}x{w}")
def overlay_inner_image(image, inner_image, paste_offset: Tuple[int] = (0, 0)):
inner_image = inner_image.convert("RGBA")
image = image.convert("RGB")
image.paste(inner_image, paste_offset, inner_image)
image = image.convert("RGB")
return image
class ImageToImageInpaintingPipeline(DiffusionPipeline):
r"""
Pipeline for text-guided image-to-image inpainting using Stable Diffusion. *This is an experimental feature*.
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
Args:
vae ([`AutoencoderKL`]):
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
text_encoder ([`CLIPTextModel`]):
Frozen text-encoder. Stable Diffusion uses the text portion of
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
tokenizer (`CLIPTokenizer`):
Tokenizer of class
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
scheduler ([`SchedulerMixin`]):
A scheduler to be used in combination with `unet` to denoise the encoded image latens. Can be one of
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
safety_checker ([`StableDiffusionSafetyChecker`]):
Classification module that estimates whether generated images could be considered offensive or harmful.
Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
feature_extractor ([`CLIPImageProcessor`]):
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
"""
def __init__(
self,
vae: AutoencoderKL,
text_encoder: CLIPTextModel,
tokenizer: CLIPTokenizer,
unet: UNet2DConditionModel,
scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler],
safety_checker: StableDiffusionSafetyChecker,
feature_extractor: CLIPImageProcessor,
):
super().__init__()
if scheduler is not None and getattr(scheduler.config, "steps_offset", 1) != 1:
deprecation_message = (
f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
new_config = dict(scheduler.config)
new_config["steps_offset"] = 1
scheduler._internal_dict = FrozenDict(new_config)
if safety_checker is None:
logger.warning(
f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
)
self.register_modules(
vae=vae,
text_encoder=text_encoder,
tokenizer=tokenizer,
unet=unet,
scheduler=scheduler,
safety_checker=safety_checker,
feature_extractor=feature_extractor,
)
@torch.no_grad()
def __call__(
self,
prompt: Union[str, List[str]],
image: Union[torch.Tensor, PIL.Image.Image],
inner_image: Union[torch.Tensor, PIL.Image.Image],
mask_image: Union[torch.Tensor, PIL.Image.Image],
height: int = 512,
width: int = 512,
num_inference_steps: int = 50,
guidance_scale: float = 7.5,
negative_prompt: Optional[Union[str, List[str]]] = None,
num_images_per_prompt: Optional[int] = 1,
eta: float = 0.0,
generator: Optional[torch.Generator] = None,
latents: Optional[torch.Tensor] = None,
output_type: Optional[str] = "pil",
return_dict: bool = True,
callback: Optional[Callable[[int, int, torch.Tensor], None]] = None,
callback_steps: int = 1,
**kwargs,
):
r"""
Function invoked when calling the pipeline for generation.
Args:
prompt (`str` or `List[str]`):
The prompt or prompts to guide the image generation.
image (`torch.Tensor` or `PIL.Image.Image`):
`Image`, or tensor representing an image batch which will be inpainted, *i.e.* parts of the image will
be masked out with `mask_image` and repainted according to `prompt`.
inner_image (`torch.Tensor` or `PIL.Image.Image`):
`Image`, or tensor representing an image batch which will be overlayed onto `image`. Non-transparent
regions of `inner_image` must fit inside white pixels in `mask_image`. Expects four channels, with
the last channel representing the alpha channel, which will be used to blend `inner_image` with
`image`. If not provided, it will be forcibly cast to RGBA.
mask_image (`PIL.Image.Image`):
`Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be
repainted, while black pixels will be preserved. If `mask_image` is a PIL image, it will be converted
to a single channel (luminance) before use. If it's a tensor, it should contain one color channel (L)
instead of 3, so the expected shape would be `(B, H, W, 1)`.
height (`int`, *optional*, defaults to 512):
The height in pixels of the generated image.
width (`int`, *optional*, defaults to 512):
The width in pixels of the generated image.
num_inference_steps (`int`, *optional*, defaults to 50):
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
expense of slower inference.
guidance_scale (`float`, *optional*, defaults to 7.5):
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
`guidance_scale` is defined as `w` of equation 2. of [Imagen
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
usually at the expense of lower image quality.
negative_prompt (`str` or `List[str]`, *optional*):
The prompt or prompts not to guide the image generation. Ignored when not using guidance (i.e., ignored
if `guidance_scale` is less than `1`).
num_images_per_prompt (`int`, *optional*, defaults to 1):
The number of images to generate per prompt.
eta (`float`, *optional*, defaults to 0.0):
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
[`schedulers.DDIMScheduler`], will be ignored for others.
generator (`torch.Generator`, *optional*):
A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
deterministic.
latents (`torch.Tensor`, *optional*):
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
tensor will ge generated by sampling using the supplied random `generator`.
output_type (`str`, *optional*, defaults to `"pil"`):
The output format of the generate image. Choose between
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
plain tuple.
callback (`Callable`, *optional*):
A function that will be called every `callback_steps` steps during inference. The function will be
called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`.
callback_steps (`int`, *optional*, defaults to 1):
The frequency at which the `callback` function will be called. If not specified, the callback will be
called at every step.
Returns:
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
When returning a tuple, the first element is a list with the generated images, and the second element is a
list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
(nsfw) content, according to the `safety_checker`.
"""
if isinstance(prompt, str):
batch_size = 1
elif isinstance(prompt, list):
batch_size = len(prompt)
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(callback_steps)}."
)
# check if input sizes are correct
check_size(image, height, width)
check_size(inner_image, height, width)
check_size(mask_image, height, width)
# get prompt text embeddings
text_inputs = self.tokenizer(
prompt,
padding="max_length",
max_length=self.tokenizer.model_max_length,
return_tensors="pt",
)
text_input_ids = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
removed_text = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :])
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f" {self.tokenizer.model_max_length} tokens: {removed_text}"
)
text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length]
text_embeddings = self.text_encoder(text_input_ids.to(self.device))[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
bs_embed, seq_len, _ = text_embeddings.shape
text_embeddings = text_embeddings.repeat(1, num_images_per_prompt, 1)
text_embeddings = text_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1)
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
do_classifier_free_guidance = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
uncond_tokens: List[str]
if negative_prompt is None:
uncond_tokens = [""]
elif type(prompt) is not type(negative_prompt):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
f" {type(prompt)}."
)
elif isinstance(negative_prompt, str):
uncond_tokens = [negative_prompt]
elif batch_size != len(negative_prompt):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
" the batch size of `prompt`."
)
else:
uncond_tokens = negative_prompt
max_length = text_input_ids.shape[-1]
uncond_input = self.tokenizer(
uncond_tokens,
padding="max_length",
max_length=max_length,
truncation=True,
return_tensors="pt",
)
uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
seq_len = uncond_embeddings.shape[1]
uncond_embeddings = uncond_embeddings.repeat(batch_size, num_images_per_prompt, 1)
uncond_embeddings = uncond_embeddings.view(batch_size * num_images_per_prompt, seq_len, -1)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
num_channels_latents = self.vae.config.latent_channels
latents_shape = (batch_size * num_images_per_prompt, num_channels_latents, height // 8, width // 8)
latents_dtype = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
latents = torch.randn(latents_shape, generator=generator, device="cpu", dtype=latents_dtype).to(
self.device
)
else:
latents = torch.randn(latents_shape, generator=generator, device=self.device, dtype=latents_dtype)
else:
if latents.shape != latents_shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
latents = latents.to(self.device)
# overlay the inner image
image = overlay_inner_image(image, inner_image)
# prepare mask and masked_image
mask, masked_image = prepare_mask_and_masked_image(image, mask_image)
mask = mask.to(device=self.device, dtype=text_embeddings.dtype)
masked_image = masked_image.to(device=self.device, dtype=text_embeddings.dtype)
# resize the mask to latents shape as we concatenate the mask to the latents
mask = torch.nn.functional.interpolate(mask, size=(height // 8, width // 8))
# encode the mask image into latents space so we can concatenate it to the latents
masked_image_latents = self.vae.encode(masked_image).latent_dist.sample(generator=generator)
masked_image_latents = 0.18215 * masked_image_latents
# duplicate mask and masked_image_latents for each generation per prompt, using mps friendly method
mask = mask.repeat(batch_size * num_images_per_prompt, 1, 1, 1)
masked_image_latents = masked_image_latents.repeat(batch_size * num_images_per_prompt, 1, 1, 1)
mask = torch.cat([mask] * 2) if do_classifier_free_guidance else mask
masked_image_latents = (
torch.cat([masked_image_latents] * 2) if do_classifier_free_guidance else masked_image_latents
)
num_channels_mask = mask.shape[1]
num_channels_masked_image = masked_image_latents.shape[1]
if num_channels_latents + num_channels_mask + num_channels_masked_image != self.unet.config.in_channels:
raise ValueError(
f"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects"
f" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +"
f" `num_channels_mask`: {num_channels_mask} + `num_channels_masked_image`: {num_channels_masked_image}"
f" = {num_channels_latents+num_channels_masked_image+num_channels_mask}. Please verify the config of"
" `pipeline.unet` or your `mask_image` or `image` input."
)
# set timesteps
self.scheduler.set_timesteps(num_inference_steps)
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
timesteps_tensor = self.scheduler.timesteps.to(self.device)
# scale the initial noise by the standard deviation required by the scheduler
latents = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
extra_step_kwargs = {}
if accepts_eta:
extra_step_kwargs["eta"] = eta
for i, t in enumerate(self.progress_bar(timesteps_tensor)):
# expand the latents if we are doing classifier free guidance
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
# concat latents, mask, masked_image_latents in the channel dimension
latent_model_input = torch.cat([latent_model_input, mask, masked_image_latents], dim=1)
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
# predict the noise residual
noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample
# perform guidance
if do_classifier_free_guidance:
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
step_idx = i // getattr(self.scheduler, "order", 1)
callback(step_idx, t, latents)
latents = 1 / 0.18215 * latents
image = self.vae.decode(latents).sample
image = (image / 2 + 0.5).clamp(0, 1)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
image = image.cpu().permute(0, 2, 3, 1).float().numpy()
if self.safety_checker is not None:
safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(
self.device
)
image, has_nsfw_concept = self.safety_checker(
images=image, clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype)
)
else:
has_nsfw_concept = None
if output_type == "pil":
image = self.numpy_to_pil(image)
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
| diffusers/examples/community/img2img_inpainting.py/0 | {
"file_path": "diffusers/examples/community/img2img_inpainting.py",
"repo_id": "diffusers",
"token_count": 9669
} |
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Based on [🪆Matryoshka Diffusion Models](https://huggingface.co/papers/2310.15111).
# Authors: Jiatao Gu, Shuangfei Zhai, Yizhe Zhang, Josh Susskind, Navdeep Jaitly
# Code: https://github.com/apple/ml-mdm with MIT license
#
# Adapted to Diffusers by [M. Tolga Cangöz](https://github.com/tolgacangoz).
import gc
import inspect
import math
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import numpy as np
import torch
import torch.nn.functional as F
import torch.utils.checkpoint
from packaging import version
from PIL import Image
from torch import nn
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection, T5EncoderModel, T5TokenizerFast
from diffusers.callbacks import MultiPipelineCallbacks, PipelineCallback
from diffusers.configuration_utils import ConfigMixin, FrozenDict, LegacyConfigMixin, register_to_config
from diffusers.image_processor import PipelineImageInput, VaeImageProcessor
from diffusers.loaders import (
FromSingleFileMixin,
IPAdapterMixin,
PeftAdapterMixin,
StableDiffusionLoraLoaderMixin,
TextualInversionLoaderMixin,
UNet2DConditionLoadersMixin,
)
from diffusers.loaders.single_file_model import FromOriginalModelMixin
from diffusers.models.activations import GELU, get_activation
from diffusers.models.attention_processor import (
ADDED_KV_ATTENTION_PROCESSORS,
CROSS_ATTENTION_PROCESSORS,
Attention,
AttentionProcessor,
AttnAddedKVProcessor,
AttnProcessor,
FusedAttnProcessor2_0,
)
from diffusers.models.downsampling import Downsample2D
from diffusers.models.embeddings import (
GaussianFourierProjection,
GLIGENTextBoundingboxProjection,
ImageHintTimeEmbedding,
ImageProjection,
ImageTimeEmbedding,
TextImageProjection,
TextImageTimeEmbedding,
TextTimeEmbedding,
TimestepEmbedding,
Timesteps,
)
from diffusers.models.lora import adjust_lora_scale_text_encoder
from diffusers.models.modeling_utils import LegacyModelMixin, ModelMixin
from diffusers.models.resnet import ResnetBlock2D
from diffusers.models.unets.unet_2d_blocks import DownBlock2D, UpBlock2D
from diffusers.models.upsampling import Upsample2D
from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import (
USE_PEFT_BACKEND,
BaseOutput,
deprecate,
is_torch_xla_available,
logging,
replace_example_docstring,
scale_lora_layers,
unscale_lora_layers,
)
from diffusers.utils.torch_utils import apply_freeu, randn_tensor
if is_torch_xla_available():
import torch_xla.core.xla_model as xm # type: ignore
XLA_AVAILABLE = True
else:
XLA_AVAILABLE = False
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
EXAMPLE_DOC_STRING = """
Examples:
```py
>>> from diffusers import DiffusionPipeline
>>> from diffusers.utils import make_image_grid
>>> # nesting_level=0 -> 64x64; nesting_level=1 -> 256x256 - 64x64; nesting_level=2 -> 1024x1024 - 256x256 - 64x64
>>> pipe = DiffusionPipeline.from_pretrained("tolgacangoz/matryoshka-diffusion-models",
... nesting_level=0,
... trust_remote_code=False, # One needs to give permission for this code to run
... ).to("cuda")
>>> prompt0 = "a blue jay stops on the top of a helmet of Japanese samurai, background with sakura tree"
>>> prompt = f"breathtaking {prompt0}. award-winning, professional, highly detailed"
>>> image = pipe(prompt, num_inference_steps=50).images
>>> make_image_grid(image, rows=1, cols=len(image))
>>> # pipe.change_nesting_level(<int>) # 0, 1, or 2
>>> # 50+, 100+, and 250+ num_inference_steps are recommended for nesting levels 0, 1, and 2 respectively.
```
"""
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.rescale_noise_cfg
def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
"""
Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and
Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4
"""
std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True)
std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True)
# rescale the results from guidance (fixes overexposure)
noise_pred_rescaled = noise_cfg * (std_text / std_cfg)
# mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images
noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg
return noise_cfg
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps
def retrieve_timesteps(
scheduler,
num_inference_steps: Optional[int] = None,
device: Optional[Union[str, torch.device]] = None,
timesteps: Optional[List[int]] = None,
sigmas: Optional[List[float]] = None,
**kwargs,
):
"""
Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles
custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`.
Args:
scheduler (`SchedulerMixin`):
The scheduler to get timesteps from.
num_inference_steps (`int`):
The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps`
must be `None`.
device (`str` or `torch.device`, *optional*):
The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
timesteps (`List[int]`, *optional*):
Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed,
`num_inference_steps` and `sigmas` must be `None`.
sigmas (`List[float]`, *optional*):
Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed,
`num_inference_steps` and `timesteps` must be `None`.
Returns:
`Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the
second element is the number of inference steps.
"""
if timesteps is not None and sigmas is not None:
raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values")
if timesteps is not None:
accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
if not accepts_timesteps:
raise ValueError(
f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
f" timestep schedules. Please check whether you are using the correct scheduler."
)
scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
timesteps = scheduler.timesteps
num_inference_steps = len(timesteps)
elif sigmas is not None:
accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
if not accept_sigmas:
raise ValueError(
f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
f" sigmas schedules. Please check whether you are using the correct scheduler."
)
scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs)
timesteps = scheduler.timesteps
num_inference_steps = len(timesteps)
else:
scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
timesteps = scheduler.timesteps
return timesteps, num_inference_steps
# Copied from diffusers.models.attention._chunked_feed_forward
def _chunked_feed_forward(ff: nn.Module, hidden_states: torch.Tensor, chunk_dim: int, chunk_size: int):
# "feed_forward_chunk_size" can be used to save memory
if hidden_states.shape[chunk_dim] % chunk_size != 0:
raise ValueError(
f"`hidden_states` dimension to be chunked: {hidden_states.shape[chunk_dim]} has to be divisible by chunk size: {chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`."
)
num_chunks = hidden_states.shape[chunk_dim] // chunk_size
ff_output = torch.cat(
[ff(hid_slice) for hid_slice in hidden_states.chunk(num_chunks, dim=chunk_dim)],
dim=chunk_dim,
)
return ff_output
@dataclass
class MatryoshkaDDIMSchedulerOutput(BaseOutput):
"""
Output class for the scheduler's `step` function output.
Args:
prev_sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample `(x_{0})` based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
"""
prev_sample: Union[torch.Tensor, List[torch.Tensor]]
pred_original_sample: Optional[Union[torch.Tensor, List[torch.Tensor]]] = None
# Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar
def betas_for_alpha_bar(
num_diffusion_timesteps,
max_beta=0.999,
alpha_transform_type="cosine",
):
"""
Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of
(1-beta) over time from t = [0,1].
Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up
to that part of the diffusion process.
Args:
num_diffusion_timesteps (`int`): the number of betas to produce.
max_beta (`float`): the maximum beta to use; use values lower than 1 to
prevent singularities.
alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar.
Choose from `cosine` or `exp`
Returns:
betas (`np.ndarray`): the betas used by the scheduler to step the model outputs
"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(t):
return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(t):
return math.exp(t * -12.0)
else:
raise ValueError(f"Unsupported alpha_transform_type: {alpha_transform_type}")
betas = []
for i in range(num_diffusion_timesteps):
t1 = i / num_diffusion_timesteps
t2 = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta))
return torch.tensor(betas, dtype=torch.float32)
# Copied from diffusers.schedulers.scheduling_ddim.rescale_zero_terminal_snr
def rescale_zero_terminal_snr(betas):
"""
Rescales betas to have zero terminal SNR Based on https://arxiv.org/pdf/2305.08891.pdf (Algorithm 1)
Args:
betas (`torch.Tensor`):
the betas that the scheduler is being initialized with.
Returns:
`torch.Tensor`: rescaled betas with zero terminal SNR
"""
# Convert betas to alphas_bar_sqrt
alphas = 1.0 - betas
alphas_cumprod = torch.cumprod(alphas, dim=0)
alphas_bar_sqrt = alphas_cumprod.sqrt()
# Store old values.
alphas_bar_sqrt_0 = alphas_bar_sqrt[0].clone()
alphas_bar_sqrt_T = alphas_bar_sqrt[-1].clone()
# Shift so the last timestep is zero.
alphas_bar_sqrt -= alphas_bar_sqrt_T
# Scale so the first timestep is back to the old value.
alphas_bar_sqrt *= alphas_bar_sqrt_0 / (alphas_bar_sqrt_0 - alphas_bar_sqrt_T)
# Convert alphas_bar_sqrt to betas
alphas_bar = alphas_bar_sqrt**2 # Revert sqrt
alphas = alphas_bar[1:] / alphas_bar[:-1] # Revert cumprod
alphas = torch.cat([alphas_bar[0:1], alphas])
betas = 1 - alphas
return betas
class MatryoshkaDDIMScheduler(SchedulerMixin, ConfigMixin):
"""
`DDIMScheduler` extends the denoising procedure introduced in denoising diffusion probabilistic models (DDPMs) with
non-Markovian guidance.
This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic
methods the library implements for all schedulers such as loading and saving.
Args:
num_train_timesteps (`int`, defaults to 1000):
The number of diffusion steps to train the model.
beta_start (`float`, defaults to 0.0001):
The starting `beta` value of inference.
beta_end (`float`, defaults to 0.02):
The final `beta` value.
beta_schedule (`str`, defaults to `"linear"`):
The beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from
`linear`, `scaled_linear`, or `squaredcos_cap_v2`.
trained_betas (`np.ndarray`, *optional*):
Pass an array of betas directly to the constructor to bypass `beta_start` and `beta_end`.
clip_sample (`bool`, defaults to `True`):
Clip the predicted sample for numerical stability.
clip_sample_range (`float`, defaults to 1.0):
The maximum magnitude for sample clipping. Valid only when `clip_sample=True`.
set_alpha_to_one (`bool`, defaults to `True`):
Each diffusion step uses the alphas product value at that step and at the previous one. For the final step
there is no previous alpha. When this option is `True` the previous alpha product is fixed to `1`,
otherwise it uses the alpha value at step 0.
steps_offset (`int`, defaults to 0):
An offset added to the inference steps, as required by some model families.
prediction_type (`str`, defaults to `epsilon`, *optional*):
Prediction type of the scheduler function; can be `epsilon` (predicts the noise of the diffusion process),
`sample` (directly predicts the noisy sample`) or `v_prediction` (see section 2.4 of [Imagen
Video](https://imagen.research.google/video/paper.pdf) paper).
thresholding (`bool`, defaults to `False`):
Whether to use the "dynamic thresholding" method. This is unsuitable for latent-space diffusion models such
as Stable Diffusion.
dynamic_thresholding_ratio (`float`, defaults to 0.995):
The ratio for the dynamic thresholding method. Valid only when `thresholding=True`.
sample_max_value (`float`, defaults to 1.0):
The threshold value for dynamic thresholding. Valid only when `thresholding=True`.
timestep_spacing (`str`, defaults to `"leading"`):
The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and
Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information.
rescale_betas_zero_snr (`bool`, defaults to `False`):
Whether to rescale the betas to have zero terminal SNR. This enables the model to generate very bright and
dark samples instead of limiting it to samples with medium brightness. Loosely related to
[`--offset_noise`](https://github.com/huggingface/diffusers/blob/74fd735eb073eb1d774b1ab4154a0876eb82f055/examples/dreambooth/train_dreambooth.py#L506).
"""
order = 1
@register_to_config
def __init__(
self,
num_train_timesteps: int = 1000,
beta_start: float = 0.0001,
beta_end: float = 0.02,
beta_schedule: str = "linear",
trained_betas: Optional[Union[np.ndarray, List[float]]] = None,
clip_sample: bool = True,
set_alpha_to_one: bool = True,
steps_offset: int = 0,
prediction_type: str = "epsilon",
thresholding: bool = False,
dynamic_thresholding_ratio: float = 0.995,
clip_sample_range: float = 1.0,
sample_max_value: float = 1.0,
timestep_spacing: str = "leading",
rescale_betas_zero_snr: bool = False,
):
if trained_betas is not None:
self.betas = torch.tensor(trained_betas, dtype=torch.float32)
elif beta_schedule == "linear":
self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32)
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
self.betas = torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2
elif beta_schedule == "squaredcos_cap_v2":
if self.config.timestep_spacing == "matryoshka_style":
self.betas = torch.cat((torch.tensor([0]), betas_for_alpha_bar(num_train_timesteps)))
else:
# Glide cosine schedule
self.betas = betas_for_alpha_bar(num_train_timesteps)
else:
raise NotImplementedError(f"{beta_schedule} is not implemented for {self.__class__}")
# Rescale for zero SNR
if rescale_betas_zero_snr:
self.betas = rescale_zero_terminal_snr(self.betas)
self.alphas = 1.0 - self.betas
self.alphas_cumprod = torch.cumprod(self.alphas, dim=0)
# At every step in ddim, we are looking into the previous alphas_cumprod
# For the final step, there is no previous alphas_cumprod because we are already at 0
# `set_alpha_to_one` decides whether we set this parameter simply to one or
# whether we use the final alpha of the "non-previous" one.
self.final_alpha_cumprod = torch.tensor(1.0) if set_alpha_to_one else self.alphas_cumprod[0]
# standard deviation of the initial noise distribution
self.init_noise_sigma = 1.0
# setable values
self.num_inference_steps = None
self.timesteps = torch.from_numpy(np.arange(0, num_train_timesteps)[::-1].copy().astype(np.int64))
self.scales = None
self.schedule_shifted_power = 1.0
def scale_model_input(self, sample: torch.Tensor, timestep: Optional[int] = None) -> torch.Tensor:
"""
Ensures interchangeability with schedulers that need to scale the denoising model input depending on the
current timestep.
Args:
sample (`torch.Tensor`):
The input sample.
timestep (`int`, *optional*):
The current timestep in the diffusion chain.
Returns:
`torch.Tensor`:
A scaled input sample.
"""
return sample
def _get_variance(self, timestep, prev_timestep):
alpha_prod_t = self.alphas_cumprod[timestep]
alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
beta_prod_t = 1 - alpha_prod_t
beta_prod_t_prev = 1 - alpha_prod_t_prev
variance = (beta_prod_t_prev / beta_prod_t) * (1 - alpha_prod_t / alpha_prod_t_prev)
return variance
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler._threshold_sample
def _threshold_sample(self, sample: torch.Tensor) -> torch.Tensor:
"""
"Dynamic thresholding: At each sampling step we set s to a certain percentile absolute pixel value in xt0 (the
prediction of x_0 at timestep t), and if s > 1, then we threshold xt0 to the range [-s, s] and then divide by
s. Dynamic thresholding pushes saturated pixels (those near -1 and 1) inwards, thereby actively preventing
pixels from saturation at each step. We find that dynamic thresholding results in significantly better
photorealism as well as better image-text alignment, especially when using very large guidance weights."
https://arxiv.org/abs/2205.11487
"""
dtype = sample.dtype
batch_size, channels, *remaining_dims = sample.shape
if dtype not in (torch.float32, torch.float64):
sample = sample.float() # upcast for quantile calculation, and clamp not implemented for cpu half
# Flatten sample for doing quantile calculation along each image
sample = sample.reshape(batch_size, channels * np.prod(remaining_dims))
abs_sample = sample.abs() # "a certain percentile absolute pixel value"
s = torch.quantile(abs_sample, self.config.dynamic_thresholding_ratio, dim=1)
s = torch.clamp(
s, min=1, max=self.config.sample_max_value
) # When clamped to min=1, equivalent to standard clipping to [-1, 1]
s = s.unsqueeze(1) # (batch_size, 1) because clamp will broadcast along dim=0
sample = torch.clamp(sample, -s, s) / s # "we threshold xt0 to the range [-s, s] and then divide by s"
sample = sample.reshape(batch_size, channels, *remaining_dims)
sample = sample.to(dtype)
return sample
def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device] = None):
"""
Sets the discrete timesteps used for the diffusion chain (to be run before inference).
Args:
num_inference_steps (`int`):
The number of diffusion steps used when generating samples with a pre-trained model.
"""
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
f"`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:"
f" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle"
f" maximal {self.config.num_train_timesteps} timesteps."
)
self.num_inference_steps = num_inference_steps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
timesteps = (
np.linspace(0, self.config.num_train_timesteps - 1, num_inference_steps)
.round()[::-1]
.copy()
.astype(np.int64)
)
elif self.config.timestep_spacing == "leading":
step_ratio = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
timesteps = (np.arange(0, num_inference_steps) * step_ratio).round()[::-1].copy().astype(np.int64)
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
step_ratio = self.config.num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
timesteps = np.round(np.arange(self.config.num_train_timesteps, 0, -step_ratio)).astype(np.int64)
timesteps -= 1
elif self.config.timestep_spacing == "matryoshka_style":
step_ratio = (self.config.num_train_timesteps + 1) / (num_inference_steps + 1)
timesteps = (np.arange(0, num_inference_steps + 1) * step_ratio).round()[::-1].copy().astype(np.int64)
else:
raise ValueError(
f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'leading' or 'trailing'."
)
self.timesteps = torch.from_numpy(timesteps).to(device)
def get_schedule_shifted(self, alpha_prod, scale_factor=None):
if (scale_factor is not None) and (scale_factor > 1): # rescale noise schedule
scale_factor = scale_factor**self.schedule_shifted_power
snr = alpha_prod / (1 - alpha_prod)
scaled_snr = snr / scale_factor
alpha_prod = 1 / (1 + 1 / scaled_snr)
return alpha_prod
def step(
self,
model_output: torch.Tensor,
timestep: int,
sample: torch.Tensor,
eta: float = 0.0,
use_clipped_model_output: bool = False,
generator=None,
variance_noise: Optional[torch.Tensor] = None,
return_dict: bool = True,
) -> Union[MatryoshkaDDIMSchedulerOutput, Tuple]:
"""
Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion
process from the learned model outputs (most often the predicted noise).
Args:
model_output (`torch.Tensor`):
The direct output from learned diffusion model.
timestep (`float`):
The current discrete timestep in the diffusion chain.
sample (`torch.Tensor`):
A current instance of a sample created by the diffusion process.
eta (`float`):
The weight of noise for added noise in diffusion step.
use_clipped_model_output (`bool`, defaults to `False`):
If `True`, computes "corrected" `model_output` from the clipped predicted original sample. Necessary
because predicted original sample is clipped to [-1, 1] when `self.config.clip_sample` is `True`. If no
clipping has happened, "corrected" `model_output` would coincide with the one provided as input and
`use_clipped_model_output` has no effect.
generator (`torch.Generator`, *optional*):
A random number generator.
variance_noise (`torch.Tensor`):
Alternative to generating noise with `generator` by directly providing the noise for the variance
itself. Useful for methods such as [`CycleDiffusion`].
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~schedulers.scheduling_ddim.DDIMSchedulerOutput`] or `tuple`.
Returns:
[`~schedulers.scheduling_ddim.DDIMSchedulerOutput`] or `tuple`:
If return_dict is `True`, [`~schedulers.scheduling_ddim.DDIMSchedulerOutput`] is returned, otherwise a
tuple is returned where the first element is the sample tensor.
"""
if self.num_inference_steps is None:
raise ValueError(
"Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler"
)
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
if self.config.timestep_spacing != "matryoshka_style":
prev_timestep = timestep - self.config.num_train_timesteps // self.num_inference_steps
else:
prev_timestep = self.timesteps[torch.nonzero(self.timesteps == timestep).item() + 1]
# 2. compute alphas, betas
alpha_prod_t = self.alphas_cumprod[timestep]
alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
if self.config.timestep_spacing == "matryoshka_style" and len(model_output) > 1:
alpha_prod_t = torch.tensor([self.get_schedule_shifted(alpha_prod_t, s) for s in self.scales])
alpha_prod_t_prev = torch.tensor([self.get_schedule_shifted(alpha_prod_t_prev, s) for s in self.scales])
beta_prod_t = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5)
pred_epsilon = model_output
elif self.config.prediction_type == "sample":
pred_original_sample = model_output
pred_epsilon = (sample - alpha_prod_t ** (0.5) * pred_original_sample) / beta_prod_t ** (0.5)
elif self.config.prediction_type == "v_prediction":
if len(model_output) > 1:
pred_original_sample = []
pred_epsilon = []
for m_o, s, a_p_t, b_p_t in zip(model_output, sample, alpha_prod_t, beta_prod_t):
pred_original_sample.append((a_p_t**0.5) * s - (b_p_t**0.5) * m_o)
pred_epsilon.append((a_p_t**0.5) * m_o + (b_p_t**0.5) * s)
else:
pred_original_sample = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
pred_epsilon = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or"
" `v_prediction`"
)
# 4. Clip or threshold "predicted x_0"
if self.config.thresholding:
if len(model_output) > 1:
pred_original_sample = [self._threshold_sample(p_o_s) for p_o_s in pred_original_sample]
else:
pred_original_sample = self._threshold_sample(pred_original_sample)
elif self.config.clip_sample:
if len(model_output) > 1:
pred_original_sample = [
p_o_s.clamp(-self.config.clip_sample_range, self.config.clip_sample_range)
for p_o_s in pred_original_sample
]
else:
pred_original_sample = pred_original_sample.clamp(
-self.config.clip_sample_range, self.config.clip_sample_range
)
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
variance = self._get_variance(timestep, prev_timestep)
std_dev_t = eta * variance ** (0.5)
if use_clipped_model_output:
# the pred_epsilon is always re-derived from the clipped x_0 in Glide
if len(model_output) > 1:
pred_epsilon = []
for s, a_p_t, p_o_s, b_p_t in zip(sample, alpha_prod_t, pred_original_sample, beta_prod_t):
pred_epsilon.append((s - a_p_t ** (0.5) * p_o_s) / b_p_t ** (0.5))
else:
pred_epsilon = (sample - alpha_prod_t ** (0.5) * pred_original_sample) / beta_prod_t ** (0.5)
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if len(model_output) > 1:
pred_sample_direction = []
for p_e, a_p_t_p in zip(pred_epsilon, alpha_prod_t_prev):
pred_sample_direction.append((1 - a_p_t_p - std_dev_t**2) ** (0.5) * p_e)
else:
pred_sample_direction = (1 - alpha_prod_t_prev - std_dev_t**2) ** (0.5) * pred_epsilon
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if len(model_output) > 1:
prev_sample = []
for p_o_s, p_s_d, a_p_t_p in zip(pred_original_sample, pred_sample_direction, alpha_prod_t_prev):
prev_sample.append(a_p_t_p ** (0.5) * p_o_s + p_s_d)
else:
prev_sample = alpha_prod_t_prev ** (0.5) * pred_original_sample + pred_sample_direction
if eta > 0:
if variance_noise is not None and generator is not None:
raise ValueError(
"Cannot pass both generator and variance_noise. Please make sure that either `generator` or"
" `variance_noise` stays `None`."
)
if variance_noise is None:
if len(model_output) > 1:
variance_noise = []
for m_o in model_output:
variance_noise.append(
randn_tensor(m_o.shape, generator=generator, device=m_o.device, dtype=m_o.dtype)
)
else:
variance_noise = randn_tensor(
model_output.shape, generator=generator, device=model_output.device, dtype=model_output.dtype
)
if len(model_output) > 1:
prev_sample = [p_s + std_dev_t * v_n for v_n, p_s in zip(variance_noise, prev_sample)]
else:
variance = std_dev_t * variance_noise
prev_sample = prev_sample + variance
if not return_dict:
return (prev_sample,)
return MatryoshkaDDIMSchedulerOutput(prev_sample=prev_sample, pred_original_sample=pred_original_sample)
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.add_noise
def add_noise(
self,
original_samples: torch.Tensor,
noise: torch.Tensor,
timesteps: torch.IntTensor,
) -> torch.Tensor:
# Make sure alphas_cumprod and timestep have same device and dtype as original_samples
# Move the self.alphas_cumprod to device to avoid redundant CPU to GPU data movement
# for the subsequent add_noise calls
self.alphas_cumprod = self.alphas_cumprod.to(device=original_samples.device)
alphas_cumprod = self.alphas_cumprod.to(dtype=original_samples.dtype)
timesteps = timesteps.to(original_samples.device)
sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5
sqrt_alpha_prod = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape) < len(original_samples.shape):
sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1)
sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5
sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape):
sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1)
noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.get_velocity
def get_velocity(self, sample: torch.Tensor, noise: torch.Tensor, timesteps: torch.IntTensor) -> torch.Tensor:
# Make sure alphas_cumprod and timestep have same device and dtype as sample
self.alphas_cumprod = self.alphas_cumprod.to(device=sample.device)
alphas_cumprod = self.alphas_cumprod.to(dtype=sample.dtype)
timesteps = timesteps.to(sample.device)
sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5
sqrt_alpha_prod = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape) < len(sample.shape):
sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1)
sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5
sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape) < len(sample.shape):
sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1)
velocity = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
def __len__(self):
return self.config.num_train_timesteps
class CrossAttnDownBlock2D(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
temb_channels: int,
dropout: float = 0.0,
num_layers: int = 1,
transformer_layers_per_block: Union[int, Tuple[int]] = 1,
resnet_eps: float = 1e-6,
resnet_time_scale_shift: str = "default",
resnet_act_fn: str = "swish",
resnet_groups: int = 32,
resnet_pre_norm: bool = True,
norm_type: str = "layer_norm",
num_attention_heads: int = 1,
cross_attention_dim: int = 1280,
cross_attention_norm: Optional[str] = None,
output_scale_factor: float = 1.0,
downsample_padding: int = 1,
add_downsample: bool = True,
dual_cross_attention: bool = False,
use_linear_projection: bool = False,
only_cross_attention: bool = False,
upcast_attention: bool = False,
attention_type: str = "default",
attention_pre_only: bool = False,
attention_bias: bool = False,
use_attention_ffn: bool = True,
):
super().__init__()
resnets = []
attentions = []
self.has_cross_attention = True
self.num_attention_heads = num_attention_heads
if isinstance(transformer_layers_per_block, int):
transformer_layers_per_block = [transformer_layers_per_block] * num_layers
for i in range(num_layers):
in_channels = in_channels if i == 0 else out_channels
resnets.append(
ResnetBlock2D(
in_channels=in_channels,
out_channels=out_channels,
temb_channels=temb_channels,
eps=resnet_eps,
groups=resnet_groups,
dropout=dropout,
time_embedding_norm=resnet_time_scale_shift,
non_linearity=resnet_act_fn,
output_scale_factor=output_scale_factor,
pre_norm=resnet_pre_norm,
)
)
attentions.append(
MatryoshkaTransformer2DModel(
num_attention_heads,
out_channels // num_attention_heads,
in_channels=out_channels,
num_layers=transformer_layers_per_block[i],
cross_attention_dim=cross_attention_dim,
upcast_attention=upcast_attention,
use_attention_ffn=use_attention_ffn,
)
)
self.attentions = nn.ModuleList(attentions)
self.resnets = nn.ModuleList(resnets)
if add_downsample:
self.downsamplers = nn.ModuleList(
[
Downsample2D(
out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name="op"
)
]
)
else:
self.downsamplers = None
self.gradient_checkpointing = False
def forward(
self,
hidden_states: torch.Tensor,
temb: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
additional_residuals: Optional[torch.Tensor] = None,
) -> Tuple[torch.Tensor, Tuple[torch.Tensor, ...]]:
if cross_attention_kwargs is not None:
if cross_attention_kwargs.get("scale", None) is not None:
logger.warning("Passing `scale` to `cross_attention_kwargs` is deprecated. `scale` will be ignored.")
output_states = ()
blocks = list(zip(self.resnets, self.attentions))
for i, (resnet, attn) in enumerate(blocks):
if torch.is_grad_enabled() and self.gradient_checkpointing:
hidden_states = self._gradient_checkpointing_func(resnet, hidden_states, temb)
hidden_states = attn(
hidden_states,
encoder_hidden_states=encoder_hidden_states,
cross_attention_kwargs=cross_attention_kwargs,
attention_mask=attention_mask,
encoder_attention_mask=encoder_attention_mask,
return_dict=False,
)[0]
else:
hidden_states = resnet(hidden_states, temb)
hidden_states = attn(
hidden_states,
encoder_hidden_states=encoder_hidden_states,
cross_attention_kwargs=cross_attention_kwargs,
attention_mask=attention_mask,
encoder_attention_mask=encoder_attention_mask,
return_dict=False,
)[0]
# apply additional residuals to the output of the last pair of resnet and attention blocks
if i == len(blocks) - 1 and additional_residuals is not None:
hidden_states = hidden_states + additional_residuals
output_states = output_states + (hidden_states,)
if self.downsamplers is not None:
for downsampler in self.downsamplers:
hidden_states = downsampler(hidden_states)
output_states = output_states + (hidden_states,)
return hidden_states, output_states
class UNetMidBlock2DCrossAttn(nn.Module):
def __init__(
self,
in_channels: int,
temb_channels: int,
out_channels: Optional[int] = None,
dropout: float = 0.0,
num_layers: int = 1,
transformer_layers_per_block: Union[int, Tuple[int]] = 1,
resnet_eps: float = 1e-6,
resnet_time_scale_shift: str = "default",
resnet_act_fn: str = "swish",
resnet_groups: int = 32,
resnet_groups_out: Optional[int] = None,
resnet_pre_norm: bool = True,
norm_type: str = "layer_norm",
num_attention_heads: int = 1,
output_scale_factor: float = 1.0,
cross_attention_dim: int = 1280,
cross_attention_norm: Optional[str] = None,
dual_cross_attention: bool = False,
use_linear_projection: bool = False,
upcast_attention: bool = False,
attention_type: str = "default",
attention_pre_only: bool = False,
attention_bias: bool = False,
use_attention_ffn: bool = True,
):
super().__init__()
out_channels = out_channels or in_channels
self.in_channels = in_channels
self.out_channels = out_channels
self.has_cross_attention = True
self.num_attention_heads = num_attention_heads
resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32)
# support for variable transformer layers per block
if isinstance(transformer_layers_per_block, int):
transformer_layers_per_block = [transformer_layers_per_block] * num_layers
resnet_groups_out = resnet_groups_out or resnet_groups
# there is always at least one resnet
resnets = [
ResnetBlock2D(
in_channels=in_channels,
out_channels=out_channels,
temb_channels=temb_channels,
eps=resnet_eps,
groups=resnet_groups,
groups_out=resnet_groups_out,
dropout=dropout,
time_embedding_norm=resnet_time_scale_shift,
non_linearity=resnet_act_fn,
output_scale_factor=output_scale_factor,
pre_norm=resnet_pre_norm,
)
]
attentions = []
for i in range(num_layers):
attentions.append(
MatryoshkaTransformer2DModel(
num_attention_heads,
out_channels // num_attention_heads,
in_channels=out_channels,
num_layers=transformer_layers_per_block[i],
cross_attention_dim=cross_attention_dim,
upcast_attention=upcast_attention,
use_attention_ffn=use_attention_ffn,
)
)
resnets.append(
ResnetBlock2D(
in_channels=out_channels,
out_channels=out_channels,
temb_channels=temb_channels,
eps=resnet_eps,
groups=resnet_groups_out,
dropout=dropout,
time_embedding_norm=resnet_time_scale_shift,
non_linearity=resnet_act_fn,
output_scale_factor=output_scale_factor,
pre_norm=resnet_pre_norm,
)
)
self.attentions = nn.ModuleList(attentions)
self.resnets = nn.ModuleList(resnets)
self.gradient_checkpointing = False
def forward(
self,
hidden_states: torch.Tensor,
temb: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
) -> torch.Tensor:
if cross_attention_kwargs is not None:
if cross_attention_kwargs.get("scale", None) is not None:
logger.warning("Passing `scale` to `cross_attention_kwargs` is deprecated. `scale` will be ignored.")
hidden_states = self.resnets[0](hidden_states, temb)
for attn, resnet in zip(self.attentions, self.resnets[1:]):
if torch.is_grad_enabled() and self.gradient_checkpointing:
hidden_states = attn(
hidden_states,
encoder_hidden_states=encoder_hidden_states,
cross_attention_kwargs=cross_attention_kwargs,
attention_mask=attention_mask,
encoder_attention_mask=encoder_attention_mask,
return_dict=False,
)[0]
hidden_states = self._gradient_checkpointing_func(resnet, hidden_states, temb)
else:
hidden_states = attn(
hidden_states,
encoder_hidden_states=encoder_hidden_states,
cross_attention_kwargs=cross_attention_kwargs,
attention_mask=attention_mask,
encoder_attention_mask=encoder_attention_mask,
return_dict=False,
)[0]
hidden_states = resnet(hidden_states, temb)
return hidden_states
class CrossAttnUpBlock2D(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
prev_output_channel: int,
temb_channels: int,
resolution_idx: Optional[int] = None,
dropout: float = 0.0,
num_layers: int = 1,
transformer_layers_per_block: Union[int, Tuple[int]] = 1,
resnet_eps: float = 1e-6,
resnet_time_scale_shift: str = "default",
resnet_act_fn: str = "swish",
resnet_groups: int = 32,
resnet_pre_norm: bool = True,
norm_type: str = "layer_norm",
num_attention_heads: int = 1,
cross_attention_dim: int = 1280,
cross_attention_norm: Optional[str] = None,
output_scale_factor: float = 1.0,
add_upsample: bool = True,
dual_cross_attention: bool = False,
use_linear_projection: bool = False,
only_cross_attention: bool = False,
upcast_attention: bool = False,
attention_type: str = "default",
attention_pre_only: bool = False,
attention_bias: bool = False,
use_attention_ffn: bool = True,
):
super().__init__()
resnets = []
attentions = []
self.has_cross_attention = True
self.num_attention_heads = num_attention_heads
if isinstance(transformer_layers_per_block, int):
transformer_layers_per_block = [transformer_layers_per_block] * num_layers
for i in range(num_layers):
res_skip_channels = in_channels if (i == num_layers - 1) else out_channels
resnet_in_channels = prev_output_channel if i == 0 else out_channels
resnets.append(
ResnetBlock2D(
in_channels=resnet_in_channels + res_skip_channels,
out_channels=out_channels,
temb_channels=temb_channels,
eps=resnet_eps,
groups=resnet_groups,
dropout=dropout,
time_embedding_norm=resnet_time_scale_shift,
non_linearity=resnet_act_fn,
output_scale_factor=output_scale_factor,
pre_norm=resnet_pre_norm,
)
)
attentions.append(
MatryoshkaTransformer2DModel(
num_attention_heads,
out_channels // num_attention_heads,
in_channels=out_channels,
num_layers=transformer_layers_per_block[i],
cross_attention_dim=cross_attention_dim,
upcast_attention=upcast_attention,
use_attention_ffn=use_attention_ffn,
)
)
self.attentions = nn.ModuleList(attentions)
self.resnets = nn.ModuleList(resnets)
if add_upsample:
self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)])
else:
self.upsamplers = None
self.gradient_checkpointing = False
self.resolution_idx = resolution_idx
def forward(
self,
hidden_states: torch.Tensor,
res_hidden_states_tuple: Tuple[torch.Tensor, ...],
temb: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
upsample_size: Optional[int] = None,
attention_mask: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
) -> torch.Tensor:
if cross_attention_kwargs is not None:
if cross_attention_kwargs.get("scale", None) is not None:
logger.warning("Passing `scale` to `cross_attention_kwargs` is deprecated. `scale` will be ignored.")
is_freeu_enabled = (
getattr(self, "s1", None)
and getattr(self, "s2", None)
and getattr(self, "b1", None)
and getattr(self, "b2", None)
)
for resnet, attn in zip(self.resnets, self.attentions):
# pop res hidden states
res_hidden_states = res_hidden_states_tuple[-1]
res_hidden_states_tuple = res_hidden_states_tuple[:-1]
# FreeU: Only operate on the first two stages
if is_freeu_enabled:
hidden_states, res_hidden_states = apply_freeu(
self.resolution_idx,
hidden_states,
res_hidden_states,
s1=self.s1,
s2=self.s2,
b1=self.b1,
b2=self.b2,
)
hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)
if torch.is_grad_enabled() and self.gradient_checkpointing:
hidden_states = self._gradient_checkpointing_func(resnet, hidden_states, temb)
hidden_states = attn(
hidden_states,
encoder_hidden_states=encoder_hidden_states,
cross_attention_kwargs=cross_attention_kwargs,
attention_mask=attention_mask,
encoder_attention_mask=encoder_attention_mask,
return_dict=False,
)[0]
else:
hidden_states = resnet(hidden_states, temb)
hidden_states = attn(
hidden_states,
encoder_hidden_states=encoder_hidden_states,
cross_attention_kwargs=cross_attention_kwargs,
attention_mask=attention_mask,
encoder_attention_mask=encoder_attention_mask,
return_dict=False,
)[0]
if self.upsamplers is not None:
for upsampler in self.upsamplers:
hidden_states = upsampler(hidden_states, upsample_size)
return hidden_states
@dataclass
class MatryoshkaTransformer2DModelOutput(BaseOutput):
"""
The output of [`MatryoshkaTransformer2DModel`].
Args:
sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` or `(batch size, num_vector_embeds - 1, num_latent_pixels)` if [`MatryoshkaTransformer2DModel`] is discrete):
The hidden states output conditioned on the `encoder_hidden_states` input. If discrete, returns probability
distributions for the unnoised latent pixels.
"""
sample: "torch.Tensor" # noqa: F821
class MatryoshkaTransformer2DModel(LegacyModelMixin, LegacyConfigMixin):
_supports_gradient_checkpointing = True
_no_split_modules = ["MatryoshkaTransformerBlock"]
@register_to_config
def __init__(
self,
num_attention_heads: int = 16,
attention_head_dim: int = 88,
in_channels: Optional[int] = None,
num_layers: int = 1,
cross_attention_dim: Optional[int] = None,
upcast_attention: bool = False,
use_attention_ffn: bool = True,
):
super().__init__()
self.in_channels = self.config.num_attention_heads * self.config.attention_head_dim
self.gradient_checkpointing = False
self.transformer_blocks = nn.ModuleList(
[
MatryoshkaTransformerBlock(
self.in_channels,
self.config.num_attention_heads,
self.config.attention_head_dim,
cross_attention_dim=self.config.cross_attention_dim,
upcast_attention=self.config.upcast_attention,
use_attention_ffn=self.config.use_attention_ffn,
)
for _ in range(self.config.num_layers)
]
)
def forward(
self,
hidden_states: torch.Tensor,
encoder_hidden_states: Optional[torch.Tensor] = None,
timestep: Optional[torch.LongTensor] = None,
added_cond_kwargs: Dict[str, torch.Tensor] = None,
class_labels: Optional[torch.LongTensor] = None,
cross_attention_kwargs: Dict[str, Any] = None,
attention_mask: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
return_dict: bool = True,
):
"""
The [`MatryoshkaTransformer2DModel`] forward method.
Args:
hidden_states (`torch.LongTensor` of shape `(batch size, num latent pixels)` if discrete, `torch.Tensor` of shape `(batch size, channel, height, width)` if continuous):
Input `hidden_states`.
encoder_hidden_states ( `torch.Tensor` of shape `(batch size, sequence len, embed dims)`, *optional*):
Conditional embeddings for cross attention layer. If not given, cross-attention defaults to
self-attention.
timestep ( `torch.LongTensor`, *optional*):
Used to indicate denoising step. Optional timestep to be applied as an embedding in `AdaLayerNorm`.
class_labels ( `torch.LongTensor` of shape `(batch size, num classes)`, *optional*):
Used to indicate class labels conditioning. Optional class labels to be applied as an embedding in
`AdaLayerZeroNorm`.
cross_attention_kwargs ( `Dict[str, Any]`, *optional*):
A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
`self.processor` in
[diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
attention_mask ( `torch.Tensor`, *optional*):
An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. If `1` the mask
is kept, otherwise if `0` it is discarded. Mask will be converted into a bias, which adds large
negative values to the attention scores corresponding to "discard" tokens.
encoder_attention_mask ( `torch.Tensor`, *optional*):
Cross-attention mask applied to `encoder_hidden_states`. Two formats supported:
* Mask `(batch, sequence_length)` True = keep, False = discard.
* Bias `(batch, 1, sequence_length)` 0 = keep, -10000 = discard.
If `ndim == 2`: will be interpreted as a mask, then converted into a bias consistent with the format
above. This bias will be added to the cross-attention scores.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~NestedUNet2DConditionOutput`] instead of a plain
tuple.
Returns:
If `return_dict` is True, an [`~MatryoshkaTransformer2DModelOutput`] is returned,
otherwise a `tuple` where the first element is the sample tensor.
"""
if cross_attention_kwargs is not None:
if cross_attention_kwargs.get("scale", None) is not None:
logger.warning("Passing `scale` to `cross_attention_kwargs` is deprecated. `scale` will be ignored.")
# ensure attention_mask is a bias, and give it a singleton query_tokens dimension.
# we may have done this conversion already, e.g. if we came here via UNet2DConditionModel#forward.
# we can tell by counting dims; if ndim == 2: it's a mask rather than a bias.
# expects mask of shape:
# [batch, key_tokens]
# adds singleton query_tokens dimension:
# [batch, 1, key_tokens]
# this helps to broadcast it as a bias over attention scores, which will be in one of the following shapes:
# [batch, heads, query_tokens, key_tokens] (e.g. torch sdp attn)
# [batch * heads, query_tokens, key_tokens] (e.g. xformers or classic attn)
if attention_mask is not None and attention_mask.ndim == 2:
# assume that mask is expressed as:
# (1 = keep, 0 = discard)
# convert mask into a bias that can be added to attention scores:
# (keep = +0, discard = -10000.0)
attention_mask = (1 - attention_mask.to(hidden_states.dtype)) * -10000.0
attention_mask = attention_mask.unsqueeze(1)
# convert encoder_attention_mask to a bias the same way we do for attention_mask
if encoder_attention_mask is not None and encoder_attention_mask.ndim == 2:
encoder_attention_mask = (1 - encoder_attention_mask.to(hidden_states.dtype)) * -10000.0
encoder_attention_mask = encoder_attention_mask.unsqueeze(1)
# Blocks
for block in self.transformer_blocks:
if torch.is_grad_enabled() and self.gradient_checkpointing:
hidden_states = self._gradient_checkpointing_func(
block,
hidden_states,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
timestep,
cross_attention_kwargs,
class_labels,
)
else:
hidden_states = block(
hidden_states,
attention_mask=attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
timestep=timestep,
cross_attention_kwargs=cross_attention_kwargs,
class_labels=class_labels,
)
# Output
output = hidden_states
if not return_dict:
return (output,)
return MatryoshkaTransformer2DModelOutput(sample=output)
class MatryoshkaTransformerBlock(nn.Module):
r"""
Matryoshka Transformer block.
Parameters:
"""
def __init__(
self,
dim: int,
num_attention_heads: int,
attention_head_dim: int,
cross_attention_dim: Optional[int] = None,
upcast_attention: bool = False,
use_attention_ffn: bool = True,
):
super().__init__()
self.dim = dim
self.num_attention_heads = num_attention_heads
self.attention_head_dim = attention_head_dim
self.cross_attention_dim = cross_attention_dim
# Define 3 blocks.
# 1. Self-Attn
self.attn1 = Attention(
query_dim=dim,
cross_attention_dim=None,
heads=num_attention_heads,
dim_head=attention_head_dim,
norm_num_groups=32,
bias=True,
upcast_attention=upcast_attention,
pre_only=True,
processor=MatryoshkaFusedAttnProcessor2_0(),
)
self.attn1.fuse_projections()
del self.attn1.to_q
del self.attn1.to_k
del self.attn1.to_v
# 2. Cross-Attn
if cross_attention_dim is not None and cross_attention_dim > 0:
self.attn2 = Attention(
query_dim=dim,
cross_attention_dim=cross_attention_dim,
cross_attention_norm="layer_norm",
heads=num_attention_heads,
dim_head=attention_head_dim,
bias=True,
upcast_attention=upcast_attention,
pre_only=True,
processor=MatryoshkaFusedAttnProcessor2_0(),
)
self.attn2.fuse_projections()
del self.attn2.to_q
del self.attn2.to_k
del self.attn2.to_v
self.proj_out = nn.Linear(dim, dim)
if use_attention_ffn:
# 3. Feed-forward
self.ff = MatryoshkaFeedForward(dim)
else:
self.ff = None
# let chunk size default to None
self._chunk_size = None
self._chunk_dim = 0
# Copied from diffusers.models.attention.BasicTransformerBlock.set_chunk_feed_forward
def set_chunk_feed_forward(self, chunk_size: Optional[int], dim: int = 0):
# Sets chunk feed-forward
self._chunk_size = chunk_size
self._chunk_dim = dim
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
timestep: Optional[torch.LongTensor] = None,
cross_attention_kwargs: Dict[str, Any] = None,
class_labels: Optional[torch.LongTensor] = None,
added_cond_kwargs: Optional[Dict[str, torch.Tensor]] = None,
) -> torch.Tensor:
if cross_attention_kwargs is not None:
if cross_attention_kwargs.get("scale", None) is not None:
logger.warning("Passing `scale` to `cross_attention_kwargs` is deprecated. `scale` will be ignored.")
# 1. Self-Attention
batch_size, channels, *spatial_dims = hidden_states.shape
attn_output, query = self.attn1(
hidden_states,
# **cross_attention_kwargs,
)
# 2. Cross-Attention
if self.cross_attention_dim is not None and self.cross_attention_dim > 0:
attn_output_cond = self.attn2(
hidden_states,
encoder_hidden_states=encoder_hidden_states,
attention_mask=encoder_attention_mask,
self_attention_output=attn_output,
self_attention_query=query,
# **cross_attention_kwargs,
)
attn_output_cond = self.proj_out(attn_output_cond)
attn_output_cond = attn_output_cond.permute(0, 2, 1).reshape(batch_size, channels, *spatial_dims)
hidden_states = hidden_states + attn_output_cond
if self.ff is not None:
# 3. Feed-forward
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
ff_output = _chunked_feed_forward(self.ff, hidden_states, self._chunk_dim, self._chunk_size)
else:
ff_output = self.ff(hidden_states)
hidden_states = ff_output + hidden_states
return hidden_states
class MatryoshkaFusedAttnProcessor2_0:
r"""
Processor for implementing scaled dot-product attention (enabled by default if you're using PyTorch 2.0). It uses
fused projection layers. For self-attention modules, all projection matrices (i.e., query, key, value) are fused.
For cross-attention modules, key and value projection matrices are fused.
<Tip warning={true}>
This API is currently 🧪 experimental in nature and can change in future.
</Tip>
"""
def __init__(self):
if not hasattr(F, "scaled_dot_product_attention"):
raise ImportError(
"MatryoshkaFusedAttnProcessor2_0 requires PyTorch 2.x, to use it. Please upgrade PyTorch to > 2.x."
)
def __call__(
self,
attn: Attention,
hidden_states: torch.Tensor,
encoder_hidden_states: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
temb: Optional[torch.Tensor] = None,
self_attention_query: Optional[torch.Tensor] = None,
self_attention_output: Optional[torch.Tensor] = None,
*args,
**kwargs,
) -> torch.Tensor:
if len(args) > 0 or kwargs.get("scale", None) is not None:
deprecation_message = "The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`."
deprecate("scale", "1.0.0", deprecation_message)
residual = hidden_states
if attn.spatial_norm is not None:
hidden_states = attn.spatial_norm(hidden_states, temb)
input_ndim = hidden_states.ndim
if attn.group_norm is not None:
hidden_states = attn.group_norm(hidden_states)
if input_ndim == 4:
batch_size, channel, height, width = hidden_states.shape
hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2).contiguous()
if encoder_hidden_states is None:
qkv = attn.to_qkv(hidden_states)
split_size = qkv.shape[-1] // 3
query, key, value = torch.split(qkv, split_size, dim=-1)
else:
if attn.norm_cross:
encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)
if self_attention_query is not None:
query = self_attention_query
else:
query = attn.to_q(hidden_states)
kv = attn.to_kv(encoder_hidden_states)
split_size = kv.shape[-1] // 2
key, value = torch.split(kv, split_size, dim=-1)
if attn.norm_q is not None:
query = attn.norm_q(query)
if attn.norm_k is not None:
key = attn.norm_k(key)
inner_dim = key.shape[-1]
head_dim = inner_dim // attn.heads
if self_attention_output is None:
query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
if attn.norm_q is not None:
query = attn.norm_q(query)
if attn.norm_k is not None:
key = attn.norm_k(key)
# the output of sdp = (batch, num_heads, seq_len, head_dim)
# TODO: add support for attn.scale when we move to Torch 2.1
hidden_states = F.scaled_dot_product_attention(
query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False
)
hidden_states = hidden_states.to(query.dtype)
if self_attention_output is not None:
hidden_states = hidden_states + self_attention_output
hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim)
if attn.residual_connection:
hidden_states = hidden_states + residual
hidden_states = hidden_states / attn.rescale_output_factor
return hidden_states if self_attention_output is not None else (hidden_states, query)
class MatryoshkaFeedForward(nn.Module):
r"""
A feed-forward layer for the Matryoshka models.
Parameters:"""
def __init__(
self,
dim: int,
):
super().__init__()
self.group_norm = nn.GroupNorm(32, dim)
self.linear_gelu = GELU(dim, dim * 4)
self.linear_out = nn.Linear(dim * 4, dim)
def forward(self, x):
batch_size, channels, *spatial_dims = x.shape
x = self.group_norm(x)
x = x.view(batch_size, channels, -1).permute(0, 2, 1)
x = self.linear_out(self.linear_gelu(x))
x = x.permute(0, 2, 1).view(batch_size, channels, *spatial_dims)
return x
def get_down_block(
down_block_type: str,
num_layers: int,
in_channels: int,
out_channels: int,
temb_channels: int,
add_downsample: bool,
resnet_eps: float,
resnet_act_fn: str,
norm_type: str = "layer_norm",
transformer_layers_per_block: int = 1,
num_attention_heads: Optional[int] = None,
resnet_groups: Optional[int] = None,
cross_attention_dim: Optional[int] = None,
downsample_padding: Optional[int] = None,
dual_cross_attention: bool = False,
use_linear_projection: bool = False,
only_cross_attention: bool = False,
upcast_attention: bool = False,
resnet_time_scale_shift: str = "default",
attention_type: str = "default",
attention_pre_only: bool = False,
resnet_skip_time_act: bool = False,
resnet_out_scale_factor: float = 1.0,
cross_attention_norm: Optional[str] = None,
attention_head_dim: Optional[int] = None,
use_attention_ffn: bool = True,
downsample_type: Optional[str] = None,
dropout: float = 0.0,
):
# If attn head dim is not defined, we default it to the number of heads
if attention_head_dim is None:
logger.warning(
f"It is recommended to provide `attention_head_dim` when calling `get_down_block`. Defaulting `attention_head_dim` to {num_attention_heads}."
)
attention_head_dim = num_attention_heads
down_block_type = down_block_type[7:] if down_block_type.startswith("UNetRes") else down_block_type
if down_block_type == "DownBlock2D":
return DownBlock2D(
num_layers=num_layers,
in_channels=in_channels,
out_channels=out_channels,
temb_channels=temb_channels,
dropout=dropout,
add_downsample=add_downsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
resnet_groups=resnet_groups,
downsample_padding=downsample_padding,
resnet_time_scale_shift=resnet_time_scale_shift,
)
elif down_block_type == "CrossAttnDownBlock2D":
if cross_attention_dim is None:
raise ValueError("cross_attention_dim must be specified for CrossAttnDownBlock2D")
return CrossAttnDownBlock2D(
num_layers=num_layers,
transformer_layers_per_block=transformer_layers_per_block,
in_channels=in_channels,
out_channels=out_channels,
temb_channels=temb_channels,
dropout=dropout,
add_downsample=add_downsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
norm_type=norm_type,
resnet_groups=resnet_groups,
downsample_padding=downsample_padding,
cross_attention_dim=cross_attention_dim,
cross_attention_norm=cross_attention_norm,
num_attention_heads=num_attention_heads,
dual_cross_attention=dual_cross_attention,
use_linear_projection=use_linear_projection,
only_cross_attention=only_cross_attention,
upcast_attention=upcast_attention,
resnet_time_scale_shift=resnet_time_scale_shift,
attention_type=attention_type,
attention_pre_only=attention_pre_only,
use_attention_ffn=use_attention_ffn,
)
def get_mid_block(
mid_block_type: str,
temb_channels: int,
in_channels: int,
resnet_eps: float,
resnet_act_fn: str,
resnet_groups: int,
norm_type: str = "layer_norm",
output_scale_factor: float = 1.0,
transformer_layers_per_block: int = 1,
num_attention_heads: Optional[int] = None,
cross_attention_dim: Optional[int] = None,
dual_cross_attention: bool = False,
use_linear_projection: bool = False,
mid_block_only_cross_attention: bool = False,
upcast_attention: bool = False,
resnet_time_scale_shift: str = "default",
attention_type: str = "default",
attention_pre_only: bool = False,
resnet_skip_time_act: bool = False,
cross_attention_norm: Optional[str] = None,
attention_head_dim: Optional[int] = 1,
dropout: float = 0.0,
):
if mid_block_type == "UNetMidBlock2DCrossAttn":
return UNetMidBlock2DCrossAttn(
transformer_layers_per_block=transformer_layers_per_block,
in_channels=in_channels,
temb_channels=temb_channels,
dropout=dropout,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
norm_type=norm_type,
output_scale_factor=output_scale_factor,
resnet_time_scale_shift=resnet_time_scale_shift,
cross_attention_dim=cross_attention_dim,
cross_attention_norm=cross_attention_norm,
num_attention_heads=num_attention_heads,
resnet_groups=resnet_groups,
dual_cross_attention=dual_cross_attention,
use_linear_projection=use_linear_projection,
upcast_attention=upcast_attention,
attention_type=attention_type,
attention_pre_only=attention_pre_only,
)
def get_up_block(
up_block_type: str,
num_layers: int,
in_channels: int,
out_channels: int,
prev_output_channel: int,
temb_channels: int,
add_upsample: bool,
resnet_eps: float,
resnet_act_fn: str,
norm_type: str = "layer_norm",
resolution_idx: Optional[int] = None,
transformer_layers_per_block: int = 1,
num_attention_heads: Optional[int] = None,
resnet_groups: Optional[int] = None,
cross_attention_dim: Optional[int] = None,
dual_cross_attention: bool = False,
use_linear_projection: bool = False,
only_cross_attention: bool = False,
upcast_attention: bool = False,
resnet_time_scale_shift: str = "default",
attention_type: str = "default",
attention_pre_only: bool = False,
resnet_skip_time_act: bool = False,
resnet_out_scale_factor: float = 1.0,
cross_attention_norm: Optional[str] = None,
attention_head_dim: Optional[int] = None,
use_attention_ffn: bool = True,
upsample_type: Optional[str] = None,
dropout: float = 0.0,
) -> nn.Module:
# If attn head dim is not defined, we default it to the number of heads
if attention_head_dim is None:
logger.warning(
f"It is recommended to provide `attention_head_dim` when calling `get_up_block`. Defaulting `attention_head_dim` to {num_attention_heads}."
)
attention_head_dim = num_attention_heads
up_block_type = up_block_type[7:] if up_block_type.startswith("UNetRes") else up_block_type
if up_block_type == "UpBlock2D":
return UpBlock2D(
num_layers=num_layers,
in_channels=in_channels,
out_channels=out_channels,
prev_output_channel=prev_output_channel,
temb_channels=temb_channels,
resolution_idx=resolution_idx,
dropout=dropout,
add_upsample=add_upsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
resnet_groups=resnet_groups,
resnet_time_scale_shift=resnet_time_scale_shift,
)
elif up_block_type == "CrossAttnUpBlock2D":
if cross_attention_dim is None:
raise ValueError("cross_attention_dim must be specified for CrossAttnUpBlock2D")
return CrossAttnUpBlock2D(
num_layers=num_layers,
transformer_layers_per_block=transformer_layers_per_block,
in_channels=in_channels,
out_channels=out_channels,
prev_output_channel=prev_output_channel,
temb_channels=temb_channels,
resolution_idx=resolution_idx,
dropout=dropout,
add_upsample=add_upsample,
resnet_eps=resnet_eps,
resnet_act_fn=resnet_act_fn,
norm_type=norm_type,
resnet_groups=resnet_groups,
cross_attention_dim=cross_attention_dim,
cross_attention_norm=cross_attention_norm,
num_attention_heads=num_attention_heads,
dual_cross_attention=dual_cross_attention,
use_linear_projection=use_linear_projection,
only_cross_attention=only_cross_attention,
upcast_attention=upcast_attention,
resnet_time_scale_shift=resnet_time_scale_shift,
attention_type=attention_type,
attention_pre_only=attention_pre_only,
use_attention_ffn=use_attention_ffn,
)
class MatryoshkaCombinedTimestepTextEmbedding(nn.Module):
def __init__(self, addition_time_embed_dim, cross_attention_dim, time_embed_dim, type):
super().__init__()
if type == "unet":
self.cond_emb = nn.Linear(cross_attention_dim, time_embed_dim, bias=False)
elif type == "nested_unet":
self.cond_emb = None
self.add_time_proj = Timesteps(addition_time_embed_dim, flip_sin_to_cos=False, downscale_freq_shift=0)
self.add_timestep_embedder = TimestepEmbedding(addition_time_embed_dim, time_embed_dim)
def forward(self, emb, encoder_hidden_states, added_cond_kwargs):
conditioning_mask = added_cond_kwargs.get("conditioning_mask", None)
masked_cross_attention = added_cond_kwargs.get("masked_cross_attention", False)
if self.cond_emb is not None and not added_cond_kwargs.get("from_nested", False):
if conditioning_mask is None:
y = encoder_hidden_states.mean(dim=1)
else:
y = (conditioning_mask.unsqueeze(-1) * encoder_hidden_states).sum(dim=1) / conditioning_mask.sum(
dim=1, keepdim=True
)
cond_emb = self.cond_emb(y)
else:
cond_emb = None
if not masked_cross_attention:
conditioning_mask = None
micro = added_cond_kwargs.get("micro_conditioning_scale", None)
if micro is not None:
temb = self.add_time_proj(torch.tensor([micro], device=emb.device, dtype=emb.dtype))
temb_micro_conditioning = self.add_timestep_embedder(temb.to(emb.dtype))
# if self.cond_emb is not None and not added_cond_kwargs.get("from_nested", False):
return temb_micro_conditioning, conditioning_mask, cond_emb
return None, conditioning_mask, cond_emb
@dataclass
class MatryoshkaUNet2DConditionOutput(BaseOutput):
"""
The output of [`MatryoshkaUNet2DConditionOutput`].
Args:
sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)`):
The hidden states output conditioned on `encoder_hidden_states` input. Output of last layer of model.
"""
sample: torch.Tensor = None
sample_inner: torch.Tensor = None
class MatryoshkaUNet2DConditionModel(
ModelMixin, ConfigMixin, FromOriginalModelMixin, UNet2DConditionLoadersMixin, PeftAdapterMixin
):
r"""
A conditional 2D UNet model that takes a noisy sample, conditional state, and a timestep and returns a sample
shaped output.
This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented
for all models (such as downloading or saving).
Parameters:
sample_size (`int` or `Tuple[int, int]`, *optional*, defaults to `None`):
Height and width of input/output sample.
in_channels (`int`, *optional*, defaults to 4): Number of channels in the input sample.
out_channels (`int`, *optional*, defaults to 4): Number of channels in the output.
center_input_sample (`bool`, *optional*, defaults to `False`): Whether to center the input sample.
flip_sin_to_cos (`bool`, *optional*, defaults to `True`):
Whether to flip the sin to cos in the time embedding.
freq_shift (`int`, *optional*, defaults to 0): The frequency shift to apply to the time embedding.
down_block_types (`Tuple[str]`, *optional*, defaults to `("CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D")`):
The tuple of downsample blocks to use.
mid_block_type (`str`, *optional*, defaults to `"UNetMidBlock2DCrossAttn"`):
Block type for middle of UNet, it can be one of `UNetMidBlock2DCrossAttn`, `UNetMidBlock2D`, or
`UNetMidBlock2DSimpleCrossAttn`. If `None`, the mid block layer is skipped.
up_block_types (`Tuple[str]`, *optional*, defaults to `("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")`):
The tuple of upsample blocks to use.
only_cross_attention(`bool` or `Tuple[bool]`, *optional*, default to `False`):
Whether to include self-attention in the basic transformer blocks, see
[`~models.attention.BasicTransformerBlock`].
block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`):
The tuple of output channels for each block.
layers_per_block (`int`, *optional*, defaults to 2): The number of layers per block.
downsample_padding (`int`, *optional*, defaults to 1): The padding to use for the downsampling convolution.
mid_block_scale_factor (`float`, *optional*, defaults to 1.0): The scale factor to use for the mid block.
dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.
act_fn (`str`, *optional*, defaults to `"silu"`): The activation function to use.
norm_num_groups (`int`, *optional*, defaults to 32): The number of groups to use for the normalization.
If `None`, normalization and activation layers is skipped in post-processing.
norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon to use for the normalization.
cross_attention_dim (`int` or `Tuple[int]`, *optional*, defaults to 1280):
The dimension of the cross attention features.
transformer_layers_per_block (`int`, `Tuple[int]`, or `Tuple[Tuple]` , *optional*, defaults to 1):
The number of transformer blocks of type [`~models.attention.BasicTransformerBlock`]. Only relevant for
[`~models.unets.unet_2d_blocks.CrossAttnDownBlock2D`], [`~models.unets.unet_2d_blocks.CrossAttnUpBlock2D`],
[`~models.unets.unet_2d_blocks.UNetMidBlock2DCrossAttn`].
reverse_transformer_layers_per_block : (`Tuple[Tuple]`, *optional*, defaults to None):
The number of transformer blocks of type [`~models.attention.BasicTransformerBlock`], in the upsampling
blocks of the U-Net. Only relevant if `transformer_layers_per_block` is of type `Tuple[Tuple]` and for
[`~models.unets.unet_2d_blocks.CrossAttnDownBlock2D`], [`~models.unets.unet_2d_blocks.CrossAttnUpBlock2D`],
[`~models.unets.unet_2d_blocks.UNetMidBlock2DCrossAttn`].
encoder_hid_dim (`int`, *optional*, defaults to None):
If `encoder_hid_dim_type` is defined, `encoder_hidden_states` will be projected from `encoder_hid_dim`
dimension to `cross_attention_dim`.
encoder_hid_dim_type (`str`, *optional*, defaults to `None`):
If given, the `encoder_hidden_states` and potentially other embeddings are down-projected to text
embeddings of dimension `cross_attention` according to `encoder_hid_dim_type`.
attention_head_dim (`int`, *optional*, defaults to 8): The dimension of the attention heads.
num_attention_heads (`int`, *optional*):
The number of attention heads. If not defined, defaults to `attention_head_dim`
resnet_time_scale_shift (`str`, *optional*, defaults to `"default"`): Time scale shift config
for ResNet blocks (see [`~models.resnet.ResnetBlock2D`]). Choose from `default` or `scale_shift`.
class_embed_type (`str`, *optional*, defaults to `None`):
The type of class embedding to use which is ultimately summed with the time embeddings. Choose from `None`,
`"timestep"`, `"identity"`, `"projection"`, or `"simple_projection"`.
addition_embed_type (`str`, *optional*, defaults to `None`):
Configures an optional embedding which will be summed with the time embeddings. Choose from `None` or
"text". "text" will use the `TextTimeEmbedding` layer.
addition_time_embed_dim: (`int`, *optional*, defaults to `None`):
Dimension for the timestep embeddings.
num_class_embeds (`int`, *optional*, defaults to `None`):
Input dimension of the learnable embedding matrix to be projected to `time_embed_dim`, when performing
class conditioning with `class_embed_type` equal to `None`.
time_embedding_type (`str`, *optional*, defaults to `positional`):
The type of position embedding to use for timesteps. Choose from `positional` or `fourier`.
time_embedding_dim (`int`, *optional*, defaults to `None`):
An optional override for the dimension of the projected time embedding.
time_embedding_act_fn (`str`, *optional*, defaults to `None`):
Optional activation function to use only once on the time embeddings before they are passed to the rest of
the UNet. Choose from `silu`, `mish`, `gelu`, and `swish`.
timestep_post_act (`str`, *optional*, defaults to `None`):
The second activation function to use in timestep embedding. Choose from `silu`, `mish` and `gelu`.
time_cond_proj_dim (`int`, *optional*, defaults to `None`):
The dimension of `cond_proj` layer in the timestep embedding.
conv_in_kernel (`int`, *optional*, default to `3`): The kernel size of `conv_in` layer.
conv_out_kernel (`int`, *optional*, default to `3`): The kernel size of `conv_out` layer.
projection_class_embeddings_input_dim (`int`, *optional*): The dimension of the `class_labels` input when
`class_embed_type="projection"`. Required when `class_embed_type="projection"`.
class_embeddings_concat (`bool`, *optional*, defaults to `False`): Whether to concatenate the time
embeddings with the class embeddings.
mid_block_only_cross_attention (`bool`, *optional*, defaults to `None`):
Whether to use cross attention with the mid block when using the `UNetMidBlock2DSimpleCrossAttn`. If
`only_cross_attention` is given as a single boolean and `mid_block_only_cross_attention` is `None`, the
`only_cross_attention` value is used as the value for `mid_block_only_cross_attention`. Default to `False`
otherwise.
"""
_supports_gradient_checkpointing = True
_no_split_modules = ["MatryoshkaTransformerBlock", "ResnetBlock2D", "CrossAttnUpBlock2D"]
@register_to_config
def __init__(
self,
sample_size: Optional[int] = None,
in_channels: int = 3,
out_channels: int = 3,
center_input_sample: bool = False,
flip_sin_to_cos: bool = True,
freq_shift: int = 0,
down_block_types: Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
),
mid_block_type: Optional[str] = "UNetMidBlock2DCrossAttn",
up_block_types: Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D"),
only_cross_attention: Union[bool, Tuple[bool]] = False,
block_out_channels: Tuple[int] = (320, 640, 1280, 1280),
layers_per_block: Union[int, Tuple[int]] = 2,
downsample_padding: int = 1,
mid_block_scale_factor: float = 1,
dropout: float = 0.0,
act_fn: str = "silu",
norm_type: str = "layer_norm",
norm_num_groups: Optional[int] = 32,
norm_eps: float = 1e-5,
cross_attention_dim: Union[int, Tuple[int]] = 1280,
transformer_layers_per_block: Union[int, Tuple[int], Tuple[Tuple]] = 1,
reverse_transformer_layers_per_block: Optional[Tuple[Tuple[int]]] = None,
encoder_hid_dim: Optional[int] = None,
encoder_hid_dim_type: Optional[str] = None,
attention_head_dim: Union[int, Tuple[int]] = 8,
num_attention_heads: Optional[Union[int, Tuple[int]]] = None,
dual_cross_attention: bool = False,
use_attention_ffn: bool = True,
use_linear_projection: bool = False,
class_embed_type: Optional[str] = None,
addition_embed_type: Optional[str] = None,
addition_time_embed_dim: Optional[int] = None,
num_class_embeds: Optional[int] = None,
upcast_attention: bool = False,
resnet_time_scale_shift: str = "default",
resnet_skip_time_act: bool = False,
resnet_out_scale_factor: float = 1.0,
time_embedding_type: str = "positional",
time_embedding_dim: Optional[int] = None,
time_embedding_act_fn: Optional[str] = None,
timestep_post_act: Optional[str] = None,
time_cond_proj_dim: Optional[int] = None,
conv_in_kernel: int = 3,
conv_out_kernel: int = 3,
projection_class_embeddings_input_dim: Optional[int] = None,
attention_type: str = "default",
attention_pre_only: bool = False,
masked_cross_attention: bool = False,
micro_conditioning_scale: int = None,
class_embeddings_concat: bool = False,
mid_block_only_cross_attention: Optional[bool] = None,
cross_attention_norm: Optional[str] = None,
addition_embed_type_num_heads: int = 64,
temporal_mode: bool = False,
temporal_spatial_ds: bool = False,
skip_cond_emb: bool = False,
nesting: Optional[int] = False,
):
super().__init__()
self.sample_size = sample_size
if num_attention_heads is not None:
raise ValueError(
"At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19."
)
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
num_attention_heads = num_attention_heads or attention_head_dim
# Check inputs
self._check_config(
down_block_types=down_block_types,
up_block_types=up_block_types,
only_cross_attention=only_cross_attention,
block_out_channels=block_out_channels,
layers_per_block=layers_per_block,
cross_attention_dim=cross_attention_dim,
transformer_layers_per_block=transformer_layers_per_block,
reverse_transformer_layers_per_block=reverse_transformer_layers_per_block,
attention_head_dim=attention_head_dim,
num_attention_heads=num_attention_heads,
)
# input
conv_in_padding = (conv_in_kernel - 1) // 2
self.conv_in = nn.Conv2d(
in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding
)
# time
time_embed_dim, timestep_input_dim = self._set_time_proj(
time_embedding_type,
block_out_channels=block_out_channels,
flip_sin_to_cos=flip_sin_to_cos,
freq_shift=freq_shift,
time_embedding_dim=time_embedding_dim,
)
self.time_embedding = TimestepEmbedding(
time_embedding_dim // 4 if time_embedding_dim is not None else timestep_input_dim,
time_embed_dim,
act_fn=act_fn,
post_act_fn=timestep_post_act,
cond_proj_dim=time_cond_proj_dim,
)
self._set_encoder_hid_proj(
encoder_hid_dim_type,
cross_attention_dim=cross_attention_dim,
encoder_hid_dim=encoder_hid_dim,
)
# class embedding
self._set_class_embedding(
class_embed_type,
act_fn=act_fn,
num_class_embeds=num_class_embeds,
projection_class_embeddings_input_dim=projection_class_embeddings_input_dim,
time_embed_dim=time_embed_dim,
timestep_input_dim=timestep_input_dim,
)
self._set_add_embedding(
addition_embed_type,
addition_embed_type_num_heads=addition_embed_type_num_heads,
addition_time_embed_dim=timestep_input_dim,
cross_attention_dim=cross_attention_dim,
encoder_hid_dim=encoder_hid_dim,
flip_sin_to_cos=flip_sin_to_cos,
freq_shift=freq_shift,
projection_class_embeddings_input_dim=projection_class_embeddings_input_dim,
time_embed_dim=time_embed_dim,
)
if time_embedding_act_fn is None:
self.time_embed_act = None
else:
self.time_embed_act = get_activation(time_embedding_act_fn)
self.down_blocks = nn.ModuleList([])
self.up_blocks = nn.ModuleList([])
if isinstance(only_cross_attention, bool):
if mid_block_only_cross_attention is None:
mid_block_only_cross_attention = only_cross_attention
only_cross_attention = [only_cross_attention] * len(down_block_types)
if mid_block_only_cross_attention is None:
mid_block_only_cross_attention = False
if isinstance(num_attention_heads, int):
num_attention_heads = (num_attention_heads,) * len(down_block_types)
if isinstance(attention_head_dim, int):
attention_head_dim = (attention_head_dim,) * len(down_block_types)
if isinstance(cross_attention_dim, int):
cross_attention_dim = (cross_attention_dim,) * len(down_block_types)
if isinstance(layers_per_block, int):
layers_per_block = [layers_per_block] * len(down_block_types)
if isinstance(transformer_layers_per_block, int):
transformer_layers_per_block = [transformer_layers_per_block] * len(down_block_types)
if class_embeddings_concat:
# The time embeddings are concatenated with the class embeddings. The dimension of the
# time embeddings passed to the down, middle, and up blocks is twice the dimension of the
# regular time embeddings
blocks_time_embed_dim = time_embed_dim * 2
else:
blocks_time_embed_dim = time_embed_dim
# down
output_channel = block_out_channels[0]
for i, down_block_type in enumerate(down_block_types):
input_channel = output_channel
output_channel = block_out_channels[i]
is_final_block = i == len(block_out_channels) - 1
down_block = get_down_block(
down_block_type,
num_layers=layers_per_block[i],
transformer_layers_per_block=transformer_layers_per_block[i],
in_channels=input_channel,
out_channels=output_channel,
temb_channels=blocks_time_embed_dim,
add_downsample=not is_final_block,
resnet_eps=norm_eps,
resnet_act_fn=act_fn,
norm_type=norm_type,
resnet_groups=norm_num_groups,
cross_attention_dim=cross_attention_dim[i],
num_attention_heads=num_attention_heads[i],
downsample_padding=downsample_padding,
dual_cross_attention=dual_cross_attention,
use_linear_projection=use_linear_projection,
only_cross_attention=only_cross_attention[i],
upcast_attention=upcast_attention,
resnet_time_scale_shift=resnet_time_scale_shift,
attention_type=attention_type,
attention_pre_only=attention_pre_only,
resnet_skip_time_act=resnet_skip_time_act,
resnet_out_scale_factor=resnet_out_scale_factor,
cross_attention_norm=cross_attention_norm,
use_attention_ffn=use_attention_ffn,
attention_head_dim=attention_head_dim[i] if attention_head_dim[i] is not None else output_channel,
dropout=dropout,
)
self.down_blocks.append(down_block)
# mid
self.mid_block = get_mid_block(
mid_block_type,
temb_channels=blocks_time_embed_dim,
in_channels=block_out_channels[-1],
resnet_eps=norm_eps,
resnet_act_fn=act_fn,
norm_type=norm_type,
resnet_groups=norm_num_groups,
output_scale_factor=mid_block_scale_factor,
transformer_layers_per_block=1,
num_attention_heads=num_attention_heads[-1],
cross_attention_dim=cross_attention_dim[-1],
dual_cross_attention=dual_cross_attention,
use_linear_projection=use_linear_projection,
mid_block_only_cross_attention=mid_block_only_cross_attention,
upcast_attention=upcast_attention,
resnet_time_scale_shift=resnet_time_scale_shift,
attention_type=attention_type,
attention_pre_only=attention_pre_only,
resnet_skip_time_act=resnet_skip_time_act,
cross_attention_norm=cross_attention_norm,
attention_head_dim=attention_head_dim[-1],
dropout=dropout,
)
# count how many layers upsample the images
self.num_upsamplers = 0
# up
reversed_block_out_channels = list(reversed(block_out_channels))
reversed_num_attention_heads = list(reversed(num_attention_heads))
reversed_layers_per_block = list(reversed(layers_per_block))
reversed_cross_attention_dim = list(reversed(cross_attention_dim))
reversed_transformer_layers_per_block = (
list(reversed(transformer_layers_per_block))
if reverse_transformer_layers_per_block is None
else reverse_transformer_layers_per_block
)
only_cross_attention = list(reversed(only_cross_attention))
output_channel = reversed_block_out_channels[0]
for i, up_block_type in enumerate(up_block_types):
is_final_block = i == len(block_out_channels) - 1
prev_output_channel = output_channel
output_channel = reversed_block_out_channels[i]
input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]
# add upsample block for all BUT final layer
if not is_final_block:
add_upsample = True
self.num_upsamplers += 1
else:
add_upsample = False
up_block = get_up_block(
up_block_type,
num_layers=reversed_layers_per_block[i] + 1,
transformer_layers_per_block=reversed_transformer_layers_per_block[i],
in_channels=input_channel,
out_channels=output_channel,
prev_output_channel=prev_output_channel,
temb_channels=blocks_time_embed_dim,
add_upsample=add_upsample,
resnet_eps=norm_eps,
resnet_act_fn=act_fn,
norm_type=norm_type,
resolution_idx=i,
resnet_groups=norm_num_groups,
cross_attention_dim=reversed_cross_attention_dim[i],
num_attention_heads=reversed_num_attention_heads[i],
dual_cross_attention=dual_cross_attention,
use_linear_projection=use_linear_projection,
only_cross_attention=only_cross_attention[i],
upcast_attention=upcast_attention,
resnet_time_scale_shift=resnet_time_scale_shift,
attention_type=attention_type,
attention_pre_only=attention_pre_only,
resnet_skip_time_act=resnet_skip_time_act,
resnet_out_scale_factor=resnet_out_scale_factor,
cross_attention_norm=cross_attention_norm,
use_attention_ffn=use_attention_ffn,
attention_head_dim=attention_head_dim[i] if attention_head_dim[i] is not None else output_channel,
dropout=dropout,
)
self.up_blocks.append(up_block)
# out
if norm_num_groups is not None:
self.conv_norm_out = nn.GroupNorm(
num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps
)
self.conv_act = get_activation(act_fn)
else:
self.conv_norm_out = None
self.conv_act = None
conv_out_padding = (conv_out_kernel - 1) // 2
self.conv_out = nn.Conv2d(
block_out_channels[0], out_channels, kernel_size=conv_out_kernel, padding=conv_out_padding
)
self._set_pos_net_if_use_gligen(attention_type=attention_type, cross_attention_dim=cross_attention_dim)
self.is_temporal = []
def _check_config(
self,
down_block_types: Tuple[str],
up_block_types: Tuple[str],
only_cross_attention: Union[bool, Tuple[bool]],
block_out_channels: Tuple[int],
layers_per_block: Union[int, Tuple[int]],
cross_attention_dim: Union[int, Tuple[int]],
transformer_layers_per_block: Union[int, Tuple[int], Tuple[Tuple[int]]],
reverse_transformer_layers_per_block: bool,
attention_head_dim: int,
num_attention_heads: Optional[Union[int, Tuple[int]]],
):
if len(down_block_types) != len(up_block_types):
raise ValueError(
f"Must provide the same number of `down_block_types` as `up_block_types`. `down_block_types`: {down_block_types}. `up_block_types`: {up_block_types}."
)
if len(block_out_channels) != len(down_block_types):
raise ValueError(
f"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}."
)
if not isinstance(only_cross_attention, bool) and len(only_cross_attention) != len(down_block_types):
raise ValueError(
f"Must provide the same number of `only_cross_attention` as `down_block_types`. `only_cross_attention`: {only_cross_attention}. `down_block_types`: {down_block_types}."
)
if not isinstance(num_attention_heads, int) and len(num_attention_heads) != len(down_block_types):
raise ValueError(
f"Must provide the same number of `num_attention_heads` as `down_block_types`. `num_attention_heads`: {num_attention_heads}. `down_block_types`: {down_block_types}."
)
if not isinstance(attention_head_dim, int) and len(attention_head_dim) != len(down_block_types):
raise ValueError(
f"Must provide the same number of `attention_head_dim` as `down_block_types`. `attention_head_dim`: {attention_head_dim}. `down_block_types`: {down_block_types}."
)
if isinstance(cross_attention_dim, list) and len(cross_attention_dim) != len(down_block_types):
raise ValueError(
f"Must provide the same number of `cross_attention_dim` as `down_block_types`. `cross_attention_dim`: {cross_attention_dim}. `down_block_types`: {down_block_types}."
)
if not isinstance(layers_per_block, int) and len(layers_per_block) != len(down_block_types):
raise ValueError(
f"Must provide the same number of `layers_per_block` as `down_block_types`. `layers_per_block`: {layers_per_block}. `down_block_types`: {down_block_types}."
)
if isinstance(transformer_layers_per_block, list) and reverse_transformer_layers_per_block is None:
for layer_number_per_block in transformer_layers_per_block:
if isinstance(layer_number_per_block, list):
raise ValueError("Must provide 'reverse_transformer_layers_per_block` if using asymmetrical UNet.")
def _set_time_proj(
self,
time_embedding_type: str,
block_out_channels: int,
flip_sin_to_cos: bool,
freq_shift: float,
time_embedding_dim: int,
) -> Tuple[int, int]:
if time_embedding_type == "fourier":
time_embed_dim = time_embedding_dim or block_out_channels[0] * 2
if time_embed_dim % 2 != 0:
raise ValueError(f"`time_embed_dim` should be divisible by 2, but is {time_embed_dim}.")
self.time_proj = GaussianFourierProjection(
time_embed_dim // 2, set_W_to_weight=False, log=False, flip_sin_to_cos=flip_sin_to_cos
)
timestep_input_dim = time_embed_dim
elif time_embedding_type == "positional":
time_embed_dim = time_embedding_dim or block_out_channels[0] * 4
if self.model_type == "unet":
self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)
elif self.model_type == "nested_unet" and self.config.micro_conditioning_scale == 256:
self.time_proj = Timesteps(block_out_channels[0] * 4, flip_sin_to_cos, freq_shift)
elif self.model_type == "nested_unet" and self.config.micro_conditioning_scale == 1024:
self.time_proj = Timesteps(block_out_channels[0] * 4 * 2, flip_sin_to_cos, freq_shift)
timestep_input_dim = block_out_channels[0]
else:
raise ValueError(
f"{time_embedding_type} does not exist. Please make sure to use one of `fourier` or `positional`."
)
return time_embed_dim, timestep_input_dim
def _set_encoder_hid_proj(
self,
encoder_hid_dim_type: Optional[str],
cross_attention_dim: Union[int, Tuple[int]],
encoder_hid_dim: Optional[int],
):
if encoder_hid_dim_type is None and encoder_hid_dim is not None:
encoder_hid_dim_type = "text_proj"
self.register_to_config(encoder_hid_dim_type=encoder_hid_dim_type)
logger.info("encoder_hid_dim_type defaults to 'text_proj' as `encoder_hid_dim` is defined.")
if encoder_hid_dim is None and encoder_hid_dim_type is not None:
raise ValueError(
f"`encoder_hid_dim` has to be defined when `encoder_hid_dim_type` is set to {encoder_hid_dim_type}."
)
if encoder_hid_dim_type == "text_proj":
self.encoder_hid_proj = nn.Linear(encoder_hid_dim, cross_attention_dim)
elif encoder_hid_dim_type == "text_image_proj":
# image_embed_dim DOESN'T have to be `cross_attention_dim`. To not clutter the __init__ too much
# they are set to `cross_attention_dim` here as this is exactly the required dimension for the currently only use
# case when `addition_embed_type == "text_image_proj"` (Kandinsky 2.1)`
self.encoder_hid_proj = TextImageProjection(
text_embed_dim=encoder_hid_dim,
image_embed_dim=cross_attention_dim,
cross_attention_dim=cross_attention_dim,
)
elif encoder_hid_dim_type == "image_proj":
# Kandinsky 2.2
self.encoder_hid_proj = ImageProjection(
image_embed_dim=encoder_hid_dim,
cross_attention_dim=cross_attention_dim,
)
elif encoder_hid_dim_type is not None:
raise ValueError(
f"`encoder_hid_dim_type`: {encoder_hid_dim_type} must be None, 'text_proj', 'text_image_proj', or 'image_proj'."
)
else:
self.encoder_hid_proj = None
def _set_class_embedding(
self,
class_embed_type: Optional[str],
act_fn: str,
num_class_embeds: Optional[int],
projection_class_embeddings_input_dim: Optional[int],
time_embed_dim: int,
timestep_input_dim: int,
):
if class_embed_type is None and num_class_embeds is not None:
self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)
elif class_embed_type == "timestep":
self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim, act_fn=act_fn)
elif class_embed_type == "identity":
self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)
elif class_embed_type == "projection":
if projection_class_embeddings_input_dim is None:
raise ValueError(
"`class_embed_type`: 'projection' requires `projection_class_embeddings_input_dim` be set"
)
# The projection `class_embed_type` is the same as the timestep `class_embed_type` except
# 1. the `class_labels` inputs are not first converted to sinusoidal embeddings
# 2. it projects from an arbitrary input dimension.
#
# Note that `TimestepEmbedding` is quite general, being mainly linear layers and activations.
# When used for embedding actual timesteps, the timesteps are first converted to sinusoidal embeddings.
# As a result, `TimestepEmbedding` can be passed arbitrary vectors.
self.class_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim)
elif class_embed_type == "simple_projection":
if projection_class_embeddings_input_dim is None:
raise ValueError(
"`class_embed_type`: 'simple_projection' requires `projection_class_embeddings_input_dim` be set"
)
self.class_embedding = nn.Linear(projection_class_embeddings_input_dim, time_embed_dim)
else:
self.class_embedding = None
def _set_add_embedding(
self,
addition_embed_type: str,
addition_embed_type_num_heads: int,
addition_time_embed_dim: Optional[int],
flip_sin_to_cos: bool,
freq_shift: float,
cross_attention_dim: Optional[int],
encoder_hid_dim: Optional[int],
projection_class_embeddings_input_dim: Optional[int],
time_embed_dim: int,
):
if addition_embed_type == "text":
if encoder_hid_dim is not None:
text_time_embedding_from_dim = encoder_hid_dim
else:
text_time_embedding_from_dim = cross_attention_dim
self.add_embedding = TextTimeEmbedding(
text_time_embedding_from_dim, time_embed_dim, num_heads=addition_embed_type_num_heads
)
elif addition_embed_type == "matryoshka":
self.add_embedding = MatryoshkaCombinedTimestepTextEmbedding(
self.config.time_embedding_dim // 4
if self.config.time_embedding_dim is not None
else addition_time_embed_dim,
cross_attention_dim,
time_embed_dim,
self.model_type, # if not self.config.nesting else "inner_" + self.model_type,
)
elif addition_embed_type == "text_image":
# text_embed_dim and image_embed_dim DON'T have to be `cross_attention_dim`. To not clutter the __init__ too much
# they are set to `cross_attention_dim` here as this is exactly the required dimension for the currently only use
# case when `addition_embed_type == "text_image"` (Kandinsky 2.1)`
self.add_embedding = TextImageTimeEmbedding(
text_embed_dim=cross_attention_dim, image_embed_dim=cross_attention_dim, time_embed_dim=time_embed_dim
)
elif addition_embed_type == "text_time":
self.add_time_proj = Timesteps(addition_time_embed_dim, flip_sin_to_cos, freq_shift)
self.add_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim)
elif addition_embed_type == "image":
# Kandinsky 2.2
self.add_embedding = ImageTimeEmbedding(image_embed_dim=encoder_hid_dim, time_embed_dim=time_embed_dim)
elif addition_embed_type == "image_hint":
# Kandinsky 2.2 ControlNet
self.add_embedding = ImageHintTimeEmbedding(image_embed_dim=encoder_hid_dim, time_embed_dim=time_embed_dim)
elif addition_embed_type is not None:
raise ValueError(
f"`addition_embed_type`: {addition_embed_type} must be None, 'text', 'text_image', 'text_time', 'image', or 'image_hint'."
)
def _set_pos_net_if_use_gligen(self, attention_type: str, cross_attention_dim: int):
if attention_type in ["gated", "gated-text-image"]:
positive_len = 768
if isinstance(cross_attention_dim, int):
positive_len = cross_attention_dim
elif isinstance(cross_attention_dim, (list, tuple)):
positive_len = cross_attention_dim[0]
feature_type = "text-only" if attention_type == "gated" else "text-image"
self.position_net = GLIGENTextBoundingboxProjection(
positive_len=positive_len, out_dim=cross_attention_dim, feature_type=feature_type
)
@property
def attn_processors(self) -> Dict[str, AttentionProcessor]:
r"""
Returns:
`dict` of attention processors: A dictionary containing all attention processors used in the model with
indexed by its weight name.
"""
# set recursively
processors = {}
def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]):
if hasattr(module, "get_processor"):
processors[f"{name}.processor"] = module.get_processor()
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"{name}.{sub_name}", child, processors)
return processors
for name, module in self.named_children():
fn_recursive_add_processors(name, module, processors)
return processors
def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
r"""
Sets the attention processor to use to compute attention.
Parameters:
processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`):
The instantiated processor class or a dictionary of processor classes that will be set as the processor
for **all** `Attention` layers.
If `processor` is a dict, the key needs to define the path to the corresponding cross attention
processor. This is strongly recommended when setting trainable attention processors.
"""
count = len(self.attn_processors.keys())
if isinstance(processor, dict) and len(processor) != count:
raise ValueError(
f"A dict of processors was passed, but the number of processors {len(processor)} does not match the"
f" number of attention layers: {count}. Please make sure to pass {count} processor classes."
)
def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):
if hasattr(module, "set_processor"):
if not isinstance(processor, dict):
module.set_processor(processor)
else:
module.set_processor(processor.pop(f"{name}.processor"))
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor)
for name, module in self.named_children():
fn_recursive_attn_processor(name, module, processor)
def set_default_attn_processor(self):
"""
Disables custom attention processors and sets the default attention implementation.
"""
if all(proc.__class__ in ADDED_KV_ATTENTION_PROCESSORS for proc in self.attn_processors.values()):
processor = AttnAddedKVProcessor()
elif all(proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values()):
processor = AttnProcessor()
else:
raise ValueError(
f"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}"
)
self.set_attn_processor(processor)
def set_attention_slice(self, slice_size: Union[str, int, List[int]] = "auto"):
r"""
Enable sliced attention computation.
When this option is enabled, the attention module splits the input tensor in slices to compute attention in
several steps. This is useful for saving some memory in exchange for a small decrease in speed.
Args:
slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`):
When `"auto"`, input to the attention heads is halved, so attention is computed in two steps. If
`"max"`, maximum amount of memory is saved by running only one slice at a time. If a number is
provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`
must be a multiple of `slice_size`.
"""
sliceable_head_dims = []
def fn_recursive_retrieve_sliceable_dims(module: torch.nn.Module):
if hasattr(module, "set_attention_slice"):
sliceable_head_dims.append(module.sliceable_head_dim)
for child in module.children():
fn_recursive_retrieve_sliceable_dims(child)
# retrieve number of attention layers
for module in self.children():
fn_recursive_retrieve_sliceable_dims(module)
num_sliceable_layers = len(sliceable_head_dims)
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
slice_size = [dim // 2 for dim in sliceable_head_dims]
elif slice_size == "max":
# make smallest slice possible
slice_size = num_sliceable_layers * [1]
slice_size = num_sliceable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size
if len(slice_size) != len(sliceable_head_dims):
raise ValueError(
f"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different"
f" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}."
)
for i in range(len(slice_size)):
size = slice_size[i]
dim = sliceable_head_dims[i]
if size is not None and size > dim:
raise ValueError(f"size {size} has to be smaller or equal to {dim}.")
# Recursively walk through all the children.
# Any children which exposes the set_attention_slice method
# gets the message
def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):
if hasattr(module, "set_attention_slice"):
module.set_attention_slice(slice_size.pop())
for child in module.children():
fn_recursive_set_attention_slice(child, slice_size)
reversed_slice_size = list(reversed(slice_size))
for module in self.children():
fn_recursive_set_attention_slice(module, reversed_slice_size)
def enable_freeu(self, s1: float, s2: float, b1: float, b2: float):
r"""Enables the FreeU mechanism from https://arxiv.org/abs/2309.11497.
The suffixes after the scaling factors represent the stage blocks where they are being applied.
Please refer to the [official repository](https://github.com/ChenyangSi/FreeU) for combinations of values that
are known to work well for different pipelines such as Stable Diffusion v1, v2, and Stable Diffusion XL.
Args:
s1 (`float`):
Scaling factor for stage 1 to attenuate the contributions of the skip features. This is done to
mitigate the "oversmoothing effect" in the enhanced denoising process.
s2 (`float`):
Scaling factor for stage 2 to attenuate the contributions of the skip features. This is done to
mitigate the "oversmoothing effect" in the enhanced denoising process.
b1 (`float`): Scaling factor for stage 1 to amplify the contributions of backbone features.
b2 (`float`): Scaling factor for stage 2 to amplify the contributions of backbone features.
"""
for i, upsample_block in enumerate(self.up_blocks):
setattr(upsample_block, "s1", s1)
setattr(upsample_block, "s2", s2)
setattr(upsample_block, "b1", b1)
setattr(upsample_block, "b2", b2)
def disable_freeu(self):
"""Disables the FreeU mechanism."""
freeu_keys = {"s1", "s2", "b1", "b2"}
for i, upsample_block in enumerate(self.up_blocks):
for k in freeu_keys:
if hasattr(upsample_block, k) or getattr(upsample_block, k, None) is not None:
setattr(upsample_block, k, None)
def fuse_qkv_projections(self):
"""
Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query, key, value)
are fused. For cross-attention modules, key and value projection matrices are fused.
<Tip warning={true}>
This API is 🧪 experimental.
</Tip>
"""
self.original_attn_processors = None
for _, attn_processor in self.attn_processors.items():
if "Added" in str(attn_processor.__class__.__name__):
raise ValueError("`fuse_qkv_projections()` is not supported for models having added KV projections.")
self.original_attn_processors = self.attn_processors
for module in self.modules():
if isinstance(module, Attention):
module.fuse_projections(fuse=True)
self.set_attn_processor(FusedAttnProcessor2_0())
def unfuse_qkv_projections(self):
"""Disables the fused QKV projection if enabled.
<Tip warning={true}>
This API is 🧪 experimental.
</Tip>
"""
if self.original_attn_processors is not None:
self.set_attn_processor(self.original_attn_processors)
def get_time_embed(
self, sample: torch.Tensor, timestep: Union[torch.Tensor, float, int]
) -> Optional[torch.Tensor]:
timesteps = timestep
if not torch.is_tensor(timesteps):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
is_mps = sample.device.type == "mps"
is_npu = sample.device.type == "npu"
if isinstance(timestep, float):
dtype = torch.float32 if (is_mps or is_npu) else torch.float64
else:
dtype = torch.int32 if (is_mps or is_npu) else torch.int64
timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)
elif len(timesteps.shape) == 0:
timesteps = timesteps[None].to(sample.device)
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
timesteps = timesteps.expand(sample.shape[0])
t_emb = self.time_proj(timesteps)
# `Timesteps` does not contain any weights and will always return f32 tensors
# but time_embedding might actually be running in fp16. so we need to cast here.
# there might be better ways to encapsulate this.
t_emb = t_emb.to(dtype=sample.dtype)
return t_emb
def get_class_embed(self, sample: torch.Tensor, class_labels: Optional[torch.Tensor]) -> Optional[torch.Tensor]:
class_emb = None
if self.class_embedding is not None:
if class_labels is None:
raise ValueError("class_labels should be provided when num_class_embeds > 0")
if self.config.class_embed_type == "timestep":
class_labels = self.time_proj(class_labels)
# `Timesteps` does not contain any weights and will always return f32 tensors
# there might be better ways to encapsulate this.
class_labels = class_labels.to(dtype=sample.dtype)
class_emb = self.class_embedding(class_labels).to(dtype=sample.dtype)
return class_emb
def get_aug_embed(
self, emb: torch.Tensor, encoder_hidden_states: torch.Tensor, added_cond_kwargs: Dict[str, Any]
) -> Optional[torch.Tensor]:
aug_emb = None
if self.config.addition_embed_type == "text":
aug_emb = self.add_embedding(encoder_hidden_states)
elif self.config.addition_embed_type == "matryoshka":
aug_emb = self.add_embedding(emb, encoder_hidden_states, added_cond_kwargs)
elif self.config.addition_embed_type == "text_image":
# Kandinsky 2.1 - style
if "image_embeds" not in added_cond_kwargs:
raise ValueError(
f"{self.__class__} has the config param `addition_embed_type` set to 'text_image' which requires the keyword argument `image_embeds` to be passed in `added_cond_kwargs`"
)
image_embs = added_cond_kwargs.get("image_embeds")
text_embs = added_cond_kwargs.get("text_embeds", encoder_hidden_states)
aug_emb = self.add_embedding(text_embs, image_embs)
elif self.config.addition_embed_type == "text_time":
# SDXL - style
if "text_embeds" not in added_cond_kwargs:
raise ValueError(
f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `text_embeds` to be passed in `added_cond_kwargs`"
)
text_embeds = added_cond_kwargs.get("text_embeds")
if "time_ids" not in added_cond_kwargs:
raise ValueError(
f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `time_ids` to be passed in `added_cond_kwargs`"
)
time_ids = added_cond_kwargs.get("time_ids")
time_embeds = self.add_time_proj(time_ids.flatten())
time_embeds = time_embeds.reshape((text_embeds.shape[0], -1))
add_embeds = torch.concat([text_embeds, time_embeds], dim=-1)
add_embeds = add_embeds.to(emb.dtype)
aug_emb = self.add_embedding(add_embeds)
elif self.config.addition_embed_type == "image":
# Kandinsky 2.2 - style
if "image_embeds" not in added_cond_kwargs:
raise ValueError(
f"{self.__class__} has the config param `addition_embed_type` set to 'image' which requires the keyword argument `image_embeds` to be passed in `added_cond_kwargs`"
)
image_embs = added_cond_kwargs.get("image_embeds")
aug_emb = self.add_embedding(image_embs)
elif self.config.addition_embed_type == "image_hint":
# Kandinsky 2.2 ControlNet - style
if "image_embeds" not in added_cond_kwargs or "hint" not in added_cond_kwargs:
raise ValueError(
f"{self.__class__} has the config param `addition_embed_type` set to 'image_hint' which requires the keyword arguments `image_embeds` and `hint` to be passed in `added_cond_kwargs`"
)
image_embs = added_cond_kwargs.get("image_embeds")
hint = added_cond_kwargs.get("hint")
aug_emb = self.add_embedding(image_embs, hint)
return aug_emb
def process_encoder_hidden_states(
self, encoder_hidden_states: torch.Tensor, added_cond_kwargs: Dict[str, Any]
) -> torch.Tensor:
if self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == "text_proj":
encoder_hidden_states = self.encoder_hid_proj(encoder_hidden_states)
elif self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == "text_image_proj":
# Kandinsky 2.1 - style
if "image_embeds" not in added_cond_kwargs:
raise ValueError(
f"{self.__class__} has the config param `encoder_hid_dim_type` set to 'text_image_proj' which requires the keyword argument `image_embeds` to be passed in `added_cond_kwargs`"
)
image_embeds = added_cond_kwargs.get("image_embeds")
encoder_hidden_states = self.encoder_hid_proj(encoder_hidden_states, image_embeds)
elif self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == "image_proj":
# Kandinsky 2.2 - style
if "image_embeds" not in added_cond_kwargs:
raise ValueError(
f"{self.__class__} has the config param `encoder_hid_dim_type` set to 'image_proj' which requires the keyword argument `image_embeds` to be passed in `added_cond_kwargs`"
)
image_embeds = added_cond_kwargs.get("image_embeds")
encoder_hidden_states = self.encoder_hid_proj(image_embeds)
elif self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == "ip_image_proj":
if "image_embeds" not in added_cond_kwargs:
raise ValueError(
f"{self.__class__} has the config param `encoder_hid_dim_type` set to 'ip_image_proj' which requires the keyword argument `image_embeds` to be passed in `added_cond_kwargs`"
)
if hasattr(self, "text_encoder_hid_proj") and self.text_encoder_hid_proj is not None:
encoder_hidden_states = self.text_encoder_hid_proj(encoder_hidden_states)
image_embeds = added_cond_kwargs.get("image_embeds")
image_embeds = self.encoder_hid_proj(image_embeds)
encoder_hidden_states = (encoder_hidden_states, image_embeds)
return encoder_hidden_states
@property
def model_type(self) -> str:
return "unet"
def forward(
self,
sample: torch.Tensor,
timestep: Union[torch.Tensor, float, int],
encoder_hidden_states: torch.Tensor,
cond_emb: Optional[torch.Tensor] = None,
class_labels: Optional[torch.Tensor] = None,
timestep_cond: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
added_cond_kwargs: Optional[Dict[str, torch.Tensor]] = None,
down_block_additional_residuals: Optional[Tuple[torch.Tensor]] = None,
mid_block_additional_residual: Optional[torch.Tensor] = None,
down_intrablock_additional_residuals: Optional[Tuple[torch.Tensor]] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
return_dict: bool = True,
from_nested: bool = False,
) -> Union[MatryoshkaUNet2DConditionOutput, Tuple]:
r"""
The [`NestedUNet2DConditionModel`] forward method.
Args:
sample (`torch.Tensor`):
The noisy input tensor with the following shape `(batch, channel, height, width)`.
timestep (`torch.Tensor` or `float` or `int`): The number of timesteps to denoise an input.
encoder_hidden_states (`torch.Tensor`):
The encoder hidden states with shape `(batch, sequence_length, feature_dim)`.
class_labels (`torch.Tensor`, *optional*, defaults to `None`):
Optional class labels for conditioning. Their embeddings will be summed with the timestep embeddings.
timestep_cond: (`torch.Tensor`, *optional*, defaults to `None`):
Conditional embeddings for timestep. If provided, the embeddings will be summed with the samples passed
through the `self.time_embedding` layer to obtain the timestep embeddings.
attention_mask (`torch.Tensor`, *optional*, defaults to `None`):
An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. If `1` the mask
is kept, otherwise if `0` it is discarded. Mask will be converted into a bias, which adds large
negative values to the attention scores corresponding to "discard" tokens.
cross_attention_kwargs (`dict`, *optional*):
A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
`self.processor` in
[diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
added_cond_kwargs: (`dict`, *optional*):
A kwargs dictionary containing additional embeddings that if specified are added to the embeddings that
are passed along to the UNet blocks.
down_block_additional_residuals: (`tuple` of `torch.Tensor`, *optional*):
A tuple of tensors that if specified are added to the residuals of down unet blocks.
mid_block_additional_residual: (`torch.Tensor`, *optional*):
A tensor that if specified is added to the residual of the middle unet block.
down_intrablock_additional_residuals (`tuple` of `torch.Tensor`, *optional*):
additional residuals to be added within UNet down blocks, for example from T2I-Adapter side model(s)
encoder_attention_mask (`torch.Tensor`):
A cross-attention mask of shape `(batch, sequence_length)` is applied to `encoder_hidden_states`. If
`True` the mask is kept, otherwise if `False` it is discarded. Mask will be converted into a bias,
which adds large negative values to the attention scores corresponding to "discard" tokens.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~NestedUNet2DConditionOutput`] instead of a plain
tuple.
Returns:
[`~NestedUNet2DConditionOutput`] or `tuple`:
If `return_dict` is True, an [`~NestedUNet2DConditionOutput`] is returned,
otherwise a `tuple` is returned where the first element is the sample tensor.
"""
# By default samples have to be AT least a multiple of the overall upsampling factor.
# The overall upsampling factor is equal to 2 ** (# num of upsampling layers).
# However, the upsampling interpolation output size can be forced to fit any upsampling size
# on the fly if necessary.
default_overall_up_factor = 2**self.num_upsamplers
# upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor`
forward_upsample_size = False
upsample_size = None
if self.config.nesting:
sample, sample_feat = sample
if isinstance(sample, list) and len(sample) == 1:
sample = sample[0]
for dim in sample.shape[-2:]:
if dim % default_overall_up_factor != 0:
# Forward upsample size to force interpolation output size.
forward_upsample_size = True
break
# ensure attention_mask is a bias, and give it a singleton query_tokens dimension
# expects mask of shape:
# [batch, key_tokens]
# adds singleton query_tokens dimension:
# [batch, 1, key_tokens]
# this helps to broadcast it as a bias over attention scores, which will be in one of the following shapes:
# [batch, heads, query_tokens, key_tokens] (e.g. torch sdp attn)
# [batch * heads, query_tokens, key_tokens] (e.g. xformers or classic attn)
if attention_mask is not None:
# assume that mask is expressed as:
# (1 = keep, 0 = discard)
# convert mask into a bias that can be added to attention scores:
# (keep = +0, discard = -10000.0)
attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0
attention_mask = attention_mask.unsqueeze(1)
# 0. center input if necessary
if self.config.center_input_sample:
sample = 2 * sample - 1.0
# 1. time
t_emb = self.get_time_embed(sample=sample, timestep=timestep)
emb = self.time_embedding(t_emb, timestep_cond)
class_emb = self.get_class_embed(sample=sample, class_labels=class_labels)
if class_emb is not None:
if self.config.class_embeddings_concat:
emb = torch.cat([emb, class_emb], dim=-1)
else:
emb = emb + class_emb
added_cond_kwargs = added_cond_kwargs or {}
added_cond_kwargs["masked_cross_attention"] = self.config.masked_cross_attention
added_cond_kwargs["micro_conditioning_scale"] = self.config.micro_conditioning_scale
added_cond_kwargs["from_nested"] = from_nested
added_cond_kwargs["conditioning_mask"] = encoder_attention_mask
if not from_nested:
encoder_hidden_states = self.process_encoder_hidden_states(
encoder_hidden_states=encoder_hidden_states, added_cond_kwargs=added_cond_kwargs
)
aug_emb, encoder_attention_mask, cond_emb = self.get_aug_embed(
emb=emb, encoder_hidden_states=encoder_hidden_states, added_cond_kwargs=added_cond_kwargs
)
else:
aug_emb, encoder_attention_mask, _ = self.get_aug_embed(
emb=emb, encoder_hidden_states=encoder_hidden_states, added_cond_kwargs=added_cond_kwargs
)
# convert encoder_attention_mask to a bias the same way we do for attention_mask
if encoder_attention_mask is not None:
encoder_attention_mask = (1 - encoder_attention_mask.to(sample[0][0].dtype)) * -10000.0
encoder_attention_mask = encoder_attention_mask.unsqueeze(1)
if self.config.addition_embed_type == "image_hint":
aug_emb, hint = aug_emb
sample = torch.cat([sample, hint], dim=1)
emb = emb + aug_emb + cond_emb if aug_emb is not None else emb
if self.time_embed_act is not None:
emb = self.time_embed_act(emb)
# 2. pre-process
sample = self.conv_in(sample)
if self.config.nesting:
sample = sample + sample_feat
# 2.5 GLIGEN position net
if cross_attention_kwargs is not None and cross_attention_kwargs.get("gligen", None) is not None:
cross_attention_kwargs = cross_attention_kwargs.copy()
gligen_args = cross_attention_kwargs.pop("gligen")
cross_attention_kwargs["gligen"] = {"objs": self.position_net(**gligen_args)}
# 3. down
# we're popping the `scale` instead of getting it because otherwise `scale` will be propagated
# to the internal blocks and will raise deprecation warnings. this will be confusing for our users.
if cross_attention_kwargs is not None:
cross_attention_kwargs = cross_attention_kwargs.copy()
lora_scale = cross_attention_kwargs.pop("scale", 1.0)
else:
lora_scale = 1.0
if USE_PEFT_BACKEND:
# weight the lora layers by setting `lora_scale` for each PEFT layer
scale_lora_layers(self, lora_scale)
is_controlnet = mid_block_additional_residual is not None and down_block_additional_residuals is not None
# using new arg down_intrablock_additional_residuals for T2I-Adapters, to distinguish from controlnets
is_adapter = down_intrablock_additional_residuals is not None
# maintain backward compatibility for legacy usage, where
# T2I-Adapter and ControlNet both use down_block_additional_residuals arg
# but can only use one or the other
if not is_adapter and mid_block_additional_residual is None and down_block_additional_residuals is not None:
deprecate(
"T2I should not use down_block_additional_residuals",
"1.3.0",
"Passing intrablock residual connections with `down_block_additional_residuals` is deprecated \
and will be removed in diffusers 1.3.0. `down_block_additional_residuals` should only be used \
for ControlNet. Please make sure use `down_intrablock_additional_residuals` instead. ",
standard_warn=False,
)
down_intrablock_additional_residuals = down_block_additional_residuals
is_adapter = True
down_block_res_samples = (sample,)
for downsample_block in self.down_blocks:
if hasattr(downsample_block, "has_cross_attention") and downsample_block.has_cross_attention:
# For t2i-adapter CrossAttnDownBlock2D
additional_residuals = {}
if is_adapter and len(down_intrablock_additional_residuals) > 0:
additional_residuals["additional_residuals"] = down_intrablock_additional_residuals.pop(0)
sample, res_samples = downsample_block(
hidden_states=sample,
temb=emb,
encoder_hidden_states=encoder_hidden_states,
attention_mask=attention_mask,
cross_attention_kwargs=cross_attention_kwargs,
encoder_attention_mask=encoder_attention_mask,
**additional_residuals,
)
else:
sample, res_samples = downsample_block(hidden_states=sample, temb=emb)
if is_adapter and len(down_intrablock_additional_residuals) > 0:
sample += down_intrablock_additional_residuals.pop(0)
down_block_res_samples += res_samples
if is_controlnet:
new_down_block_res_samples = ()
for down_block_res_sample, down_block_additional_residual in zip(
down_block_res_samples, down_block_additional_residuals
):
down_block_res_sample = down_block_res_sample + down_block_additional_residual
new_down_block_res_samples = new_down_block_res_samples + (down_block_res_sample,)
down_block_res_samples = new_down_block_res_samples
# 4. mid
if self.mid_block is not None:
if hasattr(self.mid_block, "has_cross_attention") and self.mid_block.has_cross_attention:
sample = self.mid_block(
sample,
emb,
encoder_hidden_states=encoder_hidden_states,
attention_mask=attention_mask,
cross_attention_kwargs=cross_attention_kwargs,
encoder_attention_mask=encoder_attention_mask,
)
else:
sample = self.mid_block(sample, emb)
# To support T2I-Adapter-XL
if (
is_adapter
and len(down_intrablock_additional_residuals) > 0
and sample.shape == down_intrablock_additional_residuals[0].shape
):
sample += down_intrablock_additional_residuals.pop(0)
if is_controlnet:
sample = sample + mid_block_additional_residual
# 5. up
for i, upsample_block in enumerate(self.up_blocks):
is_final_block = i == len(self.up_blocks) - 1
res_samples = down_block_res_samples[-len(upsample_block.resnets) :]
down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)]
# if we have not reached the final block and need to forward the
# upsample size, we do it here
if not is_final_block and forward_upsample_size:
upsample_size = down_block_res_samples[-1].shape[2:]
if hasattr(upsample_block, "has_cross_attention") and upsample_block.has_cross_attention:
sample = upsample_block(
hidden_states=sample,
temb=emb,
res_hidden_states_tuple=res_samples,
encoder_hidden_states=encoder_hidden_states,
cross_attention_kwargs=cross_attention_kwargs,
upsample_size=upsample_size,
attention_mask=attention_mask,
encoder_attention_mask=encoder_attention_mask,
)
else:
sample = upsample_block(
hidden_states=sample,
temb=emb,
res_hidden_states_tuple=res_samples,
upsample_size=upsample_size,
)
sample_inner = sample
# 6. post-process
if self.conv_norm_out:
sample = self.conv_norm_out(sample_inner)
sample = self.conv_act(sample)
sample = self.conv_out(sample)
if USE_PEFT_BACKEND:
# remove `lora_scale` from each PEFT layer
unscale_lora_layers(self, lora_scale)
if not return_dict:
return (sample,)
if self.config.nesting:
return MatryoshkaUNet2DConditionOutput(sample=sample, sample_inner=sample_inner)
return MatryoshkaUNet2DConditionOutput(sample=sample)
class NestedUNet2DConditionOutput(BaseOutput):
"""
Output type for the [`NestedUNet2DConditionModel`] model.
"""
sample: list = None
sample_inner: torch.Tensor = None
class NestedUNet2DConditionModel(MatryoshkaUNet2DConditionModel):
"""
Nested UNet model with condition for image denoising.
"""
@register_to_config
def __init__(
self,
in_channels=3,
out_channels=3,
block_out_channels=(64, 128, 256),
cross_attention_dim=2048,
resnet_time_scale_shift="scale_shift",
down_block_types=("DownBlock2D", "DownBlock2D", "DownBlock2D"),
up_block_types=("UpBlock2D", "UpBlock2D", "UpBlock2D"),
mid_block_type=None,
nesting=False,
flip_sin_to_cos=False,
transformer_layers_per_block=[0, 0, 0],
layers_per_block=[2, 2, 1],
masked_cross_attention=True,
micro_conditioning_scale=256,
addition_embed_type="matryoshka",
skip_normalization=True,
time_embedding_dim=1024,
skip_inner_unet_input=False,
temporal_mode=False,
temporal_spatial_ds=False,
initialize_inner_with_pretrained=None,
use_attention_ffn=False,
act_fn="silu",
addition_embed_type_num_heads=64,
addition_time_embed_dim=None,
attention_head_dim=8,
attention_pre_only=False,
attention_type="default",
center_input_sample=False,
class_embed_type=None,
class_embeddings_concat=False,
conv_in_kernel=3,
conv_out_kernel=3,
cross_attention_norm=None,
downsample_padding=1,
dropout=0.0,
dual_cross_attention=False,
encoder_hid_dim=None,
encoder_hid_dim_type=None,
freq_shift=0,
mid_block_only_cross_attention=None,
mid_block_scale_factor=1,
norm_eps=1e-05,
norm_num_groups=32,
norm_type="layer_norm",
num_attention_heads=None,
num_class_embeds=None,
only_cross_attention=False,
projection_class_embeddings_input_dim=None,
resnet_out_scale_factor=1.0,
resnet_skip_time_act=False,
reverse_transformer_layers_per_block=None,
sample_size=None,
skip_cond_emb=False,
time_cond_proj_dim=None,
time_embedding_act_fn=None,
time_embedding_type="positional",
timestep_post_act=None,
upcast_attention=False,
use_linear_projection=False,
is_temporal=None,
inner_config={},
):
super().__init__(
in_channels=in_channels,
out_channels=out_channels,
block_out_channels=block_out_channels,
cross_attention_dim=cross_attention_dim,
resnet_time_scale_shift=resnet_time_scale_shift,
down_block_types=down_block_types,
up_block_types=up_block_types,
mid_block_type=mid_block_type,
nesting=nesting,
flip_sin_to_cos=flip_sin_to_cos,
transformer_layers_per_block=transformer_layers_per_block,
layers_per_block=layers_per_block,
masked_cross_attention=masked_cross_attention,
micro_conditioning_scale=micro_conditioning_scale,
addition_embed_type=addition_embed_type,
time_embedding_dim=time_embedding_dim,
temporal_mode=temporal_mode,
temporal_spatial_ds=temporal_spatial_ds,
use_attention_ffn=use_attention_ffn,
sample_size=sample_size,
)
# self.config.inner_config.conditioning_feature_dim = self.config.conditioning_feature_dim
if "inner_config" not in self.config.inner_config:
self.inner_unet = MatryoshkaUNet2DConditionModel(**self.config.inner_config)
else:
self.inner_unet = NestedUNet2DConditionModel(**self.config.inner_config)
if not self.config.skip_inner_unet_input:
self.in_adapter = nn.Conv2d(
self.config.block_out_channels[-1],
self.config.inner_config["block_out_channels"][0],
kernel_size=3,
padding=1,
)
else:
self.in_adapter = None
self.out_adapter = nn.Conv2d(
self.config.inner_config["block_out_channels"][0],
self.config.block_out_channels[-1],
kernel_size=3,
padding=1,
)
self.is_temporal = [self.config.temporal_mode and (not self.config.temporal_spatial_ds)]
if hasattr(self.inner_unet, "is_temporal"):
self.is_temporal = self.is_temporal + self.inner_unet.is_temporal
nest_ratio = int(2 ** (len(self.config.block_out_channels) - 1))
if self.is_temporal[0]:
nest_ratio = int(np.sqrt(nest_ratio))
if self.inner_unet.config.nesting and self.inner_unet.model_type == "nested_unet":
self.nest_ratio = [nest_ratio * self.inner_unet.nest_ratio[0]] + self.inner_unet.nest_ratio
else:
self.nest_ratio = [nest_ratio]
# self.register_modules(inner_unet=self.inner_unet)
@property
def model_type(self):
return "nested_unet"
def forward(
self,
sample: torch.Tensor,
timestep: Union[torch.Tensor, float, int],
encoder_hidden_states: torch.Tensor,
cond_emb: Optional[torch.Tensor] = None,
from_nested: bool = False,
class_labels: Optional[torch.Tensor] = None,
timestep_cond: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
added_cond_kwargs: Optional[Dict[str, torch.Tensor]] = None,
down_block_additional_residuals: Optional[Tuple[torch.Tensor]] = None,
mid_block_additional_residual: Optional[torch.Tensor] = None,
down_intrablock_additional_residuals: Optional[Tuple[torch.Tensor]] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
return_dict: bool = True,
) -> Union[MatryoshkaUNet2DConditionOutput, Tuple]:
r"""
The [`NestedUNet2DConditionModel`] forward method.
Args:
sample (`torch.Tensor`):
The noisy input tensor with the following shape `(batch, channel, height, width)`.
timestep (`torch.Tensor` or `float` or `int`): The number of timesteps to denoise an input.
encoder_hidden_states (`torch.Tensor`):
The encoder hidden states with shape `(batch, sequence_length, feature_dim)`.
class_labels (`torch.Tensor`, *optional*, defaults to `None`):
Optional class labels for conditioning. Their embeddings will be summed with the timestep embeddings.
timestep_cond: (`torch.Tensor`, *optional*, defaults to `None`):
Conditional embeddings for timestep. If provided, the embeddings will be summed with the samples passed
through the `self.time_embedding` layer to obtain the timestep embeddings.
attention_mask (`torch.Tensor`, *optional*, defaults to `None`):
An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. If `1` the mask
is kept, otherwise if `0` it is discarded. Mask will be converted into a bias, which adds large
negative values to the attention scores corresponding to "discard" tokens.
cross_attention_kwargs (`dict`, *optional*):
A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
`self.processor` in
[diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
added_cond_kwargs: (`dict`, *optional*):
A kwargs dictionary containing additional embeddings that if specified are added to the embeddings that
are passed along to the UNet blocks.
down_block_additional_residuals: (`tuple` of `torch.Tensor`, *optional*):
A tuple of tensors that if specified are added to the residuals of down unet blocks.
mid_block_additional_residual: (`torch.Tensor`, *optional*):
A tensor that if specified is added to the residual of the middle unet block.
down_intrablock_additional_residuals (`tuple` of `torch.Tensor`, *optional*):
additional residuals to be added within UNet down blocks, for example from T2I-Adapter side model(s)
encoder_attention_mask (`torch.Tensor`):
A cross-attention mask of shape `(batch, sequence_length)` is applied to `encoder_hidden_states`. If
`True` the mask is kept, otherwise if `False` it is discarded. Mask will be converted into a bias,
which adds large negative values to the attention scores corresponding to "discard" tokens.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~NestedUNet2DConditionOutput`] instead of a plain
tuple.
Returns:
[`~NestedUNet2DConditionOutput`] or `tuple`:
If `return_dict` is True, an [`~NestedUNet2DConditionOutput`] is returned,
otherwise a `tuple` is returned where the first element is the sample tensor.
"""
# By default samples have to be AT least a multiple of the overall upsampling factor.
# The overall upsampling factor is equal to 2 ** (# num of upsampling layers).
# However, the upsampling interpolation output size can be forced to fit any upsampling size
# on the fly if necessary.
default_overall_up_factor = 2**self.num_upsamplers
# upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor`
forward_upsample_size = False
upsample_size = None
if self.config.nesting:
sample, sample_feat = sample
if isinstance(sample, list) and len(sample) == 1:
sample = sample[0]
# 2. input layer (normalize the input)
bsz = [x.size(0) for x in sample]
bh, bl = bsz[0], bsz[1]
x_t_low, sample = sample[1:], sample[0]
for dim in sample.shape[-2:]:
if dim % default_overall_up_factor != 0:
# Forward upsample size to force interpolation output size.
forward_upsample_size = True
break
# ensure attention_mask is a bias, and give it a singleton query_tokens dimension
# expects mask of shape:
# [batch, key_tokens]
# adds singleton query_tokens dimension:
# [batch, 1, key_tokens]
# this helps to broadcast it as a bias over attention scores, which will be in one of the following shapes:
# [batch, heads, query_tokens, key_tokens] (e.g. torch sdp attn)
# [batch * heads, query_tokens, key_tokens] (e.g. xformers or classic attn)
if attention_mask is not None:
# assume that mask is expressed as:
# (1 = keep, 0 = discard)
# convert mask into a bias that can be added to attention scores:
# (keep = +0, discard = -10000.0)
attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0
attention_mask = attention_mask.unsqueeze(1)
# 0. center input if necessary
if self.config.center_input_sample:
sample = 2 * sample - 1.0
# 1. time
t_emb = self.get_time_embed(sample=sample, timestep=timestep)
emb = self.time_embedding(t_emb, timestep_cond)
class_emb = self.get_class_embed(sample=sample, class_labels=class_labels)
if class_emb is not None:
if self.config.class_embeddings_concat:
emb = torch.cat([emb, class_emb], dim=-1)
else:
emb = emb + class_emb
if self.inner_unet.model_type == "unet":
added_cond_kwargs = added_cond_kwargs or {}
added_cond_kwargs["masked_cross_attention"] = self.inner_unet.config.masked_cross_attention
added_cond_kwargs["micro_conditioning_scale"] = self.config.micro_conditioning_scale
added_cond_kwargs["conditioning_mask"] = encoder_attention_mask
if not self.config.nesting:
encoder_hidden_states = self.inner_unet.process_encoder_hidden_states(
encoder_hidden_states=encoder_hidden_states, added_cond_kwargs=added_cond_kwargs
)
aug_emb_inner_unet, cond_mask, cond_emb = self.inner_unet.get_aug_embed(
emb=emb, encoder_hidden_states=encoder_hidden_states, added_cond_kwargs=added_cond_kwargs
)
added_cond_kwargs["masked_cross_attention"] = self.config.masked_cross_attention
aug_emb, __, _ = self.get_aug_embed(
emb=emb, encoder_hidden_states=encoder_hidden_states, added_cond_kwargs=added_cond_kwargs
)
else:
aug_emb, cond_mask, _ = self.get_aug_embed(
emb=emb, encoder_hidden_states=encoder_hidden_states, added_cond_kwargs=added_cond_kwargs
)
elif self.inner_unet.model_type == "nested_unet":
added_cond_kwargs = added_cond_kwargs or {}
added_cond_kwargs["masked_cross_attention"] = self.inner_unet.inner_unet.config.masked_cross_attention
added_cond_kwargs["micro_conditioning_scale"] = self.config.micro_conditioning_scale
added_cond_kwargs["conditioning_mask"] = encoder_attention_mask
encoder_hidden_states = self.inner_unet.inner_unet.process_encoder_hidden_states(
encoder_hidden_states=encoder_hidden_states, added_cond_kwargs=added_cond_kwargs
)
aug_emb_inner_unet, cond_mask, cond_emb = self.inner_unet.inner_unet.get_aug_embed(
emb=emb, encoder_hidden_states=encoder_hidden_states, added_cond_kwargs=added_cond_kwargs
)
aug_emb, __, _ = self.get_aug_embed(
emb=emb, encoder_hidden_states=encoder_hidden_states, added_cond_kwargs=added_cond_kwargs
)
# convert encoder_attention_mask to a bias the same way we do for attention_mask
if encoder_attention_mask is not None:
encoder_attention_mask = (1 - encoder_attention_mask.to(sample.dtype)) * -10000.0
encoder_attention_mask = encoder_attention_mask.unsqueeze(1)
if self.config.addition_embed_type == "image_hint":
aug_emb, hint = aug_emb
sample = torch.cat([sample, hint], dim=1)
emb = emb + aug_emb + cond_emb if aug_emb is not None else emb
if self.time_embed_act is not None:
emb = self.time_embed_act(emb)
if not self.config.skip_normalization:
sample = sample / sample.std((1, 2, 3), keepdims=True)
if isinstance(sample, list) and len(sample) == 1:
sample = sample[0]
sample = self.conv_in(sample)
if self.config.nesting:
sample = sample + sample_feat
# we're popping the `scale` instead of getting it because otherwise `scale` will be propagated
# to the internal blocks and will raise deprecation warnings. this will be confusing for our users.
if cross_attention_kwargs is not None:
cross_attention_kwargs = cross_attention_kwargs.copy()
lora_scale = cross_attention_kwargs.pop("scale", 1.0)
else:
lora_scale = 1.0
if USE_PEFT_BACKEND:
# weight the lora layers by setting `lora_scale` for each PEFT layer
scale_lora_layers(self, lora_scale)
# using new arg down_intrablock_additional_residuals for T2I-Adapters, to distinguish from controlnets
is_adapter = down_intrablock_additional_residuals is not None
# maintain backward compatibility for legacy usage, where
# T2I-Adapter and ControlNet both use down_block_additional_residuals arg
# but can only use one or the other
if not is_adapter and mid_block_additional_residual is None and down_block_additional_residuals is not None:
deprecate(
"T2I should not use down_block_additional_residuals",
"1.3.0",
"Passing intrablock residual connections with `down_block_additional_residuals` is deprecated \
and will be removed in diffusers 1.3.0. `down_block_additional_residuals` should only be used \
for ControlNet. Please make sure use `down_intrablock_additional_residuals` instead. ",
standard_warn=False,
)
down_intrablock_additional_residuals = down_block_additional_residuals
is_adapter = True
# 3. downsample blocks in the outer layers
down_block_res_samples = (sample,)
for downsample_block in self.down_blocks:
if hasattr(downsample_block, "has_cross_attention") and downsample_block.has_cross_attention:
# For t2i-adapter CrossAttnDownBlock2D
additional_residuals = {}
if is_adapter and len(down_intrablock_additional_residuals) > 0:
additional_residuals["additional_residuals"] = down_intrablock_additional_residuals.pop(0)
sample, res_samples = downsample_block(
hidden_states=sample,
temb=emb[:bh],
encoder_hidden_states=encoder_hidden_states[:bh],
attention_mask=attention_mask,
cross_attention_kwargs=cross_attention_kwargs,
encoder_attention_mask=cond_mask[:bh] if cond_mask is not None else cond_mask,
**additional_residuals,
)
else:
sample, res_samples = downsample_block(hidden_states=sample, temb=emb)
if is_adapter and len(down_intrablock_additional_residuals) > 0:
sample += down_intrablock_additional_residuals.pop(0)
down_block_res_samples += res_samples
# 4. run inner unet
x_inner = self.in_adapter(sample) if self.in_adapter is not None else None
x_inner = (
torch.cat([x_inner, x_inner.new_zeros(bl - bh, *x_inner.size()[1:])], 0) if bh < bl else x_inner
) # pad zeros for low-resolutions
inner_unet_output = self.inner_unet(
(x_t_low, x_inner),
timestep,
cond_emb=cond_emb,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=cond_mask,
from_nested=True,
)
x_low, x_inner = inner_unet_output.sample, inner_unet_output.sample_inner
x_inner = self.out_adapter(x_inner)
sample = sample + x_inner[:bh] if bh < bl else sample + x_inner
# 5. upsample blocks in the outer layers
for i, upsample_block in enumerate(self.up_blocks):
is_final_block = i == len(self.up_blocks) - 1
res_samples = down_block_res_samples[-len(upsample_block.resnets) :]
down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)]
# if we have not reached the final block and need to forward the
# upsample size, we do it here
if not is_final_block and forward_upsample_size:
upsample_size = down_block_res_samples[-1].shape[2:]
if hasattr(upsample_block, "has_cross_attention") and upsample_block.has_cross_attention:
sample = upsample_block(
hidden_states=sample,
temb=emb[:bh],
res_hidden_states_tuple=res_samples,
encoder_hidden_states=encoder_hidden_states[:bh],
cross_attention_kwargs=cross_attention_kwargs,
upsample_size=upsample_size,
attention_mask=attention_mask,
encoder_attention_mask=cond_mask[:bh] if cond_mask is not None else cond_mask,
)
else:
sample = upsample_block(
hidden_states=sample,
temb=emb,
res_hidden_states_tuple=res_samples,
upsample_size=upsample_size,
)
# 6. post-process
if self.conv_norm_out:
sample_out = self.conv_norm_out(sample)
sample_out = self.conv_act(sample_out)
sample_out = self.conv_out(sample_out)
if USE_PEFT_BACKEND:
# remove `lora_scale` from each PEFT layer
unscale_lora_layers(self, lora_scale)
# 7. output both low and high-res output
if isinstance(x_low, list):
out = [sample_out] + x_low
else:
out = [sample_out, x_low]
if self.config.nesting:
return NestedUNet2DConditionOutput(sample=out, sample_inner=sample)
if not return_dict:
return (out,)
else:
return NestedUNet2DConditionOutput(sample=out)
@dataclass
class MatryoshkaPipelineOutput(BaseOutput):
"""
Output class for Matryoshka pipelines.
Args:
images (`List[PIL.Image.Image]` or `np.ndarray`)
List of denoised PIL images of length `batch_size` or numpy array of shape `(batch_size, height, width,
num_channels)`. PIL images or numpy array present the denoised images of the diffusion pipeline.
"""
images: Union[List[Image.Image], List[List[Image.Image]], np.ndarray, List[np.ndarray]]
class MatryoshkaPipeline(
DiffusionPipeline,
StableDiffusionMixin,
TextualInversionLoaderMixin,
StableDiffusionLoraLoaderMixin,
IPAdapterMixin,
FromSingleFileMixin,
):
r"""
Pipeline for text-to-image generation using Matryoshka Diffusion Models.
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
implemented for all pipelines (downloading, saving, running on a particular device, etc.).
The pipeline also inherits the following loading methods:
- [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings
- [`~loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`] for loading LoRA weights
- [`~loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`] for saving LoRA weights
- [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files
- [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters
Args:
text_encoder ([`~transformers.T5EncoderModel`]):
Frozen text-encoder ([flan-t5-xl](https://huggingface.co/google/flan-t5-xl)).
tokenizer ([`~transformers.T5Tokenizer`]):
A `T5Tokenizer` to tokenize text.
unet ([`MatryoshkaUNet2DConditionModel`]):
A `MatryoshkaUNet2DConditionModel` to denoise the encoded image latents.
scheduler ([`SchedulerMixin`]):
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
[`MatryoshkaDDIMScheduler`] and other schedulers with proper modifications, see an example usage in README.md.
feature_extractor ([`~transformers.<AnImageProcessor>`]):
A `AnImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
"""
model_cpu_offload_seq = "text_encoder->image_encoder->unet"
_optional_components = ["unet", "feature_extractor", "image_encoder"]
_callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"]
def __init__(
self,
text_encoder: T5EncoderModel,
tokenizer: T5TokenizerFast,
scheduler: MatryoshkaDDIMScheduler,
unet: MatryoshkaUNet2DConditionModel = None,
feature_extractor: CLIPImageProcessor = None,
image_encoder: CLIPVisionModelWithProjection = None,
trust_remote_code: bool = False,
nesting_level: int = 0,
):
super().__init__()
if nesting_level == 0:
unet = MatryoshkaUNet2DConditionModel.from_pretrained(
"tolgacangoz/matryoshka-diffusion-models", subfolder="unet/nesting_level_0"
)
elif nesting_level == 1:
unet = NestedUNet2DConditionModel.from_pretrained(
"tolgacangoz/matryoshka-diffusion-models", subfolder="unet/nesting_level_1"
)
elif nesting_level == 2:
unet = NestedUNet2DConditionModel.from_pretrained(
"tolgacangoz/matryoshka-diffusion-models", subfolder="unet/nesting_level_2"
)
else:
raise ValueError("Currently, nesting levels 0, 1, and 2 are supported.")
if scheduler is not None and getattr(scheduler.config, "steps_offset", 1) != 1:
deprecation_message = (
f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
new_config = dict(scheduler.config)
new_config["steps_offset"] = 1
scheduler._internal_dict = FrozenDict(new_config)
# if scheduler is not None and getattr(scheduler.config, "clip_sample", False) is True:
# deprecation_message = (
# f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`."
# " `clip_sample` should be set to False in the configuration file. Please make sure to update the"
# " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in"
# " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very"
# " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file"
# )
# deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False)
# new_config = dict(scheduler.config)
# new_config["clip_sample"] = False
# scheduler._internal_dict = FrozenDict(new_config)
is_unet_version_less_0_9_0 = (
unet is not None
and hasattr(unet.config, "_diffusers_version")
and version.parse(version.parse(unet.config._diffusers_version).base_version) < version.parse("0.9.0.dev0")
)
is_unet_sample_size_less_64 = (
unet is not None and hasattr(unet.config, "sample_size") and unet.config.sample_size < 64
)
if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64:
deprecation_message = (
"The configuration file of the unet has set the default `sample_size` to smaller than"
" 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
" following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
" CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5"
" \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
" configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
" in the config might lead to incorrect results in future versions. If you have downloaded this"
" checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
" the `unet/config.json` file"
)
deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False)
new_config = dict(unet.config)
new_config["sample_size"] = 64
unet._internal_dict = FrozenDict(new_config)
if hasattr(unet, "nest_ratio"):
scheduler.scales = unet.nest_ratio + [1]
if nesting_level == 2:
scheduler.schedule_shifted_power = 2.0
self.register_modules(
text_encoder=text_encoder,
tokenizer=tokenizer,
unet=unet,
scheduler=scheduler,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
)
self.register_to_config(nesting_level=nesting_level)
self.image_processor = VaeImageProcessor(do_resize=False)
def change_nesting_level(self, nesting_level: int):
if nesting_level == 0:
if hasattr(self.unet, "nest_ratio"):
self.scheduler.scales = None
self.unet = MatryoshkaUNet2DConditionModel.from_pretrained(
"tolgacangoz/matryoshka-diffusion-models", subfolder="unet/nesting_level_0"
).to(self.device)
self.config.nesting_level = 0
elif nesting_level == 1:
self.unet = NestedUNet2DConditionModel.from_pretrained(
"tolgacangoz/matryoshka-diffusion-models", subfolder="unet/nesting_level_1"
).to(self.device)
self.config.nesting_level = 1
self.scheduler.scales = self.unet.nest_ratio + [1]
self.scheduler.schedule_shifted_power = 1.0
elif nesting_level == 2:
self.unet = NestedUNet2DConditionModel.from_pretrained(
"tolgacangoz/matryoshka-diffusion-models", subfolder="unet/nesting_level_2"
).to(self.device)
self.config.nesting_level = 2
self.scheduler.scales = self.unet.nest_ratio + [1]
self.scheduler.schedule_shifted_power = 2.0
else:
raise ValueError("Currently, nesting levels 0, 1, and 2 are supported.")
gc.collect()
torch.cuda.empty_cache()
def encode_prompt(
self,
prompt,
device,
num_images_per_prompt,
do_classifier_free_guidance,
negative_prompt=None,
prompt_embeds: Optional[torch.Tensor] = None,
negative_prompt_embeds: Optional[torch.Tensor] = None,
lora_scale: Optional[float] = None,
clip_skip: Optional[int] = None,
):
r"""
Encodes the prompt into text encoder hidden states.
Args:
prompt (`str` or `List[str]`, *optional*):
prompt to be encoded
device: (`torch.device`):
torch device
num_images_per_prompt (`int`):
number of images that should be generated per prompt
do_classifier_free_guidance (`bool`):
whether to use classifier free guidance or not
negative_prompt (`str` or `List[str]`, *optional*):
The prompt or prompts not to guide the image generation. If not defined, one has to pass
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
less than `1`).
prompt_embeds (`torch.Tensor`, *optional*):
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
provided, text embeddings will be generated from `prompt` input argument.
negative_prompt_embeds (`torch.Tensor`, *optional*):
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
argument.
lora_scale (`float`, *optional*):
A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
clip_skip (`int`, *optional*):
Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
the output of the pre-final layer will be used for computing the prompt embeddings.
"""
# set lora scale so that monkey patched LoRA
# function of text encoder can correctly access it
if lora_scale is not None and isinstance(self, StableDiffusionLoraLoaderMixin):
self._lora_scale = lora_scale
# dynamically adjust the LoRA scale
if not USE_PEFT_BACKEND:
adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
else:
scale_lora_layers(self.text_encoder, lora_scale)
if prompt is not None and isinstance(prompt, str):
batch_size = 1
elif prompt is not None and isinstance(prompt, list):
batch_size = len(prompt)
else:
batch_size = prompt_embeds.shape[0]
if prompt_embeds is None:
# textual inversion: process multi-vector tokens if necessary
if isinstance(self, TextualInversionLoaderMixin):
prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
text_inputs = self.tokenizer(
prompt,
return_tensors="pt",
)
text_input_ids = text_inputs.input_ids
untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
text_input_ids, untruncated_ids
):
removed_text = self.tokenizer.batch_decode(
untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
)
logger.warning(
"The following part of your input was truncated because FLAN-T5-XL for this pipeline can only handle sequences up to"
f" {self.tokenizer.model_max_length} tokens: {removed_text}"
)
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
prompt_attention_mask = text_inputs.attention_mask.to(device)
else:
prompt_attention_mask = None
if self.text_encoder is not None:
prompt_embeds_dtype = self.text_encoder.dtype
elif self.unet is not None:
prompt_embeds_dtype = self.unet.dtype
else:
prompt_embeds_dtype = prompt_embeds.dtype
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance and negative_prompt_embeds is None:
uncond_tokens: List[str]
if negative_prompt is None:
uncond_tokens = [""] * batch_size
elif prompt is not None and type(prompt) is not type(negative_prompt):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
f" {type(prompt)}."
)
elif isinstance(negative_prompt, str):
uncond_tokens = [negative_prompt]
elif batch_size != len(negative_prompt):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
" the batch size of `prompt`."
)
else:
uncond_tokens = negative_prompt
# textual inversion: process multi-vector tokens if necessary
if isinstance(self, TextualInversionLoaderMixin):
uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
uncond_input = self.tokenizer(
uncond_tokens,
return_tensors="pt",
)
uncond_input_ids = uncond_input.input_ids
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
negative_prompt_attention_mask = uncond_input.attention_mask.to(device)
else:
negative_prompt_attention_mask = None
if not do_classifier_free_guidance:
if clip_skip is None:
prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=prompt_attention_mask)
prompt_embeds = prompt_embeds[0]
else:
prompt_embeds = self.text_encoder(
text_input_ids.to(device), attention_mask=prompt_attention_mask, output_hidden_states=True
)
# Access the `hidden_states` first, that contains a tuple of
# all the hidden states from the encoder layers. Then index into
# the tuple to access the hidden states from the desired layer.
prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)]
# We also need to apply the final LayerNorm here to not mess with the
# representations. The `last_hidden_states` that we typically use for
# obtaining the final prompt representations passes through the LayerNorm
# layer.
prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds)
else:
max_len = max(len(text_input_ids[0]), len(uncond_input_ids[0]))
if len(text_input_ids[0]) < max_len:
text_input_ids = torch.cat(
[text_input_ids, torch.zeros(batch_size, max_len - len(text_input_ids[0]), dtype=torch.long)],
dim=1,
)
prompt_attention_mask = torch.cat(
[
prompt_attention_mask,
torch.zeros(
batch_size, max_len - len(prompt_attention_mask[0]), dtype=torch.long, device=device
),
],
dim=1,
)
elif len(uncond_input_ids[0]) < max_len:
uncond_input_ids = torch.cat(
[uncond_input_ids, torch.zeros(batch_size, max_len - len(uncond_input_ids[0]), dtype=torch.long)],
dim=1,
)
negative_prompt_attention_mask = torch.cat(
[
negative_prompt_attention_mask,
torch.zeros(
batch_size,
max_len - len(negative_prompt_attention_mask[0]),
dtype=torch.long,
device=device,
),
],
dim=1,
)
cfg_input_ids = torch.cat([uncond_input_ids, text_input_ids], dim=0)
cfg_attention_mask = torch.cat([negative_prompt_attention_mask, prompt_attention_mask], dim=0)
prompt_embeds = self.text_encoder(
cfg_input_ids.to(device),
attention_mask=cfg_attention_mask,
)
prompt_embeds = prompt_embeds[0]
prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
if self.text_encoder is not None:
if isinstance(self, StableDiffusionLoraLoaderMixin) and USE_PEFT_BACKEND:
# Retrieve the original scale by scaling back the LoRA layers
unscale_lora_layers(self.text_encoder, lora_scale)
if not do_classifier_free_guidance:
return prompt_embeds, None, prompt_attention_mask, None
return prompt_embeds[1], prompt_embeds[0], prompt_attention_mask, negative_prompt_attention_mask
def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None):
dtype = next(self.image_encoder.parameters()).dtype
if not isinstance(image, torch.Tensor):
image = self.feature_extractor(image, return_tensors="pt").pixel_values
image = image.to(device=device, dtype=dtype)
if output_hidden_states:
image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2]
image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0)
uncond_image_enc_hidden_states = self.image_encoder(
torch.zeros_like(image), output_hidden_states=True
).hidden_states[-2]
uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(
num_images_per_prompt, dim=0
)
return image_enc_hidden_states, uncond_image_enc_hidden_states
else:
image_embeds = self.image_encoder(image).image_embeds
image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
uncond_image_embeds = torch.zeros_like(image_embeds)
return image_embeds, uncond_image_embeds
def prepare_ip_adapter_image_embeds(
self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance
):
image_embeds = []
if do_classifier_free_guidance:
negative_image_embeds = []
if ip_adapter_image_embeds is None:
if not isinstance(ip_adapter_image, list):
ip_adapter_image = [ip_adapter_image]
if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers):
raise ValueError(
f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters."
)
for single_ip_adapter_image, image_proj_layer in zip(
ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers
):
output_hidden_state = not isinstance(image_proj_layer, ImageProjection)
single_image_embeds, single_negative_image_embeds = self.encode_image(
single_ip_adapter_image, device, 1, output_hidden_state
)
image_embeds.append(single_image_embeds[None, :])
if do_classifier_free_guidance:
negative_image_embeds.append(single_negative_image_embeds[None, :])
else:
for single_image_embeds in ip_adapter_image_embeds:
if do_classifier_free_guidance:
single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2)
negative_image_embeds.append(single_negative_image_embeds)
image_embeds.append(single_image_embeds)
ip_adapter_image_embeds = []
for i, single_image_embeds in enumerate(image_embeds):
single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0)
if do_classifier_free_guidance:
single_negative_image_embeds = torch.cat([negative_image_embeds[i]] * num_images_per_prompt, dim=0)
single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds], dim=0)
single_image_embeds = single_image_embeds.to(device=device)
ip_adapter_image_embeds.append(single_image_embeds)
return ip_adapter_image_embeds
def prepare_extra_step_kwargs(self, generator, eta):
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
extra_step_kwargs = {}
if accepts_eta:
extra_step_kwargs["eta"] = eta
# check if the scheduler accepts generator
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
if accepts_generator:
extra_step_kwargs["generator"] = generator
return extra_step_kwargs
def check_inputs(
self,
prompt,
height,
width,
callback_steps,
negative_prompt=None,
prompt_embeds=None,
negative_prompt_embeds=None,
ip_adapter_image=None,
ip_adapter_image_embeds=None,
callback_on_step_end_tensor_inputs=None,
):
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(callback_steps)}."
)
if callback_on_step_end_tensor_inputs is not None and not all(
k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
):
raise ValueError(
f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
)
if prompt is not None and prompt_embeds is not None:
raise ValueError(
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
" only forward one of the two."
)
elif prompt is None and prompt_embeds is None:
raise ValueError(
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
)
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
if negative_prompt is not None and negative_prompt_embeds is not None:
raise ValueError(
f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
)
if prompt_embeds is not None and negative_prompt_embeds is not None:
if prompt_embeds.shape != negative_prompt_embeds.shape:
raise ValueError(
"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
f" {negative_prompt_embeds.shape}."
)
if ip_adapter_image is not None and ip_adapter_image_embeds is not None:
raise ValueError(
"Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined."
)
if ip_adapter_image_embeds is not None:
if not isinstance(ip_adapter_image_embeds, list):
raise ValueError(
f"`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}"
)
elif ip_adapter_image_embeds[0].ndim not in [3, 4]:
raise ValueError(
f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D"
)
def prepare_latents(
self, batch_size, num_channels_latents, height, width, dtype, device, generator, scales, latents=None
):
shape = (
batch_size,
num_channels_latents,
int(height),
int(width),
)
if isinstance(generator, list) and len(generator) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
)
if latents is None:
latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
if scales is not None:
out = [latents]
for s in scales[1:]:
ratio = scales[0] // s
sample_low = F.avg_pool2d(latents, ratio) * ratio
sample_low = sample_low.normal_(generator=generator)
out += [sample_low]
latents = out
else:
if scales is not None:
latents = [latent.to(device=device) for latent in latents]
else:
latents = latents.to(device)
# scale the initial noise by the standard deviation required by the scheduler
if scales is not None:
latents = [latent * self.scheduler.init_noise_sigma for latent in latents]
else:
latents = latents * self.scheduler.init_noise_sigma
return latents
# Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding
def get_guidance_scale_embedding(
self, w: torch.Tensor, embedding_dim: int = 512, dtype: torch.dtype = torch.float32
) -> torch.Tensor:
"""
See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298
Args:
w (`torch.Tensor`):
Generate embedding vectors with a specified guidance scale to subsequently enrich timestep embeddings.
embedding_dim (`int`, *optional*, defaults to 512):
Dimension of the embeddings to generate.
dtype (`torch.dtype`, *optional*, defaults to `torch.float32`):
Data type of the generated embeddings.
Returns:
`torch.Tensor`: Embedding vectors with shape `(len(w), embedding_dim)`.
"""
assert len(w.shape) == 1
w = w * 1000.0
half_dim = embedding_dim // 2
emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb)
emb = w.to(dtype)[:, None] * emb[None, :]
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
if embedding_dim % 2 == 1: # zero pad
emb = torch.nn.functional.pad(emb, (0, 1))
assert emb.shape == (w.shape[0], embedding_dim)
return emb
@property
def guidance_scale(self):
return self._guidance_scale
@property
def guidance_rescale(self):
return self._guidance_rescale
@property
def clip_skip(self):
return self._clip_skip
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
@property
def do_classifier_free_guidance(self):
return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None
@property
def cross_attention_kwargs(self):
return self._cross_attention_kwargs
@property
def num_timesteps(self):
return self._num_timesteps
@property
def interrupt(self):
return self._interrupt
@torch.no_grad()
@replace_example_docstring(EXAMPLE_DOC_STRING)
def __call__(
self,
prompt: Union[str, List[str]] = None,
height: Optional[int] = None,
width: Optional[int] = None,
num_inference_steps: int = 50,
timesteps: List[int] = None,
sigmas: List[float] = None,
guidance_scale: float = 7.5,
negative_prompt: Optional[Union[str, List[str]]] = None,
num_images_per_prompt: Optional[int] = 1,
eta: float = 0.0,
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
latents: Optional[torch.Tensor] = None,
prompt_embeds: Optional[torch.Tensor] = None,
negative_prompt_embeds: Optional[torch.Tensor] = None,
ip_adapter_image: Optional[PipelineImageInput] = None,
ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None,
output_type: Optional[str] = "pil",
return_dict: bool = True,
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
guidance_rescale: float = 0.0,
clip_skip: Optional[int] = None,
callback_on_step_end: Optional[
Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks]
] = None,
callback_on_step_end_tensor_inputs: List[str] = ["latents"],
**kwargs,
):
r"""
The call function to the pipeline for generation.
Args:
prompt (`str` or `List[str]`, *optional*):
The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
height (`int`, *optional*, defaults to `self.unet.config.sample_size`):
The height in pixels of the generated image.
width (`int`, *optional*, defaults to `self.unet.config.sample_size`):
The width in pixels of the generated image.
num_inference_steps (`int`, *optional*, defaults to 50):
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
expense of slower inference.
timesteps (`List[int]`, *optional*):
Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument
in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is
passed will be used. Must be in descending order.
sigmas (`List[float]`, *optional*):
Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in
their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed
will be used.
guidance_scale (`float`, *optional*, defaults to 7.5):
A higher guidance scale value encourages the model to generate images closely linked to the text
`prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
negative_prompt (`str` or `List[str]`, *optional*):
The prompt or prompts to guide what to not include in image generation. If not defined, you need to
pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
num_images_per_prompt (`int`, *optional*, defaults to 1):
The number of images to generate per prompt.
eta (`float`, *optional*, defaults to 0.0):
Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
generation deterministic.
latents (`torch.Tensor`, *optional*):
Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
tensor is generated by sampling using the supplied random `generator`.
prompt_embeds (`torch.Tensor`, *optional*):
Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
provided, text embeddings are generated from the `prompt` input argument.
negative_prompt_embeds (`torch.Tensor`, *optional*):
Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters.
ip_adapter_image_embeds (`List[torch.Tensor]`, *optional*):
Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of
IP-adapters. Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should
contain the negative image embedding if `do_classifier_free_guidance` is set to `True`. If not
provided, embeddings are computed from the `ip_adapter_image` input argument.
output_type (`str`, *optional*, defaults to `"pil"`):
The output format of the generated image. Choose between `PIL.Image` or `np.array`.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
plain tuple.
cross_attention_kwargs (`dict`, *optional*):
A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
[`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
guidance_rescale (`float`, *optional*, defaults to 0.0):
Guidance rescale factor from [Common Diffusion Noise Schedules and Sample Steps are
Flawed](https://arxiv.org/pdf/2305.08891.pdf). Guidance rescale factor should fix overexposure when
using zero terminal SNR.
clip_skip (`int`, *optional*):
Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
the output of the pre-final layer will be used for computing the prompt embeddings.
callback_on_step_end (`Callable`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*):
A function or a subclass of `PipelineCallback` or `MultiPipelineCallbacks` that is called at the end of
each denoising step during the inference. with the following arguments: `callback_on_step_end(self:
DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a
list of all tensors as specified by `callback_on_step_end_tensor_inputs`.
callback_on_step_end_tensor_inputs (`List`, *optional*):
The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
`._callback_tensor_inputs` attribute of your pipeline class.
Examples:
Returns:
[`~MatryoshkaPipelineOutput`] or `tuple`:
If `return_dict` is `True`, [`~MatryoshkaPipelineOutput`] is returned,
otherwise a `tuple` is returned where the first element is a list with the generated images and the
second element is a list of `bool`s indicating whether the corresponding generated image contains
"not-safe-for-work" (nsfw) content.
"""
callback = kwargs.pop("callback", None)
callback_steps = kwargs.pop("callback_steps", None)
if callback is not None:
deprecate(
"callback",
"1.0.0",
"Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
)
if callback_steps is not None:
deprecate(
"callback_steps",
"1.0.0",
"Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
)
if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)):
callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs
# 0. Default height and width to unet
height = height or self.unet.config.sample_size
width = width or self.unet.config.sample_size
# to deal with lora scaling and other possible forward hooks
# 1. Check inputs. Raise error if not correct
self.check_inputs(
prompt,
height,
width,
callback_steps,
negative_prompt,
prompt_embeds,
negative_prompt_embeds,
ip_adapter_image,
ip_adapter_image_embeds,
callback_on_step_end_tensor_inputs,
)
self._guidance_scale = guidance_scale
self._guidance_rescale = guidance_rescale
self._clip_skip = clip_skip
self._cross_attention_kwargs = cross_attention_kwargs
self._interrupt = False
# 2. Define call parameters
if prompt is not None and isinstance(prompt, str):
batch_size = 1
elif prompt is not None and isinstance(prompt, list):
batch_size = len(prompt)
else:
batch_size = prompt_embeds.shape[0]
device = self._execution_device
# 3. Encode input prompt
lora_scale = (
self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None
)
(
prompt_embeds,
negative_prompt_embeds,
prompt_attention_mask,
negative_prompt_attention_mask,
) = self.encode_prompt(
prompt,
device,
num_images_per_prompt,
self.do_classifier_free_guidance,
negative_prompt,
prompt_embeds=prompt_embeds,
negative_prompt_embeds=negative_prompt_embeds,
lora_scale=lora_scale,
clip_skip=self.clip_skip,
)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
if self.do_classifier_free_guidance:
prompt_embeds = torch.cat([negative_prompt_embeds.unsqueeze(0), prompt_embeds.unsqueeze(0)])
attention_masks = torch.cat([negative_prompt_attention_mask, prompt_attention_mask])
else:
attention_masks = prompt_attention_mask
prompt_embeds = prompt_embeds * attention_masks.unsqueeze(-1)
if ip_adapter_image is not None or ip_adapter_image_embeds is not None:
image_embeds = self.prepare_ip_adapter_image_embeds(
ip_adapter_image,
ip_adapter_image_embeds,
device,
batch_size * num_images_per_prompt,
self.do_classifier_free_guidance,
)
# 4. Prepare timesteps
timesteps, num_inference_steps = retrieve_timesteps(
self.scheduler, num_inference_steps, device, timesteps, sigmas
)
timesteps = timesteps[:-1]
# 5. Prepare latent variables
num_channels_latents = self.unet.config.in_channels
latents = self.prepare_latents(
batch_size * num_images_per_prompt,
num_channels_latents,
height,
width,
prompt_embeds.dtype,
device,
generator,
self.scheduler.scales,
latents,
)
# 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
extra_step_kwargs |= {"use_clipped_model_output": True}
# 6.1 Add image embeds for IP-Adapter
added_cond_kwargs = (
{"image_embeds": image_embeds}
if (ip_adapter_image is not None or ip_adapter_image_embeds is not None)
else None
)
# 6.2 Optionally get Guidance Scale Embedding
timestep_cond = None
if self.unet.config.time_cond_proj_dim is not None:
guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt)
timestep_cond = self.get_guidance_scale_embedding(
guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim
).to(device=device, dtype=latents.dtype)
# 7. Denoising loop
num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
self._num_timesteps = len(timesteps)
with self.progress_bar(total=num_inference_steps) as progress_bar:
for i, t in enumerate(timesteps):
if self.interrupt:
continue
# expand the latents if we are doing classifier free guidance
if self.do_classifier_free_guidance and isinstance(latents, list):
latent_model_input = [latent.repeat(2, 1, 1, 1) for latent in latents]
elif self.do_classifier_free_guidance:
latent_model_input = latents.repeat(2, 1, 1, 1)
else:
latent_model_input = latents
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
# predict the noise residual
noise_pred = self.unet(
latent_model_input,
t - 1,
encoder_hidden_states=prompt_embeds,
timestep_cond=timestep_cond,
cross_attention_kwargs=self.cross_attention_kwargs,
added_cond_kwargs=added_cond_kwargs,
encoder_attention_mask=attention_masks,
return_dict=False,
)[0]
# perform guidance
if isinstance(noise_pred, list) and self.do_classifier_free_guidance:
for i, (noise_pred_uncond, noise_pred_text) in enumerate(noise_pred):
noise_pred[i] = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond)
elif self.do_classifier_free_guidance:
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond)
if self.do_classifier_free_guidance and self.guidance_rescale > 0.0:
# Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=self.guidance_rescale)
# compute the previous noisy sample x_t -> x_t-1
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
if callback_on_step_end is not None:
callback_kwargs = {}
for k in callback_on_step_end_tensor_inputs:
callback_kwargs[k] = locals()[k]
callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
latents = callback_outputs.pop("latents", latents)
prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
# call the callback, if provided
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
progress_bar.update()
if callback is not None and i % callback_steps == 0:
step_idx = i // getattr(self.scheduler, "order", 1)
callback(step_idx, t, latents)
if XLA_AVAILABLE:
xm.mark_step()
image = latents
if self.scheduler.scales is not None:
for i, img in enumerate(image):
image[i] = self.image_processor.postprocess(img, output_type=output_type)[0]
else:
image = self.image_processor.postprocess(image, output_type=output_type)
# Offload all models
self.maybe_free_model_hooks()
if not return_dict:
return (image,)
return MatryoshkaPipelineOutput(images=image)
| diffusers/examples/community/matryoshka.py/0 | {
"file_path": "diffusers/examples/community/matryoshka.py",
"repo_id": "diffusers",
"token_count": 100492
} |
import inspect
import os
import numpy as np
import torch
import torch.nn.functional as nnf
from PIL import Image
from torch.optim.adam import Adam
from tqdm import tqdm
from diffusers import StableDiffusionPipeline
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
def retrieve_timesteps(
scheduler,
num_inference_steps=None,
device=None,
timesteps=None,
**kwargs,
):
"""
Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles
custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`.
Args:
scheduler (`SchedulerMixin`):
The scheduler to get timesteps from.
num_inference_steps (`int`):
The number of diffusion steps used when generating samples with a pre-trained model. If used,
`timesteps` must be `None`.
device (`str` or `torch.device`, *optional*):
The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
timesteps (`List[int]`, *optional*):
Custom timesteps used to support arbitrary spacing between timesteps. If `None`, then the default
timestep spacing strategy of the scheduler is used. If `timesteps` is passed, `num_inference_steps`
must be `None`.
Returns:
`Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the
second element is the number of inference steps.
"""
if timesteps is not None:
accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
if not accepts_timesteps:
raise ValueError(
f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
f" timestep schedules. Please check whether you are using the correct scheduler."
)
scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
timesteps = scheduler.timesteps
num_inference_steps = len(timesteps)
else:
scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
timesteps = scheduler.timesteps
return timesteps, num_inference_steps
class NullTextPipeline(StableDiffusionPipeline):
def get_noise_pred(self, latents, t, context):
latents_input = torch.cat([latents] * 2)
guidance_scale = 7.5
noise_pred = self.unet(latents_input, t, encoder_hidden_states=context)["sample"]
noise_pred_uncond, noise_prediction_text = noise_pred.chunk(2)
noise_pred = noise_pred_uncond + guidance_scale * (noise_prediction_text - noise_pred_uncond)
latents = self.prev_step(noise_pred, t, latents)
return latents
def get_noise_pred_single(self, latents, t, context):
noise_pred = self.unet(latents, t, encoder_hidden_states=context)["sample"]
return noise_pred
@torch.no_grad()
def image2latent(self, image_path):
image = Image.open(image_path).convert("RGB")
image = np.array(image)
image = torch.from_numpy(image).float() / 127.5 - 1
image = image.permute(2, 0, 1).unsqueeze(0).to(self.device)
latents = self.vae.encode(image)["latent_dist"].mean
latents = latents * 0.18215
return latents
@torch.no_grad()
def latent2image(self, latents):
latents = 1 / 0.18215 * latents.detach()
image = self.vae.decode(latents)["sample"].detach()
image = self.processor.postprocess(image, output_type="pil")[0]
return image
def prev_step(self, model_output, timestep, sample):
prev_timestep = timestep - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
alpha_prod_t = self.scheduler.alphas_cumprod[timestep]
alpha_prod_t_prev = (
self.scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.scheduler.final_alpha_cumprod
)
beta_prod_t = 1 - alpha_prod_t
pred_original_sample = (sample - beta_prod_t**0.5 * model_output) / alpha_prod_t**0.5
pred_sample_direction = (1 - alpha_prod_t_prev) ** 0.5 * model_output
prev_sample = alpha_prod_t_prev**0.5 * pred_original_sample + pred_sample_direction
return prev_sample
def next_step(self, model_output, timestep, sample):
timestep, next_timestep = (
min(timestep - self.scheduler.config.num_train_timesteps // self.num_inference_steps, 999),
timestep,
)
alpha_prod_t = self.scheduler.alphas_cumprod[timestep] if timestep >= 0 else self.scheduler.final_alpha_cumprod
alpha_prod_t_next = self.scheduler.alphas_cumprod[next_timestep]
beta_prod_t = 1 - alpha_prod_t
next_original_sample = (sample - beta_prod_t**0.5 * model_output) / alpha_prod_t**0.5
next_sample_direction = (1 - alpha_prod_t_next) ** 0.5 * model_output
next_sample = alpha_prod_t_next**0.5 * next_original_sample + next_sample_direction
return next_sample
def null_optimization(self, latents, context, num_inner_steps, epsilon):
uncond_embeddings, cond_embeddings = context.chunk(2)
uncond_embeddings_list = []
latent_cur = latents[-1]
bar = tqdm(total=num_inner_steps * self.num_inference_steps)
for i in range(self.num_inference_steps):
uncond_embeddings = uncond_embeddings.clone().detach()
uncond_embeddings.requires_grad = True
optimizer = Adam([uncond_embeddings], lr=1e-2 * (1.0 - i / 100.0))
latent_prev = latents[len(latents) - i - 2]
t = self.scheduler.timesteps[i]
with torch.no_grad():
noise_pred_cond = self.get_noise_pred_single(latent_cur, t, cond_embeddings)
for j in range(num_inner_steps):
noise_pred_uncond = self.get_noise_pred_single(latent_cur, t, uncond_embeddings)
noise_pred = noise_pred_uncond + 7.5 * (noise_pred_cond - noise_pred_uncond)
latents_prev_rec = self.prev_step(noise_pred, t, latent_cur)
loss = nnf.mse_loss(latents_prev_rec, latent_prev)
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss_item = loss.item()
bar.update()
if loss_item < epsilon + i * 2e-5:
break
for j in range(j + 1, num_inner_steps):
bar.update()
uncond_embeddings_list.append(uncond_embeddings[:1].detach())
with torch.no_grad():
context = torch.cat([uncond_embeddings, cond_embeddings])
latent_cur = self.get_noise_pred(latent_cur, t, context)
bar.close()
return uncond_embeddings_list
@torch.no_grad()
def ddim_inversion_loop(self, latent, context):
self.scheduler.set_timesteps(self.num_inference_steps)
_, cond_embeddings = context.chunk(2)
all_latent = [latent]
latent = latent.clone().detach()
with torch.no_grad():
for i in range(0, self.num_inference_steps):
t = self.scheduler.timesteps[len(self.scheduler.timesteps) - i - 1]
noise_pred = self.unet(latent, t, encoder_hidden_states=cond_embeddings)["sample"]
latent = self.next_step(noise_pred, t, latent)
all_latent.append(latent)
return all_latent
def get_context(self, prompt):
uncond_input = self.tokenizer(
[""], padding="max_length", max_length=self.tokenizer.model_max_length, return_tensors="pt"
)
uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
text_input = self.tokenizer(
[prompt],
padding="max_length",
max_length=self.tokenizer.model_max_length,
truncation=True,
return_tensors="pt",
)
text_embeddings = self.text_encoder(text_input.input_ids.to(self.device))[0]
context = torch.cat([uncond_embeddings, text_embeddings])
return context
def invert(
self, image_path: str, prompt: str, num_inner_steps=10, early_stop_epsilon=1e-6, num_inference_steps=50
):
self.num_inference_steps = num_inference_steps
context = self.get_context(prompt)
latent = self.image2latent(image_path)
ddim_latents = self.ddim_inversion_loop(latent, context)
if os.path.exists(image_path + ".pt"):
uncond_embeddings = torch.load(image_path + ".pt")
else:
uncond_embeddings = self.null_optimization(ddim_latents, context, num_inner_steps, early_stop_epsilon)
uncond_embeddings = torch.stack(uncond_embeddings, 0)
torch.save(uncond_embeddings, image_path + ".pt")
return ddim_latents[-1], uncond_embeddings
@torch.no_grad()
def __call__(
self,
prompt,
uncond_embeddings,
inverted_latent,
num_inference_steps: int = 50,
timesteps=None,
guidance_scale=7.5,
negative_prompt=None,
num_images_per_prompt=1,
generator=None,
latents=None,
prompt_embeds=None,
negative_prompt_embeds=None,
output_type="pil",
):
self._guidance_scale = guidance_scale
# 0. Default height and width to unet
height = self.unet.config.sample_size * self.vae_scale_factor
width = self.unet.config.sample_size * self.vae_scale_factor
# to deal with lora scaling and other possible forward hook
callback_steps = None
# 1. Check inputs. Raise error if not correct
self.check_inputs(
prompt,
height,
width,
callback_steps,
negative_prompt,
prompt_embeds,
negative_prompt_embeds,
)
# 2. Define call parameter
device = self._execution_device
# 3. Encode input prompt
prompt_embeds, _ = self.encode_prompt(
prompt,
device,
num_images_per_prompt,
self.do_classifier_free_guidance,
negative_prompt,
prompt_embeds=prompt_embeds,
negative_prompt_embeds=negative_prompt_embeds,
)
# 4. Prepare timesteps
timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps)
latents = inverted_latent
with self.progress_bar(total=num_inference_steps) as progress_bar:
for i, t in enumerate(timesteps):
noise_pred_uncond = self.unet(latents, t, encoder_hidden_states=uncond_embeddings[i])["sample"]
noise_pred = self.unet(latents, t, encoder_hidden_states=prompt_embeds)["sample"]
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0]
progress_bar.update()
if not output_type == "latent":
image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False, generator=generator)[
0
]
else:
image = latents
image = self.image_processor.postprocess(
image, output_type=output_type, do_denormalize=[True] * image.shape[0]
)
# Offload all models
self.maybe_free_model_hooks()
return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=False)
| diffusers/examples/community/pipeline_null_text_inversion.py/0 | {
"file_path": "diffusers/examples/community/pipeline_null_text_inversion.py",
"repo_id": "diffusers",
"token_count": 5423
} |
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import numpy as np
import PIL.Image
import torch
import torch.nn.functional as F
import torchvision.transforms as T
from gmflow.gmflow import GMFlow
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers.image_processor import VaeImageProcessor
from diffusers.models import AutoencoderKL, ControlNetModel, UNet2DConditionModel
from diffusers.models.attention_processor import Attention, AttnProcessor
from diffusers.pipelines.controlnet.multicontrolnet import MultiControlNetModel
from diffusers.pipelines.controlnet.pipeline_controlnet_img2img import StableDiffusionControlNetImg2ImgPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import BaseOutput, deprecate, is_torch_xla_available, logging
from diffusers.utils.torch_utils import is_compiled_module, randn_tensor
if is_torch_xla_available():
import torch_xla.core.xla_model as xm
XLA_AVAILABLE = True
else:
XLA_AVAILABLE = False
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
def coords_grid(b, h, w, homogeneous=False, device=None):
y, x = torch.meshgrid(torch.arange(h), torch.arange(w)) # [H, W]
stacks = [x, y]
if homogeneous:
ones = torch.ones_like(x) # [H, W]
stacks.append(ones)
grid = torch.stack(stacks, dim=0).float() # [2, H, W] or [3, H, W]
grid = grid[None].repeat(b, 1, 1, 1) # [B, 2, H, W] or [B, 3, H, W]
if device is not None:
grid = grid.to(device)
return grid
def bilinear_sample(img, sample_coords, mode="bilinear", padding_mode="zeros", return_mask=False):
# img: [B, C, H, W]
# sample_coords: [B, 2, H, W] in image scale
if sample_coords.size(1) != 2: # [B, H, W, 2]
sample_coords = sample_coords.permute(0, 3, 1, 2)
b, _, h, w = sample_coords.shape
# Normalize to [-1, 1]
x_grid = 2 * sample_coords[:, 0] / (w - 1) - 1
y_grid = 2 * sample_coords[:, 1] / (h - 1) - 1
grid = torch.stack([x_grid, y_grid], dim=-1) # [B, H, W, 2]
img = F.grid_sample(img, grid, mode=mode, padding_mode=padding_mode, align_corners=True)
if return_mask:
mask = (x_grid >= -1) & (y_grid >= -1) & (x_grid <= 1) & (y_grid <= 1) # [B, H, W]
return img, mask
return img
def flow_warp(feature, flow, mask=False, mode="bilinear", padding_mode="zeros"):
b, c, h, w = feature.size()
assert flow.size(1) == 2
grid = coords_grid(b, h, w).to(flow.device) + flow # [B, 2, H, W]
grid = grid.to(feature.dtype)
return bilinear_sample(feature, grid, mode=mode, padding_mode=padding_mode, return_mask=mask)
def forward_backward_consistency_check(fwd_flow, bwd_flow, alpha=0.01, beta=0.5):
# fwd_flow, bwd_flow: [B, 2, H, W]
# alpha and beta values are following UnFlow
# (https://arxiv.org/abs/1711.07837)
assert fwd_flow.dim() == 4 and bwd_flow.dim() == 4
assert fwd_flow.size(1) == 2 and bwd_flow.size(1) == 2
flow_mag = torch.norm(fwd_flow, dim=1) + torch.norm(bwd_flow, dim=1) # [B, H, W]
warped_bwd_flow = flow_warp(bwd_flow, fwd_flow) # [B, 2, H, W]
warped_fwd_flow = flow_warp(fwd_flow, bwd_flow) # [B, 2, H, W]
diff_fwd = torch.norm(fwd_flow + warped_bwd_flow, dim=1) # [B, H, W]
diff_bwd = torch.norm(bwd_flow + warped_fwd_flow, dim=1)
threshold = alpha * flow_mag + beta
fwd_occ = (diff_fwd > threshold).float() # [B, H, W]
bwd_occ = (diff_bwd > threshold).float()
return fwd_occ, bwd_occ
@torch.no_grad()
def get_warped_and_mask(flow_model, image1, image2, image3=None, pixel_consistency=False, device=None):
if image3 is None:
image3 = image1
padder = InputPadder(image1.shape, padding_factor=8)
image1, image2 = padder.pad(image1[None].to(device), image2[None].to(device))
results_dict = flow_model(
image1, image2, attn_splits_list=[2], corr_radius_list=[-1], prop_radius_list=[-1], pred_bidir_flow=True
)
flow_pr = results_dict["flow_preds"][-1] # [B, 2, H, W]
fwd_flow = padder.unpad(flow_pr[0]).unsqueeze(0) # [1, 2, H, W]
bwd_flow = padder.unpad(flow_pr[1]).unsqueeze(0) # [1, 2, H, W]
fwd_occ, bwd_occ = forward_backward_consistency_check(fwd_flow, bwd_flow) # [1, H, W] float
if pixel_consistency:
warped_image1 = flow_warp(image1, bwd_flow)
bwd_occ = torch.clamp(
bwd_occ + (abs(image2 - warped_image1).mean(dim=1) > 255 * 0.25).float(), 0, 1
).unsqueeze(0)
warped_results = flow_warp(image3, bwd_flow)
return warped_results, bwd_occ, bwd_flow
blur = T.GaussianBlur(kernel_size=(9, 9), sigma=(18, 18))
@dataclass
class TextToVideoSDPipelineOutput(BaseOutput):
"""
Output class for text-to-video pipelines.
Args:
frames (`List[np.ndarray]` or `torch.Tensor`)
List of denoised frames (essentially images) as NumPy arrays of shape `(height, width, num_channels)` or as
a `torch` tensor. The length of the list denotes the video length (the number of frames).
"""
frames: Union[List[np.ndarray], torch.Tensor]
@torch.no_grad()
def find_flat_region(mask):
device = mask.device
kernel_x = torch.Tensor([[-1, 0, 1], [-1, 0, 1], [-1, 0, 1]]).unsqueeze(0).unsqueeze(0).to(device)
kernel_y = torch.Tensor([[-1, -1, -1], [0, 0, 0], [1, 1, 1]]).unsqueeze(0).unsqueeze(0).to(device)
mask_ = F.pad(mask.unsqueeze(0), (1, 1, 1, 1), mode="replicate")
grad_x = torch.nn.functional.conv2d(mask_, kernel_x)
grad_y = torch.nn.functional.conv2d(mask_, kernel_y)
return ((abs(grad_x) + abs(grad_y)) == 0).float()[0]
class AttnState:
STORE = 0
LOAD = 1
LOAD_AND_STORE_PREV = 2
def __init__(self):
self.reset()
@property
def state(self):
return self.__state
@property
def timestep(self):
return self.__timestep
def set_timestep(self, t):
self.__timestep = t
def reset(self):
self.__state = AttnState.STORE
self.__timestep = 0
def to_load(self):
self.__state = AttnState.LOAD
def to_load_and_store_prev(self):
self.__state = AttnState.LOAD_AND_STORE_PREV
class CrossFrameAttnProcessor(AttnProcessor):
"""
Cross frame attention processor. Each frame attends the first frame and previous frame.
Args:
attn_state: Whether the model is processing the first frame or an intermediate frame
"""
def __init__(self, attn_state: AttnState):
super().__init__()
self.attn_state = attn_state
self.first_maps = {}
self.prev_maps = {}
def __call__(self, attn: Attention, hidden_states, encoder_hidden_states=None, attention_mask=None, temb=None):
# Is self attention
if encoder_hidden_states is None:
t = self.attn_state.timestep
if self.attn_state.state == AttnState.STORE:
self.first_maps[t] = hidden_states.detach()
self.prev_maps[t] = hidden_states.detach()
res = super().__call__(attn, hidden_states, encoder_hidden_states, attention_mask, temb)
else:
if self.attn_state.state == AttnState.LOAD_AND_STORE_PREV:
tmp = hidden_states.detach()
cross_map = torch.cat((self.first_maps[t], self.prev_maps[t]), dim=1)
res = super().__call__(attn, hidden_states, cross_map, attention_mask, temb)
if self.attn_state.state == AttnState.LOAD_AND_STORE_PREV:
self.prev_maps[t] = tmp
else:
res = super().__call__(attn, hidden_states, encoder_hidden_states, attention_mask, temb)
return res
def prepare_image(image):
if isinstance(image, torch.Tensor):
# Batch single image
if image.ndim == 3:
image = image.unsqueeze(0)
image = image.to(dtype=torch.float32)
else:
# preprocess image
if isinstance(image, (PIL.Image.Image, np.ndarray)):
image = [image]
if isinstance(image, list) and isinstance(image[0], PIL.Image.Image):
image = [np.array(i.convert("RGB"))[None, :] for i in image]
image = np.concatenate(image, axis=0)
elif isinstance(image, list) and isinstance(image[0], np.ndarray):
image = np.concatenate([i[None, :] for i in image], axis=0)
image = image.transpose(0, 3, 1, 2)
image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0
return image
class RerenderAVideoPipeline(StableDiffusionControlNetImg2ImgPipeline):
r"""
Pipeline for video-to-video translation using Stable Diffusion with Rerender Algorithm.
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
In addition the pipeline inherits the following loading methods:
- *Textual-Inversion*: [`loaders.TextualInversionLoaderMixin.load_textual_inversion`]
Args:
vae ([`AutoencoderKL`]):
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
text_encoder ([`CLIPTextModel`]):
Frozen text-encoder. Stable Diffusion uses the text portion of
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
tokenizer (`CLIPTokenizer`):
Tokenizer of class
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
controlnet ([`ControlNetModel`] or `List[ControlNetModel]`):
Provides additional conditioning to the unet during the denoising process. If you set multiple ControlNets
as a list, the outputs from each ControlNet are added together to create one combined additional
conditioning.
scheduler ([`SchedulerMixin`]):
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
safety_checker ([`StableDiffusionSafetyChecker`]):
Classification module that estimates whether generated images could be considered offensive or harmful.
Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
feature_extractor ([`CLIPImageProcessor`]):
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
"""
_optional_components = ["safety_checker", "feature_extractor"]
def __init__(
self,
vae: AutoencoderKL,
text_encoder: CLIPTextModel,
tokenizer: CLIPTokenizer,
unet: UNet2DConditionModel,
controlnet: Union[ControlNetModel, List[ControlNetModel], Tuple[ControlNetModel], MultiControlNetModel],
scheduler: KarrasDiffusionSchedulers,
safety_checker: StableDiffusionSafetyChecker,
feature_extractor: CLIPImageProcessor,
image_encoder=None,
requires_safety_checker: bool = True,
device=None,
):
super().__init__(
vae,
text_encoder,
tokenizer,
unet,
controlnet,
scheduler,
safety_checker,
feature_extractor,
image_encoder,
requires_safety_checker,
)
self.to(device)
if safety_checker is None and requires_safety_checker:
logger.warning(
f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
)
if safety_checker is not None and feature_extractor is None:
raise ValueError(
"Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
" checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
)
if isinstance(controlnet, (list, tuple)):
controlnet = MultiControlNetModel(controlnet)
self.register_modules(
vae=vae,
text_encoder=text_encoder,
tokenizer=tokenizer,
unet=unet,
controlnet=controlnet,
scheduler=scheduler,
safety_checker=safety_checker,
feature_extractor=feature_extractor,
)
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True)
self.control_image_processor = VaeImageProcessor(
vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False
)
self.register_to_config(requires_safety_checker=requires_safety_checker)
self.attn_state = AttnState()
attn_processor_dict = {}
for k in unet.attn_processors.keys():
if k.startswith("up"):
attn_processor_dict[k] = CrossFrameAttnProcessor(self.attn_state)
else:
attn_processor_dict[k] = AttnProcessor()
self.unet.set_attn_processor(attn_processor_dict)
flow_model = GMFlow(
feature_channels=128,
num_scales=1,
upsample_factor=8,
num_head=1,
attention_type="swin",
ffn_dim_expansion=4,
num_transformer_layers=6,
).to(self.device)
checkpoint = torch.utils.model_zoo.load_url(
"https://huggingface.co/Anonymous-sub/Rerender/resolve/main/models/gmflow_sintel-0c07dcb3.pth",
map_location=lambda storage, loc: storage,
)
weights = checkpoint["model"] if "model" in checkpoint else checkpoint
flow_model.load_state_dict(weights, strict=False)
flow_model.eval()
self.flow_model = flow_model
# Modified from src/diffusers/pipelines/controlnet/pipeline_controlnet.StableDiffusionControlNetImg2ImgPipeline.check_inputs
def check_inputs(
self,
prompt,
callback_steps,
negative_prompt=None,
prompt_embeds=None,
negative_prompt_embeds=None,
controlnet_conditioning_scale=1.0,
control_guidance_start=0.0,
control_guidance_end=1.0,
):
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(callback_steps)}."
)
if prompt is not None and prompt_embeds is not None:
raise ValueError(
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
" only forward one of the two."
)
elif prompt is None and prompt_embeds is None:
raise ValueError(
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
)
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
if negative_prompt is not None and negative_prompt_embeds is not None:
raise ValueError(
f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
)
if prompt_embeds is not None and negative_prompt_embeds is not None:
if prompt_embeds.shape != negative_prompt_embeds.shape:
raise ValueError(
"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
f" {negative_prompt_embeds.shape}."
)
# `prompt` needs more sophisticated handling when there are multiple
# conditionings.
if isinstance(self.controlnet, MultiControlNetModel):
if isinstance(prompt, list):
logger.warning(
f"You have {len(self.controlnet.nets)} ControlNets and you have passed {len(prompt)}"
" prompts. The conditionings will be fixed across the prompts."
)
is_compiled = hasattr(F, "scaled_dot_product_attention") and isinstance(
self.controlnet, torch._dynamo.eval_frame.OptimizedModule
)
# Check `controlnet_conditioning_scale`
if (
isinstance(self.controlnet, ControlNetModel)
or is_compiled
and isinstance(self.controlnet._orig_mod, ControlNetModel)
):
if not isinstance(controlnet_conditioning_scale, float):
raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.")
elif (
isinstance(self.controlnet, MultiControlNetModel)
or is_compiled
and isinstance(self.controlnet._orig_mod, MultiControlNetModel)
):
if isinstance(controlnet_conditioning_scale, list):
if any(isinstance(i, list) for i in controlnet_conditioning_scale):
raise ValueError("A single batch of multiple conditionings are supported at the moment.")
elif isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len(
self.controlnet.nets
):
raise ValueError(
"For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have"
" the same length as the number of controlnets"
)
else:
assert False
if len(control_guidance_start) != len(control_guidance_end):
raise ValueError(
f"`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list."
)
if isinstance(self.controlnet, MultiControlNetModel):
if len(control_guidance_start) != len(self.controlnet.nets):
raise ValueError(
f"`control_guidance_start`: {control_guidance_start} has {len(control_guidance_start)} elements but there are {len(self.controlnet.nets)} controlnets available. Make sure to provide {len(self.controlnet.nets)}."
)
for start, end in zip(control_guidance_start, control_guidance_end):
if start >= end:
raise ValueError(
f"control guidance start: {start} cannot be larger or equal to control guidance end: {end}."
)
if start < 0.0:
raise ValueError(f"control guidance start: {start} can't be smaller than 0.")
if end > 1.0:
raise ValueError(f"control guidance end: {end} can't be larger than 1.0.")
# Copied from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.prepare_image
def prepare_control_image(
self,
image,
width,
height,
batch_size,
num_images_per_prompt,
device,
dtype,
do_classifier_free_guidance=False,
guess_mode=False,
):
image = self.control_image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32)
image_batch_size = image.shape[0]
if image_batch_size == 1:
repeat_by = batch_size
else:
# image batch size is the same as prompt batch size
repeat_by = num_images_per_prompt
image = image.repeat_interleave(repeat_by, dim=0)
image = image.to(device=device, dtype=dtype)
if do_classifier_free_guidance and not guess_mode:
image = torch.cat([image] * 2)
return image
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.get_timesteps
def get_timesteps(self, num_inference_steps, strength, device):
# get the original timestep using init_timestep
init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
t_start = max(num_inference_steps - init_timestep, 0)
timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :]
return timesteps, num_inference_steps - t_start
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.prepare_latents
def prepare_latents(self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None):
if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)):
raise ValueError(
f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}"
)
image = image.to(device=device, dtype=dtype)
batch_size = batch_size * num_images_per_prompt
if image.shape[1] == 4:
init_latents = image
else:
if isinstance(generator, list) and len(generator) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
)
elif isinstance(generator, list):
init_latents = [
self.vae.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(batch_size)
]
init_latents = torch.cat(init_latents, dim=0)
else:
init_latents = self.vae.encode(image).latent_dist.sample(generator)
init_latents = self.vae.config.scaling_factor * init_latents
if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0:
# expand init_latents for batch_size
deprecation_message = (
f"You have passed {batch_size} text prompts (`prompt`), but only {init_latents.shape[0]} initial"
" images (`image`). Initial images are now duplicating to match the number of text prompts. Note"
" that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update"
" your script to pass as many initial images as text prompts to suppress this warning."
)
deprecate("len(prompt) != len(image)", "1.0.0", deprecation_message, standard_warn=False)
additional_image_per_prompt = batch_size // init_latents.shape[0]
init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0)
elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0:
raise ValueError(
f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts."
)
else:
init_latents = torch.cat([init_latents], dim=0)
shape = init_latents.shape
noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
# get latents
init_latents = self.scheduler.add_noise(init_latents, noise, timestep)
latents = init_latents
return latents
@torch.no_grad()
def __call__(
self,
prompt: Union[str, List[str]] = None,
frames: Union[List[np.ndarray], torch.Tensor] = None,
control_frames: Union[List[np.ndarray], torch.Tensor] = None,
strength: float = 0.8,
num_inference_steps: int = 50,
guidance_scale: float = 7.5,
negative_prompt: Optional[Union[str, List[str]]] = None,
eta: float = 0.0,
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
latents: Optional[torch.Tensor] = None,
prompt_embeds: Optional[torch.Tensor] = None,
negative_prompt_embeds: Optional[torch.Tensor] = None,
output_type: Optional[str] = "pil",
return_dict: bool = True,
callback: Optional[Callable[[int, int, torch.Tensor], None]] = None,
callback_steps: int = 1,
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
controlnet_conditioning_scale: Union[float, List[float]] = 0.8,
guess_mode: bool = False,
control_guidance_start: Union[float, List[float]] = 0.0,
control_guidance_end: Union[float, List[float]] = 1.0,
warp_start: Union[float, List[float]] = 0.0,
warp_end: Union[float, List[float]] = 0.3,
mask_start: Union[float, List[float]] = 0.5,
mask_end: Union[float, List[float]] = 0.8,
smooth_boundary: bool = True,
mask_strength: Union[float, List[float]] = 0.5,
inner_strength: Union[float, List[float]] = 0.9,
):
r"""
Function invoked when calling the pipeline for generation.
Args:
prompt (`str` or `List[str]`, *optional*):
The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
instead.
frames (`List[np.ndarray]` or `torch.Tensor`): The input images to be used as the starting point for the image generation process.
control_frames (`List[np.ndarray]` or `torch.Tensor` or `Callable`): The ControlNet input images condition to provide guidance to the `unet` for generation or any callable object to convert frame to control_frame.
strength ('float'): SDEdit strength.
num_inference_steps (`int`, *optional*, defaults to 50):
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
expense of slower inference.
guidance_scale (`float`, *optional*, defaults to 7.5):
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
`guidance_scale` is defined as `w` of equation 2. of [Imagen
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
usually at the expense of lower image quality.
negative_prompt (`str` or `List[str]`, *optional*):
The prompt or prompts not to guide the image generation. If not defined, one has to pass
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
less than `1`).
eta (`float`, *optional*, defaults to 0.0):
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
[`schedulers.DDIMScheduler`], will be ignored for others.
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
to make generation deterministic.
latents (`torch.Tensor`, *optional*):
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
tensor will ge generated by sampling using the supplied random `generator`.
prompt_embeds (`torch.Tensor`, *optional*):
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
provided, text embeddings will be generated from `prompt` input argument.
negative_prompt_embeds (`torch.Tensor`, *optional*):
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
argument.
output_type (`str`, *optional*, defaults to `"pil"`):
The output format of the generate image. Choose between
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
plain tuple.
callback (`Callable`, *optional*):
A function that will be called every `callback_steps` steps during inference. The function will be
called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`.
callback_steps (`int`, *optional*, defaults to 1):
The frequency at which the `callback` function will be called. If not specified, the callback will be
called at every step.
cross_attention_kwargs (`dict`, *optional*):
A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
`self.processor` in
[diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0):
The outputs of the controlnet are multiplied by `controlnet_conditioning_scale` before they are added
to the residual in the original unet. If multiple ControlNets are specified in init, you can set the
corresponding scale as a list. Note that by default, we use a smaller conditioning scale for inpainting
than for [`~StableDiffusionControlNetPipeline.__call__`].
guess_mode (`bool`, *optional*, defaults to `False`):
In this mode, the ControlNet encoder will try best to recognize the content of the input image even if
you remove all prompts. The `guidance_scale` between 3.0 and 5.0 is recommended.
control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0):
The percentage of total steps at which the controlnet starts applying.
control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0):
The percentage of total steps at which the controlnet stops applying.
warp_start (`float`): Shape-aware fusion start timestep.
warp_end (`float`): Shape-aware fusion end timestep.
mask_start (`float`): Pixel-aware fusion start timestep.
mask_end (`float`):Pixel-aware fusion end timestep.
smooth_boundary (`bool`): Smooth fusion boundary. Set `True` to prevent artifacts at boundary.
mask_strength (`float`): Pixel-aware fusion strength.
inner_strength (`float`): Pixel-aware fusion detail level.
Examples:
Returns:
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
When returning a tuple, the first element is a list with the generated images, and the second element is a
list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
(nsfw) content, according to the `safety_checker`.
"""
controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet
# align format for control guidance
if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list):
control_guidance_start = len(control_guidance_end) * [control_guidance_start]
elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list):
control_guidance_end = len(control_guidance_start) * [control_guidance_end]
elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list):
mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1
control_guidance_start, control_guidance_end = (
mult * [control_guidance_start],
mult * [control_guidance_end],
)
# 1. Check inputs. Raise error if not correct
self.check_inputs(
prompt,
callback_steps,
negative_prompt,
prompt_embeds,
negative_prompt_embeds,
controlnet_conditioning_scale,
control_guidance_start,
control_guidance_end,
)
# 2. Define call parameters
# Currently we only support 1 prompt
if prompt is not None and isinstance(prompt, str):
batch_size = 1
elif prompt is not None and isinstance(prompt, list):
assert False
else:
assert False
num_images_per_prompt = 1
device = self._execution_device
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
do_classifier_free_guidance = guidance_scale > 1.0
if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float):
controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets)
global_pool_conditions = (
controlnet.config.global_pool_conditions
if isinstance(controlnet, ControlNetModel)
else controlnet.nets[0].config.global_pool_conditions
)
guess_mode = guess_mode or global_pool_conditions
# 3. Encode input prompt
text_encoder_lora_scale = (
cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
)
prompt_embeds = self._encode_prompt(
prompt,
device,
num_images_per_prompt,
do_classifier_free_guidance,
negative_prompt,
prompt_embeds=prompt_embeds,
negative_prompt_embeds=negative_prompt_embeds,
lora_scale=text_encoder_lora_scale,
)
# 4. Process the first frame
height, width = None, None
output_frames = []
self.attn_state.reset()
# 4.1 prepare frames
image = self.image_processor.preprocess(frames[0]).to(dtype=self.dtype)
first_image = image[0] # C, H, W
# 4.2 Prepare controlnet_conditioning_image
# Currently we only support single control
if isinstance(controlnet, ControlNetModel):
control_image = self.prepare_control_image(
image=control_frames(frames[0]) if callable(control_frames) else control_frames[0],
width=width,
height=height,
batch_size=batch_size,
num_images_per_prompt=1,
device=device,
dtype=controlnet.dtype,
do_classifier_free_guidance=do_classifier_free_guidance,
guess_mode=guess_mode,
)
else:
assert False
# 4.3 Prepare timesteps
self.scheduler.set_timesteps(num_inference_steps, device=device)
timesteps, cur_num_inference_steps = self.get_timesteps(num_inference_steps, strength, device)
latent_timestep = timesteps[:1].repeat(batch_size)
# 4.4 Prepare latent variables
latents = self.prepare_latents(
image,
latent_timestep,
batch_size,
num_images_per_prompt,
prompt_embeds.dtype,
device,
generator,
)
# 4.5 Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
# 4.6 Create tensor stating which controlnets to keep
controlnet_keep = []
for i in range(len(timesteps)):
keeps = [
1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e)
for s, e in zip(control_guidance_start, control_guidance_end)
]
controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps)
first_x0_list = []
# 4.7 Denoising loop
num_warmup_steps = len(timesteps) - cur_num_inference_steps * self.scheduler.order
with self.progress_bar(total=cur_num_inference_steps) as progress_bar:
for i, t in enumerate(timesteps):
self.attn_state.set_timestep(t.item())
# expand the latents if we are doing classifier free guidance
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
# controlnet(s) inference
if guess_mode and do_classifier_free_guidance:
# Infer ControlNet only for the conditional batch.
control_model_input = latents
control_model_input = self.scheduler.scale_model_input(control_model_input, t)
controlnet_prompt_embeds = prompt_embeds.chunk(2)[1]
else:
control_model_input = latent_model_input
controlnet_prompt_embeds = prompt_embeds
if isinstance(controlnet_keep[i], list):
cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])]
else:
controlnet_cond_scale = controlnet_conditioning_scale
if isinstance(controlnet_cond_scale, list):
controlnet_cond_scale = controlnet_cond_scale[0]
cond_scale = controlnet_cond_scale * controlnet_keep[i]
down_block_res_samples, mid_block_res_sample = self.controlnet(
control_model_input,
t,
encoder_hidden_states=controlnet_prompt_embeds,
controlnet_cond=control_image,
conditioning_scale=cond_scale,
guess_mode=guess_mode,
return_dict=False,
)
if guess_mode and do_classifier_free_guidance:
# Inferred ControlNet only for the conditional batch.
# To apply the output of ControlNet to both the unconditional and conditional batches,
# add 0 to the unconditional batch to keep it unchanged.
down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples]
mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample])
# predict the noise residual
noise_pred = self.unet(
latent_model_input,
t,
encoder_hidden_states=prompt_embeds,
cross_attention_kwargs=cross_attention_kwargs,
down_block_additional_residuals=down_block_res_samples,
mid_block_additional_residual=mid_block_res_sample,
return_dict=False,
)[0]
# perform guidance
if do_classifier_free_guidance:
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
alpha_prod_t = self.scheduler.alphas_cumprod[t]
beta_prod_t = 1 - alpha_prod_t
pred_x0 = (latents - beta_prod_t ** (0.5) * noise_pred) / alpha_prod_t ** (0.5)
first_x0 = pred_x0.detach()
first_x0_list.append(first_x0)
# compute the previous noisy sample x_t -> x_t-1
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
# call the callback, if provided
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
progress_bar.update()
if callback is not None and i % callback_steps == 0:
callback(i, t, latents)
if XLA_AVAILABLE:
xm.mark_step()
if not output_type == "latent":
image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
else:
image = latents
first_result = image
prev_result = image
do_denormalize = [True] * image.shape[0]
image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
output_frames.append(image[0])
# 5. Process each frame
for idx in range(1, len(frames)):
image = frames[idx]
prev_image = frames[idx - 1]
control_image = control_frames(image) if callable(control_frames) else control_frames[idx]
# 5.1 prepare frames
image = self.image_processor.preprocess(image).to(dtype=self.dtype)
prev_image = self.image_processor.preprocess(prev_image).to(dtype=self.dtype)
warped_0, bwd_occ_0, bwd_flow_0 = get_warped_and_mask(
self.flow_model, first_image, image[0], first_result, False, self.device
)
blend_mask_0 = blur(F.max_pool2d(bwd_occ_0, kernel_size=9, stride=1, padding=4))
blend_mask_0 = torch.clamp(blend_mask_0 + bwd_occ_0, 0, 1)
warped_pre, bwd_occ_pre, bwd_flow_pre = get_warped_and_mask(
self.flow_model, prev_image[0], image[0], prev_result, False, self.device
)
blend_mask_pre = blur(F.max_pool2d(bwd_occ_pre, kernel_size=9, stride=1, padding=4))
blend_mask_pre = torch.clamp(blend_mask_pre + bwd_occ_pre, 0, 1)
warp_mask = 1 - F.max_pool2d(blend_mask_0, kernel_size=8)
warp_flow = F.interpolate(bwd_flow_0 / 8.0, scale_factor=1.0 / 8, mode="bilinear")
# 5.2 Prepare controlnet_conditioning_image
# Currently we only support single control
if isinstance(controlnet, ControlNetModel):
control_image = self.prepare_control_image(
image=control_image,
width=width,
height=height,
batch_size=batch_size,
num_images_per_prompt=1,
device=device,
dtype=controlnet.dtype,
do_classifier_free_guidance=do_classifier_free_guidance,
guess_mode=guess_mode,
)
else:
assert False
# 5.3 Prepare timesteps
self.scheduler.set_timesteps(num_inference_steps, device=device)
timesteps, cur_num_inference_steps = self.get_timesteps(num_inference_steps, strength, device)
latent_timestep = timesteps[:1].repeat(batch_size)
skip_t = int(num_inference_steps * (1 - strength))
warp_start_t = int(warp_start * num_inference_steps)
warp_end_t = int(warp_end * num_inference_steps)
mask_start_t = int(mask_start * num_inference_steps)
mask_end_t = int(mask_end * num_inference_steps)
# 5.4 Prepare latent variables
init_latents = self.prepare_latents(
image,
latent_timestep,
batch_size,
num_images_per_prompt,
prompt_embeds.dtype,
device,
generator,
)
# 5.5 Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
# 5.6 Create tensor stating which controlnets to keep
controlnet_keep = []
for i in range(len(timesteps)):
keeps = [
1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e)
for s, e in zip(control_guidance_start, control_guidance_end)
]
controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps)
# 5.7 Denoising loop
num_warmup_steps = len(timesteps) - cur_num_inference_steps * self.scheduler.order
def denoising_loop(latents, mask=None, xtrg=None, noise_rescale=None):
dir_xt = 0
latents_dtype = latents.dtype
with self.progress_bar(total=cur_num_inference_steps) as progress_bar:
for i, t in enumerate(timesteps):
self.attn_state.set_timestep(t.item())
if i + skip_t >= mask_start_t and i + skip_t <= mask_end_t and xtrg is not None:
rescale = torch.maximum(1.0 - mask, (1 - mask**2) ** 0.5 * inner_strength)
if noise_rescale is not None:
rescale = (1.0 - mask) * (1 - noise_rescale) + rescale * noise_rescale
noise = randn_tensor(xtrg.shape, generator=generator, device=device, dtype=xtrg.dtype)
latents_ref = self.scheduler.add_noise(xtrg, noise, t)
latents = latents_ref * mask + (1.0 - mask) * (latents - dir_xt) + rescale * dir_xt
latents = latents.to(latents_dtype)
# expand the latents if we are doing classifier free guidance
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
# controlnet(s) inference
if guess_mode and do_classifier_free_guidance:
# Infer ControlNet only for the conditional batch.
control_model_input = latents
control_model_input = self.scheduler.scale_model_input(control_model_input, t)
controlnet_prompt_embeds = prompt_embeds.chunk(2)[1]
else:
control_model_input = latent_model_input
controlnet_prompt_embeds = prompt_embeds
if isinstance(controlnet_keep[i], list):
cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])]
else:
controlnet_cond_scale = controlnet_conditioning_scale
if isinstance(controlnet_cond_scale, list):
controlnet_cond_scale = controlnet_cond_scale[0]
cond_scale = controlnet_cond_scale * controlnet_keep[i]
down_block_res_samples, mid_block_res_sample = self.controlnet(
control_model_input,
t,
encoder_hidden_states=controlnet_prompt_embeds,
controlnet_cond=control_image,
conditioning_scale=cond_scale,
guess_mode=guess_mode,
return_dict=False,
)
if guess_mode and do_classifier_free_guidance:
# Inferred ControlNet only for the conditional batch.
# To apply the output of ControlNet to both the unconditional and conditional batches,
# add 0 to the unconditional batch to keep it unchanged.
down_block_res_samples = [
torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples
]
mid_block_res_sample = torch.cat(
[torch.zeros_like(mid_block_res_sample), mid_block_res_sample]
)
# predict the noise residual
noise_pred = self.unet(
latent_model_input,
t,
encoder_hidden_states=prompt_embeds,
cross_attention_kwargs=cross_attention_kwargs,
down_block_additional_residuals=down_block_res_samples,
mid_block_additional_residual=mid_block_res_sample,
return_dict=False,
)[0]
# perform guidance
if do_classifier_free_guidance:
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# Get pred_x0 from scheduler
alpha_prod_t = self.scheduler.alphas_cumprod[t]
beta_prod_t = 1 - alpha_prod_t
pred_x0 = (latents - beta_prod_t ** (0.5) * noise_pred) / alpha_prod_t ** (0.5)
if i + skip_t >= warp_start_t and i + skip_t <= warp_end_t:
# warp x_0
pred_x0 = (
flow_warp(first_x0_list[i], warp_flow, mode="nearest") * warp_mask
+ (1 - warp_mask) * pred_x0
)
# get x_t from x_0
latents = self.scheduler.add_noise(pred_x0, noise_pred, t).to(latents_dtype)
prev_t = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
if i == len(timesteps) - 1:
alpha_t_prev = 1.0
else:
alpha_t_prev = self.scheduler.alphas_cumprod[prev_t]
dir_xt = (1.0 - alpha_t_prev) ** 0.5 * noise_pred
# compute the previous noisy sample x_t -> x_t-1
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[
0
]
# call the callback, if provided
if i == len(timesteps) - 1 or (
(i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0
):
progress_bar.update()
if callback is not None and i % callback_steps == 0:
callback(i, t, latents)
if XLA_AVAILABLE:
xm.mark_step()
return latents
if mask_start_t <= mask_end_t:
self.attn_state.to_load()
else:
self.attn_state.to_load_and_store_prev()
latents = denoising_loop(init_latents)
if mask_start_t <= mask_end_t:
direct_result = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
blend_results = (1 - blend_mask_pre) * warped_pre + blend_mask_pre * direct_result
blend_results = (1 - blend_mask_0) * warped_0 + blend_mask_0 * blend_results
bwd_occ = 1 - torch.clamp(1 - bwd_occ_pre + 1 - bwd_occ_0, 0, 1)
blend_mask = blur(F.max_pool2d(bwd_occ, kernel_size=9, stride=1, padding=4))
blend_mask = 1 - torch.clamp(blend_mask + bwd_occ, 0, 1)
blend_results = blend_results.to(latents.dtype)
xtrg = self.vae.encode(blend_results).latent_dist.sample(generator)
xtrg = self.vae.config.scaling_factor * xtrg
blend_results_rec = self.vae.decode(xtrg / self.vae.config.scaling_factor, return_dict=False)[0]
xtrg_rec = self.vae.encode(blend_results_rec).latent_dist.sample(generator)
xtrg_rec = self.vae.config.scaling_factor * xtrg_rec
xtrg_ = xtrg + (xtrg - xtrg_rec)
blend_results_rec_new = self.vae.decode(xtrg_ / self.vae.config.scaling_factor, return_dict=False)[0]
tmp = (abs(blend_results_rec_new - blend_results).mean(dim=1, keepdims=True) > 0.25).float()
mask_x = F.max_pool2d(
(F.interpolate(tmp, scale_factor=1 / 8.0, mode="bilinear") > 0).float(),
kernel_size=3,
stride=1,
padding=1,
)
mask = 1 - F.max_pool2d(1 - blend_mask, kernel_size=8) # * (1-mask_x)
if smooth_boundary:
noise_rescale = find_flat_region(mask)
else:
noise_rescale = torch.ones_like(mask)
xtrg = (xtrg + (1 - mask_x) * (xtrg - xtrg_rec)) * mask
xtrg = xtrg.to(latents.dtype)
self.scheduler.set_timesteps(num_inference_steps, device=device)
timesteps, cur_num_inference_steps = self.get_timesteps(num_inference_steps, strength, device)
self.attn_state.to_load_and_store_prev()
latents = denoising_loop(init_latents, mask * mask_strength, xtrg, noise_rescale)
if not output_type == "latent":
image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
else:
image = latents
prev_result = image
do_denormalize = [True] * image.shape[0]
image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
output_frames.append(image[0])
# Offload last model to CPU
if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return output_frames
return TextToVideoSDPipelineOutput(frames=output_frames)
class InputPadder:
"""Pads images such that dimensions are divisible by 8"""
def __init__(self, dims, mode="sintel", padding_factor=8):
self.ht, self.wd = dims[-2:]
pad_ht = (((self.ht // padding_factor) + 1) * padding_factor - self.ht) % padding_factor
pad_wd = (((self.wd // padding_factor) + 1) * padding_factor - self.wd) % padding_factor
if mode == "sintel":
self._pad = [pad_wd // 2, pad_wd - pad_wd // 2, pad_ht // 2, pad_ht - pad_ht // 2]
else:
self._pad = [pad_wd // 2, pad_wd - pad_wd // 2, 0, pad_ht]
def pad(self, *inputs):
return [F.pad(x, self._pad, mode="replicate") for x in inputs]
def unpad(self, x):
ht, wd = x.shape[-2:]
c = [self._pad[2], ht - self._pad[3], self._pad[0], wd - self._pad[1]]
return x[..., c[0] : c[1], c[2] : c[3]]
| diffusers/examples/community/rerender_a_video.py/0 | {
"file_path": "diffusers/examples/community/rerender_a_video.py",
"repo_id": "diffusers",
"token_count": 27315
} |
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
from typing import Callable, List, Optional, Union
import numpy as np
import PIL.Image
import torch
from packaging import version
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DiffusionPipeline, UNet2DConditionModel
from diffusers.configuration_utils import FrozenDict, deprecate
from diffusers.loaders import StableDiffusionLoraLoaderMixin, TextualInversionLoaderMixin
from diffusers.pipelines.pipeline_utils import StableDiffusionMixin
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import (
StableDiffusionSafetyChecker,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import (
logging,
)
from diffusers.utils.torch_utils import randn_tensor
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
def prepare_mask_and_masked_image(image, mask):
"""
Prepares a pair (image, mask) to be consumed by the Stable Diffusion pipeline. This means that those inputs will be
converted to ``torch.Tensor`` with shapes ``batch x channels x height x width`` where ``channels`` is ``3`` for the
``image`` and ``1`` for the ``mask``.
The ``image`` will be converted to ``torch.float32`` and normalized to be in ``[-1, 1]``. The ``mask`` will be
binarized (``mask > 0.5``) and cast to ``torch.float32`` too.
Args:
image (Union[np.array, PIL.Image, torch.Tensor]): The image to inpaint.
It can be a ``PIL.Image``, or a ``height x width x 3`` ``np.array`` or a ``channels x height x width``
``torch.Tensor`` or a ``batch x channels x height x width`` ``torch.Tensor``.
mask (_type_): The mask to apply to the image, i.e. regions to inpaint.
It can be a ``PIL.Image``, or a ``height x width`` ``np.array`` or a ``1 x height x width``
``torch.Tensor`` or a ``batch x 1 x height x width`` ``torch.Tensor``.
Raises:
ValueError: ``torch.Tensor`` images should be in the ``[-1, 1]`` range. ValueError: ``torch.Tensor`` mask
should be in the ``[0, 1]`` range. ValueError: ``mask`` and ``image`` should have the same spatial dimensions.
TypeError: ``mask`` is a ``torch.Tensor`` but ``image`` is not
(ot the other way around).
Returns:
tuple[torch.Tensor]: The pair (mask, masked_image) as ``torch.Tensor`` with 4
dimensions: ``batch x channels x height x width``.
"""
if isinstance(image, torch.Tensor):
if not isinstance(mask, torch.Tensor):
raise TypeError(f"`image` is a torch.Tensor but `mask` (type: {type(mask)} is not")
# Batch single image
if image.ndim == 3:
assert image.shape[0] == 3, "Image outside a batch should be of shape (3, H, W)"
image = image.unsqueeze(0)
# Batch and add channel dim for single mask
if mask.ndim == 2:
mask = mask.unsqueeze(0).unsqueeze(0)
# Batch single mask or add channel dim
if mask.ndim == 3:
# Single batched mask, no channel dim or single mask not batched but channel dim
if mask.shape[0] == 1:
mask = mask.unsqueeze(0)
# Batched masks no channel dim
else:
mask = mask.unsqueeze(1)
assert image.ndim == 4 and mask.ndim == 4, "Image and Mask must have 4 dimensions"
assert image.shape[-2:] == mask.shape[-2:], "Image and Mask must have the same spatial dimensions"
assert image.shape[0] == mask.shape[0], "Image and Mask must have the same batch size"
# Check image is in [-1, 1]
if image.min() < -1 or image.max() > 1:
raise ValueError("Image should be in [-1, 1] range")
# Check mask is in [0, 1]
if mask.min() < 0 or mask.max() > 1:
raise ValueError("Mask should be in [0, 1] range")
# Binarize mask
mask[mask < 0.5] = 0
mask[mask >= 0.5] = 1
# Image as float32
image = image.to(dtype=torch.float32)
elif isinstance(mask, torch.Tensor):
raise TypeError(f"`mask` is a torch.Tensor but `image` (type: {type(image)} is not")
else:
# preprocess image
if isinstance(image, (PIL.Image.Image, np.ndarray)):
image = [image]
if isinstance(image, list) and isinstance(image[0], PIL.Image.Image):
image = [np.array(i.convert("RGB"))[None, :] for i in image]
image = np.concatenate(image, axis=0)
elif isinstance(image, list) and isinstance(image[0], np.ndarray):
image = np.concatenate([i[None, :] for i in image], axis=0)
image = image.transpose(0, 3, 1, 2)
image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0
# preprocess mask
if isinstance(mask, (PIL.Image.Image, np.ndarray)):
mask = [mask]
if isinstance(mask, list) and isinstance(mask[0], PIL.Image.Image):
mask = np.concatenate([np.array(m.convert("L"))[None, None, :] for m in mask], axis=0)
mask = mask.astype(np.float32) / 255.0
elif isinstance(mask, list) and isinstance(mask[0], np.ndarray):
mask = np.concatenate([m[None, None, :] for m in mask], axis=0)
mask[mask < 0.5] = 0
mask[mask >= 0.5] = 1
mask = torch.from_numpy(mask)
# masked_image = image * (mask >= 0.5)
masked_image = image
return mask, masked_image
class StableDiffusionRepaintPipeline(
DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, StableDiffusionLoraLoaderMixin
):
r"""
Pipeline for text-guided image inpainting using Stable Diffusion. *This is an experimental feature*.
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
In addition the pipeline inherits the following loading methods:
- *Textual-Inversion*: [`loaders.TextualInversionLoaderMixin.load_textual_inversion`]
- *LoRA*: [`loaders.StableDiffusionLoraLoaderMixin.load_lora_weights`]
as well as the following saving methods:
- *LoRA*: [`loaders.StableDiffusionLoraLoaderMixin.save_lora_weights`]
Args:
vae ([`AutoencoderKL`]):
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
text_encoder ([`CLIPTextModel`]):
Frozen text-encoder. Stable Diffusion uses the text portion of
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
tokenizer (`CLIPTokenizer`):
Tokenizer of class
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.
scheduler ([`SchedulerMixin`]):
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
safety_checker ([`StableDiffusionSafetyChecker`]):
Classification module that estimates whether generated images could be considered offensive or harmful.
Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.
feature_extractor ([`CLIPImageProcessor`]):
Model that extracts features from generated images to be used as inputs for the `safety_checker`.
"""
_optional_components = ["safety_checker", "feature_extractor"]
def __init__(
self,
vae: AutoencoderKL,
text_encoder: CLIPTextModel,
tokenizer: CLIPTokenizer,
unet: UNet2DConditionModel,
scheduler: KarrasDiffusionSchedulers,
safety_checker: StableDiffusionSafetyChecker,
feature_extractor: CLIPImageProcessor,
requires_safety_checker: bool = True,
):
super().__init__()
if scheduler is not None and getattr(scheduler.config, "steps_offset", 1) != 1:
deprecation_message = (
f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
new_config = dict(scheduler.config)
new_config["steps_offset"] = 1
scheduler._internal_dict = FrozenDict(new_config)
if scheduler is not None and getattr(scheduler.config, "skip_prk_steps", True) is False:
deprecation_message = (
f"The configuration file of this scheduler: {scheduler} has not set the configuration"
" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
" Hub, it would be very nice if you could open a Pull request for the"
" `scheduler/scheduler_config.json` file"
)
deprecate(
"skip_prk_steps not set",
"1.0.0",
deprecation_message,
standard_warn=False,
)
new_config = dict(scheduler.config)
new_config["skip_prk_steps"] = True
scheduler._internal_dict = FrozenDict(new_config)
if safety_checker is None and requires_safety_checker:
logger.warning(
f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
)
if safety_checker is not None and feature_extractor is None:
raise ValueError(
"Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
" checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
)
is_unet_version_less_0_9_0 = (
unet is not None
and hasattr(unet.config, "_diffusers_version")
and version.parse(version.parse(unet.config._diffusers_version).base_version) < version.parse("0.9.0.dev0")
)
is_unet_sample_size_less_64 = (
unet is not None and hasattr(unet.config, "sample_size") and unet.config.sample_size < 64
)
if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64:
deprecation_message = (
"The configuration file of the unet has set the default `sample_size` to smaller than"
" 64 which seems highly unlikely .If you're checkpoint is a fine-tuned version of any of the"
" following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
" CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5"
" \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
" configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
" in the config might lead to incorrect results in future versions. If you have downloaded this"
" checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
" the `unet/config.json` file"
)
deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False)
new_config = dict(unet.config)
new_config["sample_size"] = 64
unet._internal_dict = FrozenDict(new_config)
# Check shapes, assume num_channels_latents == 4, num_channels_mask == 1, num_channels_masked == 4
if unet is not None and unet.config.in_channels != 4:
logger.warning(
f"You have loaded a UNet with {unet.config.in_channels} input channels, whereas by default,"
f" {self.__class__} assumes that `pipeline.unet` has 4 input channels: 4 for `num_channels_latents`,"
". If you did not intend to modify"
" this behavior, please check whether you have loaded the right checkpoint."
)
self.register_modules(
vae=vae,
text_encoder=text_encoder,
tokenizer=tokenizer,
unet=unet,
scheduler=scheduler,
safety_checker=safety_checker,
feature_extractor=feature_extractor,
)
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8
self.register_to_config(requires_safety_checker=requires_safety_checker)
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt
def _encode_prompt(
self,
prompt,
device,
num_images_per_prompt,
do_classifier_free_guidance,
negative_prompt=None,
prompt_embeds: Optional[torch.Tensor] = None,
negative_prompt_embeds: Optional[torch.Tensor] = None,
):
r"""
Encodes the prompt into text encoder hidden states.
Args:
prompt (`str` or `List[str]`, *optional*):
prompt to be encoded
device: (`torch.device`):
torch device
num_images_per_prompt (`int`):
number of images that should be generated per prompt
do_classifier_free_guidance (`bool`):
whether to use classifier free guidance or not
negative_prompt (`str` or `List[str]`, *optional*):
The prompt or prompts not to guide the image generation. If not defined, one has to pass
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
less than `1`).
prompt_embeds (`torch.Tensor`, *optional*):
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
provided, text embeddings will be generated from `prompt` input argument.
negative_prompt_embeds (`torch.Tensor`, *optional*):
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
argument.
"""
if prompt is not None and isinstance(prompt, str):
batch_size = 1
elif prompt is not None and isinstance(prompt, list):
batch_size = len(prompt)
else:
batch_size = prompt_embeds.shape[0]
if prompt_embeds is None:
# textual inversion: process multi-vector tokens if necessary
if isinstance(self, TextualInversionLoaderMixin):
prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
text_inputs = self.tokenizer(
prompt,
padding="max_length",
max_length=self.tokenizer.model_max_length,
truncation=True,
return_tensors="pt",
)
text_input_ids = text_inputs.input_ids
untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
text_input_ids, untruncated_ids
):
removed_text = self.tokenizer.batch_decode(
untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
)
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f" {self.tokenizer.model_max_length} tokens: {removed_text}"
)
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
attention_mask = text_inputs.attention_mask.to(device)
else:
attention_mask = None
prompt_embeds = self.text_encoder(
text_input_ids.to(device),
attention_mask=attention_mask,
)
prompt_embeds = prompt_embeds[0]
prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
bs_embed, seq_len, _ = prompt_embeds.shape
# duplicate text embeddings for each generation per prompt, using mps friendly method
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance and negative_prompt_embeds is None:
uncond_tokens: List[str]
if negative_prompt is None:
uncond_tokens = [""] * batch_size
elif type(prompt) is not type(negative_prompt):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
f" {type(prompt)}."
)
elif isinstance(negative_prompt, str):
uncond_tokens = [negative_prompt]
elif batch_size != len(negative_prompt):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
" the batch size of `prompt`."
)
else:
uncond_tokens = negative_prompt
# textual inversion: process multi-vector tokens if necessary
if isinstance(self, TextualInversionLoaderMixin):
uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
max_length = prompt_embeds.shape[1]
uncond_input = self.tokenizer(
uncond_tokens,
padding="max_length",
max_length=max_length,
truncation=True,
return_tensors="pt",
)
if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
attention_mask = uncond_input.attention_mask.to(device)
else:
attention_mask = None
negative_prompt_embeds = self.text_encoder(
uncond_input.input_ids.to(device),
attention_mask=attention_mask,
)
negative_prompt_embeds = negative_prompt_embeds[0]
if do_classifier_free_guidance:
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
seq_len = negative_prompt_embeds.shape[1]
negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
return prompt_embeds
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker
def run_safety_checker(self, image, device, dtype):
if self.safety_checker is not None:
safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(device)
image, has_nsfw_concept = self.safety_checker(
images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
)
else:
has_nsfw_concept = None
return image, has_nsfw_concept
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
def prepare_extra_step_kwargs(self, generator, eta):
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
extra_step_kwargs = {}
if accepts_eta:
extra_step_kwargs["eta"] = eta
# check if the scheduler accepts generator
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
if accepts_generator:
extra_step_kwargs["generator"] = generator
return extra_step_kwargs
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents
def decode_latents(self, latents):
latents = 1 / self.vae.config.scaling_factor * latents
image = self.vae.decode(latents).sample
image = (image / 2 + 0.5).clamp(0, 1)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
image = image.cpu().permute(0, 2, 3, 1).float().numpy()
return image
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.check_inputs
def check_inputs(
self,
prompt,
height,
width,
callback_steps,
negative_prompt=None,
prompt_embeds=None,
negative_prompt_embeds=None,
):
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(callback_steps)}."
)
if prompt is not None and prompt_embeds is not None:
raise ValueError(
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
" only forward one of the two."
)
elif prompt is None and prompt_embeds is None:
raise ValueError(
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
)
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
if negative_prompt is not None and negative_prompt_embeds is not None:
raise ValueError(
f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
)
if prompt_embeds is not None and negative_prompt_embeds is not None:
if prompt_embeds.shape != negative_prompt_embeds.shape:
raise ValueError(
"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
f" {negative_prompt_embeds.shape}."
)
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents
def prepare_latents(
self,
batch_size,
num_channels_latents,
height,
width,
dtype,
device,
generator,
latents=None,
):
shape = (
batch_size,
num_channels_latents,
height // self.vae_scale_factor,
width // self.vae_scale_factor,
)
if isinstance(generator, list) and len(generator) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
)
if latents is None:
latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
else:
latents = latents.to(device)
# scale the initial noise by the standard deviation required by the scheduler
latents = latents * self.scheduler.init_noise_sigma
return latents
def prepare_mask_latents(
self,
mask,
masked_image,
batch_size,
height,
width,
dtype,
device,
generator,
do_classifier_free_guidance,
):
# resize the mask to latents shape as we concatenate the mask to the latents
# we do that before converting to dtype to avoid breaking in case we're using cpu_offload
# and half precision
mask = torch.nn.functional.interpolate(
mask, size=(height // self.vae_scale_factor, width // self.vae_scale_factor)
)
mask = mask.to(device=device, dtype=dtype)
masked_image = masked_image.to(device=device, dtype=dtype)
# encode the mask image into latents space so we can concatenate it to the latents
if isinstance(generator, list):
masked_image_latents = [
self.vae.encode(masked_image[i : i + 1]).latent_dist.sample(generator=generator[i])
for i in range(batch_size)
]
masked_image_latents = torch.cat(masked_image_latents, dim=0)
else:
masked_image_latents = self.vae.encode(masked_image).latent_dist.sample(generator=generator)
masked_image_latents = self.vae.config.scaling_factor * masked_image_latents
# duplicate mask and masked_image_latents for each generation per prompt, using mps friendly method
if mask.shape[0] < batch_size:
if not batch_size % mask.shape[0] == 0:
raise ValueError(
"The passed mask and the required batch size don't match. Masks are supposed to be duplicated to"
f" a total batch size of {batch_size}, but {mask.shape[0]} masks were passed. Make sure the number"
" of masks that you pass is divisible by the total requested batch size."
)
mask = mask.repeat(batch_size // mask.shape[0], 1, 1, 1)
if masked_image_latents.shape[0] < batch_size:
if not batch_size % masked_image_latents.shape[0] == 0:
raise ValueError(
"The passed images and the required batch size don't match. Images are supposed to be duplicated"
f" to a total batch size of {batch_size}, but {masked_image_latents.shape[0]} images were passed."
" Make sure the number of images that you pass is divisible by the total requested batch size."
)
masked_image_latents = masked_image_latents.repeat(batch_size // masked_image_latents.shape[0], 1, 1, 1)
mask = torch.cat([mask] * 2) if do_classifier_free_guidance else mask
masked_image_latents = (
torch.cat([masked_image_latents] * 2) if do_classifier_free_guidance else masked_image_latents
)
# aligning device to prevent device errors when concating it with the latent model input
masked_image_latents = masked_image_latents.to(device=device, dtype=dtype)
return mask, masked_image_latents
@torch.no_grad()
def __call__(
self,
prompt: Union[str, List[str]] = None,
image: Union[torch.Tensor, PIL.Image.Image] = None,
mask_image: Union[torch.Tensor, PIL.Image.Image] = None,
height: Optional[int] = None,
width: Optional[int] = None,
num_inference_steps: int = 50,
jump_length: Optional[int] = 10,
jump_n_sample: Optional[int] = 10,
guidance_scale: float = 7.5,
negative_prompt: Optional[Union[str, List[str]]] = None,
num_images_per_prompt: Optional[int] = 1,
eta: float = 0.0,
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
latents: Optional[torch.Tensor] = None,
prompt_embeds: Optional[torch.Tensor] = None,
negative_prompt_embeds: Optional[torch.Tensor] = None,
output_type: Optional[str] = "pil",
return_dict: bool = True,
callback: Optional[Callable[[int, int, torch.Tensor], None]] = None,
callback_steps: int = 1,
):
r"""
Function invoked when calling the pipeline for generation.
Args:
prompt (`str` or `List[str]`, *optional*):
The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
instead.
image (`PIL.Image.Image`):
`Image`, or tensor representing an image batch which will be inpainted, *i.e.* parts of the image will
be masked out with `mask_image` and repainted according to `prompt`.
mask_image (`PIL.Image.Image`):
`Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be
repainted, while black pixels will be preserved. If `mask_image` is a PIL image, it will be converted
to a single channel (luminance) before use. If it's a tensor, it should contain one color channel (L)
instead of 3, so the expected shape would be `(B, H, W, 1)`.
height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
The height in pixels of the generated image.
width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
The width in pixels of the generated image.
num_inference_steps (`int`, *optional*, defaults to 50):
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
expense of slower inference.
jump_length (`int`, *optional*, defaults to 10):
The number of steps taken forward in time before going backward in time for a single jump ("j" in
RePaint paper). Take a look at Figure 9 and 10 in https://arxiv.org/pdf/2201.09865.pdf.
jump_n_sample (`int`, *optional*, defaults to 10):
The number of times we will make forward time jump for a given chosen time sample. Take a look at
Figure 9 and 10 in https://arxiv.org/pdf/2201.09865.pdf.
guidance_scale (`float`, *optional*, defaults to 7.5):
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
`guidance_scale` is defined as `w` of equation 2. of [Imagen
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
usually at the expense of lower image quality.
negative_prompt (`str` or `List[str]`, *optional*):
The prompt or prompts not to guide the image generation. If not defined, one has to pass
`negative_prompt_embeds`. instead. Ignored when not using guidance (i.e., ignored if `guidance_scale`
is less than `1`).
num_images_per_prompt (`int`, *optional*, defaults to 1):
The number of images to generate per prompt.
eta (`float`, *optional*, defaults to 0.0):
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
[`schedulers.DDIMScheduler`], will be ignored for others.
generator (`torch.Generator`, *optional*):
One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
to make generation deterministic.
latents (`torch.Tensor`, *optional*):
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
tensor will ge generated by sampling using the supplied random `generator`.
prompt_embeds (`torch.Tensor`, *optional*):
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
provided, text embeddings will be generated from `prompt` input argument.
negative_prompt_embeds (`torch.Tensor`, *optional*):
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
argument.
output_type (`str`, *optional*, defaults to `"pil"`):
The output format of the generate image. Choose between
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
plain tuple.
callback (`Callable`, *optional*):
A function that will be called every `callback_steps` steps during inference. The function will be
called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`.
callback_steps (`int`, *optional*, defaults to 1):
The frequency at which the `callback` function will be called. If not specified, the callback will be
called at every step.
Examples:
```py
>>> import PIL
>>> import requests
>>> import torch
>>> from io import BytesIO
>>> from diffusers import StableDiffusionPipeline, RePaintScheduler
>>> def download_image(url):
... response = requests.get(url)
... return PIL.Image.open(BytesIO(response.content)).convert("RGB")
>>> base_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/"
>>> img_url = base_url + "overture-creations-5sI6fQgYIuo.png"
>>> mask_url = base_url + "overture-creations-5sI6fQgYIuo_mask.png "
>>> init_image = download_image(img_url).resize((512, 512))
>>> mask_image = download_image(mask_url).resize((512, 512))
>>> pipe = DiffusionPipeline.from_pretrained(
... "CompVis/stable-diffusion-v1-4", torch_dtype=torch.float16, custom_pipeline="stable_diffusion_repaint",
... )
>>> pipe.scheduler = RePaintScheduler.from_config(pipe.scheduler.config)
>>> pipe = pipe.to("cuda")
>>> prompt = "Face of a yellow cat, high resolution, sitting on a park bench"
>>> image = pipe(prompt=prompt, image=init_image, mask_image=mask_image).images[0]
```
Returns:
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
When returning a tuple, the first element is a list with the generated images, and the second element is a
list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
(nsfw) content, according to the `safety_checker`.
"""
# 0. Default height and width to unet
height = height or self.unet.config.sample_size * self.vae_scale_factor
width = width or self.unet.config.sample_size * self.vae_scale_factor
# 1. Check inputs
self.check_inputs(
prompt,
height,
width,
callback_steps,
negative_prompt,
prompt_embeds,
negative_prompt_embeds,
)
if image is None:
raise ValueError("`image` input cannot be undefined.")
if mask_image is None:
raise ValueError("`mask_image` input cannot be undefined.")
# 2. Define call parameters
if prompt is not None and isinstance(prompt, str):
batch_size = 1
elif prompt is not None and isinstance(prompt, list):
batch_size = len(prompt)
else:
batch_size = prompt_embeds.shape[0]
device = self._execution_device
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
do_classifier_free_guidance = guidance_scale > 1.0
# 3. Encode input prompt
prompt_embeds = self._encode_prompt(
prompt,
device,
num_images_per_prompt,
do_classifier_free_guidance,
negative_prompt,
prompt_embeds=prompt_embeds,
negative_prompt_embeds=negative_prompt_embeds,
)
# 4. Preprocess mask and image
mask, masked_image = prepare_mask_and_masked_image(image, mask_image)
# 5. set timesteps
self.scheduler.set_timesteps(num_inference_steps, jump_length, jump_n_sample, device)
self.scheduler.eta = eta
timesteps = self.scheduler.timesteps
# latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
# 6. Prepare latent variables
num_channels_latents = self.vae.config.latent_channels
latents = self.prepare_latents(
batch_size * num_images_per_prompt,
num_channels_latents,
height,
width,
prompt_embeds.dtype,
device,
generator,
latents,
)
# 7. Prepare mask latent variables
mask, masked_image_latents = self.prepare_mask_latents(
mask,
masked_image,
batch_size * num_images_per_prompt,
height,
width,
prompt_embeds.dtype,
device,
generator,
do_classifier_free_guidance=False, # We do not need duplicate mask and image
)
# 8. Check that sizes of mask, masked image and latents match
# num_channels_mask = mask.shape[1]
# num_channels_masked_image = masked_image_latents.shape[1]
if num_channels_latents != self.unet.config.in_channels:
raise ValueError(
f"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects"
f" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} "
f" = Please verify the config of"
" `pipeline.unet` or your `mask_image` or `image` input."
)
# 9. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
t_last = timesteps[0] + 1
# 10. Denoising loop
with self.progress_bar(total=len(timesteps)) as progress_bar:
for i, t in enumerate(timesteps):
if t >= t_last:
# compute the reverse: x_t-1 -> x_t
latents = self.scheduler.undo_step(latents, t_last, generator)
progress_bar.update()
t_last = t
continue
# expand the latents if we are doing classifier free guidance
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
# concat latents, mask, masked_image_latents in the channel dimension
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
# latent_model_input = torch.cat([latent_model_input, mask, masked_image_latents], dim=1)
# predict the noise residual
noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=prompt_embeds).sample
# perform guidance
if do_classifier_free_guidance:
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
latents = self.scheduler.step(
noise_pred,
t,
latents,
masked_image_latents,
mask,
**extra_step_kwargs,
).prev_sample
# call the callback, if provided
progress_bar.update()
if callback is not None and i % callback_steps == 0:
step_idx = i // getattr(self.scheduler, "order", 1)
callback(step_idx, t, latents)
t_last = t
# 11. Post-processing
image = self.decode_latents(latents)
# 12. Run safety checker
image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
# 13. Convert to PIL
if output_type == "pil":
image = self.numpy_to_pil(image)
# Offload last model to CPU
if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
| diffusers/examples/community/stable_diffusion_repaint.py/0 | {
"file_path": "diffusers/examples/community/stable_diffusion_repaint.py",
"repo_id": "diffusers",
"token_count": 19673
} |
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import sys
import tempfile
import safetensors
sys.path.append("..")
from test_examples_utils import ExamplesTestsAccelerate, run_command # noqa: E402
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger()
stream_handler = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class TextToImageLCM(ExamplesTestsAccelerate):
def test_text_to_image_lcm_lora_sdxl(self):
with tempfile.TemporaryDirectory() as tmpdir:
test_args = f"""
examples/consistency_distillation/train_lcm_distill_lora_sdxl.py
--pretrained_teacher_model hf-internal-testing/tiny-stable-diffusion-xl-pipe
--dataset_name hf-internal-testing/dummy_image_text_data
--resolution 64
--lora_rank 4
--train_batch_size 1
--gradient_accumulation_steps 1
--max_train_steps 2
--learning_rate 5.0e-04
--scale_lr
--lr_scheduler constant
--lr_warmup_steps 0
--output_dir {tmpdir}
""".split()
run_command(self._launch_args + test_args)
# save_pretrained smoke test
self.assertTrue(os.path.isfile(os.path.join(tmpdir, "pytorch_lora_weights.safetensors")))
# make sure the state_dict has the correct naming in the parameters.
lora_state_dict = safetensors.torch.load_file(os.path.join(tmpdir, "pytorch_lora_weights.safetensors"))
is_lora = all("lora" in k for k in lora_state_dict.keys())
self.assertTrue(is_lora)
def test_text_to_image_lcm_lora_sdxl_checkpointing(self):
with tempfile.TemporaryDirectory() as tmpdir:
test_args = f"""
examples/consistency_distillation/train_lcm_distill_lora_sdxl.py
--pretrained_teacher_model hf-internal-testing/tiny-stable-diffusion-xl-pipe
--dataset_name hf-internal-testing/dummy_image_text_data
--resolution 64
--lora_rank 4
--train_batch_size 1
--gradient_accumulation_steps 1
--max_train_steps 7
--checkpointing_steps 2
--learning_rate 5.0e-04
--scale_lr
--lr_scheduler constant
--lr_warmup_steps 0
--output_dir {tmpdir}
""".split()
run_command(self._launch_args + test_args)
self.assertEqual(
{x for x in os.listdir(tmpdir) if "checkpoint" in x},
{"checkpoint-2", "checkpoint-4", "checkpoint-6"},
)
test_args = f"""
examples/consistency_distillation/train_lcm_distill_lora_sdxl.py
--pretrained_teacher_model hf-internal-testing/tiny-stable-diffusion-xl-pipe
--dataset_name hf-internal-testing/dummy_image_text_data
--resolution 64
--lora_rank 4
--train_batch_size 1
--gradient_accumulation_steps 1
--max_train_steps 9
--checkpointing_steps 2
--resume_from_checkpoint latest
--learning_rate 5.0e-04
--scale_lr
--lr_scheduler constant
--lr_warmup_steps 0
--output_dir {tmpdir}
""".split()
run_command(self._launch_args + test_args)
self.assertEqual(
{x for x in os.listdir(tmpdir) if "checkpoint" in x},
{"checkpoint-2", "checkpoint-4", "checkpoint-6", "checkpoint-8"},
)
| diffusers/examples/consistency_distillation/test_lcm_lora.py/0 | {
"file_path": "diffusers/examples/consistency_distillation/test_lcm_lora.py",
"repo_id": "diffusers",
"token_count": 2105
} |
# Research projects
This folder contains various research projects using 🧨 Diffusers.
They are not really maintained by the core maintainers of this library and often require a specific version of Diffusers that is indicated in the requirements file of each folder.
Updating them to the most recent version of the library will require some work.
To use any of them, just run the command
```sh
pip install -r requirements.txt
```
inside the folder of your choice.
If you need help with any of those, please open an issue where you directly ping the author(s), as indicated at the top of the README of each folder.
| diffusers/examples/research_projects/README.md/0 | {
"file_path": "diffusers/examples/research_projects/README.md",
"repo_id": "diffusers",
"token_count": 144
} |
import os
import random
import torch
import torchvision.transforms as transforms
from PIL import Image
def recalculate_box_and_verify_if_valid(x, y, w, h, image_size, original_image_size, min_box_size):
scale = image_size / min(original_image_size)
crop_y = (original_image_size[1] * scale - image_size) // 2
crop_x = (original_image_size[0] * scale - image_size) // 2
x0 = max(x * scale - crop_x, 0)
y0 = max(y * scale - crop_y, 0)
x1 = min((x + w) * scale - crop_x, image_size)
y1 = min((y + h) * scale - crop_y, image_size)
if (x1 - x0) * (y1 - y0) / (image_size * image_size) < min_box_size:
return False, (None, None, None, None)
return True, (x0, y0, x1, y1)
class COCODataset(torch.utils.data.Dataset):
def __init__(
self,
data_path,
image_path,
image_size=512,
min_box_size=0.01,
max_boxes_per_data=8,
tokenizer=None,
):
super().__init__()
self.min_box_size = min_box_size
self.max_boxes_per_data = max_boxes_per_data
self.image_size = image_size
self.image_path = image_path
self.tokenizer = tokenizer
self.transforms = transforms.Compose(
[
transforms.Resize(image_size, interpolation=transforms.InterpolationMode.BILINEAR),
transforms.CenterCrop(image_size),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
self.data_list = torch.load(data_path, map_location="cpu")
def __getitem__(self, index):
if self.max_boxes_per_data > 99:
assert False, "Are you sure setting such large number of boxes per image?"
out = {}
data = self.data_list[index]
image = Image.open(os.path.join(self.image_path, data["file_path"])).convert("RGB")
original_image_size = image.size
out["pixel_values"] = self.transforms(image)
annos = data["annos"]
areas, valid_annos = [], []
for anno in annos:
# x, y, w, h = anno['bbox']
x0, y0, x1, y1 = anno["bbox"]
x, y, w, h = x0, y0, x1 - x0, y1 - y0
valid, (x0, y0, x1, y1) = recalculate_box_and_verify_if_valid(
x, y, w, h, self.image_size, original_image_size, self.min_box_size
)
if valid:
anno["bbox"] = [x0, y0, x1, y1]
areas.append((x1 - x0) * (y1 - y0))
valid_annos.append(anno)
# Sort according to area and choose the largest N objects
wanted_idxs = torch.tensor(areas).sort(descending=True)[1]
wanted_idxs = wanted_idxs[: self.max_boxes_per_data]
valid_annos = [valid_annos[i] for i in wanted_idxs]
out["boxes"] = torch.zeros(self.max_boxes_per_data, 4)
out["masks"] = torch.zeros(self.max_boxes_per_data)
out["text_embeddings_before_projection"] = torch.zeros(self.max_boxes_per_data, 768)
for i, anno in enumerate(valid_annos):
out["boxes"][i] = torch.tensor(anno["bbox"]) / self.image_size
out["masks"][i] = 1
out["text_embeddings_before_projection"][i] = anno["text_embeddings_before_projection"]
prob_drop_boxes = 0.1
if random.random() < prob_drop_boxes:
out["masks"][:] = 0
caption = random.choice(data["captions"])
prob_drop_captions = 0.5
if random.random() < prob_drop_captions:
caption = ""
caption = self.tokenizer(
caption,
max_length=self.tokenizer.model_max_length,
padding="max_length",
truncation=True,
return_tensors="pt",
)
out["caption"] = caption
return out
def __len__(self):
return len(self.data_list)
| diffusers/examples/research_projects/gligen/dataset.py/0 | {
"file_path": "diffusers/examples/research_projects/gligen/dataset.py",
"repo_id": "diffusers",
"token_count": 1897
} |
import argparse
import itertools
import math
import os
import random
from pathlib import Path
from typing import Iterable
import numpy as np
import PIL
import torch
import torch.nn.functional as F
import torch.utils.checkpoint
from accelerate import Accelerator
from accelerate.utils import ProjectConfiguration, set_seed
from huggingface_hub import create_repo, upload_folder
from neural_compressor.utils import logger
from packaging import version
from PIL import Image
from torch.utils.data import Dataset
from torchvision import transforms
from tqdm.auto import tqdm
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDPMScheduler, StableDiffusionPipeline, UNet2DConditionModel
from diffusers.optimization import get_scheduler
from diffusers.utils import make_image_grid
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"):
PIL_INTERPOLATION = {
"linear": PIL.Image.Resampling.BILINEAR,
"bilinear": PIL.Image.Resampling.BILINEAR,
"bicubic": PIL.Image.Resampling.BICUBIC,
"lanczos": PIL.Image.Resampling.LANCZOS,
"nearest": PIL.Image.Resampling.NEAREST,
}
else:
PIL_INTERPOLATION = {
"linear": PIL.Image.LINEAR,
"bilinear": PIL.Image.BILINEAR,
"bicubic": PIL.Image.BICUBIC,
"lanczos": PIL.Image.LANCZOS,
"nearest": PIL.Image.NEAREST,
}
# ------------------------------------------------------------------------------
def save_progress(text_encoder, placeholder_token_id, accelerator, args, save_path):
logger.info("Saving embeddings")
learned_embeds = accelerator.unwrap_model(text_encoder).get_input_embeddings().weight[placeholder_token_id]
learned_embeds_dict = {args.placeholder_token: learned_embeds.detach().cpu()}
torch.save(learned_embeds_dict, save_path)
def parse_args():
parser = argparse.ArgumentParser(description="Example of distillation for quantization on Textual Inversion.")
parser.add_argument(
"--save_steps",
type=int,
default=500,
help="Save learned_embeds.bin every X updates steps.",
)
parser.add_argument(
"--pretrained_model_name_or_path",
type=str,
default=None,
required=True,
help="Path to pretrained model or model identifier from huggingface.co/models.",
)
parser.add_argument(
"--revision",
type=str,
default=None,
required=False,
help="Revision of pretrained model identifier from huggingface.co/models.",
)
parser.add_argument(
"--tokenizer_name",
type=str,
default=None,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--train_data_dir", type=str, default=None, required=True, help="A folder containing the training data."
)
parser.add_argument(
"--placeholder_token",
type=str,
default=None,
required=True,
help="A token to use as a placeholder for the concept.",
)
parser.add_argument(
"--initializer_token", type=str, default=None, required=True, help="A token to use as initializer word."
)
parser.add_argument("--learnable_property", type=str, default="object", help="Choose between 'object' and 'style'")
parser.add_argument("--repeats", type=int, default=100, help="How many times to repeat the training data.")
parser.add_argument(
"--output_dir",
type=str,
default="text-inversion-model",
help="The output directory where the model predictions and checkpoints will be written.",
)
parser.add_argument(
"--cache_dir",
type=str,
default=None,
help="The directory where the downloaded models and datasets will be stored.",
)
parser.add_argument("--seed", type=int, default=42, help="A seed for reproducible training.")
parser.add_argument(
"--resolution",
type=int,
default=512,
help=(
"The resolution for input images, all the images in the train/validation dataset will be resized to this"
" resolution"
),
)
parser.add_argument(
"--center_crop", action="store_true", help="Whether to center crop images before resizing to resolution"
)
parser.add_argument(
"--train_batch_size", type=int, default=16, help="Batch size (per device) for the training dataloader."
)
parser.add_argument("--num_train_epochs", type=int, default=100)
parser.add_argument(
"--max_train_steps",
type=int,
default=5000,
help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument(
"--learning_rate",
type=float,
default=1e-4,
help="Initial learning rate (after the potential warmup period) to use.",
)
parser.add_argument(
"--scale_lr",
action="store_true",
default=False,
help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.",
)
parser.add_argument(
"--lr_scheduler",
type=str,
default="constant",
help=(
'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",'
' "constant", "constant_with_warmup"]'
),
)
parser.add_argument(
"--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler."
)
parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.")
parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.")
parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.")
parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer")
parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.")
parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.")
parser.add_argument(
"--hub_model_id",
type=str,
default=None,
help="The name of the repository to keep in sync with the local `output_dir`.",
)
parser.add_argument(
"--logging_dir",
type=str,
default="logs",
help=(
"[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to"
" *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***."
),
)
parser.add_argument(
"--mixed_precision",
type=str,
default="no",
choices=["no", "fp16", "bf16"],
help=(
"Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU."
),
)
parser.add_argument("--use_ema", action="store_true", help="Whether to use EMA model.")
parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
parser.add_argument("--do_quantization", action="store_true", help="Whether or not to do quantization.")
parser.add_argument("--do_distillation", action="store_true", help="Whether or not to do distillation.")
parser.add_argument(
"--verify_loading", action="store_true", help="Whether or not to verify the loading of the quantized model."
)
parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
args = parser.parse_args()
env_local_rank = int(os.environ.get("LOCAL_RANK", -1))
if env_local_rank != -1 and env_local_rank != args.local_rank:
args.local_rank = env_local_rank
if args.train_data_dir is None:
raise ValueError("You must specify a train data directory.")
return args
imagenet_templates_small = [
"a photo of a {}",
"a rendering of a {}",
"a cropped photo of the {}",
"the photo of a {}",
"a photo of a clean {}",
"a photo of a dirty {}",
"a dark photo of the {}",
"a photo of my {}",
"a photo of the cool {}",
"a close-up photo of a {}",
"a bright photo of the {}",
"a cropped photo of a {}",
"a photo of the {}",
"a good photo of the {}",
"a photo of one {}",
"a close-up photo of the {}",
"a rendition of the {}",
"a photo of the clean {}",
"a rendition of a {}",
"a photo of a nice {}",
"a good photo of a {}",
"a photo of the nice {}",
"a photo of the small {}",
"a photo of the weird {}",
"a photo of the large {}",
"a photo of a cool {}",
"a photo of a small {}",
]
imagenet_style_templates_small = [
"a painting in the style of {}",
"a rendering in the style of {}",
"a cropped painting in the style of {}",
"the painting in the style of {}",
"a clean painting in the style of {}",
"a dirty painting in the style of {}",
"a dark painting in the style of {}",
"a picture in the style of {}",
"a cool painting in the style of {}",
"a close-up painting in the style of {}",
"a bright painting in the style of {}",
"a cropped painting in the style of {}",
"a good painting in the style of {}",
"a close-up painting in the style of {}",
"a rendition in the style of {}",
"a nice painting in the style of {}",
"a small painting in the style of {}",
"a weird painting in the style of {}",
"a large painting in the style of {}",
]
# Adapted from torch-ema https://github.com/fadel/pytorch_ema/blob/master/torch_ema/ema.py#L14
class EMAModel:
"""
Exponential Moving Average of models weights
"""
def __init__(self, parameters: Iterable[torch.nn.Parameter], decay=0.9999):
parameters = list(parameters)
self.shadow_params = [p.clone().detach() for p in parameters]
self.decay = decay
self.optimization_step = 0
def get_decay(self, optimization_step):
"""
Compute the decay factor for the exponential moving average.
"""
value = (1 + optimization_step) / (10 + optimization_step)
return 1 - min(self.decay, value)
@torch.no_grad()
def step(self, parameters):
parameters = list(parameters)
self.optimization_step += 1
self.decay = self.get_decay(self.optimization_step)
for s_param, param in zip(self.shadow_params, parameters):
if param.requires_grad:
tmp = self.decay * (s_param - param)
s_param.sub_(tmp)
else:
s_param.copy_(param)
torch.cuda.empty_cache()
def copy_to(self, parameters: Iterable[torch.nn.Parameter]) -> None:
"""
Copy current averaged parameters into given collection of parameters.
Args:
parameters: Iterable of `torch.nn.Parameter`; the parameters to be
updated with the stored moving averages. If `None`, the
parameters with which this `ExponentialMovingAverage` was
initialized will be used.
"""
parameters = list(parameters)
for s_param, param in zip(self.shadow_params, parameters):
param.data.copy_(s_param.data)
def to(self, device=None, dtype=None) -> None:
r"""Move internal buffers of the ExponentialMovingAverage to `device`.
Args:
device: like `device` argument to `torch.Tensor.to`
"""
# .to() on the tensors handles None correctly
self.shadow_params = [
p.to(device=device, dtype=dtype) if p.is_floating_point() else p.to(device=device)
for p in self.shadow_params
]
class TextualInversionDataset(Dataset):
def __init__(
self,
data_root,
tokenizer,
learnable_property="object", # [object, style]
size=512,
repeats=100,
interpolation="bicubic",
flip_p=0.5,
set="train",
placeholder_token="*",
center_crop=False,
):
self.data_root = data_root
self.tokenizer = tokenizer
self.learnable_property = learnable_property
self.size = size
self.placeholder_token = placeholder_token
self.center_crop = center_crop
self.flip_p = flip_p
self.image_paths = [os.path.join(self.data_root, file_path) for file_path in os.listdir(self.data_root)]
self.num_images = len(self.image_paths)
self._length = self.num_images
if set == "train":
self._length = self.num_images * repeats
self.interpolation = {
"linear": PIL_INTERPOLATION["linear"],
"bilinear": PIL_INTERPOLATION["bilinear"],
"bicubic": PIL_INTERPOLATION["bicubic"],
"lanczos": PIL_INTERPOLATION["lanczos"],
}[interpolation]
self.templates = imagenet_style_templates_small if learnable_property == "style" else imagenet_templates_small
self.flip_transform = transforms.RandomHorizontalFlip(p=self.flip_p)
def __len__(self):
return self._length
def __getitem__(self, i):
example = {}
image = Image.open(self.image_paths[i % self.num_images])
if not image.mode == "RGB":
image = image.convert("RGB")
placeholder_string = self.placeholder_token
text = random.choice(self.templates).format(placeholder_string)
example["input_ids"] = self.tokenizer(
text,
padding="max_length",
truncation=True,
max_length=self.tokenizer.model_max_length,
return_tensors="pt",
).input_ids[0]
# default to score-sde preprocessing
img = np.array(image).astype(np.uint8)
if self.center_crop:
crop = min(img.shape[0], img.shape[1])
(
h,
w,
) = (
img.shape[0],
img.shape[1],
)
img = img[(h - crop) // 2 : (h + crop) // 2, (w - crop) // 2 : (w + crop) // 2]
image = Image.fromarray(img)
image = image.resize((self.size, self.size), resample=self.interpolation)
image = self.flip_transform(image)
image = np.array(image).astype(np.uint8)
image = (image / 127.5 - 1.0).astype(np.float32)
example["pixel_values"] = torch.from_numpy(image).permute(2, 0, 1)
return example
def freeze_params(params):
for param in params:
param.requires_grad = False
def generate_images(pipeline, prompt="", guidance_scale=7.5, num_inference_steps=50, num_images_per_prompt=1, seed=42):
generator = torch.Generator(pipeline.device).manual_seed(seed)
images = pipeline(
prompt,
guidance_scale=guidance_scale,
num_inference_steps=num_inference_steps,
generator=generator,
num_images_per_prompt=num_images_per_prompt,
).images
_rows = int(math.sqrt(num_images_per_prompt))
grid = make_image_grid(images, rows=_rows, cols=num_images_per_prompt // _rows)
return grid
def main():
args = parse_args()
logging_dir = os.path.join(args.output_dir, args.logging_dir)
accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=logging_dir)
accelerator = Accelerator(
gradient_accumulation_steps=args.gradient_accumulation_steps,
mixed_precision=args.mixed_precision,
log_with="tensorboard",
project_config=accelerator_project_config,
)
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Handle the repository creation
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir, exist_ok=True)
if args.push_to_hub:
repo_id = create_repo(
repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token
).repo_id
# Load the tokenizer and add the placeholder token as a additional special token
if args.tokenizer_name:
tokenizer = CLIPTokenizer.from_pretrained(args.tokenizer_name)
elif args.pretrained_model_name_or_path:
tokenizer = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="tokenizer")
# Load models and create wrapper for stable diffusion
noise_scheduler = DDPMScheduler.from_config(args.pretrained_model_name_or_path, subfolder="scheduler")
text_encoder = CLIPTextModel.from_pretrained(
args.pretrained_model_name_or_path,
subfolder="text_encoder",
revision=args.revision,
)
vae = AutoencoderKL.from_pretrained(
args.pretrained_model_name_or_path,
subfolder="vae",
revision=args.revision,
)
unet = UNet2DConditionModel.from_pretrained(
args.pretrained_model_name_or_path,
subfolder="unet",
revision=args.revision,
)
train_unet = False
# Freeze vae and unet
freeze_params(vae.parameters())
if not args.do_quantization and not args.do_distillation:
# Add the placeholder token in tokenizer
num_added_tokens = tokenizer.add_tokens(args.placeholder_token)
if num_added_tokens == 0:
raise ValueError(
f"The tokenizer already contains the token {args.placeholder_token}. Please pass a different"
" `placeholder_token` that is not already in the tokenizer."
)
# Convert the initializer_token, placeholder_token to ids
token_ids = tokenizer.encode(args.initializer_token, add_special_tokens=False)
# Check if initializer_token is a single token or a sequence of tokens
if len(token_ids) > 1:
raise ValueError("The initializer token must be a single token.")
initializer_token_id = token_ids[0]
placeholder_token_id = tokenizer.convert_tokens_to_ids(args.placeholder_token)
# Resize the token embeddings as we are adding new special tokens to the tokenizer
text_encoder.resize_token_embeddings(len(tokenizer))
# Initialise the newly added placeholder token with the embeddings of the initializer token
token_embeds = text_encoder.get_input_embeddings().weight.data
token_embeds[placeholder_token_id] = token_embeds[initializer_token_id]
freeze_params(unet.parameters())
# Freeze all parameters except for the token embeddings in text encoder
params_to_freeze = itertools.chain(
text_encoder.text_model.encoder.parameters(),
text_encoder.text_model.final_layer_norm.parameters(),
text_encoder.text_model.embeddings.position_embedding.parameters(),
)
freeze_params(params_to_freeze)
else:
train_unet = True
freeze_params(text_encoder.parameters())
if args.scale_lr:
args.learning_rate = (
args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes
)
# Initialize the optimizer
optimizer = torch.optim.AdamW(
# only optimize the unet or embeddings of text_encoder
unet.parameters() if train_unet else text_encoder.get_input_embeddings().parameters(),
lr=args.learning_rate,
betas=(args.adam_beta1, args.adam_beta2),
weight_decay=args.adam_weight_decay,
eps=args.adam_epsilon,
)
train_dataset = TextualInversionDataset(
data_root=args.train_data_dir,
tokenizer=tokenizer,
size=args.resolution,
placeholder_token=args.placeholder_token,
repeats=args.repeats,
learnable_property=args.learnable_property,
center_crop=args.center_crop,
set="train",
)
train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=args.train_batch_size, shuffle=True)
# Scheduler and math around the number of training steps.
overrode_max_train_steps = False
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
if args.max_train_steps is None:
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
overrode_max_train_steps = True
lr_scheduler = get_scheduler(
args.lr_scheduler,
optimizer=optimizer,
num_warmup_steps=args.lr_warmup_steps * accelerator.num_processes,
num_training_steps=args.max_train_steps * accelerator.num_processes,
)
if not train_unet:
text_encoder = accelerator.prepare(text_encoder)
unet.to(accelerator.device)
unet.eval()
else:
unet = accelerator.prepare(unet)
text_encoder.to(accelerator.device)
text_encoder.eval()
optimizer, train_dataloader, lr_scheduler = accelerator.prepare(optimizer, train_dataloader, lr_scheduler)
# Move vae to device
vae.to(accelerator.device)
# Keep vae in eval model as we don't train these
vae.eval()
compression_manager = None
def train_func(model):
if train_unet:
unet_ = model
text_encoder_ = text_encoder
else:
unet_ = unet
text_encoder_ = model
# We need to recalculate our total training steps as the size of the training dataloader may have changed.
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
if overrode_max_train_steps:
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
# Afterwards we recalculate our number of training epochs
args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
# We need to initialize the trackers we use, and also store our configuration.
# The trackers initializes automatically on the main process.
if accelerator.is_main_process:
accelerator.init_trackers("textual_inversion", config=vars(args))
# Train!
total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
logger.info("***** Running training *****")
logger.info(f" Num examples = {len(train_dataset)}")
logger.info(f" Num Epochs = {args.num_train_epochs}")
logger.info(f" Instantaneous batch size per device = {args.train_batch_size}")
logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
logger.info(f" Total optimization steps = {args.max_train_steps}")
# Only show the progress bar once on each machine.
progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process)
progress_bar.set_description("Steps")
global_step = 0
if train_unet and args.use_ema:
ema_unet = EMAModel(unet_.parameters())
for epoch in range(args.num_train_epochs):
model.train()
train_loss = 0.0
for step, batch in enumerate(train_dataloader):
with accelerator.accumulate(model):
# Convert images to latent space
latents = vae.encode(batch["pixel_values"]).latent_dist.sample().detach()
latents = latents * 0.18215
# Sample noise that we'll add to the latents
noise = torch.randn(latents.shape).to(latents.device)
bsz = latents.shape[0]
# Sample a random timestep for each image
timesteps = torch.randint(
0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device
).long()
# Add noise to the latents according to the noise magnitude at each timestep
# (this is the forward diffusion process)
noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps)
# Get the text embedding for conditioning
encoder_hidden_states = text_encoder_(batch["input_ids"])[0]
# Predict the noise residual
model_pred = unet_(noisy_latents, timesteps, encoder_hidden_states).sample
loss = F.mse_loss(model_pred, noise, reduction="none").mean([1, 2, 3]).mean()
if train_unet and compression_manager:
unet_inputs = {
"sample": noisy_latents,
"timestep": timesteps,
"encoder_hidden_states": encoder_hidden_states,
}
loss = compression_manager.callbacks.on_after_compute_loss(unet_inputs, model_pred, loss)
# Gather the losses across all processes for logging (if we use distributed training).
avg_loss = accelerator.gather(loss.repeat(args.train_batch_size)).mean()
train_loss += avg_loss.item() / args.gradient_accumulation_steps
# Backpropagate
accelerator.backward(loss)
if train_unet:
if accelerator.sync_gradients:
accelerator.clip_grad_norm_(unet_.parameters(), args.max_grad_norm)
else:
# Zero out the gradients for all token embeddings except the newly added
# embeddings for the concept, as we only want to optimize the concept embeddings
if accelerator.num_processes > 1:
grads = text_encoder_.module.get_input_embeddings().weight.grad
else:
grads = text_encoder_.get_input_embeddings().weight.grad
# Get the index for tokens that we want to zero the grads for
index_grads_to_zero = torch.arange(len(tokenizer)) != placeholder_token_id
grads.data[index_grads_to_zero, :] = grads.data[index_grads_to_zero, :].fill_(0)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# Checks if the accelerator has performed an optimization step behind the scenes
if accelerator.sync_gradients:
if train_unet and args.use_ema:
ema_unet.step(unet_.parameters())
progress_bar.update(1)
global_step += 1
accelerator.log({"train_loss": train_loss}, step=global_step)
train_loss = 0.0
if not train_unet and global_step % args.save_steps == 0:
save_path = os.path.join(args.output_dir, f"learned_embeds-steps-{global_step}.bin")
save_progress(text_encoder_, placeholder_token_id, accelerator, args, save_path)
logs = {"step_loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]}
progress_bar.set_postfix(**logs)
accelerator.log(logs, step=global_step)
if global_step >= args.max_train_steps:
break
accelerator.wait_for_everyone()
if train_unet and args.use_ema:
ema_unet.copy_to(unet_.parameters())
if not train_unet:
return text_encoder_
if not train_unet:
text_encoder = train_func(text_encoder)
else:
import copy
model = copy.deepcopy(unet)
confs = []
if args.do_quantization:
from neural_compressor import QuantizationAwareTrainingConfig
q_conf = QuantizationAwareTrainingConfig()
confs.append(q_conf)
if args.do_distillation:
teacher_model = copy.deepcopy(model)
def attention_fetcher(x):
return x.sample
layer_mappings = [
[
[
"conv_in",
]
],
[
[
"time_embedding",
]
],
[["down_blocks.0.attentions.0", attention_fetcher]],
[["down_blocks.0.attentions.1", attention_fetcher]],
[
[
"down_blocks.0.resnets.0",
]
],
[
[
"down_blocks.0.resnets.1",
]
],
[
[
"down_blocks.0.downsamplers.0",
]
],
[["down_blocks.1.attentions.0", attention_fetcher]],
[["down_blocks.1.attentions.1", attention_fetcher]],
[
[
"down_blocks.1.resnets.0",
]
],
[
[
"down_blocks.1.resnets.1",
]
],
[
[
"down_blocks.1.downsamplers.0",
]
],
[["down_blocks.2.attentions.0", attention_fetcher]],
[["down_blocks.2.attentions.1", attention_fetcher]],
[
[
"down_blocks.2.resnets.0",
]
],
[
[
"down_blocks.2.resnets.1",
]
],
[
[
"down_blocks.2.downsamplers.0",
]
],
[
[
"down_blocks.3.resnets.0",
]
],
[
[
"down_blocks.3.resnets.1",
]
],
[
[
"up_blocks.0.resnets.0",
]
],
[
[
"up_blocks.0.resnets.1",
]
],
[
[
"up_blocks.0.resnets.2",
]
],
[
[
"up_blocks.0.upsamplers.0",
]
],
[["up_blocks.1.attentions.0", attention_fetcher]],
[["up_blocks.1.attentions.1", attention_fetcher]],
[["up_blocks.1.attentions.2", attention_fetcher]],
[
[
"up_blocks.1.resnets.0",
]
],
[
[
"up_blocks.1.resnets.1",
]
],
[
[
"up_blocks.1.resnets.2",
]
],
[
[
"up_blocks.1.upsamplers.0",
]
],
[["up_blocks.2.attentions.0", attention_fetcher]],
[["up_blocks.2.attentions.1", attention_fetcher]],
[["up_blocks.2.attentions.2", attention_fetcher]],
[
[
"up_blocks.2.resnets.0",
]
],
[
[
"up_blocks.2.resnets.1",
]
],
[
[
"up_blocks.2.resnets.2",
]
],
[
[
"up_blocks.2.upsamplers.0",
]
],
[["up_blocks.3.attentions.0", attention_fetcher]],
[["up_blocks.3.attentions.1", attention_fetcher]],
[["up_blocks.3.attentions.2", attention_fetcher]],
[
[
"up_blocks.3.resnets.0",
]
],
[
[
"up_blocks.3.resnets.1",
]
],
[
[
"up_blocks.3.resnets.2",
]
],
[["mid_block.attentions.0", attention_fetcher]],
[
[
"mid_block.resnets.0",
]
],
[
[
"mid_block.resnets.1",
]
],
[
[
"conv_out",
]
],
]
layer_names = [layer_mapping[0][0] for layer_mapping in layer_mappings]
if not set(layer_names).issubset([n[0] for n in model.named_modules()]):
raise ValueError(
"Provided model is not compatible with the default layer_mappings, "
'please use the model fine-tuned from "CompVis/stable-diffusion-v1-4", '
"or modify the layer_mappings variable to fit your model."
f"\nDefault layer_mappings are as such:\n{layer_mappings}"
)
from neural_compressor.config import DistillationConfig, IntermediateLayersKnowledgeDistillationLossConfig
distillation_criterion = IntermediateLayersKnowledgeDistillationLossConfig(
layer_mappings=layer_mappings,
loss_types=["MSE"] * len(layer_mappings),
loss_weights=[1.0 / len(layer_mappings)] * len(layer_mappings),
add_origin_loss=True,
)
d_conf = DistillationConfig(teacher_model=teacher_model, criterion=distillation_criterion)
confs.append(d_conf)
from neural_compressor.training import prepare_compression
compression_manager = prepare_compression(model, confs)
compression_manager.callbacks.on_train_begin()
model = compression_manager.model
train_func(model)
compression_manager.callbacks.on_train_end()
# Save the resulting model and its corresponding configuration in the given directory
model.save(args.output_dir)
logger.info(f"Optimized model saved to: {args.output_dir}.")
# change to framework model for further use
model = model.model
# Create the pipeline using using the trained modules and save it.
templates = imagenet_style_templates_small if args.learnable_property == "style" else imagenet_templates_small
prompt = templates[0].format(args.placeholder_token)
if accelerator.is_main_process:
pipeline = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path,
text_encoder=accelerator.unwrap_model(text_encoder),
vae=vae,
unet=accelerator.unwrap_model(unet),
tokenizer=tokenizer,
)
pipeline.save_pretrained(args.output_dir)
pipeline = pipeline.to(unet.device)
baseline_model_images = generate_images(pipeline, prompt=prompt, seed=args.seed)
baseline_model_images.save(
os.path.join(args.output_dir, "{}_baseline_model.png".format("_".join(prompt.split())))
)
if not train_unet:
# Also save the newly trained embeddings
save_path = os.path.join(args.output_dir, "learned_embeds.bin")
save_progress(text_encoder, placeholder_token_id, accelerator, args, save_path)
else:
setattr(pipeline, "unet", accelerator.unwrap_model(model))
if args.do_quantization:
pipeline = pipeline.to(torch.device("cpu"))
optimized_model_images = generate_images(pipeline, prompt=prompt, seed=args.seed)
optimized_model_images.save(
os.path.join(args.output_dir, "{}_optimized_model.png".format("_".join(prompt.split())))
)
if args.push_to_hub:
upload_folder(
repo_id=repo_id,
folder_path=args.output_dir,
commit_message="End of training",
ignore_patterns=["step_*", "epoch_*"],
)
accelerator.end_training()
if args.do_quantization and args.verify_loading:
# Load the model obtained after Intel Neural Compressor quantization
from neural_compressor.utils.pytorch import load
loaded_model = load(args.output_dir, model=unet)
loaded_model.eval()
setattr(pipeline, "unet", loaded_model)
if args.do_quantization:
pipeline = pipeline.to(torch.device("cpu"))
loaded_model_images = generate_images(pipeline, prompt=prompt, seed=args.seed)
if loaded_model_images != optimized_model_images:
logger.info("The quantized model was not successfully loaded.")
else:
logger.info("The quantized model was successfully loaded.")
if __name__ == "__main__":
main()
| diffusers/examples/research_projects/intel_opts/textual_inversion_dfq/textual_inversion.py/0 | {
"file_path": "diffusers/examples/research_projects/intel_opts/textual_inversion_dfq/textual_inversion.py",
"repo_id": "diffusers",
"token_count": 18301
} |
## [Deprecated] Multi Token Textual Inversion
**IMPORTART: This research project is deprecated. Multi Token Textual Inversion is now supported natively in [the official textual inversion example](https://github.com/huggingface/diffusers/tree/main/examples/textual_inversion#running-locally-with-pytorch).**
The author of this project is [Isamu Isozaki](https://github.com/isamu-isozaki) - please make sure to tag the author for issue and PRs as well as @patrickvonplaten.
We add multi token support to textual inversion. I added
1. num_vec_per_token for the number of used to reference that token
2. progressive_tokens for progressively training the token from 1 token to 2 token etc
3. progressive_tokens_max_steps for the max number of steps until we start full training
4. vector_shuffle to shuffle vectors
Feel free to add these options to your training! In practice num_vec_per_token around 10+vector shuffle works great!
## Textual Inversion fine-tuning example
[Textual inversion](https://arxiv.org/abs/2208.01618) is a method to personalize text2image models like stable diffusion on your own images using just 3-5 examples.
The `textual_inversion.py` script shows how to implement the training procedure and adapt it for stable diffusion.
## Running on Colab
Colab for training
[](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_textual_inversion_training.ipynb)
Colab for inference
[](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/stable_conceptualizer_inference.ipynb)
## Running locally with PyTorch
### Installing the dependencies
Before running the scripts, make sure to install the library's training dependencies:
**Important**
To make sure you can successfully run the latest versions of the example scripts, we highly recommend **installing from source** and keeping the install up to date as we update the example scripts frequently and install some example-specific requirements. To do this, execute the following steps in a new virtual environment:
```bash
git clone https://github.com/huggingface/diffusers
cd diffusers
pip install .
```
Then cd in the example folder and run
```bash
pip install -r requirements.txt
```
And initialize an [🤗Accelerate](https://github.com/huggingface/accelerate/) environment with:
```bash
accelerate config
```
### Cat toy example
You need to accept the model license before downloading or using the weights. In this example we'll use model version `v1-5`, so you'll need to visit [its card](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5), read the license and tick the checkbox if you agree.
You have to be a registered user in 🤗 Hugging Face Hub, and you'll also need to use an access token for the code to work. For more information on access tokens, please refer to [this section of the documentation](https://huggingface.co/docs/hub/security-tokens).
Run the following command to authenticate your token
```bash
huggingface-cli login
```
If you have already cloned the repo, then you won't need to go through these steps.
<br>
Now let's get our dataset.Download 3-4 images from [here](https://drive.google.com/drive/folders/1fmJMs25nxS_rSNqS5hTcRdLem_YQXbq5) and save them in a directory. This will be our training data.
And launch the training using
**___Note: Change the `resolution` to 768 if you are using the [stable-diffusion-2](https://huggingface.co/stabilityai/stable-diffusion-2) 768x768 model.___**
```bash
export MODEL_NAME="stable-diffusion-v1-5/stable-diffusion-v1-5"
export DATA_DIR="path-to-dir-containing-images"
accelerate launch textual_inversion.py \
--pretrained_model_name_or_path=$MODEL_NAME \
--train_data_dir=$DATA_DIR \
--learnable_property="object" \
--placeholder_token="<cat-toy>" --initializer_token="toy" \
--resolution=512 \
--train_batch_size=1 \
--gradient_accumulation_steps=4 \
--max_train_steps=3000 \
--learning_rate=5.0e-04 --scale_lr \
--lr_scheduler="constant" \
--lr_warmup_steps=0 \
--output_dir="textual_inversion_cat"
```
A full training run takes ~1 hour on one V100 GPU.
### Inference
Once you have trained a model using above command, the inference can be done simply using the `StableDiffusionPipeline`. Make sure to include the `placeholder_token` in your prompt.
```python
from diffusers import StableDiffusionPipeline
model_id = "path-to-your-trained-model"
pipe = StableDiffusionPipeline.from_pretrained(model_id,torch_dtype=torch.float16).to("cuda")
prompt = "A <cat-toy> backpack"
image = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save("cat-backpack.png")
```
## Training with Flax/JAX
For faster training on TPUs and GPUs you can leverage the flax training example. Follow the instructions above to get the model and dataset before running the script.
Before running the scripts, make sure to install the library's training dependencies:
```bash
pip install -U -r requirements_flax.txt
```
```bash
export MODEL_NAME="duongna/stable-diffusion-v1-4-flax"
export DATA_DIR="path-to-dir-containing-images"
python textual_inversion_flax.py \
--pretrained_model_name_or_path=$MODEL_NAME \
--train_data_dir=$DATA_DIR \
--learnable_property="object" \
--placeholder_token="<cat-toy>" --initializer_token="toy" \
--resolution=512 \
--train_batch_size=1 \
--max_train_steps=3000 \
--learning_rate=5.0e-04 --scale_lr \
--output_dir="textual_inversion_cat"
```
It should be at least 70% faster than the PyTorch script with the same configuration.
### Training with xformers:
You can enable memory efficient attention by [installing xFormers](https://github.com/facebookresearch/xformers#installing-xformers) and padding the `--enable_xformers_memory_efficient_attention` argument to the script. This is not available with the Flax/JAX implementation.
| diffusers/examples/research_projects/multi_token_textual_inversion/README.md/0 | {
"file_path": "diffusers/examples/research_projects/multi_token_textual_inversion/README.md",
"repo_id": "diffusers",
"token_count": 1854
} |
#!/usr/bin/env python
# coding=utf-8
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import glob
import hashlib
import pandas as pd
import torch
from transformers import T5EncoderModel
from diffusers import StableDiffusion3Pipeline
PROMPT = "a photo of sks dog"
MAX_SEQ_LENGTH = 77
LOCAL_DATA_DIR = "dog"
OUTPUT_PATH = "sample_embeddings.parquet"
def bytes_to_giga_bytes(bytes):
return bytes / 1024 / 1024 / 1024
def generate_image_hash(image_path):
with open(image_path, "rb") as f:
img_data = f.read()
return hashlib.sha256(img_data).hexdigest()
def load_sd3_pipeline():
id = "stabilityai/stable-diffusion-3-medium-diffusers"
text_encoder = T5EncoderModel.from_pretrained(id, subfolder="text_encoder_3", load_in_8bit=True, device_map="auto")
pipeline = StableDiffusion3Pipeline.from_pretrained(
id, text_encoder_3=text_encoder, transformer=None, vae=None, device_map="balanced"
)
return pipeline
@torch.no_grad()
def compute_embeddings(pipeline, prompt, max_sequence_length):
(
prompt_embeds,
negative_prompt_embeds,
pooled_prompt_embeds,
negative_pooled_prompt_embeds,
) = pipeline.encode_prompt(prompt=prompt, prompt_2=None, prompt_3=None, max_sequence_length=max_sequence_length)
print(
f"{prompt_embeds.shape=}, {negative_prompt_embeds.shape=}, {pooled_prompt_embeds.shape=}, {negative_pooled_prompt_embeds.shape}"
)
max_memory = bytes_to_giga_bytes(torch.cuda.max_memory_allocated())
print(f"Max memory allocated: {max_memory:.3f} GB")
return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds
def run(args):
pipeline = load_sd3_pipeline()
prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds = compute_embeddings(
pipeline, args.prompt, args.max_sequence_length
)
# Assumes that the images within `args.local_image_dir` have a JPEG extension. Change
# as needed.
image_paths = glob.glob(f"{args.local_data_dir}/*.jpeg")
data = []
for image_path in image_paths:
img_hash = generate_image_hash(image_path)
data.append(
(img_hash, prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds)
)
# Create a DataFrame
embedding_cols = [
"prompt_embeds",
"negative_prompt_embeds",
"pooled_prompt_embeds",
"negative_pooled_prompt_embeds",
]
df = pd.DataFrame(
data,
columns=["image_hash"] + embedding_cols,
)
# Convert embedding lists to arrays (for proper storage in parquet)
for col in embedding_cols:
df[col] = df[col].apply(lambda x: x.cpu().numpy().flatten().tolist())
# Save the dataframe to a parquet file
df.to_parquet(args.output_path)
print(f"Data successfully serialized to {args.output_path}")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--prompt", type=str, default=PROMPT, help="The instance prompt.")
parser.add_argument(
"--max_sequence_length",
type=int,
default=MAX_SEQ_LENGTH,
help="Maximum sequence length to use for computing the embeddings. The more the higher computational costs.",
)
parser.add_argument(
"--local_data_dir", type=str, default=LOCAL_DATA_DIR, help="Path to the directory containing instance images."
)
parser.add_argument("--output_path", type=str, default=OUTPUT_PATH, help="Path to serialize the parquet file.")
args = parser.parse_args()
run(args)
| diffusers/examples/research_projects/sd3_lora_colab/compute_embeddings.py/0 | {
"file_path": "diffusers/examples/research_projects/sd3_lora_colab/compute_embeddings.py",
"repo_id": "diffusers",
"token_count": 1605
} |
#!/usr/bin/env python
# coding=utf-8
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
import math
import os
import random
from pathlib import Path
import jax
import jax.numpy as jnp
import numpy as np
import optax
import torch
import torch.utils.checkpoint
import transformers
from datasets import load_dataset
from flax import jax_utils
from flax.training import train_state
from flax.training.common_utils import shard
from huggingface_hub import create_repo, upload_folder
from torchvision import transforms
from tqdm.auto import tqdm
from transformers import CLIPImageProcessor, CLIPTokenizer, FlaxCLIPTextModel, set_seed
from diffusers import (
FlaxAutoencoderKL,
FlaxDDPMScheduler,
FlaxPNDMScheduler,
FlaxStableDiffusionPipeline,
FlaxUNet2DConditionModel,
)
from diffusers.pipelines.stable_diffusion import FlaxStableDiffusionSafetyChecker
from diffusers.utils import check_min_version
# Will error if the minimal version of diffusers is not installed. Remove at your own risks.
check_min_version("0.33.0.dev0")
logger = logging.getLogger(__name__)
def parse_args():
parser = argparse.ArgumentParser(description="Simple example of a training script.")
parser.add_argument(
"--pretrained_model_name_or_path",
type=str,
default=None,
required=True,
help="Path to pretrained model or model identifier from huggingface.co/models.",
)
parser.add_argument(
"--revision",
type=str,
default=None,
required=False,
help="Revision of pretrained model identifier from huggingface.co/models.",
)
parser.add_argument(
"--variant",
type=str,
default=None,
help="Variant of the model files of the pretrained model identifier from huggingface.co/models, 'e.g.' fp16",
)
parser.add_argument(
"--dataset_name",
type=str,
default=None,
help=(
"The name of the Dataset (from the HuggingFace hub) to train on (could be your own, possibly private,"
" dataset). It can also be a path pointing to a local copy of a dataset in your filesystem,"
" or to a folder containing files that 🤗 Datasets can understand."
),
)
parser.add_argument(
"--dataset_config_name",
type=str,
default=None,
help="The config of the Dataset, leave as None if there's only one config.",
)
parser.add_argument(
"--train_data_dir",
type=str,
default=None,
help=(
"A folder containing the training data. Folder contents must follow the structure described in"
" https://huggingface.co/docs/datasets/image_dataset#imagefolder. In particular, a `metadata.jsonl` file"
" must exist to provide the captions for the images. Ignored if `dataset_name` is specified."
),
)
parser.add_argument(
"--image_column", type=str, default="image", help="The column of the dataset containing an image."
)
parser.add_argument(
"--caption_column",
type=str,
default="text",
help="The column of the dataset containing a caption or a list of captions.",
)
parser.add_argument(
"--max_train_samples",
type=int,
default=None,
help=(
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
),
)
parser.add_argument(
"--output_dir",
type=str,
default="sd-model-finetuned",
help="The output directory where the model predictions and checkpoints will be written.",
)
parser.add_argument(
"--cache_dir",
type=str,
default=None,
help="The directory where the downloaded models and datasets will be stored.",
)
parser.add_argument("--seed", type=int, default=0, help="A seed for reproducible training.")
parser.add_argument(
"--resolution",
type=int,
default=512,
help=(
"The resolution for input images, all the images in the train/validation dataset will be resized to this"
" resolution"
),
)
parser.add_argument(
"--center_crop",
default=False,
action="store_true",
help=(
"Whether to center crop the input images to the resolution. If not set, the images will be randomly"
" cropped. The images will be resized to the resolution first before cropping."
),
)
parser.add_argument(
"--random_flip",
action="store_true",
help="whether to randomly flip images horizontally",
)
parser.add_argument(
"--train_batch_size", type=int, default=16, help="Batch size (per device) for the training dataloader."
)
parser.add_argument("--num_train_epochs", type=int, default=100)
parser.add_argument(
"--max_train_steps",
type=int,
default=None,
help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
)
parser.add_argument(
"--learning_rate",
type=float,
default=1e-4,
help="Initial learning rate (after the potential warmup period) to use.",
)
parser.add_argument(
"--scale_lr",
action="store_true",
default=False,
help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.",
)
parser.add_argument(
"--lr_scheduler",
type=str,
default="constant",
help=(
'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",'
' "constant", "constant_with_warmup"]'
),
)
parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.")
parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.")
parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.")
parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer")
parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.")
parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.")
parser.add_argument(
"--hub_model_id",
type=str,
default=None,
help="The name of the repository to keep in sync with the local `output_dir`.",
)
parser.add_argument(
"--logging_dir",
type=str,
default="logs",
help=(
"[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to"
" *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***."
),
)
parser.add_argument(
"--report_to",
type=str,
default="tensorboard",
help=(
'The integration to report the results and logs to. Supported platforms are `"tensorboard"`'
' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.'
),
)
parser.add_argument(
"--mixed_precision",
type=str,
default="no",
choices=["no", "fp16", "bf16"],
help=(
"Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU."
),
)
parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
parser.add_argument(
"--from_pt",
action="store_true",
default=False,
help="Flag to indicate whether to convert models from PyTorch.",
)
args = parser.parse_args()
env_local_rank = int(os.environ.get("LOCAL_RANK", -1))
if env_local_rank != -1 and env_local_rank != args.local_rank:
args.local_rank = env_local_rank
# Sanity checks
if args.dataset_name is None and args.train_data_dir is None:
raise ValueError("Need either a dataset name or a training folder.")
return args
dataset_name_mapping = {
"lambdalabs/naruto-blip-captions": ("image", "text"),
}
def get_params_to_save(params):
return jax.device_get(jax.tree_util.tree_map(lambda x: x[0], params))
def main():
args = parse_args()
if args.report_to == "wandb" and args.hub_token is not None:
raise ValueError(
"You cannot use both --report_to=wandb and --hub_token due to a security risk of exposing your token."
" Please use `huggingface-cli login` to authenticate with the Hub."
)
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
logger.setLevel(logging.INFO if jax.process_index() == 0 else logging.ERROR)
if jax.process_index() == 0:
transformers.utils.logging.set_verbosity_info()
else:
transformers.utils.logging.set_verbosity_error()
if args.seed is not None:
set_seed(args.seed)
# Handle the repository creation
if jax.process_index() == 0:
if args.output_dir is not None:
os.makedirs(args.output_dir, exist_ok=True)
if args.push_to_hub:
repo_id = create_repo(
repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token
).repo_id
# Get the datasets: you can either provide your own training and evaluation files (see below)
# or specify a Dataset from the hub (the dataset will be downloaded automatically from the datasets Hub).
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
dataset = load_dataset(
args.dataset_name, args.dataset_config_name, cache_dir=args.cache_dir, data_dir=args.train_data_dir
)
else:
data_files = {}
if args.train_data_dir is not None:
data_files["train"] = os.path.join(args.train_data_dir, "**")
dataset = load_dataset(
"imagefolder",
data_files=data_files,
cache_dir=args.cache_dir,
)
# See more about loading custom images at
# https://huggingface.co/docs/datasets/v2.4.0/en/image_load#imagefolder
# Preprocessing the datasets.
# We need to tokenize inputs and targets.
column_names = dataset["train"].column_names
# 6. Get the column names for input/target.
dataset_columns = dataset_name_mapping.get(args.dataset_name, None)
if args.image_column is None:
image_column = dataset_columns[0] if dataset_columns is not None else column_names[0]
else:
image_column = args.image_column
if image_column not in column_names:
raise ValueError(
f"--image_column' value '{args.image_column}' needs to be one of: {', '.join(column_names)}"
)
if args.caption_column is None:
caption_column = dataset_columns[1] if dataset_columns is not None else column_names[1]
else:
caption_column = args.caption_column
if caption_column not in column_names:
raise ValueError(
f"--caption_column' value '{args.caption_column}' needs to be one of: {', '.join(column_names)}"
)
# Preprocessing the datasets.
# We need to tokenize input captions and transform the images.
def tokenize_captions(examples, is_train=True):
captions = []
for caption in examples[caption_column]:
if isinstance(caption, str):
captions.append(caption)
elif isinstance(caption, (list, np.ndarray)):
# take a random caption if there are multiple
captions.append(random.choice(caption) if is_train else caption[0])
else:
raise ValueError(
f"Caption column `{caption_column}` should contain either strings or lists of strings."
)
inputs = tokenizer(captions, max_length=tokenizer.model_max_length, padding="do_not_pad", truncation=True)
input_ids = inputs.input_ids
return input_ids
train_transforms = transforms.Compose(
[
transforms.Resize(args.resolution, interpolation=transforms.InterpolationMode.BILINEAR),
transforms.CenterCrop(args.resolution) if args.center_crop else transforms.RandomCrop(args.resolution),
transforms.RandomHorizontalFlip() if args.random_flip else transforms.Lambda(lambda x: x),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def preprocess_train(examples):
images = [image.convert("RGB") for image in examples[image_column]]
examples["pixel_values"] = [train_transforms(image) for image in images]
examples["input_ids"] = tokenize_captions(examples)
return examples
if args.max_train_samples is not None:
dataset["train"] = dataset["train"].shuffle(seed=args.seed).select(range(args.max_train_samples))
# Set the training transforms
train_dataset = dataset["train"].with_transform(preprocess_train)
def collate_fn(examples):
pixel_values = torch.stack([example["pixel_values"] for example in examples])
pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float()
input_ids = [example["input_ids"] for example in examples]
padded_tokens = tokenizer.pad(
{"input_ids": input_ids}, padding="max_length", max_length=tokenizer.model_max_length, return_tensors="pt"
)
batch = {
"pixel_values": pixel_values,
"input_ids": padded_tokens.input_ids,
}
batch = {k: v.numpy() for k, v in batch.items()}
return batch
total_train_batch_size = args.train_batch_size * jax.local_device_count()
train_dataloader = torch.utils.data.DataLoader(
train_dataset, shuffle=True, collate_fn=collate_fn, batch_size=total_train_batch_size, drop_last=True
)
weight_dtype = jnp.float32
if args.mixed_precision == "fp16":
weight_dtype = jnp.float16
elif args.mixed_precision == "bf16":
weight_dtype = jnp.bfloat16
# Load models and create wrapper for stable diffusion
tokenizer = CLIPTokenizer.from_pretrained(
args.pretrained_model_name_or_path,
from_pt=args.from_pt,
revision=args.revision,
subfolder="tokenizer",
)
text_encoder = FlaxCLIPTextModel.from_pretrained(
args.pretrained_model_name_or_path,
from_pt=args.from_pt,
revision=args.revision,
subfolder="text_encoder",
dtype=weight_dtype,
)
vae, vae_params = FlaxAutoencoderKL.from_pretrained(
args.pretrained_model_name_or_path,
from_pt=args.from_pt,
revision=args.revision,
subfolder="vae",
dtype=weight_dtype,
)
unet, unet_params = FlaxUNet2DConditionModel.from_pretrained(
args.pretrained_model_name_or_path,
from_pt=args.from_pt,
revision=args.revision,
subfolder="unet",
dtype=weight_dtype,
)
# Optimization
if args.scale_lr:
args.learning_rate = args.learning_rate * total_train_batch_size
constant_scheduler = optax.constant_schedule(args.learning_rate)
adamw = optax.adamw(
learning_rate=constant_scheduler,
b1=args.adam_beta1,
b2=args.adam_beta2,
eps=args.adam_epsilon,
weight_decay=args.adam_weight_decay,
)
optimizer = optax.chain(
optax.clip_by_global_norm(args.max_grad_norm),
adamw,
)
state = train_state.TrainState.create(apply_fn=unet.__call__, params=unet_params, tx=optimizer)
noise_scheduler = FlaxDDPMScheduler(
beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", num_train_timesteps=1000
)
noise_scheduler_state = noise_scheduler.create_state()
# Initialize our training
rng = jax.random.PRNGKey(args.seed)
train_rngs = jax.random.split(rng, jax.local_device_count())
def train_step(state, text_encoder_params, vae_params, batch, train_rng):
dropout_rng, sample_rng, new_train_rng = jax.random.split(train_rng, 3)
def compute_loss(params):
# Convert images to latent space
vae_outputs = vae.apply(
{"params": vae_params}, batch["pixel_values"], deterministic=True, method=vae.encode
)
latents = vae_outputs.latent_dist.sample(sample_rng)
# (NHWC) -> (NCHW)
latents = jnp.transpose(latents, (0, 3, 1, 2))
latents = latents * vae.config.scaling_factor
# Sample noise that we'll add to the latents
noise_rng, timestep_rng = jax.random.split(sample_rng)
noise = jax.random.normal(noise_rng, latents.shape)
# Sample a random timestep for each image
bsz = latents.shape[0]
timesteps = jax.random.randint(
timestep_rng,
(bsz,),
0,
noise_scheduler.config.num_train_timesteps,
)
# Add noise to the latents according to the noise magnitude at each timestep
# (this is the forward diffusion process)
noisy_latents = noise_scheduler.add_noise(noise_scheduler_state, latents, noise, timesteps)
# Get the text embedding for conditioning
encoder_hidden_states = text_encoder(
batch["input_ids"],
params=text_encoder_params,
train=False,
)[0]
# Predict the noise residual and compute loss
model_pred = unet.apply(
{"params": params}, noisy_latents, timesteps, encoder_hidden_states, train=True
).sample
# Get the target for loss depending on the prediction type
if noise_scheduler.config.prediction_type == "epsilon":
target = noise
elif noise_scheduler.config.prediction_type == "v_prediction":
target = noise_scheduler.get_velocity(noise_scheduler_state, latents, noise, timesteps)
else:
raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}")
loss = (target - model_pred) ** 2
loss = loss.mean()
return loss
grad_fn = jax.value_and_grad(compute_loss)
loss, grad = grad_fn(state.params)
grad = jax.lax.pmean(grad, "batch")
new_state = state.apply_gradients(grads=grad)
metrics = {"loss": loss}
metrics = jax.lax.pmean(metrics, axis_name="batch")
return new_state, metrics, new_train_rng
# Create parallel version of the train step
p_train_step = jax.pmap(train_step, "batch", donate_argnums=(0,))
# Replicate the train state on each device
state = jax_utils.replicate(state)
text_encoder_params = jax_utils.replicate(text_encoder.params)
vae_params = jax_utils.replicate(vae_params)
# Train!
num_update_steps_per_epoch = math.ceil(len(train_dataloader))
# Scheduler and math around the number of training steps.
if args.max_train_steps is None:
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
logger.info("***** Running training *****")
logger.info(f" Num examples = {len(train_dataset)}")
logger.info(f" Num Epochs = {args.num_train_epochs}")
logger.info(f" Instantaneous batch size per device = {args.train_batch_size}")
logger.info(f" Total train batch size (w. parallel & distributed) = {total_train_batch_size}")
logger.info(f" Total optimization steps = {args.max_train_steps}")
global_step = 0
epochs = tqdm(range(args.num_train_epochs), desc="Epoch ... ", position=0)
for epoch in epochs:
# ======================== Training ================================
train_metrics = []
steps_per_epoch = len(train_dataset) // total_train_batch_size
train_step_progress_bar = tqdm(total=steps_per_epoch, desc="Training...", position=1, leave=False)
# train
for batch in train_dataloader:
batch = shard(batch)
state, train_metric, train_rngs = p_train_step(state, text_encoder_params, vae_params, batch, train_rngs)
train_metrics.append(train_metric)
train_step_progress_bar.update(1)
global_step += 1
if global_step >= args.max_train_steps:
break
train_metric = jax_utils.unreplicate(train_metric)
train_step_progress_bar.close()
epochs.write(f"Epoch... ({epoch + 1}/{args.num_train_epochs} | Loss: {train_metric['loss']})")
# Create the pipeline using using the trained modules and save it.
if jax.process_index() == 0:
scheduler = FlaxPNDMScheduler(
beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", skip_prk_steps=True
)
safety_checker = FlaxStableDiffusionSafetyChecker.from_pretrained(
"CompVis/stable-diffusion-safety-checker", from_pt=True
)
pipeline = FlaxStableDiffusionPipeline(
text_encoder=text_encoder,
vae=vae,
unet=unet,
tokenizer=tokenizer,
scheduler=scheduler,
safety_checker=safety_checker,
feature_extractor=CLIPImageProcessor.from_pretrained("openai/clip-vit-base-patch32"),
)
pipeline.save_pretrained(
args.output_dir,
params={
"text_encoder": get_params_to_save(text_encoder_params),
"vae": get_params_to_save(vae_params),
"unet": get_params_to_save(state.params),
"safety_checker": safety_checker.params,
},
)
if args.push_to_hub:
upload_folder(
repo_id=repo_id,
folder_path=args.output_dir,
commit_message="End of training",
ignore_patterns=["step_*", "epoch_*"],
)
if __name__ == "__main__":
main()
| diffusers/examples/text_to_image/train_text_to_image_flax.py/0 | {
"file_path": "diffusers/examples/text_to_image/train_text_to_image_flax.py",
"repo_id": "diffusers",
"token_count": 10030
} |
"""
This script requires you to build `LAVIS` from source, since the pip version doesn't have BLIP Diffusion. Follow instructions here: https://github.com/salesforce/LAVIS/tree/main.
"""
import argparse
import os
import tempfile
import torch
from lavis.models import load_model_and_preprocess
from transformers import CLIPTokenizer
from transformers.models.blip_2.configuration_blip_2 import Blip2Config
from diffusers import (
AutoencoderKL,
PNDMScheduler,
UNet2DConditionModel,
)
from diffusers.pipelines import BlipDiffusionPipeline
from diffusers.pipelines.blip_diffusion.blip_image_processing import BlipImageProcessor
from diffusers.pipelines.blip_diffusion.modeling_blip2 import Blip2QFormerModel
from diffusers.pipelines.blip_diffusion.modeling_ctx_clip import ContextCLIPTextModel
BLIP2_CONFIG = {
"vision_config": {
"hidden_size": 1024,
"num_hidden_layers": 23,
"num_attention_heads": 16,
"image_size": 224,
"patch_size": 14,
"intermediate_size": 4096,
"hidden_act": "quick_gelu",
},
"qformer_config": {
"cross_attention_frequency": 1,
"encoder_hidden_size": 1024,
"vocab_size": 30523,
},
"num_query_tokens": 16,
}
blip2config = Blip2Config(**BLIP2_CONFIG)
def qformer_model_from_original_config():
qformer = Blip2QFormerModel(blip2config)
return qformer
def embeddings_from_original_checkpoint(model, diffuser_embeddings_prefix, original_embeddings_prefix):
embeddings = {}
embeddings.update(
{
f"{diffuser_embeddings_prefix}.word_embeddings.weight": model[
f"{original_embeddings_prefix}.word_embeddings.weight"
]
}
)
embeddings.update(
{
f"{diffuser_embeddings_prefix}.position_embeddings.weight": model[
f"{original_embeddings_prefix}.position_embeddings.weight"
]
}
)
embeddings.update(
{f"{diffuser_embeddings_prefix}.LayerNorm.weight": model[f"{original_embeddings_prefix}.LayerNorm.weight"]}
)
embeddings.update(
{f"{diffuser_embeddings_prefix}.LayerNorm.bias": model[f"{original_embeddings_prefix}.LayerNorm.bias"]}
)
return embeddings
def proj_layer_from_original_checkpoint(model, diffuser_proj_prefix, original_proj_prefix):
proj_layer = {}
proj_layer.update({f"{diffuser_proj_prefix}.dense1.weight": model[f"{original_proj_prefix}.dense1.weight"]})
proj_layer.update({f"{diffuser_proj_prefix}.dense1.bias": model[f"{original_proj_prefix}.dense1.bias"]})
proj_layer.update({f"{diffuser_proj_prefix}.dense2.weight": model[f"{original_proj_prefix}.dense2.weight"]})
proj_layer.update({f"{diffuser_proj_prefix}.dense2.bias": model[f"{original_proj_prefix}.dense2.bias"]})
proj_layer.update({f"{diffuser_proj_prefix}.LayerNorm.weight": model[f"{original_proj_prefix}.LayerNorm.weight"]})
proj_layer.update({f"{diffuser_proj_prefix}.LayerNorm.bias": model[f"{original_proj_prefix}.LayerNorm.bias"]})
return proj_layer
def attention_from_original_checkpoint(model, diffuser_attention_prefix, original_attention_prefix):
attention = {}
attention.update(
{
f"{diffuser_attention_prefix}.attention.query.weight": model[
f"{original_attention_prefix}.self.query.weight"
]
}
)
attention.update(
{f"{diffuser_attention_prefix}.attention.query.bias": model[f"{original_attention_prefix}.self.query.bias"]}
)
attention.update(
{f"{diffuser_attention_prefix}.attention.key.weight": model[f"{original_attention_prefix}.self.key.weight"]}
)
attention.update(
{f"{diffuser_attention_prefix}.attention.key.bias": model[f"{original_attention_prefix}.self.key.bias"]}
)
attention.update(
{
f"{diffuser_attention_prefix}.attention.value.weight": model[
f"{original_attention_prefix}.self.value.weight"
]
}
)
attention.update(
{f"{diffuser_attention_prefix}.attention.value.bias": model[f"{original_attention_prefix}.self.value.bias"]}
)
attention.update(
{f"{diffuser_attention_prefix}.output.dense.weight": model[f"{original_attention_prefix}.output.dense.weight"]}
)
attention.update(
{f"{diffuser_attention_prefix}.output.dense.bias": model[f"{original_attention_prefix}.output.dense.bias"]}
)
attention.update(
{
f"{diffuser_attention_prefix}.output.LayerNorm.weight": model[
f"{original_attention_prefix}.output.LayerNorm.weight"
]
}
)
attention.update(
{
f"{diffuser_attention_prefix}.output.LayerNorm.bias": model[
f"{original_attention_prefix}.output.LayerNorm.bias"
]
}
)
return attention
def output_layers_from_original_checkpoint(model, diffuser_output_prefix, original_output_prefix):
output_layers = {}
output_layers.update({f"{diffuser_output_prefix}.dense.weight": model[f"{original_output_prefix}.dense.weight"]})
output_layers.update({f"{diffuser_output_prefix}.dense.bias": model[f"{original_output_prefix}.dense.bias"]})
output_layers.update(
{f"{diffuser_output_prefix}.LayerNorm.weight": model[f"{original_output_prefix}.LayerNorm.weight"]}
)
output_layers.update(
{f"{diffuser_output_prefix}.LayerNorm.bias": model[f"{original_output_prefix}.LayerNorm.bias"]}
)
return output_layers
def encoder_from_original_checkpoint(model, diffuser_encoder_prefix, original_encoder_prefix):
encoder = {}
for i in range(blip2config.qformer_config.num_hidden_layers):
encoder.update(
attention_from_original_checkpoint(
model, f"{diffuser_encoder_prefix}.{i}.attention", f"{original_encoder_prefix}.{i}.attention"
)
)
encoder.update(
attention_from_original_checkpoint(
model, f"{diffuser_encoder_prefix}.{i}.crossattention", f"{original_encoder_prefix}.{i}.crossattention"
)
)
encoder.update(
{
f"{diffuser_encoder_prefix}.{i}.intermediate.dense.weight": model[
f"{original_encoder_prefix}.{i}.intermediate.dense.weight"
]
}
)
encoder.update(
{
f"{diffuser_encoder_prefix}.{i}.intermediate.dense.bias": model[
f"{original_encoder_prefix}.{i}.intermediate.dense.bias"
]
}
)
encoder.update(
{
f"{diffuser_encoder_prefix}.{i}.intermediate_query.dense.weight": model[
f"{original_encoder_prefix}.{i}.intermediate_query.dense.weight"
]
}
)
encoder.update(
{
f"{diffuser_encoder_prefix}.{i}.intermediate_query.dense.bias": model[
f"{original_encoder_prefix}.{i}.intermediate_query.dense.bias"
]
}
)
encoder.update(
output_layers_from_original_checkpoint(
model, f"{diffuser_encoder_prefix}.{i}.output", f"{original_encoder_prefix}.{i}.output"
)
)
encoder.update(
output_layers_from_original_checkpoint(
model, f"{diffuser_encoder_prefix}.{i}.output_query", f"{original_encoder_prefix}.{i}.output_query"
)
)
return encoder
def visual_encoder_layer_from_original_checkpoint(model, diffuser_prefix, original_prefix):
visual_encoder_layer = {}
visual_encoder_layer.update({f"{diffuser_prefix}.layer_norm1.weight": model[f"{original_prefix}.ln_1.weight"]})
visual_encoder_layer.update({f"{diffuser_prefix}.layer_norm1.bias": model[f"{original_prefix}.ln_1.bias"]})
visual_encoder_layer.update({f"{diffuser_prefix}.layer_norm2.weight": model[f"{original_prefix}.ln_2.weight"]})
visual_encoder_layer.update({f"{diffuser_prefix}.layer_norm2.bias": model[f"{original_prefix}.ln_2.bias"]})
visual_encoder_layer.update(
{f"{diffuser_prefix}.self_attn.qkv.weight": model[f"{original_prefix}.attn.in_proj_weight"]}
)
visual_encoder_layer.update(
{f"{diffuser_prefix}.self_attn.qkv.bias": model[f"{original_prefix}.attn.in_proj_bias"]}
)
visual_encoder_layer.update(
{f"{diffuser_prefix}.self_attn.projection.weight": model[f"{original_prefix}.attn.out_proj.weight"]}
)
visual_encoder_layer.update(
{f"{diffuser_prefix}.self_attn.projection.bias": model[f"{original_prefix}.attn.out_proj.bias"]}
)
visual_encoder_layer.update({f"{diffuser_prefix}.mlp.fc1.weight": model[f"{original_prefix}.mlp.c_fc.weight"]})
visual_encoder_layer.update({f"{diffuser_prefix}.mlp.fc1.bias": model[f"{original_prefix}.mlp.c_fc.bias"]})
visual_encoder_layer.update({f"{diffuser_prefix}.mlp.fc2.weight": model[f"{original_prefix}.mlp.c_proj.weight"]})
visual_encoder_layer.update({f"{diffuser_prefix}.mlp.fc2.bias": model[f"{original_prefix}.mlp.c_proj.bias"]})
return visual_encoder_layer
def visual_encoder_from_original_checkpoint(model, diffuser_prefix, original_prefix):
visual_encoder = {}
visual_encoder.update(
{
f"{diffuser_prefix}.embeddings.class_embedding": model[f"{original_prefix}.class_embedding"]
.unsqueeze(0)
.unsqueeze(0)
}
)
visual_encoder.update(
{
f"{diffuser_prefix}.embeddings.position_embedding": model[
f"{original_prefix}.positional_embedding"
].unsqueeze(0)
}
)
visual_encoder.update(
{f"{diffuser_prefix}.embeddings.patch_embedding.weight": model[f"{original_prefix}.conv1.weight"]}
)
visual_encoder.update({f"{diffuser_prefix}.pre_layernorm.weight": model[f"{original_prefix}.ln_pre.weight"]})
visual_encoder.update({f"{diffuser_prefix}.pre_layernorm.bias": model[f"{original_prefix}.ln_pre.bias"]})
for i in range(blip2config.vision_config.num_hidden_layers):
visual_encoder.update(
visual_encoder_layer_from_original_checkpoint(
model, f"{diffuser_prefix}.encoder.layers.{i}", f"{original_prefix}.transformer.resblocks.{i}"
)
)
visual_encoder.update({f"{diffuser_prefix}.post_layernorm.weight": model["blip.ln_vision.weight"]})
visual_encoder.update({f"{diffuser_prefix}.post_layernorm.bias": model["blip.ln_vision.bias"]})
return visual_encoder
def qformer_original_checkpoint_to_diffusers_checkpoint(model):
qformer_checkpoint = {}
qformer_checkpoint.update(embeddings_from_original_checkpoint(model, "embeddings", "blip.Qformer.bert.embeddings"))
qformer_checkpoint.update({"query_tokens": model["blip.query_tokens"]})
qformer_checkpoint.update(proj_layer_from_original_checkpoint(model, "proj_layer", "proj_layer"))
qformer_checkpoint.update(
encoder_from_original_checkpoint(model, "encoder.layer", "blip.Qformer.bert.encoder.layer")
)
qformer_checkpoint.update(visual_encoder_from_original_checkpoint(model, "visual_encoder", "blip.visual_encoder"))
return qformer_checkpoint
def get_qformer(model):
print("loading qformer")
qformer = qformer_model_from_original_config()
qformer_diffusers_checkpoint = qformer_original_checkpoint_to_diffusers_checkpoint(model)
load_checkpoint_to_model(qformer_diffusers_checkpoint, qformer)
print("done loading qformer")
return qformer
def load_checkpoint_to_model(checkpoint, model):
with tempfile.NamedTemporaryFile(delete=False) as file:
torch.save(checkpoint, file.name)
del checkpoint
model.load_state_dict(torch.load(file.name), strict=False)
os.remove(file.name)
def save_blip_diffusion_model(model, args):
qformer = get_qformer(model)
qformer.eval()
text_encoder = ContextCLIPTextModel.from_pretrained(
"stable-diffusion-v1-5/stable-diffusion-v1-5", subfolder="text_encoder"
)
vae = AutoencoderKL.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5", subfolder="vae")
unet = UNet2DConditionModel.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5", subfolder="unet")
vae.eval()
text_encoder.eval()
scheduler = PNDMScheduler(
beta_start=0.00085,
beta_end=0.012,
beta_schedule="scaled_linear",
set_alpha_to_one=False,
skip_prk_steps=True,
)
tokenizer = CLIPTokenizer.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5", subfolder="tokenizer")
image_processor = BlipImageProcessor()
blip_diffusion = BlipDiffusionPipeline(
tokenizer=tokenizer,
text_encoder=text_encoder,
vae=vae,
unet=unet,
scheduler=scheduler,
qformer=qformer,
image_processor=image_processor,
)
blip_diffusion.save_pretrained(args.checkpoint_path)
def main(args):
model, _, _ = load_model_and_preprocess("blip_diffusion", "base", device="cpu", is_eval=True)
save_blip_diffusion_model(model.state_dict(), args)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", default=None, type=str, required=True, help="Path to the output model.")
args = parser.parse_args()
main(args)
| diffusers/scripts/convert_blipdiffusion_to_diffusers.py/0 | {
"file_path": "diffusers/scripts/convert_blipdiffusion_to_diffusers.py",
"repo_id": "diffusers",
"token_count": 5958
} |
import argparse
import torch
from diffusers import HunyuanDiT2DControlNetModel
def main(args):
state_dict = torch.load(args.pt_checkpoint_path, map_location="cpu")
if args.load_key != "none":
try:
state_dict = state_dict[args.load_key]
except KeyError:
raise KeyError(
f"{args.load_key} not found in the checkpoint."
"Please load from the following keys:{state_dict.keys()}"
)
device = "cuda"
model_config = HunyuanDiT2DControlNetModel.load_config(
"Tencent-Hunyuan/HunyuanDiT-v1.2-Diffusers", subfolder="transformer"
)
model_config[
"use_style_cond_and_image_meta_size"
] = args.use_style_cond_and_image_meta_size ### version <= v1.1: True; version >= v1.2: False
print(model_config)
for key in state_dict:
print("local:", key)
model = HunyuanDiT2DControlNetModel.from_config(model_config).to(device)
for key in model.state_dict():
print("diffusers:", key)
num_layers = 19
for i in range(num_layers):
# attn1
# Wkqv -> to_q, to_k, to_v
q, k, v = torch.chunk(state_dict[f"blocks.{i}.attn1.Wqkv.weight"], 3, dim=0)
q_bias, k_bias, v_bias = torch.chunk(state_dict[f"blocks.{i}.attn1.Wqkv.bias"], 3, dim=0)
state_dict[f"blocks.{i}.attn1.to_q.weight"] = q
state_dict[f"blocks.{i}.attn1.to_q.bias"] = q_bias
state_dict[f"blocks.{i}.attn1.to_k.weight"] = k
state_dict[f"blocks.{i}.attn1.to_k.bias"] = k_bias
state_dict[f"blocks.{i}.attn1.to_v.weight"] = v
state_dict[f"blocks.{i}.attn1.to_v.bias"] = v_bias
state_dict.pop(f"blocks.{i}.attn1.Wqkv.weight")
state_dict.pop(f"blocks.{i}.attn1.Wqkv.bias")
# q_norm, k_norm -> norm_q, norm_k
state_dict[f"blocks.{i}.attn1.norm_q.weight"] = state_dict[f"blocks.{i}.attn1.q_norm.weight"]
state_dict[f"blocks.{i}.attn1.norm_q.bias"] = state_dict[f"blocks.{i}.attn1.q_norm.bias"]
state_dict[f"blocks.{i}.attn1.norm_k.weight"] = state_dict[f"blocks.{i}.attn1.k_norm.weight"]
state_dict[f"blocks.{i}.attn1.norm_k.bias"] = state_dict[f"blocks.{i}.attn1.k_norm.bias"]
state_dict.pop(f"blocks.{i}.attn1.q_norm.weight")
state_dict.pop(f"blocks.{i}.attn1.q_norm.bias")
state_dict.pop(f"blocks.{i}.attn1.k_norm.weight")
state_dict.pop(f"blocks.{i}.attn1.k_norm.bias")
# out_proj -> to_out
state_dict[f"blocks.{i}.attn1.to_out.0.weight"] = state_dict[f"blocks.{i}.attn1.out_proj.weight"]
state_dict[f"blocks.{i}.attn1.to_out.0.bias"] = state_dict[f"blocks.{i}.attn1.out_proj.bias"]
state_dict.pop(f"blocks.{i}.attn1.out_proj.weight")
state_dict.pop(f"blocks.{i}.attn1.out_proj.bias")
# attn2
# kq_proj -> to_k, to_v
k, v = torch.chunk(state_dict[f"blocks.{i}.attn2.kv_proj.weight"], 2, dim=0)
k_bias, v_bias = torch.chunk(state_dict[f"blocks.{i}.attn2.kv_proj.bias"], 2, dim=0)
state_dict[f"blocks.{i}.attn2.to_k.weight"] = k
state_dict[f"blocks.{i}.attn2.to_k.bias"] = k_bias
state_dict[f"blocks.{i}.attn2.to_v.weight"] = v
state_dict[f"blocks.{i}.attn2.to_v.bias"] = v_bias
state_dict.pop(f"blocks.{i}.attn2.kv_proj.weight")
state_dict.pop(f"blocks.{i}.attn2.kv_proj.bias")
# q_proj -> to_q
state_dict[f"blocks.{i}.attn2.to_q.weight"] = state_dict[f"blocks.{i}.attn2.q_proj.weight"]
state_dict[f"blocks.{i}.attn2.to_q.bias"] = state_dict[f"blocks.{i}.attn2.q_proj.bias"]
state_dict.pop(f"blocks.{i}.attn2.q_proj.weight")
state_dict.pop(f"blocks.{i}.attn2.q_proj.bias")
# q_norm, k_norm -> norm_q, norm_k
state_dict[f"blocks.{i}.attn2.norm_q.weight"] = state_dict[f"blocks.{i}.attn2.q_norm.weight"]
state_dict[f"blocks.{i}.attn2.norm_q.bias"] = state_dict[f"blocks.{i}.attn2.q_norm.bias"]
state_dict[f"blocks.{i}.attn2.norm_k.weight"] = state_dict[f"blocks.{i}.attn2.k_norm.weight"]
state_dict[f"blocks.{i}.attn2.norm_k.bias"] = state_dict[f"blocks.{i}.attn2.k_norm.bias"]
state_dict.pop(f"blocks.{i}.attn2.q_norm.weight")
state_dict.pop(f"blocks.{i}.attn2.q_norm.bias")
state_dict.pop(f"blocks.{i}.attn2.k_norm.weight")
state_dict.pop(f"blocks.{i}.attn2.k_norm.bias")
# out_proj -> to_out
state_dict[f"blocks.{i}.attn2.to_out.0.weight"] = state_dict[f"blocks.{i}.attn2.out_proj.weight"]
state_dict[f"blocks.{i}.attn2.to_out.0.bias"] = state_dict[f"blocks.{i}.attn2.out_proj.bias"]
state_dict.pop(f"blocks.{i}.attn2.out_proj.weight")
state_dict.pop(f"blocks.{i}.attn2.out_proj.bias")
# switch norm 2 and norm 3
norm2_weight = state_dict[f"blocks.{i}.norm2.weight"]
norm2_bias = state_dict[f"blocks.{i}.norm2.bias"]
state_dict[f"blocks.{i}.norm2.weight"] = state_dict[f"blocks.{i}.norm3.weight"]
state_dict[f"blocks.{i}.norm2.bias"] = state_dict[f"blocks.{i}.norm3.bias"]
state_dict[f"blocks.{i}.norm3.weight"] = norm2_weight
state_dict[f"blocks.{i}.norm3.bias"] = norm2_bias
# norm1 -> norm1.norm
# default_modulation.1 -> norm1.linear
state_dict[f"blocks.{i}.norm1.norm.weight"] = state_dict[f"blocks.{i}.norm1.weight"]
state_dict[f"blocks.{i}.norm1.norm.bias"] = state_dict[f"blocks.{i}.norm1.bias"]
state_dict[f"blocks.{i}.norm1.linear.weight"] = state_dict[f"blocks.{i}.default_modulation.1.weight"]
state_dict[f"blocks.{i}.norm1.linear.bias"] = state_dict[f"blocks.{i}.default_modulation.1.bias"]
state_dict.pop(f"blocks.{i}.norm1.weight")
state_dict.pop(f"blocks.{i}.norm1.bias")
state_dict.pop(f"blocks.{i}.default_modulation.1.weight")
state_dict.pop(f"blocks.{i}.default_modulation.1.bias")
# mlp.fc1 -> ff.net.0, mlp.fc2 -> ff.net.2
state_dict[f"blocks.{i}.ff.net.0.proj.weight"] = state_dict[f"blocks.{i}.mlp.fc1.weight"]
state_dict[f"blocks.{i}.ff.net.0.proj.bias"] = state_dict[f"blocks.{i}.mlp.fc1.bias"]
state_dict[f"blocks.{i}.ff.net.2.weight"] = state_dict[f"blocks.{i}.mlp.fc2.weight"]
state_dict[f"blocks.{i}.ff.net.2.bias"] = state_dict[f"blocks.{i}.mlp.fc2.bias"]
state_dict.pop(f"blocks.{i}.mlp.fc1.weight")
state_dict.pop(f"blocks.{i}.mlp.fc1.bias")
state_dict.pop(f"blocks.{i}.mlp.fc2.weight")
state_dict.pop(f"blocks.{i}.mlp.fc2.bias")
# after_proj_list -> controlnet_blocks
state_dict[f"controlnet_blocks.{i}.weight"] = state_dict[f"after_proj_list.{i}.weight"]
state_dict[f"controlnet_blocks.{i}.bias"] = state_dict[f"after_proj_list.{i}.bias"]
state_dict.pop(f"after_proj_list.{i}.weight")
state_dict.pop(f"after_proj_list.{i}.bias")
# before_proj -> input_block
state_dict["input_block.weight"] = state_dict["before_proj.weight"]
state_dict["input_block.bias"] = state_dict["before_proj.bias"]
state_dict.pop("before_proj.weight")
state_dict.pop("before_proj.bias")
# pooler -> time_extra_emb
state_dict["time_extra_emb.pooler.positional_embedding"] = state_dict["pooler.positional_embedding"]
state_dict["time_extra_emb.pooler.k_proj.weight"] = state_dict["pooler.k_proj.weight"]
state_dict["time_extra_emb.pooler.k_proj.bias"] = state_dict["pooler.k_proj.bias"]
state_dict["time_extra_emb.pooler.q_proj.weight"] = state_dict["pooler.q_proj.weight"]
state_dict["time_extra_emb.pooler.q_proj.bias"] = state_dict["pooler.q_proj.bias"]
state_dict["time_extra_emb.pooler.v_proj.weight"] = state_dict["pooler.v_proj.weight"]
state_dict["time_extra_emb.pooler.v_proj.bias"] = state_dict["pooler.v_proj.bias"]
state_dict["time_extra_emb.pooler.c_proj.weight"] = state_dict["pooler.c_proj.weight"]
state_dict["time_extra_emb.pooler.c_proj.bias"] = state_dict["pooler.c_proj.bias"]
state_dict.pop("pooler.k_proj.weight")
state_dict.pop("pooler.k_proj.bias")
state_dict.pop("pooler.q_proj.weight")
state_dict.pop("pooler.q_proj.bias")
state_dict.pop("pooler.v_proj.weight")
state_dict.pop("pooler.v_proj.bias")
state_dict.pop("pooler.c_proj.weight")
state_dict.pop("pooler.c_proj.bias")
state_dict.pop("pooler.positional_embedding")
# t_embedder -> time_embedding (`TimestepEmbedding`)
state_dict["time_extra_emb.timestep_embedder.linear_1.bias"] = state_dict["t_embedder.mlp.0.bias"]
state_dict["time_extra_emb.timestep_embedder.linear_1.weight"] = state_dict["t_embedder.mlp.0.weight"]
state_dict["time_extra_emb.timestep_embedder.linear_2.bias"] = state_dict["t_embedder.mlp.2.bias"]
state_dict["time_extra_emb.timestep_embedder.linear_2.weight"] = state_dict["t_embedder.mlp.2.weight"]
state_dict.pop("t_embedder.mlp.0.bias")
state_dict.pop("t_embedder.mlp.0.weight")
state_dict.pop("t_embedder.mlp.2.bias")
state_dict.pop("t_embedder.mlp.2.weight")
# x_embedder -> pos_embd (`PatchEmbed`)
state_dict["pos_embed.proj.weight"] = state_dict["x_embedder.proj.weight"]
state_dict["pos_embed.proj.bias"] = state_dict["x_embedder.proj.bias"]
state_dict.pop("x_embedder.proj.weight")
state_dict.pop("x_embedder.proj.bias")
# mlp_t5 -> text_embedder
state_dict["text_embedder.linear_1.bias"] = state_dict["mlp_t5.0.bias"]
state_dict["text_embedder.linear_1.weight"] = state_dict["mlp_t5.0.weight"]
state_dict["text_embedder.linear_2.bias"] = state_dict["mlp_t5.2.bias"]
state_dict["text_embedder.linear_2.weight"] = state_dict["mlp_t5.2.weight"]
state_dict.pop("mlp_t5.0.bias")
state_dict.pop("mlp_t5.0.weight")
state_dict.pop("mlp_t5.2.bias")
state_dict.pop("mlp_t5.2.weight")
# extra_embedder -> extra_embedder
state_dict["time_extra_emb.extra_embedder.linear_1.bias"] = state_dict["extra_embedder.0.bias"]
state_dict["time_extra_emb.extra_embedder.linear_1.weight"] = state_dict["extra_embedder.0.weight"]
state_dict["time_extra_emb.extra_embedder.linear_2.bias"] = state_dict["extra_embedder.2.bias"]
state_dict["time_extra_emb.extra_embedder.linear_2.weight"] = state_dict["extra_embedder.2.weight"]
state_dict.pop("extra_embedder.0.bias")
state_dict.pop("extra_embedder.0.weight")
state_dict.pop("extra_embedder.2.bias")
state_dict.pop("extra_embedder.2.weight")
# style_embedder
if model_config["use_style_cond_and_image_meta_size"]:
print(state_dict["style_embedder.weight"])
print(state_dict["style_embedder.weight"].shape)
state_dict["time_extra_emb.style_embedder.weight"] = state_dict["style_embedder.weight"][0:1]
state_dict.pop("style_embedder.weight")
model.load_state_dict(state_dict)
if args.save:
model.save_pretrained(args.output_checkpoint_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--save", default=True, type=bool, required=False, help="Whether to save the converted pipeline or not."
)
parser.add_argument(
"--pt_checkpoint_path", default=None, type=str, required=True, help="Path to the .pt pretrained model."
)
parser.add_argument(
"--output_checkpoint_path",
default=None,
type=str,
required=False,
help="Path to the output converted diffusers pipeline.",
)
parser.add_argument(
"--load_key", default="none", type=str, required=False, help="The key to load from the pretrained .pt file"
)
parser.add_argument(
"--use_style_cond_and_image_meta_size",
type=bool,
default=False,
help="version <= v1.1: True; version >= v1.2: False",
)
args = parser.parse_args()
main(args)
| diffusers/scripts/convert_hunyuandit_controlnet_to_diffusers.py/0 | {
"file_path": "diffusers/scripts/convert_hunyuandit_controlnet_to_diffusers.py",
"repo_id": "diffusers",
"token_count": 5703
} |
# coding=utf-8
# Copyright 2025 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Conversion script for the NCSNPP checkpoints."""
import argparse
import json
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNet2DModel
def convert_ncsnpp_checkpoint(checkpoint, config):
"""
Takes a state dict and the path to
"""
new_model_architecture = UNet2DModel(**config)
new_model_architecture.time_proj.W.data = checkpoint["all_modules.0.W"].data
new_model_architecture.time_proj.weight.data = checkpoint["all_modules.0.W"].data
new_model_architecture.time_embedding.linear_1.weight.data = checkpoint["all_modules.1.weight"].data
new_model_architecture.time_embedding.linear_1.bias.data = checkpoint["all_modules.1.bias"].data
new_model_architecture.time_embedding.linear_2.weight.data = checkpoint["all_modules.2.weight"].data
new_model_architecture.time_embedding.linear_2.bias.data = checkpoint["all_modules.2.bias"].data
new_model_architecture.conv_in.weight.data = checkpoint["all_modules.3.weight"].data
new_model_architecture.conv_in.bias.data = checkpoint["all_modules.3.bias"].data
new_model_architecture.conv_norm_out.weight.data = checkpoint[list(checkpoint.keys())[-4]].data
new_model_architecture.conv_norm_out.bias.data = checkpoint[list(checkpoint.keys())[-3]].data
new_model_architecture.conv_out.weight.data = checkpoint[list(checkpoint.keys())[-2]].data
new_model_architecture.conv_out.bias.data = checkpoint[list(checkpoint.keys())[-1]].data
module_index = 4
def set_attention_weights(new_layer, old_checkpoint, index):
new_layer.query.weight.data = old_checkpoint[f"all_modules.{index}.NIN_0.W"].data.T
new_layer.key.weight.data = old_checkpoint[f"all_modules.{index}.NIN_1.W"].data.T
new_layer.value.weight.data = old_checkpoint[f"all_modules.{index}.NIN_2.W"].data.T
new_layer.query.bias.data = old_checkpoint[f"all_modules.{index}.NIN_0.b"].data
new_layer.key.bias.data = old_checkpoint[f"all_modules.{index}.NIN_1.b"].data
new_layer.value.bias.data = old_checkpoint[f"all_modules.{index}.NIN_2.b"].data
new_layer.proj_attn.weight.data = old_checkpoint[f"all_modules.{index}.NIN_3.W"].data.T
new_layer.proj_attn.bias.data = old_checkpoint[f"all_modules.{index}.NIN_3.b"].data
new_layer.group_norm.weight.data = old_checkpoint[f"all_modules.{index}.GroupNorm_0.weight"].data
new_layer.group_norm.bias.data = old_checkpoint[f"all_modules.{index}.GroupNorm_0.bias"].data
def set_resnet_weights(new_layer, old_checkpoint, index):
new_layer.conv1.weight.data = old_checkpoint[f"all_modules.{index}.Conv_0.weight"].data
new_layer.conv1.bias.data = old_checkpoint[f"all_modules.{index}.Conv_0.bias"].data
new_layer.norm1.weight.data = old_checkpoint[f"all_modules.{index}.GroupNorm_0.weight"].data
new_layer.norm1.bias.data = old_checkpoint[f"all_modules.{index}.GroupNorm_0.bias"].data
new_layer.conv2.weight.data = old_checkpoint[f"all_modules.{index}.Conv_1.weight"].data
new_layer.conv2.bias.data = old_checkpoint[f"all_modules.{index}.Conv_1.bias"].data
new_layer.norm2.weight.data = old_checkpoint[f"all_modules.{index}.GroupNorm_1.weight"].data
new_layer.norm2.bias.data = old_checkpoint[f"all_modules.{index}.GroupNorm_1.bias"].data
new_layer.time_emb_proj.weight.data = old_checkpoint[f"all_modules.{index}.Dense_0.weight"].data
new_layer.time_emb_proj.bias.data = old_checkpoint[f"all_modules.{index}.Dense_0.bias"].data
if new_layer.in_channels != new_layer.out_channels or new_layer.up or new_layer.down:
new_layer.conv_shortcut.weight.data = old_checkpoint[f"all_modules.{index}.Conv_2.weight"].data
new_layer.conv_shortcut.bias.data = old_checkpoint[f"all_modules.{index}.Conv_2.bias"].data
for i, block in enumerate(new_model_architecture.downsample_blocks):
has_attentions = hasattr(block, "attentions")
for j in range(len(block.resnets)):
set_resnet_weights(block.resnets[j], checkpoint, module_index)
module_index += 1
if has_attentions:
set_attention_weights(block.attentions[j], checkpoint, module_index)
module_index += 1
if hasattr(block, "downsamplers") and block.downsamplers is not None:
set_resnet_weights(block.resnet_down, checkpoint, module_index)
module_index += 1
block.skip_conv.weight.data = checkpoint[f"all_modules.{module_index}.Conv_0.weight"].data
block.skip_conv.bias.data = checkpoint[f"all_modules.{module_index}.Conv_0.bias"].data
module_index += 1
set_resnet_weights(new_model_architecture.mid_block.resnets[0], checkpoint, module_index)
module_index += 1
set_attention_weights(new_model_architecture.mid_block.attentions[0], checkpoint, module_index)
module_index += 1
set_resnet_weights(new_model_architecture.mid_block.resnets[1], checkpoint, module_index)
module_index += 1
for i, block in enumerate(new_model_architecture.up_blocks):
has_attentions = hasattr(block, "attentions")
for j in range(len(block.resnets)):
set_resnet_weights(block.resnets[j], checkpoint, module_index)
module_index += 1
if has_attentions:
set_attention_weights(
block.attentions[0], checkpoint, module_index
) # why can there only be a single attention layer for up?
module_index += 1
if hasattr(block, "resnet_up") and block.resnet_up is not None:
block.skip_norm.weight.data = checkpoint[f"all_modules.{module_index}.weight"].data
block.skip_norm.bias.data = checkpoint[f"all_modules.{module_index}.bias"].data
module_index += 1
block.skip_conv.weight.data = checkpoint[f"all_modules.{module_index}.weight"].data
block.skip_conv.bias.data = checkpoint[f"all_modules.{module_index}.bias"].data
module_index += 1
set_resnet_weights(block.resnet_up, checkpoint, module_index)
module_index += 1
new_model_architecture.conv_norm_out.weight.data = checkpoint[f"all_modules.{module_index}.weight"].data
new_model_architecture.conv_norm_out.bias.data = checkpoint[f"all_modules.{module_index}.bias"].data
module_index += 1
new_model_architecture.conv_out.weight.data = checkpoint[f"all_modules.{module_index}.weight"].data
new_model_architecture.conv_out.bias.data = checkpoint[f"all_modules.{module_index}.bias"].data
return new_model_architecture.state_dict()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path",
default="/Users/arthurzucker/Work/diffusers/ArthurZ/diffusion_pytorch_model.bin",
type=str,
required=False,
help="Path to the checkpoint to convert.",
)
parser.add_argument(
"--config_file",
default="/Users/arthurzucker/Work/diffusers/ArthurZ/config.json",
type=str,
required=False,
help="The config json file corresponding to the architecture.",
)
parser.add_argument(
"--dump_path",
default="/Users/arthurzucker/Work/diffusers/ArthurZ/diffusion_model_new.pt",
type=str,
required=False,
help="Path to the output model.",
)
args = parser.parse_args()
checkpoint = torch.load(args.checkpoint_path, map_location="cpu")
with open(args.config_file) as f:
config = json.loads(f.read())
converted_checkpoint = convert_ncsnpp_checkpoint(
checkpoint,
config,
)
if "sde" in config:
del config["sde"]
model = UNet2DModel(**config)
model.load_state_dict(converted_checkpoint)
try:
scheduler = ScoreSdeVeScheduler.from_config("/".join(args.checkpoint_path.split("/")[:-1]))
pipe = ScoreSdeVePipeline(unet=model, scheduler=scheduler)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| diffusers/scripts/convert_ncsnpp_original_checkpoint_to_diffusers.py/0 | {
"file_path": "diffusers/scripts/convert_ncsnpp_original_checkpoint_to_diffusers.py",
"repo_id": "diffusers",
"token_count": 3608
} |
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
is_torch_less_than_1_11 = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11")
def onnx_export(
model,
model_args: tuple,
output_path: Path,
ordered_input_names,
output_names,
dynamic_axes,
opset,
use_external_data_format=False,
):
output_path.parent.mkdir(parents=True, exist_ok=True)
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
model,
model_args,
f=output_path.as_posix(),
input_names=ordered_input_names,
output_names=output_names,
dynamic_axes=dynamic_axes,
do_constant_folding=True,
use_external_data_format=use_external_data_format,
enable_onnx_checker=True,
opset_version=opset,
)
else:
export(
model,
model_args,
f=output_path.as_posix(),
input_names=ordered_input_names,
output_names=output_names,
dynamic_axes=dynamic_axes,
do_constant_folding=True,
opset_version=opset,
)
@torch.no_grad()
def convert_models(model_path: str, output_path: str, opset: int, fp16: bool = False):
dtype = torch.float16 if fp16 else torch.float32
if fp16 and torch.cuda.is_available():
device = "cuda"
elif fp16 and not torch.cuda.is_available():
raise ValueError("`float16` model export is only supported on GPUs with CUDA")
else:
device = "cpu"
pipeline = StableDiffusionPipeline.from_pretrained(model_path, torch_dtype=dtype).to(device)
output_path = Path(output_path)
# TEXT ENCODER
num_tokens = pipeline.text_encoder.config.max_position_embeddings
text_hidden_size = pipeline.text_encoder.config.hidden_size
text_input = pipeline.tokenizer(
"A sample prompt",
padding="max_length",
max_length=pipeline.tokenizer.model_max_length,
truncation=True,
return_tensors="pt",
)
onnx_export(
pipeline.text_encoder,
# casting to torch.int32 until the CLIP fix is released: https://github.com/huggingface/transformers/pull/18515/files
model_args=(text_input.input_ids.to(device=device, dtype=torch.int32)),
output_path=output_path / "text_encoder" / "model.onnx",
ordered_input_names=["input_ids"],
output_names=["last_hidden_state", "pooler_output"],
dynamic_axes={
"input_ids": {0: "batch", 1: "sequence"},
},
opset=opset,
)
del pipeline.text_encoder
# UNET
unet_in_channels = pipeline.unet.config.in_channels
unet_sample_size = pipeline.unet.config.sample_size
unet_path = output_path / "unet" / "model.onnx"
onnx_export(
pipeline.unet,
model_args=(
torch.randn(2, unet_in_channels, unet_sample_size, unet_sample_size).to(device=device, dtype=dtype),
torch.randn(2).to(device=device, dtype=dtype),
torch.randn(2, num_tokens, text_hidden_size).to(device=device, dtype=dtype),
False,
),
output_path=unet_path,
ordered_input_names=["sample", "timestep", "encoder_hidden_states", "return_dict"],
output_names=["out_sample"], # has to be different from "sample" for correct tracing
dynamic_axes={
"sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
"timestep": {0: "batch"},
"encoder_hidden_states": {0: "batch", 1: "sequence"},
},
opset=opset,
use_external_data_format=True, # UNet is > 2GB, so the weights need to be split
)
unet_model_path = str(unet_path.absolute().as_posix())
unet_dir = os.path.dirname(unet_model_path)
unet = onnx.load(unet_model_path)
# clean up existing tensor files
shutil.rmtree(unet_dir)
os.mkdir(unet_dir)
# collate external tensor files into one
onnx.save_model(
unet,
unet_model_path,
save_as_external_data=True,
all_tensors_to_one_file=True,
location="weights.pb",
convert_attribute=False,
)
del pipeline.unet
# VAE ENCODER
vae_encoder = pipeline.vae
vae_in_channels = vae_encoder.config.in_channels
vae_sample_size = vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
vae_encoder.forward = lambda sample, return_dict: vae_encoder.encode(sample, return_dict)[0].sample()
onnx_export(
vae_encoder,
model_args=(
torch.randn(1, vae_in_channels, vae_sample_size, vae_sample_size).to(device=device, dtype=dtype),
False,
),
output_path=output_path / "vae_encoder" / "model.onnx",
ordered_input_names=["sample", "return_dict"],
output_names=["latent_sample"],
dynamic_axes={
"sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
},
opset=opset,
)
# VAE DECODER
vae_decoder = pipeline.vae
vae_latent_channels = vae_decoder.config.latent_channels
vae_out_channels = vae_decoder.config.out_channels
# forward only through the decoder part
vae_decoder.forward = vae_encoder.decode
onnx_export(
vae_decoder,
model_args=(
torch.randn(1, vae_latent_channels, unet_sample_size, unet_sample_size).to(device=device, dtype=dtype),
False,
),
output_path=output_path / "vae_decoder" / "model.onnx",
ordered_input_names=["latent_sample", "return_dict"],
output_names=["sample"],
dynamic_axes={
"latent_sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
},
opset=opset,
)
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
safety_checker = pipeline.safety_checker
clip_num_channels = safety_checker.config.vision_config.num_channels
clip_image_size = safety_checker.config.vision_config.image_size
safety_checker.forward = safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker,
model_args=(
torch.randn(
1,
clip_num_channels,
clip_image_size,
clip_image_size,
).to(device=device, dtype=dtype),
torch.randn(1, vae_sample_size, vae_sample_size, vae_out_channels).to(device=device, dtype=dtype),
),
output_path=output_path / "safety_checker" / "model.onnx",
ordered_input_names=["clip_input", "images"],
output_names=["out_images", "has_nsfw_concepts"],
dynamic_axes={
"clip_input": {0: "batch", 1: "channels", 2: "height", 3: "width"},
"images": {0: "batch", 1: "height", 2: "width", 3: "channels"},
},
opset=opset,
)
del pipeline.safety_checker
safety_checker = OnnxRuntimeModel.from_pretrained(output_path / "safety_checker")
feature_extractor = pipeline.feature_extractor
else:
safety_checker = None
feature_extractor = None
onnx_pipeline = OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / "vae_encoder"),
vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / "vae_decoder"),
text_encoder=OnnxRuntimeModel.from_pretrained(output_path / "text_encoder"),
tokenizer=pipeline.tokenizer,
unet=OnnxRuntimeModel.from_pretrained(output_path / "unet"),
scheduler=pipeline.scheduler,
safety_checker=safety_checker,
feature_extractor=feature_extractor,
requires_safety_checker=safety_checker is not None,
)
onnx_pipeline.save_pretrained(output_path)
print("ONNX pipeline saved to", output_path)
del pipeline
del onnx_pipeline
_ = OnnxStableDiffusionPipeline.from_pretrained(output_path, provider="CPUExecutionProvider")
print("ONNX pipeline is loadable")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--model_path",
type=str,
required=True,
help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
)
parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--opset",
default=14,
type=int,
help="The version of the ONNX operator set to use.",
)
parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode")
args = parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fp16)
| diffusers/scripts/convert_stable_diffusion_checkpoint_to_onnx.py/0 | {
"file_path": "diffusers/scripts/convert_stable_diffusion_checkpoint_to_onnx.py",
"repo_id": "diffusers",
"token_count": 4384
} |
__version__ = "0.33.0.dev0"
from typing import TYPE_CHECKING
from .utils import (
DIFFUSERS_SLOW_IMPORT,
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_k_diffusion_available,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_sentencepiece_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
)
# Lazy Import based on
# https://github.com/huggingface/transformers/blob/main/src/transformers/__init__.py
# When adding a new object to this init, please add it to `_import_structure`. The `_import_structure` is a dictionary submodule to list of object names,
# and is used to defer the actual importing for when the objects are requested.
# This way `import diffusers` provides the names in the namespace without actually importing anything (and especially none of the backends).
_import_structure = {
"configuration_utils": ["ConfigMixin"],
"hooks": [],
"loaders": ["FromOriginalModelMixin"],
"models": [],
"pipelines": [],
"quantizers.quantization_config": ["BitsAndBytesConfig", "GGUFQuantizationConfig", "TorchAoConfig"],
"schedulers": [],
"utils": [
"OptionalDependencyNotAvailable",
"is_flax_available",
"is_inflect_available",
"is_invisible_watermark_available",
"is_k_diffusion_available",
"is_k_diffusion_version",
"is_librosa_available",
"is_note_seq_available",
"is_onnx_available",
"is_scipy_available",
"is_torch_available",
"is_torchsde_available",
"is_transformers_available",
"is_transformers_version",
"is_unidecode_available",
"logging",
],
}
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils import dummy_onnx_objects # noqa F403
_import_structure["utils.dummy_onnx_objects"] = [
name for name in dir(dummy_onnx_objects) if not name.startswith("_")
]
else:
_import_structure["pipelines"].extend(["OnnxRuntimeModel"])
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils import dummy_pt_objects # noqa F403
_import_structure["utils.dummy_pt_objects"] = [name for name in dir(dummy_pt_objects) if not name.startswith("_")]
else:
_import_structure["hooks"].extend(
[
"HookRegistry",
"PyramidAttentionBroadcastConfig",
"apply_pyramid_attention_broadcast",
]
)
_import_structure["models"].extend(
[
"AllegroTransformer3DModel",
"AsymmetricAutoencoderKL",
"AuraFlowTransformer2DModel",
"AutoencoderDC",
"AutoencoderKL",
"AutoencoderKLAllegro",
"AutoencoderKLCogVideoX",
"AutoencoderKLHunyuanVideo",
"AutoencoderKLLTXVideo",
"AutoencoderKLMochi",
"AutoencoderKLTemporalDecoder",
"AutoencoderOobleck",
"AutoencoderTiny",
"CacheMixin",
"CogVideoXTransformer3DModel",
"CogView3PlusTransformer2DModel",
"ConsisIDTransformer3DModel",
"ConsistencyDecoderVAE",
"ControlNetModel",
"ControlNetUnionModel",
"ControlNetXSAdapter",
"DiTTransformer2DModel",
"FluxControlNetModel",
"FluxMultiControlNetModel",
"FluxTransformer2DModel",
"HunyuanDiT2DControlNetModel",
"HunyuanDiT2DModel",
"HunyuanDiT2DMultiControlNetModel",
"HunyuanVideoTransformer3DModel",
"I2VGenXLUNet",
"Kandinsky3UNet",
"LatteTransformer3DModel",
"LTXVideoTransformer3DModel",
"LuminaNextDiT2DModel",
"MochiTransformer3DModel",
"ModelMixin",
"MotionAdapter",
"MultiAdapter",
"MultiControlNetModel",
"PixArtTransformer2DModel",
"PriorTransformer",
"SanaTransformer2DModel",
"SD3ControlNetModel",
"SD3MultiControlNetModel",
"SD3Transformer2DModel",
"SparseControlNetModel",
"StableAudioDiTModel",
"StableCascadeUNet",
"T2IAdapter",
"T5FilmDecoder",
"Transformer2DModel",
"UNet1DModel",
"UNet2DConditionModel",
"UNet2DModel",
"UNet3DConditionModel",
"UNetControlNetXSModel",
"UNetMotionModel",
"UNetSpatioTemporalConditionModel",
"UVit2DModel",
"VQModel",
]
)
_import_structure["optimization"] = [
"get_constant_schedule",
"get_constant_schedule_with_warmup",
"get_cosine_schedule_with_warmup",
"get_cosine_with_hard_restarts_schedule_with_warmup",
"get_linear_schedule_with_warmup",
"get_polynomial_decay_schedule_with_warmup",
"get_scheduler",
]
_import_structure["pipelines"].extend(
[
"AudioPipelineOutput",
"AutoPipelineForImage2Image",
"AutoPipelineForInpainting",
"AutoPipelineForText2Image",
"ConsistencyModelPipeline",
"DanceDiffusionPipeline",
"DDIMPipeline",
"DDPMPipeline",
"DiffusionPipeline",
"DiTPipeline",
"ImagePipelineOutput",
"KarrasVePipeline",
"LDMPipeline",
"LDMSuperResolutionPipeline",
"PNDMPipeline",
"RePaintPipeline",
"ScoreSdeVePipeline",
"StableDiffusionMixin",
]
)
_import_structure["quantizers"] = ["DiffusersQuantizer"]
_import_structure["schedulers"].extend(
[
"AmusedScheduler",
"CMStochasticIterativeScheduler",
"CogVideoXDDIMScheduler",
"CogVideoXDPMScheduler",
"DDIMInverseScheduler",
"DDIMParallelScheduler",
"DDIMScheduler",
"DDPMParallelScheduler",
"DDPMScheduler",
"DDPMWuerstchenScheduler",
"DEISMultistepScheduler",
"DPMSolverMultistepInverseScheduler",
"DPMSolverMultistepScheduler",
"DPMSolverSinglestepScheduler",
"EDMDPMSolverMultistepScheduler",
"EDMEulerScheduler",
"EulerAncestralDiscreteScheduler",
"EulerDiscreteScheduler",
"FlowMatchEulerDiscreteScheduler",
"FlowMatchHeunDiscreteScheduler",
"HeunDiscreteScheduler",
"IPNDMScheduler",
"KarrasVeScheduler",
"KDPM2AncestralDiscreteScheduler",
"KDPM2DiscreteScheduler",
"LCMScheduler",
"PNDMScheduler",
"RePaintScheduler",
"SASolverScheduler",
"SchedulerMixin",
"ScoreSdeVeScheduler",
"TCDScheduler",
"UnCLIPScheduler",
"UniPCMultistepScheduler",
"VQDiffusionScheduler",
]
)
_import_structure["training_utils"] = ["EMAModel"]
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils import dummy_torch_and_scipy_objects # noqa F403
_import_structure["utils.dummy_torch_and_scipy_objects"] = [
name for name in dir(dummy_torch_and_scipy_objects) if not name.startswith("_")
]
else:
_import_structure["schedulers"].extend(["LMSDiscreteScheduler"])
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils import dummy_torch_and_torchsde_objects # noqa F403
_import_structure["utils.dummy_torch_and_torchsde_objects"] = [
name for name in dir(dummy_torch_and_torchsde_objects) if not name.startswith("_")
]
else:
_import_structure["schedulers"].extend(["CosineDPMSolverMultistepScheduler", "DPMSolverSDEScheduler"])
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils import dummy_torch_and_transformers_objects # noqa F403
_import_structure["utils.dummy_torch_and_transformers_objects"] = [
name for name in dir(dummy_torch_and_transformers_objects) if not name.startswith("_")
]
else:
_import_structure["pipelines"].extend(
[
"AllegroPipeline",
"AltDiffusionImg2ImgPipeline",
"AltDiffusionPipeline",
"AmusedImg2ImgPipeline",
"AmusedInpaintPipeline",
"AmusedPipeline",
"AnimateDiffControlNetPipeline",
"AnimateDiffPAGPipeline",
"AnimateDiffPipeline",
"AnimateDiffSDXLPipeline",
"AnimateDiffSparseControlNetPipeline",
"AnimateDiffVideoToVideoControlNetPipeline",
"AnimateDiffVideoToVideoPipeline",
"AudioLDM2Pipeline",
"AudioLDM2ProjectionModel",
"AudioLDM2UNet2DConditionModel",
"AudioLDMPipeline",
"AuraFlowPipeline",
"BlipDiffusionControlNetPipeline",
"BlipDiffusionPipeline",
"CLIPImageProjection",
"CogVideoXFunControlPipeline",
"CogVideoXImageToVideoPipeline",
"CogVideoXPipeline",
"CogVideoXVideoToVideoPipeline",
"CogView3PlusPipeline",
"ConsisIDPipeline",
"CycleDiffusionPipeline",
"FluxControlImg2ImgPipeline",
"FluxControlInpaintPipeline",
"FluxControlNetImg2ImgPipeline",
"FluxControlNetInpaintPipeline",
"FluxControlNetPipeline",
"FluxControlPipeline",
"FluxFillPipeline",
"FluxImg2ImgPipeline",
"FluxInpaintPipeline",
"FluxPipeline",
"FluxPriorReduxPipeline",
"HunyuanDiTControlNetPipeline",
"HunyuanDiTPAGPipeline",
"HunyuanDiTPipeline",
"HunyuanVideoPipeline",
"I2VGenXLPipeline",
"IFImg2ImgPipeline",
"IFImg2ImgSuperResolutionPipeline",
"IFInpaintingPipeline",
"IFInpaintingSuperResolutionPipeline",
"IFPipeline",
"IFSuperResolutionPipeline",
"ImageTextPipelineOutput",
"Kandinsky3Img2ImgPipeline",
"Kandinsky3Pipeline",
"KandinskyCombinedPipeline",
"KandinskyImg2ImgCombinedPipeline",
"KandinskyImg2ImgPipeline",
"KandinskyInpaintCombinedPipeline",
"KandinskyInpaintPipeline",
"KandinskyPipeline",
"KandinskyPriorPipeline",
"KandinskyV22CombinedPipeline",
"KandinskyV22ControlnetImg2ImgPipeline",
"KandinskyV22ControlnetPipeline",
"KandinskyV22Img2ImgCombinedPipeline",
"KandinskyV22Img2ImgPipeline",
"KandinskyV22InpaintCombinedPipeline",
"KandinskyV22InpaintPipeline",
"KandinskyV22Pipeline",
"KandinskyV22PriorEmb2EmbPipeline",
"KandinskyV22PriorPipeline",
"LatentConsistencyModelImg2ImgPipeline",
"LatentConsistencyModelPipeline",
"LattePipeline",
"LDMTextToImagePipeline",
"LEditsPPPipelineStableDiffusion",
"LEditsPPPipelineStableDiffusionXL",
"LTXImageToVideoPipeline",
"LTXPipeline",
"LuminaText2ImgPipeline",
"MarigoldDepthPipeline",
"MarigoldNormalsPipeline",
"MochiPipeline",
"MusicLDMPipeline",
"PaintByExamplePipeline",
"PIAPipeline",
"PixArtAlphaPipeline",
"PixArtSigmaPAGPipeline",
"PixArtSigmaPipeline",
"ReduxImageEncoder",
"SanaPAGPipeline",
"SanaPipeline",
"SemanticStableDiffusionPipeline",
"ShapEImg2ImgPipeline",
"ShapEPipeline",
"StableAudioPipeline",
"StableAudioProjectionModel",
"StableCascadeCombinedPipeline",
"StableCascadeDecoderPipeline",
"StableCascadePriorPipeline",
"StableDiffusion3ControlNetInpaintingPipeline",
"StableDiffusion3ControlNetPipeline",
"StableDiffusion3Img2ImgPipeline",
"StableDiffusion3InpaintPipeline",
"StableDiffusion3PAGImg2ImgPipeline",
"StableDiffusion3PAGImg2ImgPipeline",
"StableDiffusion3PAGPipeline",
"StableDiffusion3Pipeline",
"StableDiffusionAdapterPipeline",
"StableDiffusionAttendAndExcitePipeline",
"StableDiffusionControlNetImg2ImgPipeline",
"StableDiffusionControlNetInpaintPipeline",
"StableDiffusionControlNetPAGInpaintPipeline",
"StableDiffusionControlNetPAGPipeline",
"StableDiffusionControlNetPipeline",
"StableDiffusionControlNetXSPipeline",
"StableDiffusionDepth2ImgPipeline",
"StableDiffusionDiffEditPipeline",
"StableDiffusionGLIGENPipeline",
"StableDiffusionGLIGENTextImagePipeline",
"StableDiffusionImageVariationPipeline",
"StableDiffusionImg2ImgPipeline",
"StableDiffusionInpaintPipeline",
"StableDiffusionInpaintPipelineLegacy",
"StableDiffusionInstructPix2PixPipeline",
"StableDiffusionLatentUpscalePipeline",
"StableDiffusionLDM3DPipeline",
"StableDiffusionModelEditingPipeline",
"StableDiffusionPAGImg2ImgPipeline",
"StableDiffusionPAGInpaintPipeline",
"StableDiffusionPAGPipeline",
"StableDiffusionPanoramaPipeline",
"StableDiffusionParadigmsPipeline",
"StableDiffusionPipeline",
"StableDiffusionPipelineSafe",
"StableDiffusionPix2PixZeroPipeline",
"StableDiffusionSAGPipeline",
"StableDiffusionUpscalePipeline",
"StableDiffusionXLAdapterPipeline",
"StableDiffusionXLControlNetImg2ImgPipeline",
"StableDiffusionXLControlNetInpaintPipeline",
"StableDiffusionXLControlNetPAGImg2ImgPipeline",
"StableDiffusionXLControlNetPAGPipeline",
"StableDiffusionXLControlNetPipeline",
"StableDiffusionXLControlNetUnionImg2ImgPipeline",
"StableDiffusionXLControlNetUnionInpaintPipeline",
"StableDiffusionXLControlNetUnionPipeline",
"StableDiffusionXLControlNetXSPipeline",
"StableDiffusionXLImg2ImgPipeline",
"StableDiffusionXLInpaintPipeline",
"StableDiffusionXLInstructPix2PixPipeline",
"StableDiffusionXLPAGImg2ImgPipeline",
"StableDiffusionXLPAGInpaintPipeline",
"StableDiffusionXLPAGPipeline",
"StableDiffusionXLPipeline",
"StableUnCLIPImg2ImgPipeline",
"StableUnCLIPPipeline",
"StableVideoDiffusionPipeline",
"TextToVideoSDPipeline",
"TextToVideoZeroPipeline",
"TextToVideoZeroSDXLPipeline",
"UnCLIPImageVariationPipeline",
"UnCLIPPipeline",
"UniDiffuserModel",
"UniDiffuserPipeline",
"UniDiffuserTextDecoder",
"VersatileDiffusionDualGuidedPipeline",
"VersatileDiffusionImageVariationPipeline",
"VersatileDiffusionPipeline",
"VersatileDiffusionTextToImagePipeline",
"VideoToVideoSDPipeline",
"VQDiffusionPipeline",
"WuerstchenCombinedPipeline",
"WuerstchenDecoderPipeline",
"WuerstchenPriorPipeline",
]
)
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils import dummy_torch_and_transformers_and_k_diffusion_objects # noqa F403
_import_structure["utils.dummy_torch_and_transformers_and_k_diffusion_objects"] = [
name for name in dir(dummy_torch_and_transformers_and_k_diffusion_objects) if not name.startswith("_")
]
else:
_import_structure["pipelines"].extend(["StableDiffusionKDiffusionPipeline", "StableDiffusionXLKDiffusionPipeline"])
try:
if not (is_torch_available() and is_transformers_available() and is_sentencepiece_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils import dummy_torch_and_transformers_and_sentencepiece_objects # noqa F403
_import_structure["utils.dummy_torch_and_transformers_and_sentencepiece_objects"] = [
name for name in dir(dummy_torch_and_transformers_and_sentencepiece_objects) if not name.startswith("_")
]
else:
_import_structure["pipelines"].extend(["KolorsImg2ImgPipeline", "KolorsPAGPipeline", "KolorsPipeline"])
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils import dummy_torch_and_transformers_and_onnx_objects # noqa F403
_import_structure["utils.dummy_torch_and_transformers_and_onnx_objects"] = [
name for name in dir(dummy_torch_and_transformers_and_onnx_objects) if not name.startswith("_")
]
else:
_import_structure["pipelines"].extend(
[
"OnnxStableDiffusionImg2ImgPipeline",
"OnnxStableDiffusionInpaintPipeline",
"OnnxStableDiffusionInpaintPipelineLegacy",
"OnnxStableDiffusionPipeline",
"OnnxStableDiffusionUpscalePipeline",
"StableDiffusionOnnxPipeline",
]
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils import dummy_torch_and_librosa_objects # noqa F403
_import_structure["utils.dummy_torch_and_librosa_objects"] = [
name for name in dir(dummy_torch_and_librosa_objects) if not name.startswith("_")
]
else:
_import_structure["pipelines"].extend(["AudioDiffusionPipeline", "Mel"])
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils import dummy_transformers_and_torch_and_note_seq_objects # noqa F403
_import_structure["utils.dummy_transformers_and_torch_and_note_seq_objects"] = [
name for name in dir(dummy_transformers_and_torch_and_note_seq_objects) if not name.startswith("_")
]
else:
_import_structure["pipelines"].extend(["SpectrogramDiffusionPipeline"])
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils import dummy_flax_objects # noqa F403
_import_structure["utils.dummy_flax_objects"] = [
name for name in dir(dummy_flax_objects) if not name.startswith("_")
]
else:
_import_structure["models.controlnets.controlnet_flax"] = ["FlaxControlNetModel"]
_import_structure["models.modeling_flax_utils"] = ["FlaxModelMixin"]
_import_structure["models.unets.unet_2d_condition_flax"] = ["FlaxUNet2DConditionModel"]
_import_structure["models.vae_flax"] = ["FlaxAutoencoderKL"]
_import_structure["pipelines"].extend(["FlaxDiffusionPipeline"])
_import_structure["schedulers"].extend(
[
"FlaxDDIMScheduler",
"FlaxDDPMScheduler",
"FlaxDPMSolverMultistepScheduler",
"FlaxEulerDiscreteScheduler",
"FlaxKarrasVeScheduler",
"FlaxLMSDiscreteScheduler",
"FlaxPNDMScheduler",
"FlaxSchedulerMixin",
"FlaxScoreSdeVeScheduler",
]
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils import dummy_flax_and_transformers_objects # noqa F403
_import_structure["utils.dummy_flax_and_transformers_objects"] = [
name for name in dir(dummy_flax_and_transformers_objects) if not name.startswith("_")
]
else:
_import_structure["pipelines"].extend(
[
"FlaxStableDiffusionControlNetPipeline",
"FlaxStableDiffusionImg2ImgPipeline",
"FlaxStableDiffusionInpaintPipeline",
"FlaxStableDiffusionPipeline",
"FlaxStableDiffusionXLPipeline",
]
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils import dummy_note_seq_objects # noqa F403
_import_structure["utils.dummy_note_seq_objects"] = [
name for name in dir(dummy_note_seq_objects) if not name.startswith("_")
]
else:
_import_structure["pipelines"].extend(["MidiProcessor"])
if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
from .configuration_utils import ConfigMixin
from .quantizers.quantization_config import BitsAndBytesConfig, GGUFQuantizationConfig, TorchAoConfig
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .hooks import HookRegistry, PyramidAttentionBroadcastConfig, apply_pyramid_attention_broadcast
from .models import (
AllegroTransformer3DModel,
AsymmetricAutoencoderKL,
AuraFlowTransformer2DModel,
AutoencoderDC,
AutoencoderKL,
AutoencoderKLAllegro,
AutoencoderKLCogVideoX,
AutoencoderKLHunyuanVideo,
AutoencoderKLLTXVideo,
AutoencoderKLMochi,
AutoencoderKLTemporalDecoder,
AutoencoderOobleck,
AutoencoderTiny,
CacheMixin,
CogVideoXTransformer3DModel,
CogView3PlusTransformer2DModel,
ConsisIDTransformer3DModel,
ConsistencyDecoderVAE,
ControlNetModel,
ControlNetUnionModel,
ControlNetXSAdapter,
DiTTransformer2DModel,
FluxControlNetModel,
FluxMultiControlNetModel,
FluxTransformer2DModel,
HunyuanDiT2DControlNetModel,
HunyuanDiT2DModel,
HunyuanDiT2DMultiControlNetModel,
HunyuanVideoTransformer3DModel,
I2VGenXLUNet,
Kandinsky3UNet,
LatteTransformer3DModel,
LTXVideoTransformer3DModel,
LuminaNextDiT2DModel,
MochiTransformer3DModel,
ModelMixin,
MotionAdapter,
MultiAdapter,
MultiControlNetModel,
PixArtTransformer2DModel,
PriorTransformer,
SanaTransformer2DModel,
SD3ControlNetModel,
SD3MultiControlNetModel,
SD3Transformer2DModel,
SparseControlNetModel,
StableAudioDiTModel,
T2IAdapter,
T5FilmDecoder,
Transformer2DModel,
UNet1DModel,
UNet2DConditionModel,
UNet2DModel,
UNet3DConditionModel,
UNetControlNetXSModel,
UNetMotionModel,
UNetSpatioTemporalConditionModel,
UVit2DModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
AutoPipelineForImage2Image,
AutoPipelineForInpainting,
AutoPipelineForText2Image,
BlipDiffusionControlNetPipeline,
BlipDiffusionPipeline,
CLIPImageProjection,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
StableDiffusionMixin,
)
from .quantizers import DiffusersQuantizer
from .schedulers import (
AmusedScheduler,
CMStochasticIterativeScheduler,
CogVideoXDDIMScheduler,
CogVideoXDPMScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DDPMWuerstchenScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EDMDPMSolverMultistepScheduler,
EDMEulerScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
FlowMatchEulerDiscreteScheduler,
FlowMatchHeunDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPM2AncestralDiscreteScheduler,
KDPM2DiscreteScheduler,
LCMScheduler,
PNDMScheduler,
RePaintScheduler,
SASolverScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
TCDScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import CosineDPMSolverMultistepScheduler, DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AllegroPipeline,
AltDiffusionImg2ImgPipeline,
AltDiffusionPipeline,
AmusedImg2ImgPipeline,
AmusedInpaintPipeline,
AmusedPipeline,
AnimateDiffControlNetPipeline,
AnimateDiffPAGPipeline,
AnimateDiffPipeline,
AnimateDiffSDXLPipeline,
AnimateDiffSparseControlNetPipeline,
AnimateDiffVideoToVideoControlNetPipeline,
AnimateDiffVideoToVideoPipeline,
AudioLDM2Pipeline,
AudioLDM2ProjectionModel,
AudioLDM2UNet2DConditionModel,
AudioLDMPipeline,
AuraFlowPipeline,
CLIPImageProjection,
CogVideoXFunControlPipeline,
CogVideoXImageToVideoPipeline,
CogVideoXPipeline,
CogVideoXVideoToVideoPipeline,
CogView3PlusPipeline,
ConsisIDPipeline,
CycleDiffusionPipeline,
FluxControlImg2ImgPipeline,
FluxControlInpaintPipeline,
FluxControlNetImg2ImgPipeline,
FluxControlNetInpaintPipeline,
FluxControlNetPipeline,
FluxControlPipeline,
FluxFillPipeline,
FluxImg2ImgPipeline,
FluxInpaintPipeline,
FluxPipeline,
FluxPriorReduxPipeline,
HunyuanDiTControlNetPipeline,
HunyuanDiTPAGPipeline,
HunyuanDiTPipeline,
HunyuanVideoPipeline,
I2VGenXLPipeline,
IFImg2ImgPipeline,
IFImg2ImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
Kandinsky3Img2ImgPipeline,
Kandinsky3Pipeline,
KandinskyCombinedPipeline,
KandinskyImg2ImgCombinedPipeline,
KandinskyImg2ImgPipeline,
KandinskyInpaintCombinedPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyV22CombinedPipeline,
KandinskyV22ControlnetImg2ImgPipeline,
KandinskyV22ControlnetPipeline,
KandinskyV22Img2ImgCombinedPipeline,
KandinskyV22Img2ImgPipeline,
KandinskyV22InpaintCombinedPipeline,
KandinskyV22InpaintPipeline,
KandinskyV22Pipeline,
KandinskyV22PriorEmb2EmbPipeline,
KandinskyV22PriorPipeline,
LatentConsistencyModelImg2ImgPipeline,
LatentConsistencyModelPipeline,
LattePipeline,
LDMTextToImagePipeline,
LEditsPPPipelineStableDiffusion,
LEditsPPPipelineStableDiffusionXL,
LTXImageToVideoPipeline,
LTXPipeline,
LuminaText2ImgPipeline,
MarigoldDepthPipeline,
MarigoldNormalsPipeline,
MochiPipeline,
MusicLDMPipeline,
PaintByExamplePipeline,
PIAPipeline,
PixArtAlphaPipeline,
PixArtSigmaPAGPipeline,
PixArtSigmaPipeline,
ReduxImageEncoder,
SanaPAGPipeline,
SanaPipeline,
SemanticStableDiffusionPipeline,
ShapEImg2ImgPipeline,
ShapEPipeline,
StableAudioPipeline,
StableAudioProjectionModel,
StableCascadeCombinedPipeline,
StableCascadeDecoderPipeline,
StableCascadePriorPipeline,
StableDiffusion3ControlNetPipeline,
StableDiffusion3Img2ImgPipeline,
StableDiffusion3InpaintPipeline,
StableDiffusion3PAGImg2ImgPipeline,
StableDiffusion3PAGPipeline,
StableDiffusion3Pipeline,
StableDiffusionAdapterPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImg2ImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPAGInpaintPipeline,
StableDiffusionControlNetPAGPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionControlNetXSPipeline,
StableDiffusionDepth2ImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionGLIGENPipeline,
StableDiffusionGLIGENTextImagePipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImg2ImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPix2PixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDM3DPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPAGImg2ImgPipeline,
StableDiffusionPAGInpaintPipeline,
StableDiffusionPAGPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPix2PixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableDiffusionXLAdapterPipeline,
StableDiffusionXLControlNetImg2ImgPipeline,
StableDiffusionXLControlNetInpaintPipeline,
StableDiffusionXLControlNetPAGImg2ImgPipeline,
StableDiffusionXLControlNetPAGPipeline,
StableDiffusionXLControlNetPipeline,
StableDiffusionXLControlNetUnionImg2ImgPipeline,
StableDiffusionXLControlNetUnionInpaintPipeline,
StableDiffusionXLControlNetUnionPipeline,
StableDiffusionXLControlNetXSPipeline,
StableDiffusionXLImg2ImgPipeline,
StableDiffusionXLInpaintPipeline,
StableDiffusionXLInstructPix2PixPipeline,
StableDiffusionXLPAGImg2ImgPipeline,
StableDiffusionXLPAGInpaintPipeline,
StableDiffusionXLPAGPipeline,
StableDiffusionXLPipeline,
StableUnCLIPImg2ImgPipeline,
StableUnCLIPPipeline,
StableVideoDiffusionPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
TextToVideoZeroSDXLPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
WuerstchenCombinedPipeline,
WuerstchenDecoderPipeline,
WuerstchenPriorPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline, StableDiffusionXLKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_sentencepiece_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_sentencepiece_objects import * # noqa F403
else:
from .pipelines import KolorsImg2ImgPipeline, KolorsPAGPipeline, KolorsPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImg2ImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnets.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unets.unet_2d_condition_flax import FlaxUNet2DConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxEulerDiscreteScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImg2ImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
FlaxStableDiffusionXLPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
else:
import sys
sys.modules[__name__] = _LazyModule(
__name__,
globals()["__file__"],
_import_structure,
module_spec=__spec__,
extra_objects={"__version__": __version__},
)
| diffusers/src/diffusers/__init__.py/0 | {
"file_path": "diffusers/src/diffusers/__init__.py",
"repo_id": "diffusers",
"token_count": 19348
} |
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from dataclasses import dataclass
from typing import Any, Callable, Optional, Tuple, Union
import torch
from ..models.attention_processor import Attention, MochiAttention
from ..utils import logging
from .hooks import HookRegistry, ModelHook
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
_ATTENTION_CLASSES = (Attention, MochiAttention)
_SPATIAL_ATTENTION_BLOCK_IDENTIFIERS = ("blocks", "transformer_blocks", "single_transformer_blocks")
_TEMPORAL_ATTENTION_BLOCK_IDENTIFIERS = ("temporal_transformer_blocks",)
_CROSS_ATTENTION_BLOCK_IDENTIFIERS = ("blocks", "transformer_blocks")
@dataclass
class PyramidAttentionBroadcastConfig:
r"""
Configuration for Pyramid Attention Broadcast.
Args:
spatial_attention_block_skip_range (`int`, *optional*, defaults to `None`):
The number of times a specific spatial attention broadcast is skipped before computing the attention states
to re-use. If this is set to the value `N`, the attention computation will be skipped `N - 1` times (i.e.,
old attention states will be re-used) before computing the new attention states again.
temporal_attention_block_skip_range (`int`, *optional*, defaults to `None`):
The number of times a specific temporal attention broadcast is skipped before computing the attention
states to re-use. If this is set to the value `N`, the attention computation will be skipped `N - 1` times
(i.e., old attention states will be re-used) before computing the new attention states again.
cross_attention_block_skip_range (`int`, *optional*, defaults to `None`):
The number of times a specific cross-attention broadcast is skipped before computing the attention states
to re-use. If this is set to the value `N`, the attention computation will be skipped `N - 1` times (i.e.,
old attention states will be re-used) before computing the new attention states again.
spatial_attention_timestep_skip_range (`Tuple[int, int]`, defaults to `(100, 800)`):
The range of timesteps to skip in the spatial attention layer. The attention computations will be
conditionally skipped if the current timestep is within the specified range.
temporal_attention_timestep_skip_range (`Tuple[int, int]`, defaults to `(100, 800)`):
The range of timesteps to skip in the temporal attention layer. The attention computations will be
conditionally skipped if the current timestep is within the specified range.
cross_attention_timestep_skip_range (`Tuple[int, int]`, defaults to `(100, 800)`):
The range of timesteps to skip in the cross-attention layer. The attention computations will be
conditionally skipped if the current timestep is within the specified range.
spatial_attention_block_identifiers (`Tuple[str, ...]`, defaults to `("blocks", "transformer_blocks")`):
The identifiers to match against the layer names to determine if the layer is a spatial attention layer.
temporal_attention_block_identifiers (`Tuple[str, ...]`, defaults to `("temporal_transformer_blocks",)`):
The identifiers to match against the layer names to determine if the layer is a temporal attention layer.
cross_attention_block_identifiers (`Tuple[str, ...]`, defaults to `("blocks", "transformer_blocks")`):
The identifiers to match against the layer names to determine if the layer is a cross-attention layer.
"""
spatial_attention_block_skip_range: Optional[int] = None
temporal_attention_block_skip_range: Optional[int] = None
cross_attention_block_skip_range: Optional[int] = None
spatial_attention_timestep_skip_range: Tuple[int, int] = (100, 800)
temporal_attention_timestep_skip_range: Tuple[int, int] = (100, 800)
cross_attention_timestep_skip_range: Tuple[int, int] = (100, 800)
spatial_attention_block_identifiers: Tuple[str, ...] = _SPATIAL_ATTENTION_BLOCK_IDENTIFIERS
temporal_attention_block_identifiers: Tuple[str, ...] = _TEMPORAL_ATTENTION_BLOCK_IDENTIFIERS
cross_attention_block_identifiers: Tuple[str, ...] = _CROSS_ATTENTION_BLOCK_IDENTIFIERS
current_timestep_callback: Callable[[], int] = None
# TODO(aryan): add PAB for MLP layers (very limited speedup from testing with original codebase
# so not added for now)
def __repr__(self) -> str:
return (
f"PyramidAttentionBroadcastConfig("
f" spatial_attention_block_skip_range={self.spatial_attention_block_skip_range},\n"
f" temporal_attention_block_skip_range={self.temporal_attention_block_skip_range},\n"
f" cross_attention_block_skip_range={self.cross_attention_block_skip_range},\n"
f" spatial_attention_timestep_skip_range={self.spatial_attention_timestep_skip_range},\n"
f" temporal_attention_timestep_skip_range={self.temporal_attention_timestep_skip_range},\n"
f" cross_attention_timestep_skip_range={self.cross_attention_timestep_skip_range},\n"
f" spatial_attention_block_identifiers={self.spatial_attention_block_identifiers},\n"
f" temporal_attention_block_identifiers={self.temporal_attention_block_identifiers},\n"
f" cross_attention_block_identifiers={self.cross_attention_block_identifiers},\n"
f" current_timestep_callback={self.current_timestep_callback}\n"
")"
)
class PyramidAttentionBroadcastState:
r"""
State for Pyramid Attention Broadcast.
Attributes:
iteration (`int`):
The current iteration of the Pyramid Attention Broadcast. It is necessary to ensure that `reset_state` is
called before starting a new inference forward pass for PAB to work correctly.
cache (`Any`):
The cached output from the previous forward pass. This is used to re-use the attention states when the
attention computation is skipped. It is either a tensor or a tuple of tensors, depending on the module.
"""
def __init__(self) -> None:
self.iteration = 0
self.cache = None
def reset(self):
self.iteration = 0
self.cache = None
def __repr__(self):
cache_repr = ""
if self.cache is None:
cache_repr = "None"
else:
cache_repr = f"Tensor(shape={self.cache.shape}, dtype={self.cache.dtype})"
return f"PyramidAttentionBroadcastState(iteration={self.iteration}, cache={cache_repr})"
class PyramidAttentionBroadcastHook(ModelHook):
r"""A hook that applies Pyramid Attention Broadcast to a given module."""
_is_stateful = True
def __init__(
self, timestep_skip_range: Tuple[int, int], block_skip_range: int, current_timestep_callback: Callable[[], int]
) -> None:
super().__init__()
self.timestep_skip_range = timestep_skip_range
self.block_skip_range = block_skip_range
self.current_timestep_callback = current_timestep_callback
def initialize_hook(self, module):
self.state = PyramidAttentionBroadcastState()
return module
def new_forward(self, module: torch.nn.Module, *args, **kwargs) -> Any:
is_within_timestep_range = (
self.timestep_skip_range[0] < self.current_timestep_callback() < self.timestep_skip_range[1]
)
should_compute_attention = (
self.state.cache is None
or self.state.iteration == 0
or not is_within_timestep_range
or self.state.iteration % self.block_skip_range == 0
)
if should_compute_attention:
output = self.fn_ref.original_forward(*args, **kwargs)
else:
output = self.state.cache
self.state.cache = output
self.state.iteration += 1
return output
def reset_state(self, module: torch.nn.Module) -> None:
self.state.reset()
return module
def apply_pyramid_attention_broadcast(
module: torch.nn.Module,
config: PyramidAttentionBroadcastConfig,
):
r"""
Apply [Pyramid Attention Broadcast](https://huggingface.co/papers/2408.12588) to a given pipeline.
PAB is an attention approximation method that leverages the similarity in attention states between timesteps to
reduce the computational cost of attention computation. The key takeaway from the paper is that the attention
similarity in the cross-attention layers between timesteps is high, followed by less similarity in the temporal and
spatial layers. This allows for the skipping of attention computation in the cross-attention layers more frequently
than in the temporal and spatial layers. Applying PAB will, therefore, speedup the inference process.
Args:
module (`torch.nn.Module`):
The module to apply Pyramid Attention Broadcast to.
config (`Optional[PyramidAttentionBroadcastConfig]`, `optional`, defaults to `None`):
The configuration to use for Pyramid Attention Broadcast.
Example:
```python
>>> import torch
>>> from diffusers import CogVideoXPipeline, PyramidAttentionBroadcastConfig, apply_pyramid_attention_broadcast
>>> from diffusers.utils import export_to_video
>>> pipe = CogVideoXPipeline.from_pretrained("THUDM/CogVideoX-5b", torch_dtype=torch.bfloat16)
>>> pipe.to("cuda")
>>> config = PyramidAttentionBroadcastConfig(
... spatial_attention_block_skip_range=2,
... spatial_attention_timestep_skip_range=(100, 800),
... current_timestep_callback=lambda: pipe.current_timestep,
... )
>>> apply_pyramid_attention_broadcast(pipe.transformer, config)
```
"""
if config.current_timestep_callback is None:
raise ValueError(
"The `current_timestep_callback` function must be provided in the configuration to apply Pyramid Attention Broadcast."
)
if (
config.spatial_attention_block_skip_range is None
and config.temporal_attention_block_skip_range is None
and config.cross_attention_block_skip_range is None
):
logger.warning(
"Pyramid Attention Broadcast requires one or more of `spatial_attention_block_skip_range`, `temporal_attention_block_skip_range` "
"or `cross_attention_block_skip_range` parameters to be set to an integer, not `None`. Defaulting to using `spatial_attention_block_skip_range=2`. "
"To avoid this warning, please set one of the above parameters."
)
config.spatial_attention_block_skip_range = 2
for name, submodule in module.named_modules():
if not isinstance(submodule, _ATTENTION_CLASSES):
# PAB has been implemented specific to Diffusers' Attention classes. However, this does not mean that PAB
# cannot be applied to this layer. For custom layers, users can extend this functionality and implement
# their own PAB logic similar to `_apply_pyramid_attention_broadcast_on_attention_class`.
continue
_apply_pyramid_attention_broadcast_on_attention_class(name, submodule, config)
def _apply_pyramid_attention_broadcast_on_attention_class(
name: str, module: Attention, config: PyramidAttentionBroadcastConfig
) -> bool:
is_spatial_self_attention = (
any(re.search(identifier, name) is not None for identifier in config.spatial_attention_block_identifiers)
and config.spatial_attention_block_skip_range is not None
and not getattr(module, "is_cross_attention", False)
)
is_temporal_self_attention = (
any(re.search(identifier, name) is not None for identifier in config.temporal_attention_block_identifiers)
and config.temporal_attention_block_skip_range is not None
and not getattr(module, "is_cross_attention", False)
)
is_cross_attention = (
any(re.search(identifier, name) is not None for identifier in config.cross_attention_block_identifiers)
and config.cross_attention_block_skip_range is not None
and getattr(module, "is_cross_attention", False)
)
block_skip_range, timestep_skip_range, block_type = None, None, None
if is_spatial_self_attention:
block_skip_range = config.spatial_attention_block_skip_range
timestep_skip_range = config.spatial_attention_timestep_skip_range
block_type = "spatial"
elif is_temporal_self_attention:
block_skip_range = config.temporal_attention_block_skip_range
timestep_skip_range = config.temporal_attention_timestep_skip_range
block_type = "temporal"
elif is_cross_attention:
block_skip_range = config.cross_attention_block_skip_range
timestep_skip_range = config.cross_attention_timestep_skip_range
block_type = "cross"
if block_skip_range is None or timestep_skip_range is None:
logger.info(
f'Unable to apply Pyramid Attention Broadcast to the selected layer: "{name}" because it does '
f"not match any of the required criteria for spatial, temporal or cross attention layers. Note, "
f"however, that this layer may still be valid for applying PAB. Please specify the correct "
f"block identifiers in the configuration."
)
return False
logger.debug(f"Enabling Pyramid Attention Broadcast ({block_type}) in layer: {name}")
_apply_pyramid_attention_broadcast_hook(
module, timestep_skip_range, block_skip_range, config.current_timestep_callback
)
return True
def _apply_pyramid_attention_broadcast_hook(
module: Union[Attention, MochiAttention],
timestep_skip_range: Tuple[int, int],
block_skip_range: int,
current_timestep_callback: Callable[[], int],
):
r"""
Apply [Pyramid Attention Broadcast](https://huggingface.co/papers/2408.12588) to a given torch.nn.Module.
Args:
module (`torch.nn.Module`):
The module to apply Pyramid Attention Broadcast to.
timestep_skip_range (`Tuple[int, int]`):
The range of timesteps to skip in the attention layer. The attention computations will be conditionally
skipped if the current timestep is within the specified range.
block_skip_range (`int`):
The number of times a specific attention broadcast is skipped before computing the attention states to
re-use. If this is set to the value `N`, the attention computation will be skipped `N - 1` times (i.e., old
attention states will be re-used) before computing the new attention states again.
current_timestep_callback (`Callable[[], int]`):
A callback function that returns the current inference timestep.
"""
registry = HookRegistry.check_if_exists_or_initialize(module)
hook = PyramidAttentionBroadcastHook(timestep_skip_range, block_skip_range, current_timestep_callback)
registry.register_hook(hook, "pyramid_attention_broadcast")
| diffusers/src/diffusers/hooks/pyramid_attention_broadcast.py/0 | {
"file_path": "diffusers/src/diffusers/hooks/pyramid_attention_broadcast.py",
"repo_id": "diffusers",
"token_count": 5684
} |
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict
import torch
class AttnProcsLayers(torch.nn.Module):
def __init__(self, state_dict: Dict[str, torch.Tensor]):
super().__init__()
self.layers = torch.nn.ModuleList(state_dict.values())
self.mapping = dict(enumerate(state_dict.keys()))
self.rev_mapping = {v: k for k, v in enumerate(state_dict.keys())}
# .processor for unet, .self_attn for text encoder
self.split_keys = [".processor", ".self_attn"]
# we add a hook to state_dict() and load_state_dict() so that the
# naming fits with `unet.attn_processors`
def map_to(module, state_dict, *args, **kwargs):
new_state_dict = {}
for key, value in state_dict.items():
num = int(key.split(".")[1]) # 0 is always "layers"
new_key = key.replace(f"layers.{num}", module.mapping[num])
new_state_dict[new_key] = value
return new_state_dict
def remap_key(key, state_dict):
for k in self.split_keys:
if k in key:
return key.split(k)[0] + k
raise ValueError(
f"There seems to be a problem with the state_dict: {set(state_dict.keys())}. {key} has to have one of {self.split_keys}."
)
def map_from(module, state_dict, *args, **kwargs):
all_keys = list(state_dict.keys())
for key in all_keys:
replace_key = remap_key(key, state_dict)
new_key = key.replace(replace_key, f"layers.{module.rev_mapping[replace_key]}")
state_dict[new_key] = state_dict[key]
del state_dict[key]
self._register_state_dict_hook(map_to)
self._register_load_state_dict_pre_hook(map_from, with_module=True)
| diffusers/src/diffusers/loaders/utils.py/0 | {
"file_path": "diffusers/src/diffusers/loaders/utils.py",
"repo_id": "diffusers",
"token_count": 1031
} |
# Copyright 2024 The Mochi team and The HuggingFace Team.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...utils import logging
from ...utils.accelerate_utils import apply_forward_hook
from ..activations import get_activation
from ..attention_processor import Attention, MochiVaeAttnProcessor2_0
from ..modeling_outputs import AutoencoderKLOutput
from ..modeling_utils import ModelMixin
from .autoencoder_kl_cogvideox import CogVideoXCausalConv3d
from .vae import DecoderOutput, DiagonalGaussianDistribution
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
class MochiChunkedGroupNorm3D(nn.Module):
r"""
Applies per-frame group normalization for 5D video inputs. It also supports memory-efficient chunked group
normalization.
Args:
num_channels (int): Number of channels expected in input
num_groups (int, optional): Number of groups to separate the channels into. Default: 32
affine (bool, optional): If True, this module has learnable affine parameters. Default: True
chunk_size (int, optional): Size of each chunk for processing. Default: 8
"""
def __init__(
self,
num_channels: int,
num_groups: int = 32,
affine: bool = True,
chunk_size: int = 8,
):
super().__init__()
self.norm_layer = nn.GroupNorm(num_channels=num_channels, num_groups=num_groups, affine=affine)
self.chunk_size = chunk_size
def forward(self, x: torch.Tensor = None) -> torch.Tensor:
batch_size = x.size(0)
x = x.permute(0, 2, 1, 3, 4).flatten(0, 1)
output = torch.cat([self.norm_layer(chunk) for chunk in x.split(self.chunk_size, dim=0)], dim=0)
output = output.unflatten(0, (batch_size, -1)).permute(0, 2, 1, 3, 4)
return output
class MochiResnetBlock3D(nn.Module):
r"""
A 3D ResNet block used in the Mochi model.
Args:
in_channels (`int`):
Number of input channels.
out_channels (`int`, *optional*):
Number of output channels. If None, defaults to `in_channels`.
non_linearity (`str`, defaults to `"swish"`):
Activation function to use.
"""
def __init__(
self,
in_channels: int,
out_channels: Optional[int] = None,
act_fn: str = "swish",
):
super().__init__()
out_channels = out_channels or in_channels
self.in_channels = in_channels
self.out_channels = out_channels
self.nonlinearity = get_activation(act_fn)
self.norm1 = MochiChunkedGroupNorm3D(num_channels=in_channels)
self.conv1 = CogVideoXCausalConv3d(
in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=1, pad_mode="replicate"
)
self.norm2 = MochiChunkedGroupNorm3D(num_channels=out_channels)
self.conv2 = CogVideoXCausalConv3d(
in_channels=out_channels, out_channels=out_channels, kernel_size=3, stride=1, pad_mode="replicate"
)
def forward(
self,
inputs: torch.Tensor,
conv_cache: Optional[Dict[str, torch.Tensor]] = None,
) -> torch.Tensor:
new_conv_cache = {}
conv_cache = conv_cache or {}
hidden_states = inputs
hidden_states = self.norm1(hidden_states)
hidden_states = self.nonlinearity(hidden_states)
hidden_states, new_conv_cache["conv1"] = self.conv1(hidden_states, conv_cache=conv_cache.get("conv1"))
hidden_states = self.norm2(hidden_states)
hidden_states = self.nonlinearity(hidden_states)
hidden_states, new_conv_cache["conv2"] = self.conv2(hidden_states, conv_cache=conv_cache.get("conv2"))
hidden_states = hidden_states + inputs
return hidden_states, new_conv_cache
class MochiDownBlock3D(nn.Module):
r"""
An downsampling block used in the Mochi model.
Args:
in_channels (`int`):
Number of input channels.
out_channels (`int`, *optional*):
Number of output channels. If None, defaults to `in_channels`.
num_layers (`int`, defaults to `1`):
Number of resnet blocks in the block.
temporal_expansion (`int`, defaults to `2`):
Temporal expansion factor.
spatial_expansion (`int`, defaults to `2`):
Spatial expansion factor.
"""
def __init__(
self,
in_channels: int,
out_channels: int,
num_layers: int = 1,
temporal_expansion: int = 2,
spatial_expansion: int = 2,
add_attention: bool = True,
):
super().__init__()
self.temporal_expansion = temporal_expansion
self.spatial_expansion = spatial_expansion
self.conv_in = CogVideoXCausalConv3d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=(temporal_expansion, spatial_expansion, spatial_expansion),
stride=(temporal_expansion, spatial_expansion, spatial_expansion),
pad_mode="replicate",
)
resnets = []
norms = []
attentions = []
for _ in range(num_layers):
resnets.append(MochiResnetBlock3D(in_channels=out_channels))
if add_attention:
norms.append(MochiChunkedGroupNorm3D(num_channels=out_channels))
attentions.append(
Attention(
query_dim=out_channels,
heads=out_channels // 32,
dim_head=32,
qk_norm="l2",
is_causal=True,
processor=MochiVaeAttnProcessor2_0(),
)
)
else:
norms.append(None)
attentions.append(None)
self.resnets = nn.ModuleList(resnets)
self.norms = nn.ModuleList(norms)
self.attentions = nn.ModuleList(attentions)
self.gradient_checkpointing = False
def forward(
self,
hidden_states: torch.Tensor,
conv_cache: Optional[Dict[str, torch.Tensor]] = None,
chunk_size: int = 2**15,
) -> torch.Tensor:
r"""Forward method of the `MochiUpBlock3D` class."""
new_conv_cache = {}
conv_cache = conv_cache or {}
hidden_states, new_conv_cache["conv_in"] = self.conv_in(hidden_states)
for i, (resnet, norm, attn) in enumerate(zip(self.resnets, self.norms, self.attentions)):
conv_cache_key = f"resnet_{i}"
if torch.is_grad_enabled() and self.gradient_checkpointing:
hidden_states, new_conv_cache[conv_cache_key] = self._gradient_checkpointing_func(
resnet,
hidden_states,
conv_cache=conv_cache.get(conv_cache_key),
)
else:
hidden_states, new_conv_cache[conv_cache_key] = resnet(
hidden_states, conv_cache=conv_cache.get(conv_cache_key)
)
if attn is not None:
residual = hidden_states
hidden_states = norm(hidden_states)
batch_size, num_channels, num_frames, height, width = hidden_states.shape
hidden_states = hidden_states.permute(0, 3, 4, 2, 1).flatten(0, 2).contiguous()
# Perform attention in chunks to avoid following error:
# RuntimeError: CUDA error: invalid configuration argument
if hidden_states.size(0) <= chunk_size:
hidden_states = attn(hidden_states)
else:
hidden_states_chunks = []
for i in range(0, hidden_states.size(0), chunk_size):
hidden_states_chunk = hidden_states[i : i + chunk_size]
hidden_states_chunk = attn(hidden_states_chunk)
hidden_states_chunks.append(hidden_states_chunk)
hidden_states = torch.cat(hidden_states_chunks)
hidden_states = hidden_states.unflatten(0, (batch_size, height, width)).permute(0, 4, 3, 1, 2)
hidden_states = residual + hidden_states
return hidden_states, new_conv_cache
class MochiMidBlock3D(nn.Module):
r"""
A middle block used in the Mochi model.
Args:
in_channels (`int`):
Number of input channels.
num_layers (`int`, defaults to `3`):
Number of resnet blocks in the block.
"""
def __init__(
self,
in_channels: int, # 768
num_layers: int = 3,
add_attention: bool = True,
):
super().__init__()
resnets = []
norms = []
attentions = []
for _ in range(num_layers):
resnets.append(MochiResnetBlock3D(in_channels=in_channels))
if add_attention:
norms.append(MochiChunkedGroupNorm3D(num_channels=in_channels))
attentions.append(
Attention(
query_dim=in_channels,
heads=in_channels // 32,
dim_head=32,
qk_norm="l2",
is_causal=True,
processor=MochiVaeAttnProcessor2_0(),
)
)
else:
norms.append(None)
attentions.append(None)
self.resnets = nn.ModuleList(resnets)
self.norms = nn.ModuleList(norms)
self.attentions = nn.ModuleList(attentions)
self.gradient_checkpointing = False
def forward(
self,
hidden_states: torch.Tensor,
conv_cache: Optional[Dict[str, torch.Tensor]] = None,
) -> torch.Tensor:
r"""Forward method of the `MochiMidBlock3D` class."""
new_conv_cache = {}
conv_cache = conv_cache or {}
for i, (resnet, norm, attn) in enumerate(zip(self.resnets, self.norms, self.attentions)):
conv_cache_key = f"resnet_{i}"
if torch.is_grad_enabled() and self.gradient_checkpointing:
hidden_states, new_conv_cache[conv_cache_key] = self._gradient_checkpointing_func(
resnet, hidden_states, conv_cache=conv_cache.get(conv_cache_key)
)
else:
hidden_states, new_conv_cache[conv_cache_key] = resnet(
hidden_states, conv_cache=conv_cache.get(conv_cache_key)
)
if attn is not None:
residual = hidden_states
hidden_states = norm(hidden_states)
batch_size, num_channels, num_frames, height, width = hidden_states.shape
hidden_states = hidden_states.permute(0, 3, 4, 2, 1).flatten(0, 2).contiguous()
hidden_states = attn(hidden_states)
hidden_states = hidden_states.unflatten(0, (batch_size, height, width)).permute(0, 4, 3, 1, 2)
hidden_states = residual + hidden_states
return hidden_states, new_conv_cache
class MochiUpBlock3D(nn.Module):
r"""
An upsampling block used in the Mochi model.
Args:
in_channels (`int`):
Number of input channels.
out_channels (`int`, *optional*):
Number of output channels. If None, defaults to `in_channels`.
num_layers (`int`, defaults to `1`):
Number of resnet blocks in the block.
temporal_expansion (`int`, defaults to `2`):
Temporal expansion factor.
spatial_expansion (`int`, defaults to `2`):
Spatial expansion factor.
"""
def __init__(
self,
in_channels: int,
out_channels: int,
num_layers: int = 1,
temporal_expansion: int = 2,
spatial_expansion: int = 2,
):
super().__init__()
self.temporal_expansion = temporal_expansion
self.spatial_expansion = spatial_expansion
resnets = []
for _ in range(num_layers):
resnets.append(MochiResnetBlock3D(in_channels=in_channels))
self.resnets = nn.ModuleList(resnets)
self.proj = nn.Linear(in_channels, out_channels * temporal_expansion * spatial_expansion**2)
self.gradient_checkpointing = False
def forward(
self,
hidden_states: torch.Tensor,
conv_cache: Optional[Dict[str, torch.Tensor]] = None,
) -> torch.Tensor:
r"""Forward method of the `MochiUpBlock3D` class."""
new_conv_cache = {}
conv_cache = conv_cache or {}
for i, resnet in enumerate(self.resnets):
conv_cache_key = f"resnet_{i}"
if torch.is_grad_enabled() and self.gradient_checkpointing:
hidden_states, new_conv_cache[conv_cache_key] = self._gradient_checkpointing_func(
resnet,
hidden_states,
conv_cache=conv_cache.get(conv_cache_key),
)
else:
hidden_states, new_conv_cache[conv_cache_key] = resnet(
hidden_states, conv_cache=conv_cache.get(conv_cache_key)
)
hidden_states = hidden_states.permute(0, 2, 3, 4, 1)
hidden_states = self.proj(hidden_states)
hidden_states = hidden_states.permute(0, 4, 1, 2, 3)
batch_size, num_channels, num_frames, height, width = hidden_states.shape
st = self.temporal_expansion
sh = self.spatial_expansion
sw = self.spatial_expansion
# Reshape and unpatchify
hidden_states = hidden_states.view(batch_size, -1, st, sh, sw, num_frames, height, width)
hidden_states = hidden_states.permute(0, 1, 5, 2, 6, 3, 7, 4).contiguous()
hidden_states = hidden_states.view(batch_size, -1, num_frames * st, height * sh, width * sw)
return hidden_states, new_conv_cache
class FourierFeatures(nn.Module):
def __init__(self, start: int = 6, stop: int = 8, step: int = 1):
super().__init__()
self.start = start
self.stop = stop
self.step = step
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
r"""Forward method of the `FourierFeatures` class."""
original_dtype = inputs.dtype
inputs = inputs.to(torch.float32)
num_channels = inputs.shape[1]
num_freqs = (self.stop - self.start) // self.step
freqs = torch.arange(self.start, self.stop, self.step, dtype=inputs.dtype, device=inputs.device)
w = torch.pow(2.0, freqs) * (2 * torch.pi) # [num_freqs]
w = w.repeat(num_channels)[None, :, None, None, None] # [1, num_channels * num_freqs, 1, 1, 1]
# Interleaved repeat of input channels to match w
h = inputs.repeat_interleave(num_freqs, dim=1) # [B, C * num_freqs, T, H, W]
# Scale channels by frequency.
h = w * h
return torch.cat([inputs, torch.sin(h), torch.cos(h)], dim=1).to(original_dtype)
class MochiEncoder3D(nn.Module):
r"""
The `MochiEncoder3D` layer of a variational autoencoder that encodes input video samples to its latent
representation.
Args:
in_channels (`int`, *optional*):
The number of input channels.
out_channels (`int`, *optional*):
The number of output channels.
block_out_channels (`Tuple[int, ...]`, *optional*, defaults to `(128, 256, 512, 768)`):
The number of output channels for each block.
layers_per_block (`Tuple[int, ...]`, *optional*, defaults to `(3, 3, 4, 6, 3)`):
The number of resnet blocks for each block.
temporal_expansions (`Tuple[int, ...]`, *optional*, defaults to `(1, 2, 3)`):
The temporal expansion factor for each of the up blocks.
spatial_expansions (`Tuple[int, ...]`, *optional*, defaults to `(2, 2, 2)`):
The spatial expansion factor for each of the up blocks.
non_linearity (`str`, *optional*, defaults to `"swish"`):
The non-linearity to use in the decoder.
"""
def __init__(
self,
in_channels: int,
out_channels: int,
block_out_channels: Tuple[int, ...] = (128, 256, 512, 768),
layers_per_block: Tuple[int, ...] = (3, 3, 4, 6, 3),
temporal_expansions: Tuple[int, ...] = (1, 2, 3),
spatial_expansions: Tuple[int, ...] = (2, 2, 2),
add_attention_block: Tuple[bool, ...] = (False, True, True, True, True),
act_fn: str = "swish",
):
super().__init__()
self.nonlinearity = get_activation(act_fn)
self.fourier_features = FourierFeatures()
self.proj_in = nn.Linear(in_channels, block_out_channels[0])
self.block_in = MochiMidBlock3D(
in_channels=block_out_channels[0], num_layers=layers_per_block[0], add_attention=add_attention_block[0]
)
down_blocks = []
for i in range(len(block_out_channels) - 1):
down_block = MochiDownBlock3D(
in_channels=block_out_channels[i],
out_channels=block_out_channels[i + 1],
num_layers=layers_per_block[i + 1],
temporal_expansion=temporal_expansions[i],
spatial_expansion=spatial_expansions[i],
add_attention=add_attention_block[i + 1],
)
down_blocks.append(down_block)
self.down_blocks = nn.ModuleList(down_blocks)
self.block_out = MochiMidBlock3D(
in_channels=block_out_channels[-1], num_layers=layers_per_block[-1], add_attention=add_attention_block[-1]
)
self.norm_out = MochiChunkedGroupNorm3D(block_out_channels[-1])
self.proj_out = nn.Linear(block_out_channels[-1], 2 * out_channels, bias=False)
def forward(
self, hidden_states: torch.Tensor, conv_cache: Optional[Dict[str, torch.Tensor]] = None
) -> torch.Tensor:
r"""Forward method of the `MochiEncoder3D` class."""
new_conv_cache = {}
conv_cache = conv_cache or {}
hidden_states = self.fourier_features(hidden_states)
hidden_states = hidden_states.permute(0, 2, 3, 4, 1)
hidden_states = self.proj_in(hidden_states)
hidden_states = hidden_states.permute(0, 4, 1, 2, 3)
if torch.is_grad_enabled() and self.gradient_checkpointing:
hidden_states, new_conv_cache["block_in"] = self._gradient_checkpointing_func(
self.block_in, hidden_states, conv_cache=conv_cache.get("block_in")
)
for i, down_block in enumerate(self.down_blocks):
conv_cache_key = f"down_block_{i}"
hidden_states, new_conv_cache[conv_cache_key] = self._gradient_checkpointing_func(
down_block, hidden_states, conv_cache=conv_cache.get(conv_cache_key)
)
else:
hidden_states, new_conv_cache["block_in"] = self.block_in(
hidden_states, conv_cache=conv_cache.get("block_in")
)
for i, down_block in enumerate(self.down_blocks):
conv_cache_key = f"down_block_{i}"
hidden_states, new_conv_cache[conv_cache_key] = down_block(
hidden_states, conv_cache=conv_cache.get(conv_cache_key)
)
hidden_states, new_conv_cache["block_out"] = self.block_out(
hidden_states, conv_cache=conv_cache.get("block_out")
)
hidden_states = self.norm_out(hidden_states)
hidden_states = self.nonlinearity(hidden_states)
hidden_states = hidden_states.permute(0, 2, 3, 4, 1)
hidden_states = self.proj_out(hidden_states)
hidden_states = hidden_states.permute(0, 4, 1, 2, 3)
return hidden_states, new_conv_cache
class MochiDecoder3D(nn.Module):
r"""
The `MochiDecoder3D` layer of a variational autoencoder that decodes its latent representation into an output
sample.
Args:
in_channels (`int`, *optional*):
The number of input channels.
out_channels (`int`, *optional*):
The number of output channels.
block_out_channels (`Tuple[int, ...]`, *optional*, defaults to `(128, 256, 512, 768)`):
The number of output channels for each block.
layers_per_block (`Tuple[int, ...]`, *optional*, defaults to `(3, 3, 4, 6, 3)`):
The number of resnet blocks for each block.
temporal_expansions (`Tuple[int, ...]`, *optional*, defaults to `(1, 2, 3)`):
The temporal expansion factor for each of the up blocks.
spatial_expansions (`Tuple[int, ...]`, *optional*, defaults to `(2, 2, 2)`):
The spatial expansion factor for each of the up blocks.
non_linearity (`str`, *optional*, defaults to `"swish"`):
The non-linearity to use in the decoder.
"""
def __init__(
self,
in_channels: int, # 12
out_channels: int, # 3
block_out_channels: Tuple[int, ...] = (128, 256, 512, 768),
layers_per_block: Tuple[int, ...] = (3, 3, 4, 6, 3),
temporal_expansions: Tuple[int, ...] = (1, 2, 3),
spatial_expansions: Tuple[int, ...] = (2, 2, 2),
act_fn: str = "swish",
):
super().__init__()
self.nonlinearity = get_activation(act_fn)
self.conv_in = nn.Conv3d(in_channels, block_out_channels[-1], kernel_size=(1, 1, 1))
self.block_in = MochiMidBlock3D(
in_channels=block_out_channels[-1],
num_layers=layers_per_block[-1],
add_attention=False,
)
up_blocks = []
for i in range(len(block_out_channels) - 1):
up_block = MochiUpBlock3D(
in_channels=block_out_channels[-i - 1],
out_channels=block_out_channels[-i - 2],
num_layers=layers_per_block[-i - 2],
temporal_expansion=temporal_expansions[-i - 1],
spatial_expansion=spatial_expansions[-i - 1],
)
up_blocks.append(up_block)
self.up_blocks = nn.ModuleList(up_blocks)
self.block_out = MochiMidBlock3D(
in_channels=block_out_channels[0],
num_layers=layers_per_block[0],
add_attention=False,
)
self.proj_out = nn.Linear(block_out_channels[0], out_channels)
self.gradient_checkpointing = False
def forward(
self, hidden_states: torch.Tensor, conv_cache: Optional[Dict[str, torch.Tensor]] = None
) -> torch.Tensor:
r"""Forward method of the `MochiDecoder3D` class."""
new_conv_cache = {}
conv_cache = conv_cache or {}
hidden_states = self.conv_in(hidden_states)
# 1. Mid
if torch.is_grad_enabled() and self.gradient_checkpointing:
hidden_states, new_conv_cache["block_in"] = self._gradient_checkpointing_func(
self.block_in, hidden_states, conv_cache=conv_cache.get("block_in")
)
for i, up_block in enumerate(self.up_blocks):
conv_cache_key = f"up_block_{i}"
hidden_states, new_conv_cache[conv_cache_key] = self._gradient_checkpointing_func(
up_block, hidden_states, conv_cache=conv_cache.get(conv_cache_key)
)
else:
hidden_states, new_conv_cache["block_in"] = self.block_in(
hidden_states, conv_cache=conv_cache.get("block_in")
)
for i, up_block in enumerate(self.up_blocks):
conv_cache_key = f"up_block_{i}"
hidden_states, new_conv_cache[conv_cache_key] = up_block(
hidden_states, conv_cache=conv_cache.get(conv_cache_key)
)
hidden_states, new_conv_cache["block_out"] = self.block_out(
hidden_states, conv_cache=conv_cache.get("block_out")
)
hidden_states = self.nonlinearity(hidden_states)
hidden_states = hidden_states.permute(0, 2, 3, 4, 1)
hidden_states = self.proj_out(hidden_states)
hidden_states = hidden_states.permute(0, 4, 1, 2, 3)
return hidden_states, new_conv_cache
class AutoencoderKLMochi(ModelMixin, ConfigMixin):
r"""
A VAE model with KL loss for encoding images into latents and decoding latent representations into images. Used in
[Mochi 1 preview](https://github.com/genmoai/models).
This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented
for all models (such as downloading or saving).
Parameters:
in_channels (int, *optional*, defaults to 3): Number of channels in the input image.
out_channels (int, *optional*, defaults to 3): Number of channels in the output.
block_out_channels (`Tuple[int]`, *optional*, defaults to `(64,)`):
Tuple of block output channels.
act_fn (`str`, *optional*, defaults to `"silu"`): The activation function to use.
scaling_factor (`float`, *optional*, defaults to `1.15258426`):
The component-wise standard deviation of the trained latent space computed using the first batch of the
training set. This is used to scale the latent space to have unit variance when training the diffusion
model. The latents are scaled with the formula `z = z * scaling_factor` before being passed to the
diffusion model. When decoding, the latents are scaled back to the original scale with the formula: `z = 1
/ scaling_factor * z`. For more details, refer to sections 4.3.2 and D.1 of the [High-Resolution Image
Synthesis with Latent Diffusion Models](https://arxiv.org/abs/2112.10752) paper.
"""
_supports_gradient_checkpointing = True
_no_split_modules = ["MochiResnetBlock3D"]
@register_to_config
def __init__(
self,
in_channels: int = 15,
out_channels: int = 3,
encoder_block_out_channels: Tuple[int] = (64, 128, 256, 384),
decoder_block_out_channels: Tuple[int] = (128, 256, 512, 768),
latent_channels: int = 12,
layers_per_block: Tuple[int, ...] = (3, 3, 4, 6, 3),
act_fn: str = "silu",
temporal_expansions: Tuple[int, ...] = (1, 2, 3),
spatial_expansions: Tuple[int, ...] = (2, 2, 2),
add_attention_block: Tuple[bool, ...] = (False, True, True, True, True),
latents_mean: Tuple[float, ...] = (
-0.06730895953510081,
-0.038011381506090416,
-0.07477820912866141,
-0.05565264470995561,
0.012767231469026969,
-0.04703542746246419,
0.043896967884726704,
-0.09346305707025976,
-0.09918314763016893,
-0.008729793427399178,
-0.011931556316503654,
-0.0321993391887285,
),
latents_std: Tuple[float, ...] = (
0.9263795028493863,
0.9248894543193766,
0.9393059390890617,
0.959253732819592,
0.8244560132752793,
0.917259975397747,
0.9294154431013696,
1.3720942357788521,
0.881393668867029,
0.9168315692124348,
0.9185249279345552,
0.9274757570805041,
),
scaling_factor: float = 1.0,
):
super().__init__()
self.encoder = MochiEncoder3D(
in_channels=in_channels,
out_channels=latent_channels,
block_out_channels=encoder_block_out_channels,
layers_per_block=layers_per_block,
temporal_expansions=temporal_expansions,
spatial_expansions=spatial_expansions,
add_attention_block=add_attention_block,
act_fn=act_fn,
)
self.decoder = MochiDecoder3D(
in_channels=latent_channels,
out_channels=out_channels,
block_out_channels=decoder_block_out_channels,
layers_per_block=layers_per_block,
temporal_expansions=temporal_expansions,
spatial_expansions=spatial_expansions,
act_fn=act_fn,
)
self.spatial_compression_ratio = functools.reduce(lambda x, y: x * y, spatial_expansions, 1)
self.temporal_compression_ratio = functools.reduce(lambda x, y: x * y, temporal_expansions, 1)
# When decoding a batch of video latents at a time, one can save memory by slicing across the batch dimension
# to perform decoding of a single video latent at a time.
self.use_slicing = False
# When decoding spatially large video latents, the memory requirement is very high. By breaking the video latent
# frames spatially into smaller tiles and performing multiple forward passes for decoding, and then blending the
# intermediate tiles together, the memory requirement can be lowered.
self.use_tiling = False
# When decoding temporally long video latents, the memory requirement is very high. By decoding latent frames
# at a fixed frame batch size (based on `self.num_latent_frames_batch_sizes`), the memory requirement can be lowered.
self.use_framewise_encoding = False
self.use_framewise_decoding = False
# This can be used to determine how the number of output frames in the final decoded video. To maintain consistency with
# the original implementation, this defaults to `True`.
# - Original implementation (drop_last_temporal_frames=True):
# Output frames = (latent_frames - 1) * temporal_compression_ratio + 1
# - Without dropping additional temporal upscaled frames (drop_last_temporal_frames=False):
# Output frames = latent_frames * temporal_compression_ratio
# The latter case is useful for frame packing and some training/finetuning scenarios where the additional.
self.drop_last_temporal_frames = True
# This can be configured based on the amount of GPU memory available.
# `12` for sample frames and `2` for latent frames are sensible defaults for consumer GPUs.
# Setting it to higher values results in higher memory usage.
self.num_sample_frames_batch_size = 12
self.num_latent_frames_batch_size = 2
# The minimal tile height and width for spatial tiling to be used
self.tile_sample_min_height = 256
self.tile_sample_min_width = 256
# The minimal distance between two spatial tiles
self.tile_sample_stride_height = 192
self.tile_sample_stride_width = 192
def enable_tiling(
self,
tile_sample_min_height: Optional[int] = None,
tile_sample_min_width: Optional[int] = None,
tile_sample_stride_height: Optional[float] = None,
tile_sample_stride_width: Optional[float] = None,
) -> None:
r"""
Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to
compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
processing larger images.
Args:
tile_sample_min_height (`int`, *optional*):
The minimum height required for a sample to be separated into tiles across the height dimension.
tile_sample_min_width (`int`, *optional*):
The minimum width required for a sample to be separated into tiles across the width dimension.
tile_sample_stride_height (`int`, *optional*):
The minimum amount of overlap between two consecutive vertical tiles. This is to ensure that there are
no tiling artifacts produced across the height dimension.
tile_sample_stride_width (`int`, *optional*):
The stride between two consecutive horizontal tiles. This is to ensure that there are no tiling
artifacts produced across the width dimension.
"""
self.use_tiling = True
self.tile_sample_min_height = tile_sample_min_height or self.tile_sample_min_height
self.tile_sample_min_width = tile_sample_min_width or self.tile_sample_min_width
self.tile_sample_stride_height = tile_sample_stride_height or self.tile_sample_stride_height
self.tile_sample_stride_width = tile_sample_stride_width or self.tile_sample_stride_width
def disable_tiling(self) -> None:
r"""
Disable tiled VAE decoding. If `enable_tiling` was previously enabled, this method will go back to computing
decoding in one step.
"""
self.use_tiling = False
def enable_slicing(self) -> None:
r"""
Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
"""
self.use_slicing = True
def disable_slicing(self) -> None:
r"""
Disable sliced VAE decoding. If `enable_slicing` was previously enabled, this method will go back to computing
decoding in one step.
"""
self.use_slicing = False
def _enable_framewise_encoding(self):
r"""
Enables the framewise VAE encoding implementation with past latent padding. By default, Diffusers uses the
oneshot encoding implementation without current latent replicate padding.
Warning: Framewise encoding may not work as expected due to the causal attention layers. If you enable
framewise encoding, encode a video, and try to decode it, there will be noticeable jittering effect.
"""
self.use_framewise_encoding = True
for name, module in self.named_modules():
if isinstance(module, CogVideoXCausalConv3d):
module.pad_mode = "constant"
def _enable_framewise_decoding(self):
r"""
Enables the framewise VAE decoding implementation with past latent padding. By default, Diffusers uses the
oneshot decoding implementation without current latent replicate padding.
"""
self.use_framewise_decoding = True
for name, module in self.named_modules():
if isinstance(module, CogVideoXCausalConv3d):
module.pad_mode = "constant"
def _encode(self, x: torch.Tensor) -> torch.Tensor:
batch_size, num_channels, num_frames, height, width = x.shape
if self.use_tiling and (width > self.tile_sample_min_width or height > self.tile_sample_min_height):
return self.tiled_encode(x)
if self.use_framewise_encoding:
raise NotImplementedError(
"Frame-wise encoding does not work with the Mochi VAE Encoder due to the presence of attention layers. "
"As intermediate frames are not independent from each other, they cannot be encoded frame-wise."
)
else:
enc, _ = self.encoder(x)
return enc
@apply_forward_hook
def encode(
self, x: torch.Tensor, return_dict: bool = True
) -> Union[AutoencoderKLOutput, Tuple[DiagonalGaussianDistribution]]:
"""
Encode a batch of images into latents.
Args:
x (`torch.Tensor`): Input batch of images.
return_dict (`bool`, *optional*, defaults to `True`):
Whether to return a [`~models.autoencoder_kl.AutoencoderKLOutput`] instead of a plain tuple.
Returns:
The latent representations of the encoded videos. If `return_dict` is True, a
[`~models.autoencoder_kl.AutoencoderKLOutput`] is returned, otherwise a plain `tuple` is returned.
"""
if self.use_slicing and x.shape[0] > 1:
encoded_slices = [self._encode(x_slice) for x_slice in x.split(1)]
h = torch.cat(encoded_slices)
else:
h = self._encode(x)
posterior = DiagonalGaussianDistribution(h)
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=posterior)
def _decode(self, z: torch.Tensor, return_dict: bool = True) -> Union[DecoderOutput, torch.Tensor]:
batch_size, num_channels, num_frames, height, width = z.shape
tile_latent_min_height = self.tile_sample_min_height // self.spatial_compression_ratio
tile_latent_min_width = self.tile_sample_stride_width // self.spatial_compression_ratio
if self.use_tiling and (width > tile_latent_min_width or height > tile_latent_min_height):
return self.tiled_decode(z, return_dict=return_dict)
if self.use_framewise_decoding:
conv_cache = None
dec = []
for i in range(0, num_frames, self.num_latent_frames_batch_size):
z_intermediate = z[:, :, i : i + self.num_latent_frames_batch_size]
z_intermediate, conv_cache = self.decoder(z_intermediate, conv_cache=conv_cache)
dec.append(z_intermediate)
dec = torch.cat(dec, dim=2)
else:
dec, _ = self.decoder(z)
if self.drop_last_temporal_frames and dec.size(2) >= self.temporal_compression_ratio:
dec = dec[:, :, self.temporal_compression_ratio - 1 :]
if not return_dict:
return (dec,)
return DecoderOutput(sample=dec)
@apply_forward_hook
def decode(self, z: torch.Tensor, return_dict: bool = True) -> Union[DecoderOutput, torch.Tensor]:
"""
Decode a batch of images.
Args:
z (`torch.Tensor`): Input batch of latent vectors.
return_dict (`bool`, *optional*, defaults to `True`):
Whether to return a [`~models.vae.DecoderOutput`] instead of a plain tuple.
Returns:
[`~models.vae.DecoderOutput`] or `tuple`:
If return_dict is True, a [`~models.vae.DecoderOutput`] is returned, otherwise a plain `tuple` is
returned.
"""
if self.use_slicing and z.shape[0] > 1:
decoded_slices = [self._decode(z_slice).sample for z_slice in z.split(1)]
decoded = torch.cat(decoded_slices)
else:
decoded = self._decode(z).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=decoded)
def blend_v(self, a: torch.Tensor, b: torch.Tensor, blend_extent: int) -> torch.Tensor:
blend_extent = min(a.shape[3], b.shape[3], blend_extent)
for y in range(blend_extent):
b[:, :, :, y, :] = a[:, :, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, :, y, :] * (
y / blend_extent
)
return b
def blend_h(self, a: torch.Tensor, b: torch.Tensor, blend_extent: int) -> torch.Tensor:
blend_extent = min(a.shape[4], b.shape[4], blend_extent)
for x in range(blend_extent):
b[:, :, :, :, x] = a[:, :, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, :, x] * (
x / blend_extent
)
return b
def tiled_encode(self, x: torch.Tensor) -> torch.Tensor:
r"""Encode a batch of images using a tiled encoder.
Args:
x (`torch.Tensor`): Input batch of videos.
Returns:
`torch.Tensor`:
The latent representation of the encoded videos.
"""
batch_size, num_channels, num_frames, height, width = x.shape
latent_height = height // self.spatial_compression_ratio
latent_width = width // self.spatial_compression_ratio
tile_latent_min_height = self.tile_sample_min_height // self.spatial_compression_ratio
tile_latent_min_width = self.tile_sample_min_width // self.spatial_compression_ratio
tile_latent_stride_height = self.tile_sample_stride_height // self.spatial_compression_ratio
tile_latent_stride_width = self.tile_sample_stride_width // self.spatial_compression_ratio
blend_height = tile_latent_min_height - tile_latent_stride_height
blend_width = tile_latent_min_width - tile_latent_stride_width
# Split x into overlapping tiles and encode them separately.
# The tiles have an overlap to avoid seams between tiles.
rows = []
for i in range(0, height, self.tile_sample_stride_height):
row = []
for j in range(0, width, self.tile_sample_stride_width):
if self.use_framewise_encoding:
raise NotImplementedError(
"Frame-wise encoding does not work with the Mochi VAE Encoder due to the presence of attention layers. "
"As intermediate frames are not independent from each other, they cannot be encoded frame-wise."
)
else:
time, _ = self.encoder(
x[:, :, :, i : i + self.tile_sample_min_height, j : j + self.tile_sample_min_width]
)
row.append(time)
rows.append(row)
result_rows = []
for i, row in enumerate(rows):
result_row = []
for j, tile in enumerate(row):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
tile = self.blend_v(rows[i - 1][j], tile, blend_height)
if j > 0:
tile = self.blend_h(row[j - 1], tile, blend_width)
result_row.append(tile[:, :, :, :tile_latent_stride_height, :tile_latent_stride_width])
result_rows.append(torch.cat(result_row, dim=4))
enc = torch.cat(result_rows, dim=3)[:, :, :, :latent_height, :latent_width]
return enc
def tiled_decode(self, z: torch.Tensor, return_dict: bool = True) -> Union[DecoderOutput, torch.Tensor]:
r"""
Decode a batch of images using a tiled decoder.
Args:
z (`torch.Tensor`): Input batch of latent vectors.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~models.vae.DecoderOutput`] instead of a plain tuple.
Returns:
[`~models.vae.DecoderOutput`] or `tuple`:
If return_dict is True, a [`~models.vae.DecoderOutput`] is returned, otherwise a plain `tuple` is
returned.
"""
batch_size, num_channels, num_frames, height, width = z.shape
sample_height = height * self.spatial_compression_ratio
sample_width = width * self.spatial_compression_ratio
tile_latent_min_height = self.tile_sample_min_height // self.spatial_compression_ratio
tile_latent_min_width = self.tile_sample_min_width // self.spatial_compression_ratio
tile_latent_stride_height = self.tile_sample_stride_height // self.spatial_compression_ratio
tile_latent_stride_width = self.tile_sample_stride_width // self.spatial_compression_ratio
blend_height = self.tile_sample_min_height - self.tile_sample_stride_height
blend_width = self.tile_sample_min_width - self.tile_sample_stride_width
# Split z into overlapping tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
rows = []
for i in range(0, height, tile_latent_stride_height):
row = []
for j in range(0, width, tile_latent_stride_width):
if self.use_framewise_decoding:
time = []
conv_cache = None
for k in range(0, num_frames, self.num_latent_frames_batch_size):
tile = z[
:,
:,
k : k + self.num_latent_frames_batch_size,
i : i + tile_latent_min_height,
j : j + tile_latent_min_width,
]
tile, conv_cache = self.decoder(tile, conv_cache=conv_cache)
time.append(tile)
time = torch.cat(time, dim=2)
else:
time, _ = self.decoder(z[:, :, :, i : i + tile_latent_min_height, j : j + tile_latent_min_width])
if self.drop_last_temporal_frames and time.size(2) >= self.temporal_compression_ratio:
time = time[:, :, self.temporal_compression_ratio - 1 :]
row.append(time)
rows.append(row)
result_rows = []
for i, row in enumerate(rows):
result_row = []
for j, tile in enumerate(row):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
tile = self.blend_v(rows[i - 1][j], tile, blend_height)
if j > 0:
tile = self.blend_h(row[j - 1], tile, blend_width)
result_row.append(tile[:, :, :, : self.tile_sample_stride_height, : self.tile_sample_stride_width])
result_rows.append(torch.cat(result_row, dim=4))
dec = torch.cat(result_rows, dim=3)[:, :, :, :sample_height, :sample_width]
if not return_dict:
return (dec,)
return DecoderOutput(sample=dec)
def forward(
self,
sample: torch.Tensor,
sample_posterior: bool = False,
return_dict: bool = True,
generator: Optional[torch.Generator] = None,
) -> Union[torch.Tensor, torch.Tensor]:
x = sample
posterior = self.encode(x).latent_dist
if sample_posterior:
z = posterior.sample(generator=generator)
else:
z = posterior.mode()
dec = self.decode(z)
if not return_dict:
return (dec,)
return dec
| diffusers/src/diffusers/models/autoencoders/autoencoder_kl_mochi.py/0 | {
"file_path": "diffusers/src/diffusers/models/autoencoders/autoencoder_kl_mochi.py",
"repo_id": "diffusers",
"token_count": 21465
} |
# Copyright 2024 HunyuanDiT Authors, Qixun Wang and The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...utils import BaseOutput, logging
from ..attention_processor import AttentionProcessor
from ..embeddings import (
HunyuanCombinedTimestepTextSizeStyleEmbedding,
PatchEmbed,
PixArtAlphaTextProjection,
)
from ..modeling_utils import ModelMixin
from ..transformers.hunyuan_transformer_2d import HunyuanDiTBlock
from .controlnet import Tuple, zero_module
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
@dataclass
class HunyuanControlNetOutput(BaseOutput):
controlnet_block_samples: Tuple[torch.Tensor]
class HunyuanDiT2DControlNetModel(ModelMixin, ConfigMixin):
@register_to_config
def __init__(
self,
conditioning_channels: int = 3,
num_attention_heads: int = 16,
attention_head_dim: int = 88,
in_channels: Optional[int] = None,
patch_size: Optional[int] = None,
activation_fn: str = "gelu-approximate",
sample_size=32,
hidden_size=1152,
transformer_num_layers: int = 40,
mlp_ratio: float = 4.0,
cross_attention_dim: int = 1024,
cross_attention_dim_t5: int = 2048,
pooled_projection_dim: int = 1024,
text_len: int = 77,
text_len_t5: int = 256,
use_style_cond_and_image_meta_size: bool = True,
):
super().__init__()
self.num_heads = num_attention_heads
self.inner_dim = num_attention_heads * attention_head_dim
self.text_embedder = PixArtAlphaTextProjection(
in_features=cross_attention_dim_t5,
hidden_size=cross_attention_dim_t5 * 4,
out_features=cross_attention_dim,
act_fn="silu_fp32",
)
self.text_embedding_padding = nn.Parameter(
torch.randn(text_len + text_len_t5, cross_attention_dim, dtype=torch.float32)
)
self.pos_embed = PatchEmbed(
height=sample_size,
width=sample_size,
in_channels=in_channels,
embed_dim=hidden_size,
patch_size=patch_size,
pos_embed_type=None,
)
self.time_extra_emb = HunyuanCombinedTimestepTextSizeStyleEmbedding(
hidden_size,
pooled_projection_dim=pooled_projection_dim,
seq_len=text_len_t5,
cross_attention_dim=cross_attention_dim_t5,
use_style_cond_and_image_meta_size=use_style_cond_and_image_meta_size,
)
# controlnet_blocks
self.controlnet_blocks = nn.ModuleList([])
# HunyuanDiT Blocks
self.blocks = nn.ModuleList(
[
HunyuanDiTBlock(
dim=self.inner_dim,
num_attention_heads=self.config.num_attention_heads,
activation_fn=activation_fn,
ff_inner_dim=int(self.inner_dim * mlp_ratio),
cross_attention_dim=cross_attention_dim,
qk_norm=True, # See http://arxiv.org/abs/2302.05442 for details.
skip=False, # always False as it is the first half of the model
)
for layer in range(transformer_num_layers // 2 - 1)
]
)
self.input_block = zero_module(nn.Linear(hidden_size, hidden_size))
for _ in range(len(self.blocks)):
controlnet_block = nn.Linear(hidden_size, hidden_size)
controlnet_block = zero_module(controlnet_block)
self.controlnet_blocks.append(controlnet_block)
@property
def attn_processors(self) -> Dict[str, AttentionProcessor]:
r"""
Returns:
`dict` of attention processors: A dictionary containing all attention processors used in the model with
indexed by its weight name.
"""
# set recursively
processors = {}
def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]):
if hasattr(module, "get_processor"):
processors[f"{name}.processor"] = module.get_processor(return_deprecated_lora=True)
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"{name}.{sub_name}", child, processors)
return processors
for name, module in self.named_children():
fn_recursive_add_processors(name, module, processors)
return processors
def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
r"""
Sets the attention processor to use to compute attention.
Parameters:
processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`):
The instantiated processor class or a dictionary of processor classes that will be set as the processor
for **all** `Attention` layers. If `processor` is a dict, the key needs to define the path to the
corresponding cross attention processor. This is strongly recommended when setting trainable attention
processors.
"""
count = len(self.attn_processors.keys())
if isinstance(processor, dict) and len(processor) != count:
raise ValueError(
f"A dict of processors was passed, but the number of processors {len(processor)} does not match the"
f" number of attention layers: {count}. Please make sure to pass {count} processor classes."
)
def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):
if hasattr(module, "set_processor"):
if not isinstance(processor, dict):
module.set_processor(processor)
else:
module.set_processor(processor.pop(f"{name}.processor"))
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor)
for name, module in self.named_children():
fn_recursive_attn_processor(name, module, processor)
@classmethod
def from_transformer(
cls, transformer, conditioning_channels=3, transformer_num_layers=None, load_weights_from_transformer=True
):
config = transformer.config
activation_fn = config.activation_fn
attention_head_dim = config.attention_head_dim
cross_attention_dim = config.cross_attention_dim
cross_attention_dim_t5 = config.cross_attention_dim_t5
hidden_size = config.hidden_size
in_channels = config.in_channels
mlp_ratio = config.mlp_ratio
num_attention_heads = config.num_attention_heads
patch_size = config.patch_size
sample_size = config.sample_size
text_len = config.text_len
text_len_t5 = config.text_len_t5
conditioning_channels = conditioning_channels
transformer_num_layers = transformer_num_layers or config.transformer_num_layers
controlnet = cls(
conditioning_channels=conditioning_channels,
transformer_num_layers=transformer_num_layers,
activation_fn=activation_fn,
attention_head_dim=attention_head_dim,
cross_attention_dim=cross_attention_dim,
cross_attention_dim_t5=cross_attention_dim_t5,
hidden_size=hidden_size,
in_channels=in_channels,
mlp_ratio=mlp_ratio,
num_attention_heads=num_attention_heads,
patch_size=patch_size,
sample_size=sample_size,
text_len=text_len,
text_len_t5=text_len_t5,
)
if load_weights_from_transformer:
key = controlnet.load_state_dict(transformer.state_dict(), strict=False)
logger.warning(f"controlnet load from Hunyuan-DiT. missing_keys: {key[0]}")
return controlnet
def forward(
self,
hidden_states,
timestep,
controlnet_cond: torch.Tensor,
conditioning_scale: float = 1.0,
encoder_hidden_states=None,
text_embedding_mask=None,
encoder_hidden_states_t5=None,
text_embedding_mask_t5=None,
image_meta_size=None,
style=None,
image_rotary_emb=None,
return_dict=True,
):
"""
The [`HunyuanDiT2DControlNetModel`] forward method.
Args:
hidden_states (`torch.Tensor` of shape `(batch size, dim, height, width)`):
The input tensor.
timestep ( `torch.LongTensor`, *optional*):
Used to indicate denoising step.
controlnet_cond ( `torch.Tensor` ):
The conditioning input to ControlNet.
conditioning_scale ( `float` ):
Indicate the conditioning scale.
encoder_hidden_states ( `torch.Tensor` of shape `(batch size, sequence len, embed dims)`, *optional*):
Conditional embeddings for cross attention layer. This is the output of `BertModel`.
text_embedding_mask: torch.Tensor
An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. This is the output
of `BertModel`.
encoder_hidden_states_t5 ( `torch.Tensor` of shape `(batch size, sequence len, embed dims)`, *optional*):
Conditional embeddings for cross attention layer. This is the output of T5 Text Encoder.
text_embedding_mask_t5: torch.Tensor
An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. This is the output
of T5 Text Encoder.
image_meta_size (torch.Tensor):
Conditional embedding indicate the image sizes
style: torch.Tensor:
Conditional embedding indicate the style
image_rotary_emb (`torch.Tensor`):
The image rotary embeddings to apply on query and key tensors during attention calculation.
return_dict: bool
Whether to return a dictionary.
"""
height, width = hidden_states.shape[-2:]
hidden_states = self.pos_embed(hidden_states) # b,c,H,W -> b, N, C
# 2. pre-process
hidden_states = hidden_states + self.input_block(self.pos_embed(controlnet_cond))
temb = self.time_extra_emb(
timestep, encoder_hidden_states_t5, image_meta_size, style, hidden_dtype=timestep.dtype
) # [B, D]
# text projection
batch_size, sequence_length, _ = encoder_hidden_states_t5.shape
encoder_hidden_states_t5 = self.text_embedder(
encoder_hidden_states_t5.view(-1, encoder_hidden_states_t5.shape[-1])
)
encoder_hidden_states_t5 = encoder_hidden_states_t5.view(batch_size, sequence_length, -1)
encoder_hidden_states = torch.cat([encoder_hidden_states, encoder_hidden_states_t5], dim=1)
text_embedding_mask = torch.cat([text_embedding_mask, text_embedding_mask_t5], dim=-1)
text_embedding_mask = text_embedding_mask.unsqueeze(2).bool()
encoder_hidden_states = torch.where(text_embedding_mask, encoder_hidden_states, self.text_embedding_padding)
block_res_samples = ()
for layer, block in enumerate(self.blocks):
hidden_states = block(
hidden_states,
temb=temb,
encoder_hidden_states=encoder_hidden_states,
image_rotary_emb=image_rotary_emb,
) # (N, L, D)
block_res_samples = block_res_samples + (hidden_states,)
controlnet_block_res_samples = ()
for block_res_sample, controlnet_block in zip(block_res_samples, self.controlnet_blocks):
block_res_sample = controlnet_block(block_res_sample)
controlnet_block_res_samples = controlnet_block_res_samples + (block_res_sample,)
# 6. scaling
controlnet_block_res_samples = [sample * conditioning_scale for sample in controlnet_block_res_samples]
if not return_dict:
return (controlnet_block_res_samples,)
return HunyuanControlNetOutput(controlnet_block_samples=controlnet_block_res_samples)
class HunyuanDiT2DMultiControlNetModel(ModelMixin):
r"""
`HunyuanDiT2DMultiControlNetModel` wrapper class for Multi-HunyuanDiT2DControlNetModel
This module is a wrapper for multiple instances of the `HunyuanDiT2DControlNetModel`. The `forward()` API is
designed to be compatible with `HunyuanDiT2DControlNetModel`.
Args:
controlnets (`List[HunyuanDiT2DControlNetModel]`):
Provides additional conditioning to the unet during the denoising process. You must set multiple
`HunyuanDiT2DControlNetModel` as a list.
"""
def __init__(self, controlnets):
super().__init__()
self.nets = nn.ModuleList(controlnets)
def forward(
self,
hidden_states,
timestep,
controlnet_cond: torch.Tensor,
conditioning_scale: float = 1.0,
encoder_hidden_states=None,
text_embedding_mask=None,
encoder_hidden_states_t5=None,
text_embedding_mask_t5=None,
image_meta_size=None,
style=None,
image_rotary_emb=None,
return_dict=True,
):
"""
The [`HunyuanDiT2DControlNetModel`] forward method.
Args:
hidden_states (`torch.Tensor` of shape `(batch size, dim, height, width)`):
The input tensor.
timestep ( `torch.LongTensor`, *optional*):
Used to indicate denoising step.
controlnet_cond ( `torch.Tensor` ):
The conditioning input to ControlNet.
conditioning_scale ( `float` ):
Indicate the conditioning scale.
encoder_hidden_states ( `torch.Tensor` of shape `(batch size, sequence len, embed dims)`, *optional*):
Conditional embeddings for cross attention layer. This is the output of `BertModel`.
text_embedding_mask: torch.Tensor
An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. This is the output
of `BertModel`.
encoder_hidden_states_t5 ( `torch.Tensor` of shape `(batch size, sequence len, embed dims)`, *optional*):
Conditional embeddings for cross attention layer. This is the output of T5 Text Encoder.
text_embedding_mask_t5: torch.Tensor
An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. This is the output
of T5 Text Encoder.
image_meta_size (torch.Tensor):
Conditional embedding indicate the image sizes
style: torch.Tensor:
Conditional embedding indicate the style
image_rotary_emb (`torch.Tensor`):
The image rotary embeddings to apply on query and key tensors during attention calculation.
return_dict: bool
Whether to return a dictionary.
"""
for i, (image, scale, controlnet) in enumerate(zip(controlnet_cond, conditioning_scale, self.nets)):
block_samples = controlnet(
hidden_states=hidden_states,
timestep=timestep,
controlnet_cond=image,
conditioning_scale=scale,
encoder_hidden_states=encoder_hidden_states,
text_embedding_mask=text_embedding_mask,
encoder_hidden_states_t5=encoder_hidden_states_t5,
text_embedding_mask_t5=text_embedding_mask_t5,
image_meta_size=image_meta_size,
style=style,
image_rotary_emb=image_rotary_emb,
return_dict=return_dict,
)
# merge samples
if i == 0:
control_block_samples = block_samples
else:
control_block_samples = [
control_block_sample + block_sample
for control_block_sample, block_sample in zip(control_block_samples[0], block_samples[0])
]
control_block_samples = (control_block_samples,)
return control_block_samples
| diffusers/src/diffusers/models/controlnets/controlnet_hunyuan.py/0 | {
"file_path": "diffusers/src/diffusers/models/controlnets/controlnet_hunyuan.py",
"repo_id": "diffusers",
"token_count": 7435
} |
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numbers
from typing import Dict, Optional, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..utils import is_torch_npu_available, is_torch_version
from .activations import get_activation
from .embeddings import CombinedTimestepLabelEmbeddings, PixArtAlphaCombinedTimestepSizeEmbeddings
class AdaLayerNorm(nn.Module):
r"""
Norm layer modified to incorporate timestep embeddings.
Parameters:
embedding_dim (`int`): The size of each embedding vector.
num_embeddings (`int`, *optional*): The size of the embeddings dictionary.
output_dim (`int`, *optional*):
norm_elementwise_affine (`bool`, defaults to `False):
norm_eps (`bool`, defaults to `False`):
chunk_dim (`int`, defaults to `0`):
"""
def __init__(
self,
embedding_dim: int,
num_embeddings: Optional[int] = None,
output_dim: Optional[int] = None,
norm_elementwise_affine: bool = False,
norm_eps: float = 1e-5,
chunk_dim: int = 0,
):
super().__init__()
self.chunk_dim = chunk_dim
output_dim = output_dim or embedding_dim * 2
if num_embeddings is not None:
self.emb = nn.Embedding(num_embeddings, embedding_dim)
else:
self.emb = None
self.silu = nn.SiLU()
self.linear = nn.Linear(embedding_dim, output_dim)
self.norm = nn.LayerNorm(output_dim // 2, norm_eps, norm_elementwise_affine)
def forward(
self, x: torch.Tensor, timestep: Optional[torch.Tensor] = None, temb: Optional[torch.Tensor] = None
) -> torch.Tensor:
if self.emb is not None:
temb = self.emb(timestep)
temb = self.linear(self.silu(temb))
if self.chunk_dim == 1:
# This is a bit weird why we have the order of "shift, scale" here and "scale, shift" in the
# other if-branch. This branch is specific to CogVideoX for now.
shift, scale = temb.chunk(2, dim=1)
shift = shift[:, None, :]
scale = scale[:, None, :]
else:
scale, shift = temb.chunk(2, dim=0)
x = self.norm(x) * (1 + scale) + shift
return x
class FP32LayerNorm(nn.LayerNorm):
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
origin_dtype = inputs.dtype
return F.layer_norm(
inputs.float(),
self.normalized_shape,
self.weight.float() if self.weight is not None else None,
self.bias.float() if self.bias is not None else None,
self.eps,
).to(origin_dtype)
class SD35AdaLayerNormZeroX(nn.Module):
r"""
Norm layer adaptive layer norm zero (AdaLN-Zero).
Parameters:
embedding_dim (`int`): The size of each embedding vector.
num_embeddings (`int`): The size of the embeddings dictionary.
"""
def __init__(self, embedding_dim: int, norm_type: str = "layer_norm", bias: bool = True) -> None:
super().__init__()
self.silu = nn.SiLU()
self.linear = nn.Linear(embedding_dim, 9 * embedding_dim, bias=bias)
if norm_type == "layer_norm":
self.norm = nn.LayerNorm(embedding_dim, elementwise_affine=False, eps=1e-6)
else:
raise ValueError(f"Unsupported `norm_type` ({norm_type}) provided. Supported ones are: 'layer_norm'.")
def forward(
self,
hidden_states: torch.Tensor,
emb: Optional[torch.Tensor] = None,
) -> Tuple[torch.Tensor, ...]:
emb = self.linear(self.silu(emb))
shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp, shift_msa2, scale_msa2, gate_msa2 = emb.chunk(
9, dim=1
)
norm_hidden_states = self.norm(hidden_states)
hidden_states = norm_hidden_states * (1 + scale_msa[:, None]) + shift_msa[:, None]
norm_hidden_states2 = norm_hidden_states * (1 + scale_msa2[:, None]) + shift_msa2[:, None]
return hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp, norm_hidden_states2, gate_msa2
class AdaLayerNormZero(nn.Module):
r"""
Norm layer adaptive layer norm zero (adaLN-Zero).
Parameters:
embedding_dim (`int`): The size of each embedding vector.
num_embeddings (`int`): The size of the embeddings dictionary.
"""
def __init__(self, embedding_dim: int, num_embeddings: Optional[int] = None, norm_type="layer_norm", bias=True):
super().__init__()
if num_embeddings is not None:
self.emb = CombinedTimestepLabelEmbeddings(num_embeddings, embedding_dim)
else:
self.emb = None
self.silu = nn.SiLU()
self.linear = nn.Linear(embedding_dim, 6 * embedding_dim, bias=bias)
if norm_type == "layer_norm":
self.norm = nn.LayerNorm(embedding_dim, elementwise_affine=False, eps=1e-6)
elif norm_type == "fp32_layer_norm":
self.norm = FP32LayerNorm(embedding_dim, elementwise_affine=False, bias=False)
else:
raise ValueError(
f"Unsupported `norm_type` ({norm_type}) provided. Supported ones are: 'layer_norm', 'fp32_layer_norm'."
)
def forward(
self,
x: torch.Tensor,
timestep: Optional[torch.Tensor] = None,
class_labels: Optional[torch.LongTensor] = None,
hidden_dtype: Optional[torch.dtype] = None,
emb: Optional[torch.Tensor] = None,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
if self.emb is not None:
emb = self.emb(timestep, class_labels, hidden_dtype=hidden_dtype)
emb = self.linear(self.silu(emb))
shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = emb.chunk(6, dim=1)
x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class AdaLayerNormZeroSingle(nn.Module):
r"""
Norm layer adaptive layer norm zero (adaLN-Zero).
Parameters:
embedding_dim (`int`): The size of each embedding vector.
num_embeddings (`int`): The size of the embeddings dictionary.
"""
def __init__(self, embedding_dim: int, norm_type="layer_norm", bias=True):
super().__init__()
self.silu = nn.SiLU()
self.linear = nn.Linear(embedding_dim, 3 * embedding_dim, bias=bias)
if norm_type == "layer_norm":
self.norm = nn.LayerNorm(embedding_dim, elementwise_affine=False, eps=1e-6)
else:
raise ValueError(
f"Unsupported `norm_type` ({norm_type}) provided. Supported ones are: 'layer_norm', 'fp32_layer_norm'."
)
def forward(
self,
x: torch.Tensor,
emb: Optional[torch.Tensor] = None,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
emb = self.linear(self.silu(emb))
shift_msa, scale_msa, gate_msa = emb.chunk(3, dim=1)
x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa
class LuminaRMSNormZero(nn.Module):
"""
Norm layer adaptive RMS normalization zero.
Parameters:
embedding_dim (`int`): The size of each embedding vector.
"""
def __init__(self, embedding_dim: int, norm_eps: float, norm_elementwise_affine: bool):
super().__init__()
self.silu = nn.SiLU()
self.linear = nn.Linear(
min(embedding_dim, 1024),
4 * embedding_dim,
bias=True,
)
self.norm = RMSNorm(embedding_dim, eps=norm_eps, elementwise_affine=norm_elementwise_affine)
def forward(
self,
x: torch.Tensor,
emb: Optional[torch.Tensor] = None,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
# emb = self.emb(timestep, encoder_hidden_states, encoder_mask)
emb = self.linear(self.silu(emb))
scale_msa, gate_msa, scale_mlp, gate_mlp = emb.chunk(4, dim=1)
x = self.norm(x) * (1 + scale_msa[:, None])
return x, gate_msa, scale_mlp, gate_mlp
class AdaLayerNormSingle(nn.Module):
r"""
Norm layer adaptive layer norm single (adaLN-single).
As proposed in PixArt-Alpha (see: https://arxiv.org/abs/2310.00426; Section 2.3).
Parameters:
embedding_dim (`int`): The size of each embedding vector.
use_additional_conditions (`bool`): To use additional conditions for normalization or not.
"""
def __init__(self, embedding_dim: int, use_additional_conditions: bool = False):
super().__init__()
self.emb = PixArtAlphaCombinedTimestepSizeEmbeddings(
embedding_dim, size_emb_dim=embedding_dim // 3, use_additional_conditions=use_additional_conditions
)
self.silu = nn.SiLU()
self.linear = nn.Linear(embedding_dim, 6 * embedding_dim, bias=True)
def forward(
self,
timestep: torch.Tensor,
added_cond_kwargs: Optional[Dict[str, torch.Tensor]] = None,
batch_size: Optional[int] = None,
hidden_dtype: Optional[torch.dtype] = None,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
# No modulation happening here.
added_cond_kwargs = added_cond_kwargs or {"resolution": None, "aspect_ratio": None}
embedded_timestep = self.emb(timestep, **added_cond_kwargs, batch_size=batch_size, hidden_dtype=hidden_dtype)
return self.linear(self.silu(embedded_timestep)), embedded_timestep
class AdaGroupNorm(nn.Module):
r"""
GroupNorm layer modified to incorporate timestep embeddings.
Parameters:
embedding_dim (`int`): The size of each embedding vector.
num_embeddings (`int`): The size of the embeddings dictionary.
num_groups (`int`): The number of groups to separate the channels into.
act_fn (`str`, *optional*, defaults to `None`): The activation function to use.
eps (`float`, *optional*, defaults to `1e-5`): The epsilon value to use for numerical stability.
"""
def __init__(
self, embedding_dim: int, out_dim: int, num_groups: int, act_fn: Optional[str] = None, eps: float = 1e-5
):
super().__init__()
self.num_groups = num_groups
self.eps = eps
if act_fn is None:
self.act = None
else:
self.act = get_activation(act_fn)
self.linear = nn.Linear(embedding_dim, out_dim * 2)
def forward(self, x: torch.Tensor, emb: torch.Tensor) -> torch.Tensor:
if self.act:
emb = self.act(emb)
emb = self.linear(emb)
emb = emb[:, :, None, None]
scale, shift = emb.chunk(2, dim=1)
x = F.group_norm(x, self.num_groups, eps=self.eps)
x = x * (1 + scale) + shift
return x
class AdaLayerNormContinuous(nn.Module):
def __init__(
self,
embedding_dim: int,
conditioning_embedding_dim: int,
# NOTE: It is a bit weird that the norm layer can be configured to have scale and shift parameters
# because the output is immediately scaled and shifted by the projected conditioning embeddings.
# Note that AdaLayerNorm does not let the norm layer have scale and shift parameters.
# However, this is how it was implemented in the original code, and it's rather likely you should
# set `elementwise_affine` to False.
elementwise_affine=True,
eps=1e-5,
bias=True,
norm_type="layer_norm",
):
super().__init__()
self.silu = nn.SiLU()
self.linear = nn.Linear(conditioning_embedding_dim, embedding_dim * 2, bias=bias)
if norm_type == "layer_norm":
self.norm = LayerNorm(embedding_dim, eps, elementwise_affine, bias)
elif norm_type == "rms_norm":
self.norm = RMSNorm(embedding_dim, eps, elementwise_affine)
else:
raise ValueError(f"unknown norm_type {norm_type}")
def forward(self, x: torch.Tensor, conditioning_embedding: torch.Tensor) -> torch.Tensor:
# convert back to the original dtype in case `conditioning_embedding`` is upcasted to float32 (needed for hunyuanDiT)
emb = self.linear(self.silu(conditioning_embedding).to(x.dtype))
scale, shift = torch.chunk(emb, 2, dim=1)
x = self.norm(x) * (1 + scale)[:, None, :] + shift[:, None, :]
return x
class LuminaLayerNormContinuous(nn.Module):
def __init__(
self,
embedding_dim: int,
conditioning_embedding_dim: int,
# NOTE: It is a bit weird that the norm layer can be configured to have scale and shift parameters
# because the output is immediately scaled and shifted by the projected conditioning embeddings.
# Note that AdaLayerNorm does not let the norm layer have scale and shift parameters.
# However, this is how it was implemented in the original code, and it's rather likely you should
# set `elementwise_affine` to False.
elementwise_affine=True,
eps=1e-5,
bias=True,
norm_type="layer_norm",
out_dim: Optional[int] = None,
):
super().__init__()
# AdaLN
self.silu = nn.SiLU()
self.linear_1 = nn.Linear(conditioning_embedding_dim, embedding_dim, bias=bias)
if norm_type == "layer_norm":
self.norm = LayerNorm(embedding_dim, eps, elementwise_affine, bias)
elif norm_type == "rms_norm":
self.norm = RMSNorm(embedding_dim, eps=eps, elementwise_affine=elementwise_affine)
else:
raise ValueError(f"unknown norm_type {norm_type}")
self.linear_2 = None
if out_dim is not None:
self.linear_2 = nn.Linear(embedding_dim, out_dim, bias=bias)
def forward(
self,
x: torch.Tensor,
conditioning_embedding: torch.Tensor,
) -> torch.Tensor:
# convert back to the original dtype in case `conditioning_embedding`` is upcasted to float32 (needed for hunyuanDiT)
emb = self.linear_1(self.silu(conditioning_embedding).to(x.dtype))
scale = emb
x = self.norm(x) * (1 + scale)[:, None, :]
if self.linear_2 is not None:
x = self.linear_2(x)
return x
class CogView3PlusAdaLayerNormZeroTextImage(nn.Module):
r"""
Norm layer adaptive layer norm zero (adaLN-Zero).
Parameters:
embedding_dim (`int`): The size of each embedding vector.
num_embeddings (`int`): The size of the embeddings dictionary.
"""
def __init__(self, embedding_dim: int, dim: int):
super().__init__()
self.silu = nn.SiLU()
self.linear = nn.Linear(embedding_dim, 12 * dim, bias=True)
self.norm_x = nn.LayerNorm(dim, elementwise_affine=False, eps=1e-5)
self.norm_c = nn.LayerNorm(dim, elementwise_affine=False, eps=1e-5)
def forward(
self,
x: torch.Tensor,
context: torch.Tensor,
emb: Optional[torch.Tensor] = None,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
emb = self.linear(self.silu(emb))
(
shift_msa,
scale_msa,
gate_msa,
shift_mlp,
scale_mlp,
gate_mlp,
c_shift_msa,
c_scale_msa,
c_gate_msa,
c_shift_mlp,
c_scale_mlp,
c_gate_mlp,
) = emb.chunk(12, dim=1)
normed_x = self.norm_x(x)
normed_context = self.norm_c(context)
x = normed_x * (1 + scale_msa[:, None]) + shift_msa[:, None]
context = normed_context * (1 + c_scale_msa[:, None]) + c_shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp, context, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp
class CogVideoXLayerNormZero(nn.Module):
def __init__(
self,
conditioning_dim: int,
embedding_dim: int,
elementwise_affine: bool = True,
eps: float = 1e-5,
bias: bool = True,
) -> None:
super().__init__()
self.silu = nn.SiLU()
self.linear = nn.Linear(conditioning_dim, 6 * embedding_dim, bias=bias)
self.norm = nn.LayerNorm(embedding_dim, eps=eps, elementwise_affine=elementwise_affine)
def forward(
self, hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor, temb: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
shift, scale, gate, enc_shift, enc_scale, enc_gate = self.linear(self.silu(temb)).chunk(6, dim=1)
hidden_states = self.norm(hidden_states) * (1 + scale)[:, None, :] + shift[:, None, :]
encoder_hidden_states = self.norm(encoder_hidden_states) * (1 + enc_scale)[:, None, :] + enc_shift[:, None, :]
return hidden_states, encoder_hidden_states, gate[:, None, :], enc_gate[:, None, :]
if is_torch_version(">=", "2.1.0"):
LayerNorm = nn.LayerNorm
else:
# Has optional bias parameter compared to torch layer norm
# TODO: replace with torch layernorm once min required torch version >= 2.1
class LayerNorm(nn.Module):
def __init__(self, dim, eps: float = 1e-5, elementwise_affine: bool = True, bias: bool = True):
super().__init__()
self.eps = eps
if isinstance(dim, numbers.Integral):
dim = (dim,)
self.dim = torch.Size(dim)
if elementwise_affine:
self.weight = nn.Parameter(torch.ones(dim))
self.bias = nn.Parameter(torch.zeros(dim)) if bias else None
else:
self.weight = None
self.bias = None
def forward(self, input):
return F.layer_norm(input, self.dim, self.weight, self.bias, self.eps)
class RMSNorm(nn.Module):
def __init__(self, dim, eps: float, elementwise_affine: bool = True, bias: bool = False):
super().__init__()
self.eps = eps
self.elementwise_affine = elementwise_affine
if isinstance(dim, numbers.Integral):
dim = (dim,)
self.dim = torch.Size(dim)
self.weight = None
self.bias = None
if elementwise_affine:
self.weight = nn.Parameter(torch.ones(dim))
if bias:
self.bias = nn.Parameter(torch.zeros(dim))
def forward(self, hidden_states):
if is_torch_npu_available():
import torch_npu
if self.weight is not None:
# convert into half-precision if necessary
if self.weight.dtype in [torch.float16, torch.bfloat16]:
hidden_states = hidden_states.to(self.weight.dtype)
hidden_states = torch_npu.npu_rms_norm(hidden_states, self.weight, epsilon=self.eps)[0]
if self.bias is not None:
hidden_states = hidden_states + self.bias
else:
input_dtype = hidden_states.dtype
variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.eps)
if self.weight is not None:
# convert into half-precision if necessary
if self.weight.dtype in [torch.float16, torch.bfloat16]:
hidden_states = hidden_states.to(self.weight.dtype)
hidden_states = hidden_states * self.weight
if self.bias is not None:
hidden_states = hidden_states + self.bias
else:
hidden_states = hidden_states.to(input_dtype)
return hidden_states
# TODO: (Dhruv) This can be replaced with regular RMSNorm in Mochi once `_keep_in_fp32_modules` is supported
# for sharded checkpoints, see: https://github.com/huggingface/diffusers/issues/10013
class MochiRMSNorm(nn.Module):
def __init__(self, dim, eps: float, elementwise_affine: bool = True):
super().__init__()
self.eps = eps
if isinstance(dim, numbers.Integral):
dim = (dim,)
self.dim = torch.Size(dim)
if elementwise_affine:
self.weight = nn.Parameter(torch.ones(dim))
else:
self.weight = None
def forward(self, hidden_states):
input_dtype = hidden_states.dtype
variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.eps)
if self.weight is not None:
hidden_states = hidden_states * self.weight
hidden_states = hidden_states.to(input_dtype)
return hidden_states
class GlobalResponseNorm(nn.Module):
# Taken from https://github.com/facebookresearch/ConvNeXt-V2/blob/3608f67cc1dae164790c5d0aead7bf2d73d9719b/models/utils.py#L105
def __init__(self, dim):
super().__init__()
self.gamma = nn.Parameter(torch.zeros(1, 1, 1, dim))
self.beta = nn.Parameter(torch.zeros(1, 1, 1, dim))
def forward(self, x):
gx = torch.norm(x, p=2, dim=(1, 2), keepdim=True)
nx = gx / (gx.mean(dim=-1, keepdim=True) + 1e-6)
return self.gamma * (x * nx) + self.beta + x
class LpNorm(nn.Module):
def __init__(self, p: int = 2, dim: int = -1, eps: float = 1e-12):
super().__init__()
self.p = p
self.dim = dim
self.eps = eps
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
return F.normalize(hidden_states, p=self.p, dim=self.dim, eps=self.eps)
def get_normalization(
norm_type: str = "batch_norm",
num_features: Optional[int] = None,
eps: float = 1e-5,
elementwise_affine: bool = True,
bias: bool = True,
) -> nn.Module:
if norm_type == "rms_norm":
norm = RMSNorm(num_features, eps=eps, elementwise_affine=elementwise_affine, bias=bias)
elif norm_type == "layer_norm":
norm = nn.LayerNorm(num_features, eps=eps, elementwise_affine=elementwise_affine, bias=bias)
elif norm_type == "batch_norm":
norm = nn.BatchNorm2d(num_features, eps=eps, affine=elementwise_affine)
else:
raise ValueError(f"{norm_type=} is not supported.")
return norm
| diffusers/src/diffusers/models/normalization.py/0 | {
"file_path": "diffusers/src/diffusers/models/normalization.py",
"repo_id": "diffusers",
"token_count": 10215
} |
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from typing import Optional, Tuple
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ..attention_processor import Attention
from ..embeddings import get_timestep_embedding
from ..modeling_utils import ModelMixin
class T5FilmDecoder(ModelMixin, ConfigMixin):
r"""
T5 style decoder with FiLM conditioning.
Args:
input_dims (`int`, *optional*, defaults to `128`):
The number of input dimensions.
targets_length (`int`, *optional*, defaults to `256`):
The length of the targets.
d_model (`int`, *optional*, defaults to `768`):
Size of the input hidden states.
num_layers (`int`, *optional*, defaults to `12`):
The number of `DecoderLayer`'s to use.
num_heads (`int`, *optional*, defaults to `12`):
The number of attention heads to use.
d_kv (`int`, *optional*, defaults to `64`):
Size of the key-value projection vectors.
d_ff (`int`, *optional*, defaults to `2048`):
The number of dimensions in the intermediate feed-forward layer of `DecoderLayer`'s.
dropout_rate (`float`, *optional*, defaults to `0.1`):
Dropout probability.
"""
@register_to_config
def __init__(
self,
input_dims: int = 128,
targets_length: int = 256,
max_decoder_noise_time: float = 2000.0,
d_model: int = 768,
num_layers: int = 12,
num_heads: int = 12,
d_kv: int = 64,
d_ff: int = 2048,
dropout_rate: float = 0.1,
):
super().__init__()
self.conditioning_emb = nn.Sequential(
nn.Linear(d_model, d_model * 4, bias=False),
nn.SiLU(),
nn.Linear(d_model * 4, d_model * 4, bias=False),
nn.SiLU(),
)
self.position_encoding = nn.Embedding(targets_length, d_model)
self.position_encoding.weight.requires_grad = False
self.continuous_inputs_projection = nn.Linear(input_dims, d_model, bias=False)
self.dropout = nn.Dropout(p=dropout_rate)
self.decoders = nn.ModuleList()
for lyr_num in range(num_layers):
# FiLM conditional T5 decoder
lyr = DecoderLayer(d_model=d_model, d_kv=d_kv, num_heads=num_heads, d_ff=d_ff, dropout_rate=dropout_rate)
self.decoders.append(lyr)
self.decoder_norm = T5LayerNorm(d_model)
self.post_dropout = nn.Dropout(p=dropout_rate)
self.spec_out = nn.Linear(d_model, input_dims, bias=False)
def encoder_decoder_mask(self, query_input: torch.Tensor, key_input: torch.Tensor) -> torch.Tensor:
mask = torch.mul(query_input.unsqueeze(-1), key_input.unsqueeze(-2))
return mask.unsqueeze(-3)
def forward(self, encodings_and_masks, decoder_input_tokens, decoder_noise_time):
batch, _, _ = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
time_steps = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time,
embedding_dim=self.config.d_model,
max_period=self.config.max_decoder_noise_time,
).to(dtype=self.dtype)
conditioning_emb = self.conditioning_emb(time_steps).unsqueeze(1)
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
seq_length = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
decoder_positions = torch.broadcast_to(
torch.arange(seq_length, device=decoder_input_tokens.device),
(batch, seq_length),
)
position_encodings = self.position_encoding(decoder_positions)
inputs = self.continuous_inputs_projection(decoder_input_tokens)
inputs += position_encodings
y = self.dropout(inputs)
# decoder: No padding present.
decoder_mask = torch.ones(
decoder_input_tokens.shape[:2], device=decoder_input_tokens.device, dtype=inputs.dtype
)
# Translate encoding masks to encoder-decoder masks.
encodings_and_encdec_masks = [(x, self.encoder_decoder_mask(decoder_mask, y)) for x, y in encodings_and_masks]
# cross attend style: concat encodings
encoded = torch.cat([x[0] for x in encodings_and_encdec_masks], dim=1)
encoder_decoder_mask = torch.cat([x[1] for x in encodings_and_encdec_masks], dim=-1)
for lyr in self.decoders:
y = lyr(
y,
conditioning_emb=conditioning_emb,
encoder_hidden_states=encoded,
encoder_attention_mask=encoder_decoder_mask,
)[0]
y = self.decoder_norm(y)
y = self.post_dropout(y)
spec_out = self.spec_out(y)
return spec_out
class DecoderLayer(nn.Module):
r"""
T5 decoder layer.
Args:
d_model (`int`):
Size of the input hidden states.
d_kv (`int`):
Size of the key-value projection vectors.
num_heads (`int`):
Number of attention heads.
d_ff (`int`):
Size of the intermediate feed-forward layer.
dropout_rate (`float`):
Dropout probability.
layer_norm_epsilon (`float`, *optional*, defaults to `1e-6`):
A small value used for numerical stability to avoid dividing by zero.
"""
def __init__(
self, d_model: int, d_kv: int, num_heads: int, d_ff: int, dropout_rate: float, layer_norm_epsilon: float = 1e-6
):
super().__init__()
self.layer = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
T5LayerSelfAttentionCond(d_model=d_model, d_kv=d_kv, num_heads=num_heads, dropout_rate=dropout_rate)
)
# cross attention: layer 1
self.layer.append(
T5LayerCrossAttention(
d_model=d_model,
d_kv=d_kv,
num_heads=num_heads,
dropout_rate=dropout_rate,
layer_norm_epsilon=layer_norm_epsilon,
)
)
# Film Cond MLP + dropout: last layer
self.layer.append(
T5LayerFFCond(d_model=d_model, d_ff=d_ff, dropout_rate=dropout_rate, layer_norm_epsilon=layer_norm_epsilon)
)
def forward(
self,
hidden_states: torch.Tensor,
conditioning_emb: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
encoder_decoder_position_bias=None,
) -> Tuple[torch.Tensor]:
hidden_states = self.layer[0](
hidden_states,
conditioning_emb=conditioning_emb,
attention_mask=attention_mask,
)
if encoder_hidden_states is not None:
encoder_extended_attention_mask = torch.where(encoder_attention_mask > 0, 0, -1e10).to(
encoder_hidden_states.dtype
)
hidden_states = self.layer[1](
hidden_states,
key_value_states=encoder_hidden_states,
attention_mask=encoder_extended_attention_mask,
)
# Apply Film Conditional Feed Forward layer
hidden_states = self.layer[-1](hidden_states, conditioning_emb)
return (hidden_states,)
class T5LayerSelfAttentionCond(nn.Module):
r"""
T5 style self-attention layer with conditioning.
Args:
d_model (`int`):
Size of the input hidden states.
d_kv (`int`):
Size of the key-value projection vectors.
num_heads (`int`):
Number of attention heads.
dropout_rate (`float`):
Dropout probability.
"""
def __init__(self, d_model: int, d_kv: int, num_heads: int, dropout_rate: float):
super().__init__()
self.layer_norm = T5LayerNorm(d_model)
self.FiLMLayer = T5FiLMLayer(in_features=d_model * 4, out_features=d_model)
self.attention = Attention(query_dim=d_model, heads=num_heads, dim_head=d_kv, out_bias=False, scale_qk=False)
self.dropout = nn.Dropout(dropout_rate)
def forward(
self,
hidden_states: torch.Tensor,
conditioning_emb: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
) -> torch.Tensor:
# pre_self_attention_layer_norm
normed_hidden_states = self.layer_norm(hidden_states)
if conditioning_emb is not None:
normed_hidden_states = self.FiLMLayer(normed_hidden_states, conditioning_emb)
# Self-attention block
attention_output = self.attention(normed_hidden_states)
hidden_states = hidden_states + self.dropout(attention_output)
return hidden_states
class T5LayerCrossAttention(nn.Module):
r"""
T5 style cross-attention layer.
Args:
d_model (`int`):
Size of the input hidden states.
d_kv (`int`):
Size of the key-value projection vectors.
num_heads (`int`):
Number of attention heads.
dropout_rate (`float`):
Dropout probability.
layer_norm_epsilon (`float`):
A small value used for numerical stability to avoid dividing by zero.
"""
def __init__(self, d_model: int, d_kv: int, num_heads: int, dropout_rate: float, layer_norm_epsilon: float):
super().__init__()
self.attention = Attention(query_dim=d_model, heads=num_heads, dim_head=d_kv, out_bias=False, scale_qk=False)
self.layer_norm = T5LayerNorm(d_model, eps=layer_norm_epsilon)
self.dropout = nn.Dropout(dropout_rate)
def forward(
self,
hidden_states: torch.Tensor,
key_value_states: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
) -> torch.Tensor:
normed_hidden_states = self.layer_norm(hidden_states)
attention_output = self.attention(
normed_hidden_states,
encoder_hidden_states=key_value_states,
attention_mask=attention_mask.squeeze(1),
)
layer_output = hidden_states + self.dropout(attention_output)
return layer_output
class T5LayerFFCond(nn.Module):
r"""
T5 style feed-forward conditional layer.
Args:
d_model (`int`):
Size of the input hidden states.
d_ff (`int`):
Size of the intermediate feed-forward layer.
dropout_rate (`float`):
Dropout probability.
layer_norm_epsilon (`float`):
A small value used for numerical stability to avoid dividing by zero.
"""
def __init__(self, d_model: int, d_ff: int, dropout_rate: float, layer_norm_epsilon: float):
super().__init__()
self.DenseReluDense = T5DenseGatedActDense(d_model=d_model, d_ff=d_ff, dropout_rate=dropout_rate)
self.film = T5FiLMLayer(in_features=d_model * 4, out_features=d_model)
self.layer_norm = T5LayerNorm(d_model, eps=layer_norm_epsilon)
self.dropout = nn.Dropout(dropout_rate)
def forward(self, hidden_states: torch.Tensor, conditioning_emb: Optional[torch.Tensor] = None) -> torch.Tensor:
forwarded_states = self.layer_norm(hidden_states)
if conditioning_emb is not None:
forwarded_states = self.film(forwarded_states, conditioning_emb)
forwarded_states = self.DenseReluDense(forwarded_states)
hidden_states = hidden_states + self.dropout(forwarded_states)
return hidden_states
class T5DenseGatedActDense(nn.Module):
r"""
T5 style feed-forward layer with gated activations and dropout.
Args:
d_model (`int`):
Size of the input hidden states.
d_ff (`int`):
Size of the intermediate feed-forward layer.
dropout_rate (`float`):
Dropout probability.
"""
def __init__(self, d_model: int, d_ff: int, dropout_rate: float):
super().__init__()
self.wi_0 = nn.Linear(d_model, d_ff, bias=False)
self.wi_1 = nn.Linear(d_model, d_ff, bias=False)
self.wo = nn.Linear(d_ff, d_model, bias=False)
self.dropout = nn.Dropout(dropout_rate)
self.act = NewGELUActivation()
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_gelu = self.act(self.wi_0(hidden_states))
hidden_linear = self.wi_1(hidden_states)
hidden_states = hidden_gelu * hidden_linear
hidden_states = self.dropout(hidden_states)
hidden_states = self.wo(hidden_states)
return hidden_states
class T5LayerNorm(nn.Module):
r"""
T5 style layer normalization module.
Args:
hidden_size (`int`):
Size of the input hidden states.
eps (`float`, `optional`, defaults to `1e-6`):
A small value used for numerical stability to avoid dividing by zero.
"""
def __init__(self, hidden_size: int, eps: float = 1e-6):
"""
Construct a layernorm module in the T5 style. No bias and no subtraction of mean.
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
# convert into half-precision if necessary
if self.weight.dtype in [torch.float16, torch.bfloat16]:
hidden_states = hidden_states.to(self.weight.dtype)
return self.weight * hidden_states
class NewGELUActivation(nn.Module):
"""
Implementation of the GELU activation function currently in Google BERT repo (identical to OpenAI GPT). Also see
the Gaussian Error Linear Units paper: https://arxiv.org/abs/1606.08415
"""
def forward(self, input: torch.Tensor) -> torch.Tensor:
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi) * (input + 0.044715 * torch.pow(input, 3.0))))
class T5FiLMLayer(nn.Module):
"""
T5 style FiLM Layer.
Args:
in_features (`int`):
Number of input features.
out_features (`int`):
Number of output features.
"""
def __init__(self, in_features: int, out_features: int):
super().__init__()
self.scale_bias = nn.Linear(in_features, out_features * 2, bias=False)
def forward(self, x: torch.Tensor, conditioning_emb: torch.Tensor) -> torch.Tensor:
emb = self.scale_bias(conditioning_emb)
scale, shift = torch.chunk(emb, 2, -1)
x = x * (1 + scale) + shift
return x
| diffusers/src/diffusers/models/transformers/t5_film_transformer.py/0 | {
"file_path": "diffusers/src/diffusers/models/transformers/t5_film_transformer.py",
"repo_id": "diffusers",
"token_count": 7110
} |
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
import torch.nn as nn
import torch.utils.checkpoint
from ...configuration_utils import ConfigMixin, register_to_config
from ...loaders import PeftAdapterMixin, UNet2DConditionLoadersMixin
from ...loaders.single_file_model import FromOriginalModelMixin
from ...utils import USE_PEFT_BACKEND, BaseOutput, deprecate, logging, scale_lora_layers, unscale_lora_layers
from ..activations import get_activation
from ..attention_processor import (
ADDED_KV_ATTENTION_PROCESSORS,
CROSS_ATTENTION_PROCESSORS,
Attention,
AttentionProcessor,
AttnAddedKVProcessor,
AttnProcessor,
FusedAttnProcessor2_0,
)
from ..embeddings import (
GaussianFourierProjection,
GLIGENTextBoundingboxProjection,
ImageHintTimeEmbedding,
ImageProjection,
ImageTimeEmbedding,
TextImageProjection,
TextImageTimeEmbedding,
TextTimeEmbedding,
TimestepEmbedding,
Timesteps,
)
from ..modeling_utils import ModelMixin
from .unet_2d_blocks import (
get_down_block,
get_mid_block,
get_up_block,
)
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
@dataclass
class UNet2DConditionOutput(BaseOutput):
"""
The output of [`UNet2DConditionModel`].
Args:
sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)`):
The hidden states output conditioned on `encoder_hidden_states` input. Output of last layer of model.
"""
sample: torch.Tensor = None
class UNet2DConditionModel(
ModelMixin, ConfigMixin, FromOriginalModelMixin, UNet2DConditionLoadersMixin, PeftAdapterMixin
):
r"""
A conditional 2D UNet model that takes a noisy sample, conditional state, and a timestep and returns a sample
shaped output.
This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented
for all models (such as downloading or saving).
Parameters:
sample_size (`int` or `Tuple[int, int]`, *optional*, defaults to `None`):
Height and width of input/output sample.
in_channels (`int`, *optional*, defaults to 4): Number of channels in the input sample.
out_channels (`int`, *optional*, defaults to 4): Number of channels in the output.
center_input_sample (`bool`, *optional*, defaults to `False`): Whether to center the input sample.
flip_sin_to_cos (`bool`, *optional*, defaults to `True`):
Whether to flip the sin to cos in the time embedding.
freq_shift (`int`, *optional*, defaults to 0): The frequency shift to apply to the time embedding.
down_block_types (`Tuple[str]`, *optional*, defaults to `("CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D")`):
The tuple of downsample blocks to use.
mid_block_type (`str`, *optional*, defaults to `"UNetMidBlock2DCrossAttn"`):
Block type for middle of UNet, it can be one of `UNetMidBlock2DCrossAttn`, `UNetMidBlock2D`, or
`UNetMidBlock2DSimpleCrossAttn`. If `None`, the mid block layer is skipped.
up_block_types (`Tuple[str]`, *optional*, defaults to `("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")`):
The tuple of upsample blocks to use.
only_cross_attention(`bool` or `Tuple[bool]`, *optional*, default to `False`):
Whether to include self-attention in the basic transformer blocks, see
[`~models.attention.BasicTransformerBlock`].
block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`):
The tuple of output channels for each block.
layers_per_block (`int`, *optional*, defaults to 2): The number of layers per block.
downsample_padding (`int`, *optional*, defaults to 1): The padding to use for the downsampling convolution.
mid_block_scale_factor (`float`, *optional*, defaults to 1.0): The scale factor to use for the mid block.
dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.
act_fn (`str`, *optional*, defaults to `"silu"`): The activation function to use.
norm_num_groups (`int`, *optional*, defaults to 32): The number of groups to use for the normalization.
If `None`, normalization and activation layers is skipped in post-processing.
norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon to use for the normalization.
cross_attention_dim (`int` or `Tuple[int]`, *optional*, defaults to 1280):
The dimension of the cross attention features.
transformer_layers_per_block (`int`, `Tuple[int]`, or `Tuple[Tuple]` , *optional*, defaults to 1):
The number of transformer blocks of type [`~models.attention.BasicTransformerBlock`]. Only relevant for
[`~models.unets.unet_2d_blocks.CrossAttnDownBlock2D`], [`~models.unets.unet_2d_blocks.CrossAttnUpBlock2D`],
[`~models.unets.unet_2d_blocks.UNetMidBlock2DCrossAttn`].
reverse_transformer_layers_per_block : (`Tuple[Tuple]`, *optional*, defaults to None):
The number of transformer blocks of type [`~models.attention.BasicTransformerBlock`], in the upsampling
blocks of the U-Net. Only relevant if `transformer_layers_per_block` is of type `Tuple[Tuple]` and for
[`~models.unets.unet_2d_blocks.CrossAttnDownBlock2D`], [`~models.unets.unet_2d_blocks.CrossAttnUpBlock2D`],
[`~models.unets.unet_2d_blocks.UNetMidBlock2DCrossAttn`].
encoder_hid_dim (`int`, *optional*, defaults to None):
If `encoder_hid_dim_type` is defined, `encoder_hidden_states` will be projected from `encoder_hid_dim`
dimension to `cross_attention_dim`.
encoder_hid_dim_type (`str`, *optional*, defaults to `None`):
If given, the `encoder_hidden_states` and potentially other embeddings are down-projected to text
embeddings of dimension `cross_attention` according to `encoder_hid_dim_type`.
attention_head_dim (`int`, *optional*, defaults to 8): The dimension of the attention heads.
num_attention_heads (`int`, *optional*):
The number of attention heads. If not defined, defaults to `attention_head_dim`
resnet_time_scale_shift (`str`, *optional*, defaults to `"default"`): Time scale shift config
for ResNet blocks (see [`~models.resnet.ResnetBlock2D`]). Choose from `default` or `scale_shift`.
class_embed_type (`str`, *optional*, defaults to `None`):
The type of class embedding to use which is ultimately summed with the time embeddings. Choose from `None`,
`"timestep"`, `"identity"`, `"projection"`, or `"simple_projection"`.
addition_embed_type (`str`, *optional*, defaults to `None`):
Configures an optional embedding which will be summed with the time embeddings. Choose from `None` or
"text". "text" will use the `TextTimeEmbedding` layer.
addition_time_embed_dim: (`int`, *optional*, defaults to `None`):
Dimension for the timestep embeddings.
num_class_embeds (`int`, *optional*, defaults to `None`):
Input dimension of the learnable embedding matrix to be projected to `time_embed_dim`, when performing
class conditioning with `class_embed_type` equal to `None`.
time_embedding_type (`str`, *optional*, defaults to `positional`):
The type of position embedding to use for timesteps. Choose from `positional` or `fourier`.
time_embedding_dim (`int`, *optional*, defaults to `None`):
An optional override for the dimension of the projected time embedding.
time_embedding_act_fn (`str`, *optional*, defaults to `None`):
Optional activation function to use only once on the time embeddings before they are passed to the rest of
the UNet. Choose from `silu`, `mish`, `gelu`, and `swish`.
timestep_post_act (`str`, *optional*, defaults to `None`):
The second activation function to use in timestep embedding. Choose from `silu`, `mish` and `gelu`.
time_cond_proj_dim (`int`, *optional*, defaults to `None`):
The dimension of `cond_proj` layer in the timestep embedding.
conv_in_kernel (`int`, *optional*, default to `3`): The kernel size of `conv_in` layer.
conv_out_kernel (`int`, *optional*, default to `3`): The kernel size of `conv_out` layer.
projection_class_embeddings_input_dim (`int`, *optional*): The dimension of the `class_labels` input when
`class_embed_type="projection"`. Required when `class_embed_type="projection"`.
class_embeddings_concat (`bool`, *optional*, defaults to `False`): Whether to concatenate the time
embeddings with the class embeddings.
mid_block_only_cross_attention (`bool`, *optional*, defaults to `None`):
Whether to use cross attention with the mid block when using the `UNetMidBlock2DSimpleCrossAttn`. If
`only_cross_attention` is given as a single boolean and `mid_block_only_cross_attention` is `None`, the
`only_cross_attention` value is used as the value for `mid_block_only_cross_attention`. Default to `False`
otherwise.
"""
_supports_gradient_checkpointing = True
_no_split_modules = ["BasicTransformerBlock", "ResnetBlock2D", "CrossAttnUpBlock2D"]
_skip_layerwise_casting_patterns = ["norm"]
@register_to_config
def __init__(
self,
sample_size: Optional[Union[int, Tuple[int, int]]] = None,
in_channels: int = 4,
out_channels: int = 4,
center_input_sample: bool = False,
flip_sin_to_cos: bool = True,
freq_shift: int = 0,
down_block_types: Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
),
mid_block_type: Optional[str] = "UNetMidBlock2DCrossAttn",
up_block_types: Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D"),
only_cross_attention: Union[bool, Tuple[bool]] = False,
block_out_channels: Tuple[int] = (320, 640, 1280, 1280),
layers_per_block: Union[int, Tuple[int]] = 2,
downsample_padding: int = 1,
mid_block_scale_factor: float = 1,
dropout: float = 0.0,
act_fn: str = "silu",
norm_num_groups: Optional[int] = 32,
norm_eps: float = 1e-5,
cross_attention_dim: Union[int, Tuple[int]] = 1280,
transformer_layers_per_block: Union[int, Tuple[int], Tuple[Tuple]] = 1,
reverse_transformer_layers_per_block: Optional[Tuple[Tuple[int]]] = None,
encoder_hid_dim: Optional[int] = None,
encoder_hid_dim_type: Optional[str] = None,
attention_head_dim: Union[int, Tuple[int]] = 8,
num_attention_heads: Optional[Union[int, Tuple[int]]] = None,
dual_cross_attention: bool = False,
use_linear_projection: bool = False,
class_embed_type: Optional[str] = None,
addition_embed_type: Optional[str] = None,
addition_time_embed_dim: Optional[int] = None,
num_class_embeds: Optional[int] = None,
upcast_attention: bool = False,
resnet_time_scale_shift: str = "default",
resnet_skip_time_act: bool = False,
resnet_out_scale_factor: float = 1.0,
time_embedding_type: str = "positional",
time_embedding_dim: Optional[int] = None,
time_embedding_act_fn: Optional[str] = None,
timestep_post_act: Optional[str] = None,
time_cond_proj_dim: Optional[int] = None,
conv_in_kernel: int = 3,
conv_out_kernel: int = 3,
projection_class_embeddings_input_dim: Optional[int] = None,
attention_type: str = "default",
class_embeddings_concat: bool = False,
mid_block_only_cross_attention: Optional[bool] = None,
cross_attention_norm: Optional[str] = None,
addition_embed_type_num_heads: int = 64,
):
super().__init__()
self.sample_size = sample_size
if num_attention_heads is not None:
raise ValueError(
"At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19."
)
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
num_attention_heads = num_attention_heads or attention_head_dim
# Check inputs
self._check_config(
down_block_types=down_block_types,
up_block_types=up_block_types,
only_cross_attention=only_cross_attention,
block_out_channels=block_out_channels,
layers_per_block=layers_per_block,
cross_attention_dim=cross_attention_dim,
transformer_layers_per_block=transformer_layers_per_block,
reverse_transformer_layers_per_block=reverse_transformer_layers_per_block,
attention_head_dim=attention_head_dim,
num_attention_heads=num_attention_heads,
)
# input
conv_in_padding = (conv_in_kernel - 1) // 2
self.conv_in = nn.Conv2d(
in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding
)
# time
time_embed_dim, timestep_input_dim = self._set_time_proj(
time_embedding_type,
block_out_channels=block_out_channels,
flip_sin_to_cos=flip_sin_to_cos,
freq_shift=freq_shift,
time_embedding_dim=time_embedding_dim,
)
self.time_embedding = TimestepEmbedding(
timestep_input_dim,
time_embed_dim,
act_fn=act_fn,
post_act_fn=timestep_post_act,
cond_proj_dim=time_cond_proj_dim,
)
self._set_encoder_hid_proj(
encoder_hid_dim_type,
cross_attention_dim=cross_attention_dim,
encoder_hid_dim=encoder_hid_dim,
)
# class embedding
self._set_class_embedding(
class_embed_type,
act_fn=act_fn,
num_class_embeds=num_class_embeds,
projection_class_embeddings_input_dim=projection_class_embeddings_input_dim,
time_embed_dim=time_embed_dim,
timestep_input_dim=timestep_input_dim,
)
self._set_add_embedding(
addition_embed_type,
addition_embed_type_num_heads=addition_embed_type_num_heads,
addition_time_embed_dim=addition_time_embed_dim,
cross_attention_dim=cross_attention_dim,
encoder_hid_dim=encoder_hid_dim,
flip_sin_to_cos=flip_sin_to_cos,
freq_shift=freq_shift,
projection_class_embeddings_input_dim=projection_class_embeddings_input_dim,
time_embed_dim=time_embed_dim,
)
if time_embedding_act_fn is None:
self.time_embed_act = None
else:
self.time_embed_act = get_activation(time_embedding_act_fn)
self.down_blocks = nn.ModuleList([])
self.up_blocks = nn.ModuleList([])
if isinstance(only_cross_attention, bool):
if mid_block_only_cross_attention is None:
mid_block_only_cross_attention = only_cross_attention
only_cross_attention = [only_cross_attention] * len(down_block_types)
if mid_block_only_cross_attention is None:
mid_block_only_cross_attention = False
if isinstance(num_attention_heads, int):
num_attention_heads = (num_attention_heads,) * len(down_block_types)
if isinstance(attention_head_dim, int):
attention_head_dim = (attention_head_dim,) * len(down_block_types)
if isinstance(cross_attention_dim, int):
cross_attention_dim = (cross_attention_dim,) * len(down_block_types)
if isinstance(layers_per_block, int):
layers_per_block = [layers_per_block] * len(down_block_types)
if isinstance(transformer_layers_per_block, int):
transformer_layers_per_block = [transformer_layers_per_block] * len(down_block_types)
if class_embeddings_concat:
# The time embeddings are concatenated with the class embeddings. The dimension of the
# time embeddings passed to the down, middle, and up blocks is twice the dimension of the
# regular time embeddings
blocks_time_embed_dim = time_embed_dim * 2
else:
blocks_time_embed_dim = time_embed_dim
# down
output_channel = block_out_channels[0]
for i, down_block_type in enumerate(down_block_types):
input_channel = output_channel
output_channel = block_out_channels[i]
is_final_block = i == len(block_out_channels) - 1
down_block = get_down_block(
down_block_type,
num_layers=layers_per_block[i],
transformer_layers_per_block=transformer_layers_per_block[i],
in_channels=input_channel,
out_channels=output_channel,
temb_channels=blocks_time_embed_dim,
add_downsample=not is_final_block,
resnet_eps=norm_eps,
resnet_act_fn=act_fn,
resnet_groups=norm_num_groups,
cross_attention_dim=cross_attention_dim[i],
num_attention_heads=num_attention_heads[i],
downsample_padding=downsample_padding,
dual_cross_attention=dual_cross_attention,
use_linear_projection=use_linear_projection,
only_cross_attention=only_cross_attention[i],
upcast_attention=upcast_attention,
resnet_time_scale_shift=resnet_time_scale_shift,
attention_type=attention_type,
resnet_skip_time_act=resnet_skip_time_act,
resnet_out_scale_factor=resnet_out_scale_factor,
cross_attention_norm=cross_attention_norm,
attention_head_dim=attention_head_dim[i] if attention_head_dim[i] is not None else output_channel,
dropout=dropout,
)
self.down_blocks.append(down_block)
# mid
self.mid_block = get_mid_block(
mid_block_type,
temb_channels=blocks_time_embed_dim,
in_channels=block_out_channels[-1],
resnet_eps=norm_eps,
resnet_act_fn=act_fn,
resnet_groups=norm_num_groups,
output_scale_factor=mid_block_scale_factor,
transformer_layers_per_block=transformer_layers_per_block[-1],
num_attention_heads=num_attention_heads[-1],
cross_attention_dim=cross_attention_dim[-1],
dual_cross_attention=dual_cross_attention,
use_linear_projection=use_linear_projection,
mid_block_only_cross_attention=mid_block_only_cross_attention,
upcast_attention=upcast_attention,
resnet_time_scale_shift=resnet_time_scale_shift,
attention_type=attention_type,
resnet_skip_time_act=resnet_skip_time_act,
cross_attention_norm=cross_attention_norm,
attention_head_dim=attention_head_dim[-1],
dropout=dropout,
)
# count how many layers upsample the images
self.num_upsamplers = 0
# up
reversed_block_out_channels = list(reversed(block_out_channels))
reversed_num_attention_heads = list(reversed(num_attention_heads))
reversed_layers_per_block = list(reversed(layers_per_block))
reversed_cross_attention_dim = list(reversed(cross_attention_dim))
reversed_transformer_layers_per_block = (
list(reversed(transformer_layers_per_block))
if reverse_transformer_layers_per_block is None
else reverse_transformer_layers_per_block
)
only_cross_attention = list(reversed(only_cross_attention))
output_channel = reversed_block_out_channels[0]
for i, up_block_type in enumerate(up_block_types):
is_final_block = i == len(block_out_channels) - 1
prev_output_channel = output_channel
output_channel = reversed_block_out_channels[i]
input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]
# add upsample block for all BUT final layer
if not is_final_block:
add_upsample = True
self.num_upsamplers += 1
else:
add_upsample = False
up_block = get_up_block(
up_block_type,
num_layers=reversed_layers_per_block[i] + 1,
transformer_layers_per_block=reversed_transformer_layers_per_block[i],
in_channels=input_channel,
out_channels=output_channel,
prev_output_channel=prev_output_channel,
temb_channels=blocks_time_embed_dim,
add_upsample=add_upsample,
resnet_eps=norm_eps,
resnet_act_fn=act_fn,
resolution_idx=i,
resnet_groups=norm_num_groups,
cross_attention_dim=reversed_cross_attention_dim[i],
num_attention_heads=reversed_num_attention_heads[i],
dual_cross_attention=dual_cross_attention,
use_linear_projection=use_linear_projection,
only_cross_attention=only_cross_attention[i],
upcast_attention=upcast_attention,
resnet_time_scale_shift=resnet_time_scale_shift,
attention_type=attention_type,
resnet_skip_time_act=resnet_skip_time_act,
resnet_out_scale_factor=resnet_out_scale_factor,
cross_attention_norm=cross_attention_norm,
attention_head_dim=attention_head_dim[i] if attention_head_dim[i] is not None else output_channel,
dropout=dropout,
)
self.up_blocks.append(up_block)
# out
if norm_num_groups is not None:
self.conv_norm_out = nn.GroupNorm(
num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps
)
self.conv_act = get_activation(act_fn)
else:
self.conv_norm_out = None
self.conv_act = None
conv_out_padding = (conv_out_kernel - 1) // 2
self.conv_out = nn.Conv2d(
block_out_channels[0], out_channels, kernel_size=conv_out_kernel, padding=conv_out_padding
)
self._set_pos_net_if_use_gligen(attention_type=attention_type, cross_attention_dim=cross_attention_dim)
def _check_config(
self,
down_block_types: Tuple[str],
up_block_types: Tuple[str],
only_cross_attention: Union[bool, Tuple[bool]],
block_out_channels: Tuple[int],
layers_per_block: Union[int, Tuple[int]],
cross_attention_dim: Union[int, Tuple[int]],
transformer_layers_per_block: Union[int, Tuple[int], Tuple[Tuple[int]]],
reverse_transformer_layers_per_block: bool,
attention_head_dim: int,
num_attention_heads: Optional[Union[int, Tuple[int]]],
):
if len(down_block_types) != len(up_block_types):
raise ValueError(
f"Must provide the same number of `down_block_types` as `up_block_types`. `down_block_types`: {down_block_types}. `up_block_types`: {up_block_types}."
)
if len(block_out_channels) != len(down_block_types):
raise ValueError(
f"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}."
)
if not isinstance(only_cross_attention, bool) and len(only_cross_attention) != len(down_block_types):
raise ValueError(
f"Must provide the same number of `only_cross_attention` as `down_block_types`. `only_cross_attention`: {only_cross_attention}. `down_block_types`: {down_block_types}."
)
if not isinstance(num_attention_heads, int) and len(num_attention_heads) != len(down_block_types):
raise ValueError(
f"Must provide the same number of `num_attention_heads` as `down_block_types`. `num_attention_heads`: {num_attention_heads}. `down_block_types`: {down_block_types}."
)
if not isinstance(attention_head_dim, int) and len(attention_head_dim) != len(down_block_types):
raise ValueError(
f"Must provide the same number of `attention_head_dim` as `down_block_types`. `attention_head_dim`: {attention_head_dim}. `down_block_types`: {down_block_types}."
)
if isinstance(cross_attention_dim, list) and len(cross_attention_dim) != len(down_block_types):
raise ValueError(
f"Must provide the same number of `cross_attention_dim` as `down_block_types`. `cross_attention_dim`: {cross_attention_dim}. `down_block_types`: {down_block_types}."
)
if not isinstance(layers_per_block, int) and len(layers_per_block) != len(down_block_types):
raise ValueError(
f"Must provide the same number of `layers_per_block` as `down_block_types`. `layers_per_block`: {layers_per_block}. `down_block_types`: {down_block_types}."
)
if isinstance(transformer_layers_per_block, list) and reverse_transformer_layers_per_block is None:
for layer_number_per_block in transformer_layers_per_block:
if isinstance(layer_number_per_block, list):
raise ValueError("Must provide 'reverse_transformer_layers_per_block` if using asymmetrical UNet.")
def _set_time_proj(
self,
time_embedding_type: str,
block_out_channels: int,
flip_sin_to_cos: bool,
freq_shift: float,
time_embedding_dim: int,
) -> Tuple[int, int]:
if time_embedding_type == "fourier":
time_embed_dim = time_embedding_dim or block_out_channels[0] * 2
if time_embed_dim % 2 != 0:
raise ValueError(f"`time_embed_dim` should be divisible by 2, but is {time_embed_dim}.")
self.time_proj = GaussianFourierProjection(
time_embed_dim // 2, set_W_to_weight=False, log=False, flip_sin_to_cos=flip_sin_to_cos
)
timestep_input_dim = time_embed_dim
elif time_embedding_type == "positional":
time_embed_dim = time_embedding_dim or block_out_channels[0] * 4
self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)
timestep_input_dim = block_out_channels[0]
else:
raise ValueError(
f"{time_embedding_type} does not exist. Please make sure to use one of `fourier` or `positional`."
)
return time_embed_dim, timestep_input_dim
def _set_encoder_hid_proj(
self,
encoder_hid_dim_type: Optional[str],
cross_attention_dim: Union[int, Tuple[int]],
encoder_hid_dim: Optional[int],
):
if encoder_hid_dim_type is None and encoder_hid_dim is not None:
encoder_hid_dim_type = "text_proj"
self.register_to_config(encoder_hid_dim_type=encoder_hid_dim_type)
logger.info("encoder_hid_dim_type defaults to 'text_proj' as `encoder_hid_dim` is defined.")
if encoder_hid_dim is None and encoder_hid_dim_type is not None:
raise ValueError(
f"`encoder_hid_dim` has to be defined when `encoder_hid_dim_type` is set to {encoder_hid_dim_type}."
)
if encoder_hid_dim_type == "text_proj":
self.encoder_hid_proj = nn.Linear(encoder_hid_dim, cross_attention_dim)
elif encoder_hid_dim_type == "text_image_proj":
# image_embed_dim DOESN'T have to be `cross_attention_dim`. To not clutter the __init__ too much
# they are set to `cross_attention_dim` here as this is exactly the required dimension for the currently only use
# case when `addition_embed_type == "text_image_proj"` (Kandinsky 2.1)`
self.encoder_hid_proj = TextImageProjection(
text_embed_dim=encoder_hid_dim,
image_embed_dim=cross_attention_dim,
cross_attention_dim=cross_attention_dim,
)
elif encoder_hid_dim_type == "image_proj":
# Kandinsky 2.2
self.encoder_hid_proj = ImageProjection(
image_embed_dim=encoder_hid_dim,
cross_attention_dim=cross_attention_dim,
)
elif encoder_hid_dim_type is not None:
raise ValueError(
f"`encoder_hid_dim_type`: {encoder_hid_dim_type} must be None, 'text_proj', 'text_image_proj', or 'image_proj'."
)
else:
self.encoder_hid_proj = None
def _set_class_embedding(
self,
class_embed_type: Optional[str],
act_fn: str,
num_class_embeds: Optional[int],
projection_class_embeddings_input_dim: Optional[int],
time_embed_dim: int,
timestep_input_dim: int,
):
if class_embed_type is None and num_class_embeds is not None:
self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)
elif class_embed_type == "timestep":
self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim, act_fn=act_fn)
elif class_embed_type == "identity":
self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)
elif class_embed_type == "projection":
if projection_class_embeddings_input_dim is None:
raise ValueError(
"`class_embed_type`: 'projection' requires `projection_class_embeddings_input_dim` be set"
)
# The projection `class_embed_type` is the same as the timestep `class_embed_type` except
# 1. the `class_labels` inputs are not first converted to sinusoidal embeddings
# 2. it projects from an arbitrary input dimension.
#
# Note that `TimestepEmbedding` is quite general, being mainly linear layers and activations.
# When used for embedding actual timesteps, the timesteps are first converted to sinusoidal embeddings.
# As a result, `TimestepEmbedding` can be passed arbitrary vectors.
self.class_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim)
elif class_embed_type == "simple_projection":
if projection_class_embeddings_input_dim is None:
raise ValueError(
"`class_embed_type`: 'simple_projection' requires `projection_class_embeddings_input_dim` be set"
)
self.class_embedding = nn.Linear(projection_class_embeddings_input_dim, time_embed_dim)
else:
self.class_embedding = None
def _set_add_embedding(
self,
addition_embed_type: str,
addition_embed_type_num_heads: int,
addition_time_embed_dim: Optional[int],
flip_sin_to_cos: bool,
freq_shift: float,
cross_attention_dim: Optional[int],
encoder_hid_dim: Optional[int],
projection_class_embeddings_input_dim: Optional[int],
time_embed_dim: int,
):
if addition_embed_type == "text":
if encoder_hid_dim is not None:
text_time_embedding_from_dim = encoder_hid_dim
else:
text_time_embedding_from_dim = cross_attention_dim
self.add_embedding = TextTimeEmbedding(
text_time_embedding_from_dim, time_embed_dim, num_heads=addition_embed_type_num_heads
)
elif addition_embed_type == "text_image":
# text_embed_dim and image_embed_dim DON'T have to be `cross_attention_dim`. To not clutter the __init__ too much
# they are set to `cross_attention_dim` here as this is exactly the required dimension for the currently only use
# case when `addition_embed_type == "text_image"` (Kandinsky 2.1)`
self.add_embedding = TextImageTimeEmbedding(
text_embed_dim=cross_attention_dim, image_embed_dim=cross_attention_dim, time_embed_dim=time_embed_dim
)
elif addition_embed_type == "text_time":
self.add_time_proj = Timesteps(addition_time_embed_dim, flip_sin_to_cos, freq_shift)
self.add_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim)
elif addition_embed_type == "image":
# Kandinsky 2.2
self.add_embedding = ImageTimeEmbedding(image_embed_dim=encoder_hid_dim, time_embed_dim=time_embed_dim)
elif addition_embed_type == "image_hint":
# Kandinsky 2.2 ControlNet
self.add_embedding = ImageHintTimeEmbedding(image_embed_dim=encoder_hid_dim, time_embed_dim=time_embed_dim)
elif addition_embed_type is not None:
raise ValueError(
f"`addition_embed_type`: {addition_embed_type} must be None, 'text', 'text_image', 'text_time', 'image', or 'image_hint'."
)
def _set_pos_net_if_use_gligen(self, attention_type: str, cross_attention_dim: int):
if attention_type in ["gated", "gated-text-image"]:
positive_len = 768
if isinstance(cross_attention_dim, int):
positive_len = cross_attention_dim
elif isinstance(cross_attention_dim, (list, tuple)):
positive_len = cross_attention_dim[0]
feature_type = "text-only" if attention_type == "gated" else "text-image"
self.position_net = GLIGENTextBoundingboxProjection(
positive_len=positive_len, out_dim=cross_attention_dim, feature_type=feature_type
)
@property
def attn_processors(self) -> Dict[str, AttentionProcessor]:
r"""
Returns:
`dict` of attention processors: A dictionary containing all attention processors used in the model with
indexed by its weight name.
"""
# set recursively
processors = {}
def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]):
if hasattr(module, "get_processor"):
processors[f"{name}.processor"] = module.get_processor()
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"{name}.{sub_name}", child, processors)
return processors
for name, module in self.named_children():
fn_recursive_add_processors(name, module, processors)
return processors
def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
r"""
Sets the attention processor to use to compute attention.
Parameters:
processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`):
The instantiated processor class or a dictionary of processor classes that will be set as the processor
for **all** `Attention` layers.
If `processor` is a dict, the key needs to define the path to the corresponding cross attention
processor. This is strongly recommended when setting trainable attention processors.
"""
count = len(self.attn_processors.keys())
if isinstance(processor, dict) and len(processor) != count:
raise ValueError(
f"A dict of processors was passed, but the number of processors {len(processor)} does not match the"
f" number of attention layers: {count}. Please make sure to pass {count} processor classes."
)
def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):
if hasattr(module, "set_processor"):
if not isinstance(processor, dict):
module.set_processor(processor)
else:
module.set_processor(processor.pop(f"{name}.processor"))
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor)
for name, module in self.named_children():
fn_recursive_attn_processor(name, module, processor)
def set_default_attn_processor(self):
"""
Disables custom attention processors and sets the default attention implementation.
"""
if all(proc.__class__ in ADDED_KV_ATTENTION_PROCESSORS for proc in self.attn_processors.values()):
processor = AttnAddedKVProcessor()
elif all(proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values()):
processor = AttnProcessor()
else:
raise ValueError(
f"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}"
)
self.set_attn_processor(processor)
def set_attention_slice(self, slice_size: Union[str, int, List[int]] = "auto"):
r"""
Enable sliced attention computation.
When this option is enabled, the attention module splits the input tensor in slices to compute attention in
several steps. This is useful for saving some memory in exchange for a small decrease in speed.
Args:
slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`):
When `"auto"`, input to the attention heads is halved, so attention is computed in two steps. If
`"max"`, maximum amount of memory is saved by running only one slice at a time. If a number is
provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`
must be a multiple of `slice_size`.
"""
sliceable_head_dims = []
def fn_recursive_retrieve_sliceable_dims(module: torch.nn.Module):
if hasattr(module, "set_attention_slice"):
sliceable_head_dims.append(module.sliceable_head_dim)
for child in module.children():
fn_recursive_retrieve_sliceable_dims(child)
# retrieve number of attention layers
for module in self.children():
fn_recursive_retrieve_sliceable_dims(module)
num_sliceable_layers = len(sliceable_head_dims)
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
slice_size = [dim // 2 for dim in sliceable_head_dims]
elif slice_size == "max":
# make smallest slice possible
slice_size = num_sliceable_layers * [1]
slice_size = num_sliceable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size
if len(slice_size) != len(sliceable_head_dims):
raise ValueError(
f"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different"
f" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}."
)
for i in range(len(slice_size)):
size = slice_size[i]
dim = sliceable_head_dims[i]
if size is not None and size > dim:
raise ValueError(f"size {size} has to be smaller or equal to {dim}.")
# Recursively walk through all the children.
# Any children which exposes the set_attention_slice method
# gets the message
def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):
if hasattr(module, "set_attention_slice"):
module.set_attention_slice(slice_size.pop())
for child in module.children():
fn_recursive_set_attention_slice(child, slice_size)
reversed_slice_size = list(reversed(slice_size))
for module in self.children():
fn_recursive_set_attention_slice(module, reversed_slice_size)
def enable_freeu(self, s1: float, s2: float, b1: float, b2: float):
r"""Enables the FreeU mechanism from https://arxiv.org/abs/2309.11497.
The suffixes after the scaling factors represent the stage blocks where they are being applied.
Please refer to the [official repository](https://github.com/ChenyangSi/FreeU) for combinations of values that
are known to work well for different pipelines such as Stable Diffusion v1, v2, and Stable Diffusion XL.
Args:
s1 (`float`):
Scaling factor for stage 1 to attenuate the contributions of the skip features. This is done to
mitigate the "oversmoothing effect" in the enhanced denoising process.
s2 (`float`):
Scaling factor for stage 2 to attenuate the contributions of the skip features. This is done to
mitigate the "oversmoothing effect" in the enhanced denoising process.
b1 (`float`): Scaling factor for stage 1 to amplify the contributions of backbone features.
b2 (`float`): Scaling factor for stage 2 to amplify the contributions of backbone features.
"""
for i, upsample_block in enumerate(self.up_blocks):
setattr(upsample_block, "s1", s1)
setattr(upsample_block, "s2", s2)
setattr(upsample_block, "b1", b1)
setattr(upsample_block, "b2", b2)
def disable_freeu(self):
"""Disables the FreeU mechanism."""
freeu_keys = {"s1", "s2", "b1", "b2"}
for i, upsample_block in enumerate(self.up_blocks):
for k in freeu_keys:
if hasattr(upsample_block, k) or getattr(upsample_block, k, None) is not None:
setattr(upsample_block, k, None)
def fuse_qkv_projections(self):
"""
Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query, key, value)
are fused. For cross-attention modules, key and value projection matrices are fused.
<Tip warning={true}>
This API is 🧪 experimental.
</Tip>
"""
self.original_attn_processors = None
for _, attn_processor in self.attn_processors.items():
if "Added" in str(attn_processor.__class__.__name__):
raise ValueError("`fuse_qkv_projections()` is not supported for models having added KV projections.")
self.original_attn_processors = self.attn_processors
for module in self.modules():
if isinstance(module, Attention):
module.fuse_projections(fuse=True)
self.set_attn_processor(FusedAttnProcessor2_0())
def unfuse_qkv_projections(self):
"""Disables the fused QKV projection if enabled.
<Tip warning={true}>
This API is 🧪 experimental.
</Tip>
"""
if self.original_attn_processors is not None:
self.set_attn_processor(self.original_attn_processors)
def get_time_embed(
self, sample: torch.Tensor, timestep: Union[torch.Tensor, float, int]
) -> Optional[torch.Tensor]:
timesteps = timestep
if not torch.is_tensor(timesteps):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
is_mps = sample.device.type == "mps"
is_npu = sample.device.type == "npu"
if isinstance(timestep, float):
dtype = torch.float32 if (is_mps or is_npu) else torch.float64
else:
dtype = torch.int32 if (is_mps or is_npu) else torch.int64
timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)
elif len(timesteps.shape) == 0:
timesteps = timesteps[None].to(sample.device)
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
timesteps = timesteps.expand(sample.shape[0])
t_emb = self.time_proj(timesteps)
# `Timesteps` does not contain any weights and will always return f32 tensors
# but time_embedding might actually be running in fp16. so we need to cast here.
# there might be better ways to encapsulate this.
t_emb = t_emb.to(dtype=sample.dtype)
return t_emb
def get_class_embed(self, sample: torch.Tensor, class_labels: Optional[torch.Tensor]) -> Optional[torch.Tensor]:
class_emb = None
if self.class_embedding is not None:
if class_labels is None:
raise ValueError("class_labels should be provided when num_class_embeds > 0")
if self.config.class_embed_type == "timestep":
class_labels = self.time_proj(class_labels)
# `Timesteps` does not contain any weights and will always return f32 tensors
# there might be better ways to encapsulate this.
class_labels = class_labels.to(dtype=sample.dtype)
class_emb = self.class_embedding(class_labels).to(dtype=sample.dtype)
return class_emb
def get_aug_embed(
self, emb: torch.Tensor, encoder_hidden_states: torch.Tensor, added_cond_kwargs: Dict[str, Any]
) -> Optional[torch.Tensor]:
aug_emb = None
if self.config.addition_embed_type == "text":
aug_emb = self.add_embedding(encoder_hidden_states)
elif self.config.addition_embed_type == "text_image":
# Kandinsky 2.1 - style
if "image_embeds" not in added_cond_kwargs:
raise ValueError(
f"{self.__class__} has the config param `addition_embed_type` set to 'text_image' which requires the keyword argument `image_embeds` to be passed in `added_cond_kwargs`"
)
image_embs = added_cond_kwargs.get("image_embeds")
text_embs = added_cond_kwargs.get("text_embeds", encoder_hidden_states)
aug_emb = self.add_embedding(text_embs, image_embs)
elif self.config.addition_embed_type == "text_time":
# SDXL - style
if "text_embeds" not in added_cond_kwargs:
raise ValueError(
f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `text_embeds` to be passed in `added_cond_kwargs`"
)
text_embeds = added_cond_kwargs.get("text_embeds")
if "time_ids" not in added_cond_kwargs:
raise ValueError(
f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `time_ids` to be passed in `added_cond_kwargs`"
)
time_ids = added_cond_kwargs.get("time_ids")
time_embeds = self.add_time_proj(time_ids.flatten())
time_embeds = time_embeds.reshape((text_embeds.shape[0], -1))
add_embeds = torch.concat([text_embeds, time_embeds], dim=-1)
add_embeds = add_embeds.to(emb.dtype)
aug_emb = self.add_embedding(add_embeds)
elif self.config.addition_embed_type == "image":
# Kandinsky 2.2 - style
if "image_embeds" not in added_cond_kwargs:
raise ValueError(
f"{self.__class__} has the config param `addition_embed_type` set to 'image' which requires the keyword argument `image_embeds` to be passed in `added_cond_kwargs`"
)
image_embs = added_cond_kwargs.get("image_embeds")
aug_emb = self.add_embedding(image_embs)
elif self.config.addition_embed_type == "image_hint":
# Kandinsky 2.2 ControlNet - style
if "image_embeds" not in added_cond_kwargs or "hint" not in added_cond_kwargs:
raise ValueError(
f"{self.__class__} has the config param `addition_embed_type` set to 'image_hint' which requires the keyword arguments `image_embeds` and `hint` to be passed in `added_cond_kwargs`"
)
image_embs = added_cond_kwargs.get("image_embeds")
hint = added_cond_kwargs.get("hint")
aug_emb = self.add_embedding(image_embs, hint)
return aug_emb
def process_encoder_hidden_states(
self, encoder_hidden_states: torch.Tensor, added_cond_kwargs: Dict[str, Any]
) -> torch.Tensor:
if self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == "text_proj":
encoder_hidden_states = self.encoder_hid_proj(encoder_hidden_states)
elif self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == "text_image_proj":
# Kandinsky 2.1 - style
if "image_embeds" not in added_cond_kwargs:
raise ValueError(
f"{self.__class__} has the config param `encoder_hid_dim_type` set to 'text_image_proj' which requires the keyword argument `image_embeds` to be passed in `added_cond_kwargs`"
)
image_embeds = added_cond_kwargs.get("image_embeds")
encoder_hidden_states = self.encoder_hid_proj(encoder_hidden_states, image_embeds)
elif self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == "image_proj":
# Kandinsky 2.2 - style
if "image_embeds" not in added_cond_kwargs:
raise ValueError(
f"{self.__class__} has the config param `encoder_hid_dim_type` set to 'image_proj' which requires the keyword argument `image_embeds` to be passed in `added_cond_kwargs`"
)
image_embeds = added_cond_kwargs.get("image_embeds")
encoder_hidden_states = self.encoder_hid_proj(image_embeds)
elif self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == "ip_image_proj":
if "image_embeds" not in added_cond_kwargs:
raise ValueError(
f"{self.__class__} has the config param `encoder_hid_dim_type` set to 'ip_image_proj' which requires the keyword argument `image_embeds` to be passed in `added_cond_kwargs`"
)
if hasattr(self, "text_encoder_hid_proj") and self.text_encoder_hid_proj is not None:
encoder_hidden_states = self.text_encoder_hid_proj(encoder_hidden_states)
image_embeds = added_cond_kwargs.get("image_embeds")
image_embeds = self.encoder_hid_proj(image_embeds)
encoder_hidden_states = (encoder_hidden_states, image_embeds)
return encoder_hidden_states
def forward(
self,
sample: torch.Tensor,
timestep: Union[torch.Tensor, float, int],
encoder_hidden_states: torch.Tensor,
class_labels: Optional[torch.Tensor] = None,
timestep_cond: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
added_cond_kwargs: Optional[Dict[str, torch.Tensor]] = None,
down_block_additional_residuals: Optional[Tuple[torch.Tensor]] = None,
mid_block_additional_residual: Optional[torch.Tensor] = None,
down_intrablock_additional_residuals: Optional[Tuple[torch.Tensor]] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
return_dict: bool = True,
) -> Union[UNet2DConditionOutput, Tuple]:
r"""
The [`UNet2DConditionModel`] forward method.
Args:
sample (`torch.Tensor`):
The noisy input tensor with the following shape `(batch, channel, height, width)`.
timestep (`torch.Tensor` or `float` or `int`): The number of timesteps to denoise an input.
encoder_hidden_states (`torch.Tensor`):
The encoder hidden states with shape `(batch, sequence_length, feature_dim)`.
class_labels (`torch.Tensor`, *optional*, defaults to `None`):
Optional class labels for conditioning. Their embeddings will be summed with the timestep embeddings.
timestep_cond: (`torch.Tensor`, *optional*, defaults to `None`):
Conditional embeddings for timestep. If provided, the embeddings will be summed with the samples passed
through the `self.time_embedding` layer to obtain the timestep embeddings.
attention_mask (`torch.Tensor`, *optional*, defaults to `None`):
An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. If `1` the mask
is kept, otherwise if `0` it is discarded. Mask will be converted into a bias, which adds large
negative values to the attention scores corresponding to "discard" tokens.
cross_attention_kwargs (`dict`, *optional*):
A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
`self.processor` in
[diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
added_cond_kwargs: (`dict`, *optional*):
A kwargs dictionary containing additional embeddings that if specified are added to the embeddings that
are passed along to the UNet blocks.
down_block_additional_residuals: (`tuple` of `torch.Tensor`, *optional*):
A tuple of tensors that if specified are added to the residuals of down unet blocks.
mid_block_additional_residual: (`torch.Tensor`, *optional*):
A tensor that if specified is added to the residual of the middle unet block.
down_intrablock_additional_residuals (`tuple` of `torch.Tensor`, *optional*):
additional residuals to be added within UNet down blocks, for example from T2I-Adapter side model(s)
encoder_attention_mask (`torch.Tensor`):
A cross-attention mask of shape `(batch, sequence_length)` is applied to `encoder_hidden_states`. If
`True` the mask is kept, otherwise if `False` it is discarded. Mask will be converted into a bias,
which adds large negative values to the attention scores corresponding to "discard" tokens.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~models.unets.unet_2d_condition.UNet2DConditionOutput`] instead of a plain
tuple.
Returns:
[`~models.unets.unet_2d_condition.UNet2DConditionOutput`] or `tuple`:
If `return_dict` is True, an [`~models.unets.unet_2d_condition.UNet2DConditionOutput`] is returned,
otherwise a `tuple` is returned where the first element is the sample tensor.
"""
# By default samples have to be AT least a multiple of the overall upsampling factor.
# The overall upsampling factor is equal to 2 ** (# num of upsampling layers).
# However, the upsampling interpolation output size can be forced to fit any upsampling size
# on the fly if necessary.
default_overall_up_factor = 2**self.num_upsamplers
# upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor`
forward_upsample_size = False
upsample_size = None
for dim in sample.shape[-2:]:
if dim % default_overall_up_factor != 0:
# Forward upsample size to force interpolation output size.
forward_upsample_size = True
break
# ensure attention_mask is a bias, and give it a singleton query_tokens dimension
# expects mask of shape:
# [batch, key_tokens]
# adds singleton query_tokens dimension:
# [batch, 1, key_tokens]
# this helps to broadcast it as a bias over attention scores, which will be in one of the following shapes:
# [batch, heads, query_tokens, key_tokens] (e.g. torch sdp attn)
# [batch * heads, query_tokens, key_tokens] (e.g. xformers or classic attn)
if attention_mask is not None:
# assume that mask is expressed as:
# (1 = keep, 0 = discard)
# convert mask into a bias that can be added to attention scores:
# (keep = +0, discard = -10000.0)
attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0
attention_mask = attention_mask.unsqueeze(1)
# convert encoder_attention_mask to a bias the same way we do for attention_mask
if encoder_attention_mask is not None:
encoder_attention_mask = (1 - encoder_attention_mask.to(sample.dtype)) * -10000.0
encoder_attention_mask = encoder_attention_mask.unsqueeze(1)
# 0. center input if necessary
if self.config.center_input_sample:
sample = 2 * sample - 1.0
# 1. time
t_emb = self.get_time_embed(sample=sample, timestep=timestep)
emb = self.time_embedding(t_emb, timestep_cond)
class_emb = self.get_class_embed(sample=sample, class_labels=class_labels)
if class_emb is not None:
if self.config.class_embeddings_concat:
emb = torch.cat([emb, class_emb], dim=-1)
else:
emb = emb + class_emb
aug_emb = self.get_aug_embed(
emb=emb, encoder_hidden_states=encoder_hidden_states, added_cond_kwargs=added_cond_kwargs
)
if self.config.addition_embed_type == "image_hint":
aug_emb, hint = aug_emb
sample = torch.cat([sample, hint], dim=1)
emb = emb + aug_emb if aug_emb is not None else emb
if self.time_embed_act is not None:
emb = self.time_embed_act(emb)
encoder_hidden_states = self.process_encoder_hidden_states(
encoder_hidden_states=encoder_hidden_states, added_cond_kwargs=added_cond_kwargs
)
# 2. pre-process
sample = self.conv_in(sample)
# 2.5 GLIGEN position net
if cross_attention_kwargs is not None and cross_attention_kwargs.get("gligen", None) is not None:
cross_attention_kwargs = cross_attention_kwargs.copy()
gligen_args = cross_attention_kwargs.pop("gligen")
cross_attention_kwargs["gligen"] = {"objs": self.position_net(**gligen_args)}
# 3. down
# we're popping the `scale` instead of getting it because otherwise `scale` will be propagated
# to the internal blocks and will raise deprecation warnings. this will be confusing for our users.
if cross_attention_kwargs is not None:
cross_attention_kwargs = cross_attention_kwargs.copy()
lora_scale = cross_attention_kwargs.pop("scale", 1.0)
else:
lora_scale = 1.0
if USE_PEFT_BACKEND:
# weight the lora layers by setting `lora_scale` for each PEFT layer
scale_lora_layers(self, lora_scale)
is_controlnet = mid_block_additional_residual is not None and down_block_additional_residuals is not None
# using new arg down_intrablock_additional_residuals for T2I-Adapters, to distinguish from controlnets
is_adapter = down_intrablock_additional_residuals is not None
# maintain backward compatibility for legacy usage, where
# T2I-Adapter and ControlNet both use down_block_additional_residuals arg
# but can only use one or the other
if not is_adapter and mid_block_additional_residual is None and down_block_additional_residuals is not None:
deprecate(
"T2I should not use down_block_additional_residuals",
"1.3.0",
"Passing intrablock residual connections with `down_block_additional_residuals` is deprecated \
and will be removed in diffusers 1.3.0. `down_block_additional_residuals` should only be used \
for ControlNet. Please make sure use `down_intrablock_additional_residuals` instead. ",
standard_warn=False,
)
down_intrablock_additional_residuals = down_block_additional_residuals
is_adapter = True
down_block_res_samples = (sample,)
for downsample_block in self.down_blocks:
if hasattr(downsample_block, "has_cross_attention") and downsample_block.has_cross_attention:
# For t2i-adapter CrossAttnDownBlock2D
additional_residuals = {}
if is_adapter and len(down_intrablock_additional_residuals) > 0:
additional_residuals["additional_residuals"] = down_intrablock_additional_residuals.pop(0)
sample, res_samples = downsample_block(
hidden_states=sample,
temb=emb,
encoder_hidden_states=encoder_hidden_states,
attention_mask=attention_mask,
cross_attention_kwargs=cross_attention_kwargs,
encoder_attention_mask=encoder_attention_mask,
**additional_residuals,
)
else:
sample, res_samples = downsample_block(hidden_states=sample, temb=emb)
if is_adapter and len(down_intrablock_additional_residuals) > 0:
sample += down_intrablock_additional_residuals.pop(0)
down_block_res_samples += res_samples
if is_controlnet:
new_down_block_res_samples = ()
for down_block_res_sample, down_block_additional_residual in zip(
down_block_res_samples, down_block_additional_residuals
):
down_block_res_sample = down_block_res_sample + down_block_additional_residual
new_down_block_res_samples = new_down_block_res_samples + (down_block_res_sample,)
down_block_res_samples = new_down_block_res_samples
# 4. mid
if self.mid_block is not None:
if hasattr(self.mid_block, "has_cross_attention") and self.mid_block.has_cross_attention:
sample = self.mid_block(
sample,
emb,
encoder_hidden_states=encoder_hidden_states,
attention_mask=attention_mask,
cross_attention_kwargs=cross_attention_kwargs,
encoder_attention_mask=encoder_attention_mask,
)
else:
sample = self.mid_block(sample, emb)
# To support T2I-Adapter-XL
if (
is_adapter
and len(down_intrablock_additional_residuals) > 0
and sample.shape == down_intrablock_additional_residuals[0].shape
):
sample += down_intrablock_additional_residuals.pop(0)
if is_controlnet:
sample = sample + mid_block_additional_residual
# 5. up
for i, upsample_block in enumerate(self.up_blocks):
is_final_block = i == len(self.up_blocks) - 1
res_samples = down_block_res_samples[-len(upsample_block.resnets) :]
down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)]
# if we have not reached the final block and need to forward the
# upsample size, we do it here
if not is_final_block and forward_upsample_size:
upsample_size = down_block_res_samples[-1].shape[2:]
if hasattr(upsample_block, "has_cross_attention") and upsample_block.has_cross_attention:
sample = upsample_block(
hidden_states=sample,
temb=emb,
res_hidden_states_tuple=res_samples,
encoder_hidden_states=encoder_hidden_states,
cross_attention_kwargs=cross_attention_kwargs,
upsample_size=upsample_size,
attention_mask=attention_mask,
encoder_attention_mask=encoder_attention_mask,
)
else:
sample = upsample_block(
hidden_states=sample,
temb=emb,
res_hidden_states_tuple=res_samples,
upsample_size=upsample_size,
)
# 6. post-process
if self.conv_norm_out:
sample = self.conv_norm_out(sample)
sample = self.conv_act(sample)
sample = self.conv_out(sample)
if USE_PEFT_BACKEND:
# remove `lora_scale` from each PEFT layer
unscale_lora_layers(self, lora_scale)
if not return_dict:
return (sample,)
return UNet2DConditionOutput(sample=sample)
| diffusers/src/diffusers/models/unets/unet_2d_condition.py/0 | {
"file_path": "diffusers/src/diffusers/models/unets/unet_2d_condition.py",
"repo_id": "diffusers",
"token_count": 29830
} |
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
from typing import Any, Callable, Dict, List, Optional, Union
import numpy as np
import torch
import torch.nn.functional as F
from transformers import ClapTextModelWithProjection, RobertaTokenizer, RobertaTokenizerFast, SpeechT5HifiGan
from ...models import AutoencoderKL, UNet2DConditionModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import is_torch_xla_available, logging, replace_example_docstring
from ...utils.torch_utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline, StableDiffusionMixin
if is_torch_xla_available():
import torch_xla.core.xla_model as xm
XLA_AVAILABLE = True
else:
XLA_AVAILABLE = False
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
EXAMPLE_DOC_STRING = """
Examples:
```py
>>> from diffusers import AudioLDMPipeline
>>> import torch
>>> import scipy
>>> repo_id = "cvssp/audioldm-s-full-v2"
>>> pipe = AudioLDMPipeline.from_pretrained(repo_id, torch_dtype=torch.float16)
>>> pipe = pipe.to("cuda")
>>> prompt = "Techno music with a strong, upbeat tempo and high melodic riffs"
>>> audio = pipe(prompt, num_inference_steps=10, audio_length_in_s=5.0).audios[0]
>>> # save the audio sample as a .wav file
>>> scipy.io.wavfile.write("techno.wav", rate=16000, data=audio)
```
"""
class AudioLDMPipeline(DiffusionPipeline, StableDiffusionMixin):
r"""
Pipeline for text-to-audio generation using AudioLDM.
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
implemented for all pipelines (downloading, saving, running on a particular device, etc.).
Args:
vae ([`AutoencoderKL`]):
Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.
text_encoder ([`~transformers.ClapTextModelWithProjection`]):
Frozen text-encoder (`ClapTextModelWithProjection`, specifically the
[laion/clap-htsat-unfused](https://huggingface.co/laion/clap-htsat-unfused) variant.
tokenizer ([`PreTrainedTokenizer`]):
A [`~transformers.RobertaTokenizer`] to tokenize text.
unet ([`UNet2DConditionModel`]):
A `UNet2DConditionModel` to denoise the encoded audio latents.
scheduler ([`SchedulerMixin`]):
A scheduler to be used in combination with `unet` to denoise the encoded audio latents. Can be one of
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
vocoder ([`~transformers.SpeechT5HifiGan`]):
Vocoder of class `SpeechT5HifiGan`.
"""
model_cpu_offload_seq = "text_encoder->unet->vae"
def __init__(
self,
vae: AutoencoderKL,
text_encoder: ClapTextModelWithProjection,
tokenizer: Union[RobertaTokenizer, RobertaTokenizerFast],
unet: UNet2DConditionModel,
scheduler: KarrasDiffusionSchedulers,
vocoder: SpeechT5HifiGan,
):
super().__init__()
self.register_modules(
vae=vae,
text_encoder=text_encoder,
tokenizer=tokenizer,
unet=unet,
scheduler=scheduler,
vocoder=vocoder,
)
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8
def _encode_prompt(
self,
prompt,
device,
num_waveforms_per_prompt,
do_classifier_free_guidance,
negative_prompt=None,
prompt_embeds: Optional[torch.Tensor] = None,
negative_prompt_embeds: Optional[torch.Tensor] = None,
):
r"""
Encodes the prompt into text encoder hidden states.
Args:
prompt (`str` or `List[str]`, *optional*):
prompt to be encoded
device (`torch.device`):
torch device
num_waveforms_per_prompt (`int`):
number of waveforms that should be generated per prompt
do_classifier_free_guidance (`bool`):
whether to use classifier free guidance or not
negative_prompt (`str` or `List[str]`, *optional*):
The prompt or prompts not to guide the audio generation. If not defined, one has to pass
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
less than `1`).
prompt_embeds (`torch.Tensor`, *optional*):
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
provided, text embeddings will be generated from `prompt` input argument.
negative_prompt_embeds (`torch.Tensor`, *optional*):
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
argument.
"""
if prompt is not None and isinstance(prompt, str):
batch_size = 1
elif prompt is not None and isinstance(prompt, list):
batch_size = len(prompt)
else:
batch_size = prompt_embeds.shape[0]
if prompt_embeds is None:
text_inputs = self.tokenizer(
prompt,
padding="max_length",
max_length=self.tokenizer.model_max_length,
truncation=True,
return_tensors="pt",
)
text_input_ids = text_inputs.input_ids
attention_mask = text_inputs.attention_mask
untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
text_input_ids, untruncated_ids
):
removed_text = self.tokenizer.batch_decode(
untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
)
logger.warning(
"The following part of your input was truncated because CLAP can only handle sequences up to"
f" {self.tokenizer.model_max_length} tokens: {removed_text}"
)
prompt_embeds = self.text_encoder(
text_input_ids.to(device),
attention_mask=attention_mask.to(device),
)
prompt_embeds = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
prompt_embeds = F.normalize(prompt_embeds, dim=-1)
prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
(
bs_embed,
seq_len,
) = prompt_embeds.shape
# duplicate text embeddings for each generation per prompt, using mps friendly method
prompt_embeds = prompt_embeds.repeat(1, num_waveforms_per_prompt)
prompt_embeds = prompt_embeds.view(bs_embed * num_waveforms_per_prompt, seq_len)
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance and negative_prompt_embeds is None:
uncond_tokens: List[str]
if negative_prompt is None:
uncond_tokens = [""] * batch_size
elif type(prompt) is not type(negative_prompt):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
f" {type(prompt)}."
)
elif isinstance(negative_prompt, str):
uncond_tokens = [negative_prompt]
elif batch_size != len(negative_prompt):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
" the batch size of `prompt`."
)
else:
uncond_tokens = negative_prompt
max_length = prompt_embeds.shape[1]
uncond_input = self.tokenizer(
uncond_tokens,
padding="max_length",
max_length=max_length,
truncation=True,
return_tensors="pt",
)
uncond_input_ids = uncond_input.input_ids.to(device)
attention_mask = uncond_input.attention_mask.to(device)
negative_prompt_embeds = self.text_encoder(
uncond_input_ids,
attention_mask=attention_mask,
)
negative_prompt_embeds = negative_prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
negative_prompt_embeds = F.normalize(negative_prompt_embeds, dim=-1)
if do_classifier_free_guidance:
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
seq_len = negative_prompt_embeds.shape[1]
negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_waveforms_per_prompt)
negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_waveforms_per_prompt, seq_len)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
return prompt_embeds
def decode_latents(self, latents):
latents = 1 / self.vae.config.scaling_factor * latents
mel_spectrogram = self.vae.decode(latents).sample
return mel_spectrogram
def mel_spectrogram_to_waveform(self, mel_spectrogram):
if mel_spectrogram.dim() == 4:
mel_spectrogram = mel_spectrogram.squeeze(1)
waveform = self.vocoder(mel_spectrogram)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
waveform = waveform.cpu().float()
return waveform
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
def prepare_extra_step_kwargs(self, generator, eta):
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
extra_step_kwargs = {}
if accepts_eta:
extra_step_kwargs["eta"] = eta
# check if the scheduler accepts generator
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
if accepts_generator:
extra_step_kwargs["generator"] = generator
return extra_step_kwargs
def check_inputs(
self,
prompt,
audio_length_in_s,
vocoder_upsample_factor,
callback_steps,
negative_prompt=None,
prompt_embeds=None,
negative_prompt_embeds=None,
):
min_audio_length_in_s = vocoder_upsample_factor * self.vae_scale_factor
if audio_length_in_s < min_audio_length_in_s:
raise ValueError(
f"`audio_length_in_s` has to be a positive value greater than or equal to {min_audio_length_in_s}, but "
f"is {audio_length_in_s}."
)
if self.vocoder.config.model_in_dim % self.vae_scale_factor != 0:
raise ValueError(
f"The number of frequency bins in the vocoder's log-mel spectrogram has to be divisible by the "
f"VAE scale factor, but got {self.vocoder.config.model_in_dim} bins and a scale factor of "
f"{self.vae_scale_factor}."
)
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(callback_steps)}."
)
if prompt is not None and prompt_embeds is not None:
raise ValueError(
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
" only forward one of the two."
)
elif prompt is None and prompt_embeds is None:
raise ValueError(
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
)
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
if negative_prompt is not None and negative_prompt_embeds is not None:
raise ValueError(
f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
)
if prompt_embeds is not None and negative_prompt_embeds is not None:
if prompt_embeds.shape != negative_prompt_embeds.shape:
raise ValueError(
"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
f" {negative_prompt_embeds.shape}."
)
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents with width->self.vocoder.config.model_in_dim
def prepare_latents(self, batch_size, num_channels_latents, height, dtype, device, generator, latents=None):
shape = (
batch_size,
num_channels_latents,
int(height) // self.vae_scale_factor,
int(self.vocoder.config.model_in_dim) // self.vae_scale_factor,
)
if isinstance(generator, list) and len(generator) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
)
if latents is None:
latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
else:
latents = latents.to(device)
# scale the initial noise by the standard deviation required by the scheduler
latents = latents * self.scheduler.init_noise_sigma
return latents
@torch.no_grad()
@replace_example_docstring(EXAMPLE_DOC_STRING)
def __call__(
self,
prompt: Union[str, List[str]] = None,
audio_length_in_s: Optional[float] = None,
num_inference_steps: int = 10,
guidance_scale: float = 2.5,
negative_prompt: Optional[Union[str, List[str]]] = None,
num_waveforms_per_prompt: Optional[int] = 1,
eta: float = 0.0,
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
latents: Optional[torch.Tensor] = None,
prompt_embeds: Optional[torch.Tensor] = None,
negative_prompt_embeds: Optional[torch.Tensor] = None,
return_dict: bool = True,
callback: Optional[Callable[[int, int, torch.Tensor], None]] = None,
callback_steps: Optional[int] = 1,
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
output_type: Optional[str] = "np",
):
r"""
The call function to the pipeline for generation.
Args:
prompt (`str` or `List[str]`, *optional*):
The prompt or prompts to guide audio generation. If not defined, you need to pass `prompt_embeds`.
audio_length_in_s (`int`, *optional*, defaults to 5.12):
The length of the generated audio sample in seconds.
num_inference_steps (`int`, *optional*, defaults to 10):
The number of denoising steps. More denoising steps usually lead to a higher quality audio at the
expense of slower inference.
guidance_scale (`float`, *optional*, defaults to 2.5):
A higher guidance scale value encourages the model to generate audio that is closely linked to the text
`prompt` at the expense of lower sound quality. Guidance scale is enabled when `guidance_scale > 1`.
negative_prompt (`str` or `List[str]`, *optional*):
The prompt or prompts to guide what to not include in audio generation. If not defined, you need to
pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
num_waveforms_per_prompt (`int`, *optional*, defaults to 1):
The number of waveforms to generate per prompt.
eta (`float`, *optional*, defaults to 0.0):
Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
generation deterministic.
latents (`torch.Tensor`, *optional*):
Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
tensor is generated by sampling using the supplied random `generator`.
prompt_embeds (`torch.Tensor`, *optional*):
Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
provided, text embeddings are generated from the `prompt` input argument.
negative_prompt_embeds (`torch.Tensor`, *optional*):
Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~pipelines.AudioPipelineOutput`] instead of a plain tuple.
callback (`Callable`, *optional*):
A function that calls every `callback_steps` steps during inference. The function is called with the
following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`.
callback_steps (`int`, *optional*, defaults to 1):
The frequency at which the `callback` function is called. If not specified, the callback is called at
every step.
cross_attention_kwargs (`dict`, *optional*):
A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
[`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
output_type (`str`, *optional*, defaults to `"np"`):
The output format of the generated image. Choose between `"np"` to return a NumPy `np.ndarray` or
`"pt"` to return a PyTorch `torch.Tensor` object.
Examples:
Returns:
[`~pipelines.AudioPipelineOutput`] or `tuple`:
If `return_dict` is `True`, [`~pipelines.AudioPipelineOutput`] is returned, otherwise a `tuple` is
returned where the first element is a list with the generated audio.
"""
# 0. Convert audio input length from seconds to spectrogram height
vocoder_upsample_factor = np.prod(self.vocoder.config.upsample_rates) / self.vocoder.config.sampling_rate
if audio_length_in_s is None:
audio_length_in_s = self.unet.config.sample_size * self.vae_scale_factor * vocoder_upsample_factor
height = int(audio_length_in_s / vocoder_upsample_factor)
original_waveform_length = int(audio_length_in_s * self.vocoder.config.sampling_rate)
if height % self.vae_scale_factor != 0:
height = int(np.ceil(height / self.vae_scale_factor)) * self.vae_scale_factor
logger.info(
f"Audio length in seconds {audio_length_in_s} is increased to {height * vocoder_upsample_factor} "
f"so that it can be handled by the model. It will be cut to {audio_length_in_s} after the "
f"denoising process."
)
# 1. Check inputs. Raise error if not correct
self.check_inputs(
prompt,
audio_length_in_s,
vocoder_upsample_factor,
callback_steps,
negative_prompt,
prompt_embeds,
negative_prompt_embeds,
)
# 2. Define call parameters
if prompt is not None and isinstance(prompt, str):
batch_size = 1
elif prompt is not None and isinstance(prompt, list):
batch_size = len(prompt)
else:
batch_size = prompt_embeds.shape[0]
device = self._execution_device
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
do_classifier_free_guidance = guidance_scale > 1.0
# 3. Encode input prompt
prompt_embeds = self._encode_prompt(
prompt,
device,
num_waveforms_per_prompt,
do_classifier_free_guidance,
negative_prompt,
prompt_embeds=prompt_embeds,
negative_prompt_embeds=negative_prompt_embeds,
)
# 4. Prepare timesteps
self.scheduler.set_timesteps(num_inference_steps, device=device)
timesteps = self.scheduler.timesteps
# 5. Prepare latent variables
num_channels_latents = self.unet.config.in_channels
latents = self.prepare_latents(
batch_size * num_waveforms_per_prompt,
num_channels_latents,
height,
prompt_embeds.dtype,
device,
generator,
latents,
)
# 6. Prepare extra step kwargs
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
# 7. Denoising loop
num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
with self.progress_bar(total=num_inference_steps) as progress_bar:
for i, t in enumerate(timesteps):
# expand the latents if we are doing classifier free guidance
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
# predict the noise residual
noise_pred = self.unet(
latent_model_input,
t,
encoder_hidden_states=None,
class_labels=prompt_embeds,
cross_attention_kwargs=cross_attention_kwargs,
).sample
# perform guidance
if do_classifier_free_guidance:
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
# call the callback, if provided
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
progress_bar.update()
if callback is not None and i % callback_steps == 0:
step_idx = i // getattr(self.scheduler, "order", 1)
callback(step_idx, t, latents)
if XLA_AVAILABLE:
xm.mark_step()
# 8. Post-processing
mel_spectrogram = self.decode_latents(latents)
audio = self.mel_spectrogram_to_waveform(mel_spectrogram)
audio = audio[:, :original_waveform_length]
if output_type == "np":
audio = audio.numpy()
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=audio)
| diffusers/src/diffusers/pipelines/audioldm/pipeline_audioldm.py/0 | {
"file_path": "diffusers/src/diffusers/pipelines/audioldm/pipeline_audioldm.py",
"repo_id": "diffusers",
"token_count": 11663
} |
from typing import TYPE_CHECKING
from ...utils import DIFFUSERS_SLOW_IMPORT, _LazyModule
_import_structure = {"pipeline_dance_diffusion": ["DanceDiffusionPipeline"]}
if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
from .pipeline_dance_diffusion import DanceDiffusionPipeline
else:
import sys
sys.modules[__name__] = _LazyModule(
__name__,
globals()["__file__"],
_import_structure,
module_spec=__spec__,
)
| diffusers/src/diffusers/pipelines/dance_diffusion/__init__.py/0 | {
"file_path": "diffusers/src/diffusers/pipelines/dance_diffusion/__init__.py",
"repo_id": "diffusers",
"token_count": 189
} |
from typing import List
import PIL.Image
import torch
from PIL import Image
from ...configuration_utils import ConfigMixin
from ...models.modeling_utils import ModelMixin
from ...utils import PIL_INTERPOLATION
class IFWatermarker(ModelMixin, ConfigMixin):
def __init__(self):
super().__init__()
self.register_buffer("watermark_image", torch.zeros((62, 62, 4)))
self.watermark_image_as_pil = None
def apply_watermark(self, images: List[PIL.Image.Image], sample_size=None):
# Copied from https://github.com/deep-floyd/IF/blob/b77482e36ca2031cb94dbca1001fc1e6400bf4ab/deepfloyd_if/modules/base.py#L287
h = images[0].height
w = images[0].width
sample_size = sample_size or h
coef = min(h / sample_size, w / sample_size)
img_h, img_w = (int(h / coef), int(w / coef)) if coef < 1 else (h, w)
S1, S2 = 1024**2, img_w * img_h
K = (S2 / S1) ** 0.5
wm_size, wm_x, wm_y = int(K * 62), img_w - int(14 * K), img_h - int(14 * K)
if self.watermark_image_as_pil is None:
watermark_image = self.watermark_image.to(torch.uint8).cpu().numpy()
watermark_image = Image.fromarray(watermark_image, mode="RGBA")
self.watermark_image_as_pil = watermark_image
wm_img = self.watermark_image_as_pil.resize(
(wm_size, wm_size), PIL_INTERPOLATION["bicubic"], reducing_gap=None
)
for pil_img in images:
pil_img.paste(wm_img, box=(wm_x - wm_size, wm_y - wm_size, wm_x, wm_y), mask=wm_img.split()[-1])
return images
| diffusers/src/diffusers/pipelines/deepfloyd_if/watermark.py/0 | {
"file_path": "diffusers/src/diffusers/pipelines/deepfloyd_if/watermark.py",
"repo_id": "diffusers",
"token_count": 737
} |
# Copyright 2024 ETH Zurich Computer Vision Lab and The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL.Image
import torch
from ....models import UNet2DModel
from ....schedulers import RePaintScheduler
from ....utils import PIL_INTERPOLATION, deprecate, logging
from ....utils.torch_utils import randn_tensor
from ...pipeline_utils import DiffusionPipeline, ImagePipelineOutput
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.preprocess
def _preprocess_image(image: Union[List, PIL.Image.Image, torch.Tensor]):
deprecation_message = "The preprocess method is deprecated and will be removed in diffusers 1.0.0. Please use VaeImageProcessor.preprocess(...) instead"
deprecate("preprocess", "1.0.0", deprecation_message, standard_warn=False)
if isinstance(image, torch.Tensor):
return image
elif isinstance(image, PIL.Image.Image):
image = [image]
if isinstance(image[0], PIL.Image.Image):
w, h = image[0].size
w, h = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
image = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]))[None, :] for i in image]
image = np.concatenate(image, axis=0)
image = np.array(image).astype(np.float32) / 255.0
image = image.transpose(0, 3, 1, 2)
image = 2.0 * image - 1.0
image = torch.from_numpy(image)
elif isinstance(image[0], torch.Tensor):
image = torch.cat(image, dim=0)
return image
def _preprocess_mask(mask: Union[List, PIL.Image.Image, torch.Tensor]):
if isinstance(mask, torch.Tensor):
return mask
elif isinstance(mask, PIL.Image.Image):
mask = [mask]
if isinstance(mask[0], PIL.Image.Image):
w, h = mask[0].size
w, h = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
mask = [np.array(m.convert("L").resize((w, h), resample=PIL_INTERPOLATION["nearest"]))[None, :] for m in mask]
mask = np.concatenate(mask, axis=0)
mask = mask.astype(np.float32) / 255.0
mask[mask < 0.5] = 0
mask[mask >= 0.5] = 1
mask = torch.from_numpy(mask)
elif isinstance(mask[0], torch.Tensor):
mask = torch.cat(mask, dim=0)
return mask
class RePaintPipeline(DiffusionPipeline):
r"""
Pipeline for image inpainting using RePaint.
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
implemented for all pipelines (downloading, saving, running on a particular device, etc.).
Parameters:
unet ([`UNet2DModel`]):
A `UNet2DModel` to denoise the encoded image latents.
scheduler ([`RePaintScheduler`]):
A `RePaintScheduler` to be used in combination with `unet` to denoise the encoded image.
"""
unet: UNet2DModel
scheduler: RePaintScheduler
model_cpu_offload_seq = "unet"
def __init__(self, unet, scheduler):
super().__init__()
self.register_modules(unet=unet, scheduler=scheduler)
@torch.no_grad()
def __call__(
self,
image: Union[torch.Tensor, PIL.Image.Image],
mask_image: Union[torch.Tensor, PIL.Image.Image],
num_inference_steps: int = 250,
eta: float = 0.0,
jump_length: int = 10,
jump_n_sample: int = 10,
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
output_type: Optional[str] = "pil",
return_dict: bool = True,
) -> Union[ImagePipelineOutput, Tuple]:
r"""
The call function to the pipeline for generation.
Args:
image (`torch.Tensor` or `PIL.Image.Image`):
The original image to inpaint on.
mask_image (`torch.Tensor` or `PIL.Image.Image`):
The mask_image where 0.0 define which part of the original image to inpaint.
num_inference_steps (`int`, *optional*, defaults to 1000):
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
expense of slower inference.
eta (`float`):
The weight of the added noise in a diffusion step. Its value is between 0.0 and 1.0; 0.0 corresponds to
DDIM and 1.0 is the DDPM scheduler.
jump_length (`int`, *optional*, defaults to 10):
The number of steps taken forward in time before going backward in time for a single jump ("j" in
RePaint paper). Take a look at Figure 9 and 10 in the [paper](https://arxiv.org/pdf/2201.09865.pdf).
jump_n_sample (`int`, *optional*, defaults to 10):
The number of times to make a forward time jump for a given chosen time sample. Take a look at Figure 9
and 10 in the [paper](https://arxiv.org/pdf/2201.09865.pdf).
generator (`torch.Generator`, *optional*):
A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
generation deterministic.
output_type (`str`, `optional`, defaults to `"pil"`):
The output format of the generated image. Choose between `PIL.Image` or `np.array`.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`ImagePipelineOutput`] instead of a plain tuple.
Example:
```py
>>> from io import BytesIO
>>> import torch
>>> import PIL
>>> import requests
>>> from diffusers import RePaintPipeline, RePaintScheduler
>>> def download_image(url):
... response = requests.get(url)
... return PIL.Image.open(BytesIO(response.content)).convert("RGB")
>>> img_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/repaint/celeba_hq_256.png"
>>> mask_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/repaint/mask_256.png"
>>> # Load the original image and the mask as PIL images
>>> original_image = download_image(img_url).resize((256, 256))
>>> mask_image = download_image(mask_url).resize((256, 256))
>>> # Load the RePaint scheduler and pipeline based on a pretrained DDPM model
>>> scheduler = RePaintScheduler.from_pretrained("google/ddpm-ema-celebahq-256")
>>> pipe = RePaintPipeline.from_pretrained("google/ddpm-ema-celebahq-256", scheduler=scheduler)
>>> pipe = pipe.to("cuda")
>>> generator = torch.Generator(device="cuda").manual_seed(0)
>>> output = pipe(
... image=original_image,
... mask_image=mask_image,
... num_inference_steps=250,
... eta=0.0,
... jump_length=10,
... jump_n_sample=10,
... generator=generator,
... )
>>> inpainted_image = output.images[0]
```
Returns:
[`~pipelines.ImagePipelineOutput`] or `tuple`:
If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is
returned where the first element is a list with the generated images.
"""
original_image = image
original_image = _preprocess_image(original_image)
original_image = original_image.to(device=self._execution_device, dtype=self.unet.dtype)
mask_image = _preprocess_mask(mask_image)
mask_image = mask_image.to(device=self._execution_device, dtype=self.unet.dtype)
batch_size = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(generator, list) and len(generator) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
)
image_shape = original_image.shape
image = randn_tensor(image_shape, generator=generator, device=self._execution_device, dtype=self.unet.dtype)
# set step values
self.scheduler.set_timesteps(num_inference_steps, jump_length, jump_n_sample, self._execution_device)
self.scheduler.eta = eta
t_last = self.scheduler.timesteps[0] + 1
generator = generator[0] if isinstance(generator, list) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps)):
if t < t_last:
# predict the noise residual
model_output = self.unet(image, t).sample
# compute previous image: x_t -> x_t-1
image = self.scheduler.step(model_output, t, image, original_image, mask_image, generator).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
image = self.scheduler.undo_step(image, t_last, generator)
t_last = t
image = (image / 2 + 0.5).clamp(0, 1)
image = image.cpu().permute(0, 2, 3, 1).numpy()
if output_type == "pil":
image = self.numpy_to_pil(image)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=image)
| diffusers/src/diffusers/pipelines/deprecated/repaint/pipeline_repaint.py/0 | {
"file_path": "diffusers/src/diffusers/pipelines/deprecated/repaint/pipeline_repaint.py",
"repo_id": "diffusers",
"token_count": 4200
} |
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Optional, Tuple, Union
import torch
from ....models import UNet2DModel
from ....schedulers import KarrasVeScheduler
from ....utils.torch_utils import randn_tensor
from ...pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class KarrasVePipeline(DiffusionPipeline):
r"""
Pipeline for unconditional image generation.
Parameters:
unet ([`UNet2DModel`]):
A `UNet2DModel` to denoise the encoded image.
scheduler ([`KarrasVeScheduler`]):
A scheduler to be used in combination with `unet` to denoise the encoded image.
"""
# add type hints for linting
unet: UNet2DModel
scheduler: KarrasVeScheduler
def __init__(self, unet: UNet2DModel, scheduler: KarrasVeScheduler):
super().__init__()
self.register_modules(unet=unet, scheduler=scheduler)
@torch.no_grad()
def __call__(
self,
batch_size: int = 1,
num_inference_steps: int = 50,
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
output_type: Optional[str] = "pil",
return_dict: bool = True,
**kwargs,
) -> Union[Tuple, ImagePipelineOutput]:
r"""
The call function to the pipeline for generation.
Args:
batch_size (`int`, *optional*, defaults to 1):
The number of images to generate.
generator (`torch.Generator`, *optional*):
A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
generation deterministic.
num_inference_steps (`int`, *optional*, defaults to 50):
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
expense of slower inference.
output_type (`str`, *optional*, defaults to `"pil"`):
The output format of the generated image. Choose between `PIL.Image` or `np.array`.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`ImagePipelineOutput`] instead of a plain tuple.
Example:
Returns:
[`~pipelines.ImagePipelineOutput`] or `tuple`:
If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is
returned where the first element is a list with the generated images.
"""
img_size = self.unet.config.sample_size
shape = (batch_size, 3, img_size, img_size)
model = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
sample = randn_tensor(shape, generator=generator, device=self.device) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(num_inference_steps)
for t in self.progress_bar(self.scheduler.timesteps):
# here sigma_t == t_i from the paper
sigma = self.scheduler.schedule[t]
sigma_prev = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
sample_hat, sigma_hat = self.scheduler.add_noise_to_input(sample, sigma, generator=generator)
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
model_output = (sigma_hat / 2) * model((sample_hat + 1) / 2, sigma_hat / 2).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
step_output = self.scheduler.step(model_output, sigma_hat, sigma_prev, sample_hat)
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
model_output = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2, sigma_prev / 2).sample
step_output = self.scheduler.step_correct(
model_output,
sigma_hat,
sigma_prev,
sample_hat,
step_output.prev_sample,
step_output["derivative"],
)
sample = step_output.prev_sample
sample = (sample / 2 + 0.5).clamp(0, 1)
image = sample.cpu().permute(0, 2, 3, 1).numpy()
if output_type == "pil":
image = self.numpy_to_pil(image)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=image)
| diffusers/src/diffusers/pipelines/deprecated/stochastic_karras_ve/pipeline_stochastic_karras_ve.py/0 | {
"file_path": "diffusers/src/diffusers/pipelines/deprecated/stochastic_karras_ve/pipeline_stochastic_karras_ve.py",
"repo_id": "diffusers",
"token_count": 2264
} |
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
from typing import List, Optional, Tuple, Union
import torch
import torch.nn as nn
import torch.utils.checkpoint
from transformers import PretrainedConfig, PreTrainedModel, PreTrainedTokenizer
from transformers.activations import ACT2FN
from transformers.modeling_outputs import BaseModelOutput
from transformers.utils import logging
from ...models import AutoencoderKL, UNet2DConditionModel, UNet2DModel, VQModel
from ...schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from ...utils import is_torch_xla_available
from ...utils.torch_utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
if is_torch_xla_available():
import torch_xla.core.xla_model as xm
XLA_AVAILABLE = True
else:
XLA_AVAILABLE = False
class LDMTextToImagePipeline(DiffusionPipeline):
r"""
Pipeline for text-to-image generation using latent diffusion.
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
implemented for all pipelines (downloading, saving, running on a particular device, etc.).
Parameters:
vqvae ([`VQModel`]):
Vector-quantized (VQ) model to encode and decode images to and from latent representations.
bert ([`LDMBertModel`]):
Text-encoder model based on [`~transformers.BERT`].
tokenizer ([`~transformers.BertTokenizer`]):
A `BertTokenizer` to tokenize text.
unet ([`UNet2DConditionModel`]):
A `UNet2DConditionModel` to denoise the encoded image latents.
scheduler ([`SchedulerMixin`]):
A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
"""
model_cpu_offload_seq = "bert->unet->vqvae"
def __init__(
self,
vqvae: Union[VQModel, AutoencoderKL],
bert: PreTrainedModel,
tokenizer: PreTrainedTokenizer,
unet: Union[UNet2DModel, UNet2DConditionModel],
scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler],
):
super().__init__()
self.register_modules(vqvae=vqvae, bert=bert, tokenizer=tokenizer, unet=unet, scheduler=scheduler)
self.vae_scale_factor = 2 ** (len(self.vqvae.config.block_out_channels) - 1)
@torch.no_grad()
def __call__(
self,
prompt: Union[str, List[str]],
height: Optional[int] = None,
width: Optional[int] = None,
num_inference_steps: Optional[int] = 50,
guidance_scale: Optional[float] = 1.0,
eta: Optional[float] = 0.0,
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
latents: Optional[torch.Tensor] = None,
output_type: Optional[str] = "pil",
return_dict: bool = True,
**kwargs,
) -> Union[Tuple, ImagePipelineOutput]:
r"""
The call function to the pipeline for generation.
Args:
prompt (`str` or `List[str]`):
The prompt or prompts to guide the image generation.
height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
The height in pixels of the generated image.
width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
The width in pixels of the generated image.
num_inference_steps (`int`, *optional*, defaults to 50):
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
expense of slower inference.
guidance_scale (`float`, *optional*, defaults to 1.0):
A higher guidance scale value encourages the model to generate images closely linked to the text
`prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
generator (`torch.Generator`, *optional*):
A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
generation deterministic.
latents (`torch.Tensor`, *optional*):
Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
tensor is generated by sampling using the supplied random `generator`.
output_type (`str`, *optional*, defaults to `"pil"`):
The output format of the generated image. Choose between `PIL.Image` or `np.array`.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`ImagePipelineOutput`] instead of a plain tuple.
Example:
```py
>>> from diffusers import DiffusionPipeline
>>> # load model and scheduler
>>> ldm = DiffusionPipeline.from_pretrained("CompVis/ldm-text2im-large-256")
>>> # run pipeline in inference (sample random noise and denoise)
>>> prompt = "A painting of a squirrel eating a burger"
>>> images = ldm([prompt], num_inference_steps=50, eta=0.3, guidance_scale=6).images
>>> # save images
>>> for idx, image in enumerate(images):
... image.save(f"squirrel-{idx}.png")
```
Returns:
[`~pipelines.ImagePipelineOutput`] or `tuple`:
If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is
returned where the first element is a list with the generated images.
"""
# 0. Default height and width to unet
height = height or self.unet.config.sample_size * self.vae_scale_factor
width = width or self.unet.config.sample_size * self.vae_scale_factor
if isinstance(prompt, str):
batch_size = 1
elif isinstance(prompt, list):
batch_size = len(prompt)
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
# get unconditional embeddings for classifier free guidance
if guidance_scale != 1.0:
uncond_input = self.tokenizer(
[""] * batch_size, padding="max_length", max_length=77, truncation=True, return_tensors="pt"
)
negative_prompt_embeds = self.bert(uncond_input.input_ids.to(self._execution_device))[0]
# get prompt text embeddings
text_input = self.tokenizer(prompt, padding="max_length", max_length=77, truncation=True, return_tensors="pt")
prompt_embeds = self.bert(text_input.input_ids.to(self._execution_device))[0]
# get the initial random noise unless the user supplied it
latents_shape = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
if isinstance(generator, list) and len(generator) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
)
if latents is None:
latents = randn_tensor(
latents_shape, generator=generator, device=self._execution_device, dtype=prompt_embeds.dtype
)
else:
if latents.shape != latents_shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
latents = latents.to(self._execution_device)
self.scheduler.set_timesteps(num_inference_steps)
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
extra_kwargs = {}
if accepts_eta:
extra_kwargs["eta"] = eta
for t in self.progress_bar(self.scheduler.timesteps):
if guidance_scale == 1.0:
# guidance_scale of 1 means no guidance
latents_input = latents
context = prompt_embeds
else:
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
latents_input = torch.cat([latents] * 2)
context = torch.cat([negative_prompt_embeds, prompt_embeds])
# predict the noise residual
noise_pred = self.unet(latents_input, t, encoder_hidden_states=context).sample
# perform guidance
if guidance_scale != 1.0:
noise_pred_uncond, noise_prediction_text = noise_pred.chunk(2)
noise_pred = noise_pred_uncond + guidance_scale * (noise_prediction_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
latents = self.scheduler.step(noise_pred, t, latents, **extra_kwargs).prev_sample
if XLA_AVAILABLE:
xm.mark_step()
# scale and decode the image latents with vae
latents = 1 / self.vqvae.config.scaling_factor * latents
image = self.vqvae.decode(latents).sample
image = (image / 2 + 0.5).clamp(0, 1)
image = image.cpu().permute(0, 2, 3, 1).numpy()
if output_type == "pil":
image = self.numpy_to_pil(image)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=image)
################################################################################
# Code for the text transformer model
################################################################################
""" PyTorch LDMBERT model."""
logger = logging.get_logger(__name__)
LDMBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [
"ldm-bert",
# See all LDMBert models at https://huggingface.co/models?filter=ldmbert
]
LDMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP = {
"ldm-bert": "https://huggingface.co/valhalla/ldm-bert/blob/main/config.json",
}
""" LDMBERT model configuration"""
class LDMBertConfig(PretrainedConfig):
model_type = "ldmbert"
keys_to_ignore_at_inference = ["past_key_values"]
attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__(
self,
vocab_size=30522,
max_position_embeddings=77,
encoder_layers=32,
encoder_ffn_dim=5120,
encoder_attention_heads=8,
head_dim=64,
encoder_layerdrop=0.0,
activation_function="gelu",
d_model=1280,
dropout=0.1,
attention_dropout=0.0,
activation_dropout=0.0,
init_std=0.02,
classifier_dropout=0.0,
scale_embedding=False,
use_cache=True,
pad_token_id=0,
**kwargs,
):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.d_model = d_model
self.encoder_ffn_dim = encoder_ffn_dim
self.encoder_layers = encoder_layers
self.encoder_attention_heads = encoder_attention_heads
self.head_dim = head_dim
self.dropout = dropout
self.attention_dropout = attention_dropout
self.activation_dropout = activation_dropout
self.activation_function = activation_function
self.init_std = init_std
self.encoder_layerdrop = encoder_layerdrop
self.classifier_dropout = classifier_dropout
self.use_cache = use_cache
self.num_hidden_layers = encoder_layers
self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(pad_token_id=pad_token_id, **kwargs)
def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
"""
Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
"""
bsz, src_len = mask.size()
tgt_len = tgt_len if tgt_len is not None else src_len
expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
inverted_mask = 1.0 - expanded_mask
return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min)
# Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->LDMBert
class LDMBertAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(
self,
embed_dim: int,
num_heads: int,
head_dim: int,
dropout: float = 0.0,
is_decoder: bool = False,
bias: bool = False,
):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = head_dim
self.inner_dim = head_dim * num_heads
self.scaling = self.head_dim**-0.5
self.is_decoder = is_decoder
self.k_proj = nn.Linear(embed_dim, self.inner_dim, bias=bias)
self.v_proj = nn.Linear(embed_dim, self.inner_dim, bias=bias)
self.q_proj = nn.Linear(embed_dim, self.inner_dim, bias=bias)
self.out_proj = nn.Linear(self.inner_dim, embed_dim)
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
def forward(
self,
hidden_states: torch.Tensor,
key_value_states: Optional[torch.Tensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
attention_mask: Optional[torch.Tensor] = None,
layer_head_mask: Optional[torch.Tensor] = None,
output_attentions: bool = False,
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
# if key_value_states are provided this layer is used as a cross-attention layer
# for the decoder
is_cross_attention = key_value_states is not None
bsz, tgt_len, _ = hidden_states.size()
# get query proj
query_states = self.q_proj(hidden_states) * self.scaling
# get key, value proj
if is_cross_attention and past_key_value is not None:
# reuse k,v, cross_attentions
key_states = past_key_value[0]
value_states = past_key_value[1]
elif is_cross_attention:
# cross_attentions
key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
elif past_key_value is not None:
# reuse k, v, self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
key_states = torch.cat([past_key_value[0], key_states], dim=2)
value_states = torch.cat([past_key_value[1], value_states], dim=2)
else:
# self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
if self.is_decoder:
# if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
# Further calls to cross_attention layer can then reuse all cross-attention
# key/value_states (first "if" case)
# if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
# all previous decoder key/value_states. Further calls to uni-directional self-attention
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
# if encoder bi-directional self-attention `past_key_value` is always `None`
past_key_value = (key_states, value_states)
proj_shape = (bsz * self.num_heads, -1, self.head_dim)
query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
key_states = key_states.view(*proj_shape)
value_states = value_states.view(*proj_shape)
src_len = key_states.size(1)
attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
raise ValueError(
f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
f" {attn_weights.size()}"
)
if attention_mask is not None:
if attention_mask.size() != (bsz, 1, tgt_len, src_len):
raise ValueError(
f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
)
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
if layer_head_mask is not None:
if layer_head_mask.size() != (self.num_heads,):
raise ValueError(
f"Head mask for a single layer should be of size {(self.num_heads,)}, but is"
f" {layer_head_mask.size()}"
)
attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
if output_attentions:
# this operation is a bit awkward, but it's required to
# make sure that attn_weights keeps its gradient.
# In order to do so, attn_weights have to be reshaped
# twice and have to be reused in the following
attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
else:
attn_weights_reshaped = None
attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
attn_output = torch.bmm(attn_probs, value_states)
if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
raise ValueError(
f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is"
f" {attn_output.size()}"
)
attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
attn_output = attn_output.transpose(1, 2)
# Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
# partitioned across GPUs when using tensor-parallelism.
attn_output = attn_output.reshape(bsz, tgt_len, self.inner_dim)
attn_output = self.out_proj(attn_output)
return attn_output, attn_weights_reshaped, past_key_value
class LDMBertEncoderLayer(nn.Module):
def __init__(self, config: LDMBertConfig):
super().__init__()
self.embed_dim = config.d_model
self.self_attn = LDMBertAttention(
embed_dim=self.embed_dim,
num_heads=config.encoder_attention_heads,
head_dim=config.head_dim,
dropout=config.attention_dropout,
)
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim)
self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim)
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor,
layer_head_mask: torch.Tensor,
output_attentions: Optional[bool] = False,
) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
"""
Args:
hidden_states (`torch.Tensor`): input to the layer of shape `(seq_len, batch, embed_dim)`
attention_mask (`torch.Tensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
layer_head_mask (`torch.Tensor`): mask for attention heads in a given layer of size
`(encoder_attention_heads,)`.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
residual = hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
hidden_states, attn_weights, _ = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
layer_head_mask=layer_head_mask,
output_attentions=output_attentions,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.final_layer_norm(hidden_states)
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
if hidden_states.dtype == torch.float16 and (
torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any()
):
clamp_value = torch.finfo(hidden_states.dtype).max - 1000
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
# Copied from transformers.models.bart.modeling_bart.BartPretrainedModel with Bart->LDMBert
class LDMBertPreTrainedModel(PreTrainedModel):
config_class = LDMBertConfig
base_model_prefix = "model"
_supports_gradient_checkpointing = True
_keys_to_ignore_on_load_unexpected = [r"encoder\.version", r"decoder\.version"]
def _init_weights(self, module):
std = self.config.init_std
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=std)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=std)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
@property
def dummy_inputs(self):
pad_token = self.config.pad_token_id
input_ids = torch.tensor([[0, 6, 10, 4, 2], [0, 8, 12, 2, pad_token]], device=self.device)
dummy_inputs = {
"attention_mask": input_ids.ne(pad_token),
"input_ids": input_ids,
}
return dummy_inputs
class LDMBertEncoder(LDMBertPreTrainedModel):
"""
Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
[`LDMBertEncoderLayer`].
Args:
config: LDMBertConfig
embed_tokens (nn.Embedding): output embedding
"""
def __init__(self, config: LDMBertConfig):
super().__init__(config)
self.dropout = config.dropout
embed_dim = config.d_model
self.padding_idx = config.pad_token_id
self.max_source_positions = config.max_position_embeddings
self.embed_tokens = nn.Embedding(config.vocab_size, embed_dim)
self.embed_positions = nn.Embedding(config.max_position_embeddings, embed_dim)
self.layers = nn.ModuleList([LDMBertEncoderLayer(config) for _ in range(config.encoder_layers)])
self.layer_norm = nn.LayerNorm(embed_dim)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embed_tokens
def set_input_embeddings(self, value):
self.embed_tokens = value
def forward(
self,
input_ids: torch.LongTensor = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
head_mask: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, BaseModelOutput]:
r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using [`BartTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (`torch.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.BaseModelOutput`] instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# retrieve input_ids and inputs_embeds
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
seq_len = input_shape[1]
if position_ids is None:
position_ids = torch.arange(seq_len, dtype=torch.long, device=inputs_embeds.device).expand((1, -1))
embed_pos = self.embed_positions(position_ids)
hidden_states = inputs_embeds + embed_pos
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
# expand attention_mask
if attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
attention_mask = _expand_mask(attention_mask, inputs_embeds.dtype)
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
# check if head_mask has a correct number of layers specified if desired
if head_mask is not None:
if head_mask.size()[0] != (len(self.layers)):
raise ValueError(
f"The head_mask should be specified for {len(self.layers)} layers, but it is for"
f" {head_mask.size()[0]}."
)
for idx, encoder_layer in enumerate(self.layers):
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
if torch.is_grad_enabled() and self.gradient_checkpointing:
layer_outputs = self._gradient_checkpointing_func(
encoder_layer,
hidden_states,
attention_mask,
(head_mask[idx] if head_mask is not None else None),
)
else:
layer_outputs = encoder_layer(
hidden_states,
attention_mask,
layer_head_mask=(head_mask[idx] if head_mask is not None else None),
output_attentions=output_attentions,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
hidden_states = self.layer_norm(hidden_states)
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
)
class LDMBertModel(LDMBertPreTrainedModel):
_no_split_modules = []
def __init__(self, config: LDMBertConfig):
super().__init__(config)
self.model = LDMBertEncoder(config)
self.to_logits = nn.Linear(config.hidden_size, config.vocab_size)
def forward(
self,
input_ids=None,
attention_mask=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
outputs = self.model(
input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
return outputs
| diffusers/src/diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion.py/0 | {
"file_path": "diffusers/src/diffusers/pipelines/latent_diffusion/pipeline_latent_diffusion.py",
"repo_id": "diffusers",
"token_count": 14243
} |
# Copyright 2024 Marigold authors, PRS ETH Zurich. All rights reserved.
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# --------------------------------------------------------------------------
# More information and citation instructions are available on the
# Marigold project website: https://marigoldmonodepth.github.io
# --------------------------------------------------------------------------
from dataclasses import dataclass
from functools import partial
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import torch
from PIL import Image
from tqdm.auto import tqdm
from transformers import CLIPTextModel, CLIPTokenizer
from ...image_processor import PipelineImageInput
from ...models import (
AutoencoderKL,
UNet2DConditionModel,
)
from ...schedulers import (
DDIMScheduler,
LCMScheduler,
)
from ...utils import (
BaseOutput,
is_torch_xla_available,
logging,
replace_example_docstring,
)
from ...utils.import_utils import is_scipy_available
from ...utils.torch_utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline
from .marigold_image_processing import MarigoldImageProcessor
if is_torch_xla_available():
import torch_xla.core.xla_model as xm
XLA_AVAILABLE = True
else:
XLA_AVAILABLE = False
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
EXAMPLE_DOC_STRING = """
Examples:
```py
>>> import diffusers
>>> import torch
>>> pipe = diffusers.MarigoldDepthPipeline.from_pretrained(
... "prs-eth/marigold-depth-lcm-v1-0", variant="fp16", torch_dtype=torch.float16
... ).to("cuda")
>>> image = diffusers.utils.load_image("https://marigoldmonodepth.github.io/images/einstein.jpg")
>>> depth = pipe(image)
>>> vis = pipe.image_processor.visualize_depth(depth.prediction)
>>> vis[0].save("einstein_depth.png")
>>> depth_16bit = pipe.image_processor.export_depth_to_16bit_png(depth.prediction)
>>> depth_16bit[0].save("einstein_depth_16bit.png")
```
"""
@dataclass
class MarigoldDepthOutput(BaseOutput):
"""
Output class for Marigold monocular depth prediction pipeline.
Args:
prediction (`np.ndarray`, `torch.Tensor`):
Predicted depth maps with values in the range [0, 1]. The shape is always $numimages \times 1 \times height
\times width$, regardless of whether the images were passed as a 4D array or a list.
uncertainty (`None`, `np.ndarray`, `torch.Tensor`):
Uncertainty maps computed from the ensemble, with values in the range [0, 1]. The shape is $numimages
\times 1 \times height \times width$.
latent (`None`, `torch.Tensor`):
Latent features corresponding to the predictions, compatible with the `latents` argument of the pipeline.
The shape is $numimages * numensemble \times 4 \times latentheight \times latentwidth$.
"""
prediction: Union[np.ndarray, torch.Tensor]
uncertainty: Union[None, np.ndarray, torch.Tensor]
latent: Union[None, torch.Tensor]
class MarigoldDepthPipeline(DiffusionPipeline):
"""
Pipeline for monocular depth estimation using the Marigold method: https://marigoldmonodepth.github.io.
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
Args:
unet (`UNet2DConditionModel`):
Conditional U-Net to denoise the depth latent, conditioned on image latent.
vae (`AutoencoderKL`):
Variational Auto-Encoder (VAE) Model to encode and decode images and predictions to and from latent
representations.
scheduler (`DDIMScheduler` or `LCMScheduler`):
A scheduler to be used in combination with `unet` to denoise the encoded image latents.
text_encoder (`CLIPTextModel`):
Text-encoder, for empty text embedding.
tokenizer (`CLIPTokenizer`):
CLIP tokenizer.
prediction_type (`str`, *optional*):
Type of predictions made by the model.
scale_invariant (`bool`, *optional*):
A model property specifying whether the predicted depth maps are scale-invariant. This value must be set in
the model config. When used together with the `shift_invariant=True` flag, the model is also called
"affine-invariant". NB: overriding this value is not supported.
shift_invariant (`bool`, *optional*):
A model property specifying whether the predicted depth maps are shift-invariant. This value must be set in
the model config. When used together with the `scale_invariant=True` flag, the model is also called
"affine-invariant". NB: overriding this value is not supported.
default_denoising_steps (`int`, *optional*):
The minimum number of denoising diffusion steps that are required to produce a prediction of reasonable
quality with the given model. This value must be set in the model config. When the pipeline is called
without explicitly setting `num_inference_steps`, the default value is used. This is required to ensure
reasonable results with various model flavors compatible with the pipeline, such as those relying on very
short denoising schedules (`LCMScheduler`) and those with full diffusion schedules (`DDIMScheduler`).
default_processing_resolution (`int`, *optional*):
The recommended value of the `processing_resolution` parameter of the pipeline. This value must be set in
the model config. When the pipeline is called without explicitly setting `processing_resolution`, the
default value is used. This is required to ensure reasonable results with various model flavors trained
with varying optimal processing resolution values.
"""
model_cpu_offload_seq = "text_encoder->unet->vae"
supported_prediction_types = ("depth", "disparity")
def __init__(
self,
unet: UNet2DConditionModel,
vae: AutoencoderKL,
scheduler: Union[DDIMScheduler, LCMScheduler],
text_encoder: CLIPTextModel,
tokenizer: CLIPTokenizer,
prediction_type: Optional[str] = None,
scale_invariant: Optional[bool] = True,
shift_invariant: Optional[bool] = True,
default_denoising_steps: Optional[int] = None,
default_processing_resolution: Optional[int] = None,
):
super().__init__()
if prediction_type not in self.supported_prediction_types:
logger.warning(
f"Potentially unsupported `prediction_type='{prediction_type}'`; values supported by the pipeline: "
f"{self.supported_prediction_types}."
)
self.register_modules(
unet=unet,
vae=vae,
scheduler=scheduler,
text_encoder=text_encoder,
tokenizer=tokenizer,
)
self.register_to_config(
prediction_type=prediction_type,
scale_invariant=scale_invariant,
shift_invariant=shift_invariant,
default_denoising_steps=default_denoising_steps,
default_processing_resolution=default_processing_resolution,
)
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8
self.scale_invariant = scale_invariant
self.shift_invariant = shift_invariant
self.default_denoising_steps = default_denoising_steps
self.default_processing_resolution = default_processing_resolution
self.empty_text_embedding = None
self.image_processor = MarigoldImageProcessor(vae_scale_factor=self.vae_scale_factor)
def check_inputs(
self,
image: PipelineImageInput,
num_inference_steps: int,
ensemble_size: int,
processing_resolution: int,
resample_method_input: str,
resample_method_output: str,
batch_size: int,
ensembling_kwargs: Optional[Dict[str, Any]],
latents: Optional[torch.Tensor],
generator: Optional[Union[torch.Generator, List[torch.Generator]]],
output_type: str,
output_uncertainty: bool,
) -> int:
if num_inference_steps is None:
raise ValueError("`num_inference_steps` is not specified and could not be resolved from the model config.")
if num_inference_steps < 1:
raise ValueError("`num_inference_steps` must be positive.")
if ensemble_size < 1:
raise ValueError("`ensemble_size` must be positive.")
if ensemble_size == 2:
logger.warning(
"`ensemble_size` == 2 results are similar to no ensembling (1); "
"consider increasing the value to at least 3."
)
if ensemble_size > 1 and (self.scale_invariant or self.shift_invariant) and not is_scipy_available():
raise ImportError("Make sure to install scipy if you want to use ensembling.")
if ensemble_size == 1 and output_uncertainty:
raise ValueError(
"Computing uncertainty by setting `output_uncertainty=True` also requires setting `ensemble_size` "
"greater than 1."
)
if processing_resolution is None:
raise ValueError(
"`processing_resolution` is not specified and could not be resolved from the model config."
)
if processing_resolution < 0:
raise ValueError(
"`processing_resolution` must be non-negative: 0 for native resolution, or any positive value for "
"downsampled processing."
)
if processing_resolution % self.vae_scale_factor != 0:
raise ValueError(f"`processing_resolution` must be a multiple of {self.vae_scale_factor}.")
if resample_method_input not in ("nearest", "nearest-exact", "bilinear", "bicubic", "area"):
raise ValueError(
"`resample_method_input` takes string values compatible with PIL library: "
"nearest, nearest-exact, bilinear, bicubic, area."
)
if resample_method_output not in ("nearest", "nearest-exact", "bilinear", "bicubic", "area"):
raise ValueError(
"`resample_method_output` takes string values compatible with PIL library: "
"nearest, nearest-exact, bilinear, bicubic, area."
)
if batch_size < 1:
raise ValueError("`batch_size` must be positive.")
if output_type not in ["pt", "np"]:
raise ValueError("`output_type` must be one of `pt` or `np`.")
if latents is not None and generator is not None:
raise ValueError("`latents` and `generator` cannot be used together.")
if ensembling_kwargs is not None:
if not isinstance(ensembling_kwargs, dict):
raise ValueError("`ensembling_kwargs` must be a dictionary.")
if "reduction" in ensembling_kwargs and ensembling_kwargs["reduction"] not in ("mean", "median"):
raise ValueError("`ensembling_kwargs['reduction']` can be either `'mean'` or `'median'`.")
# image checks
num_images = 0
W, H = None, None
if not isinstance(image, list):
image = [image]
for i, img in enumerate(image):
if isinstance(img, np.ndarray) or torch.is_tensor(img):
if img.ndim not in (2, 3, 4):
raise ValueError(f"`image[{i}]` has unsupported dimensions or shape: {img.shape}.")
H_i, W_i = img.shape[-2:]
N_i = 1
if img.ndim == 4:
N_i = img.shape[0]
elif isinstance(img, Image.Image):
W_i, H_i = img.size
N_i = 1
else:
raise ValueError(f"Unsupported `image[{i}]` type: {type(img)}.")
if W is None:
W, H = W_i, H_i
elif (W, H) != (W_i, H_i):
raise ValueError(
f"Input `image[{i}]` has incompatible dimensions {(W_i, H_i)} with the previous images {(W, H)}"
)
num_images += N_i
# latents checks
if latents is not None:
if not torch.is_tensor(latents):
raise ValueError("`latents` must be a torch.Tensor.")
if latents.dim() != 4:
raise ValueError(f"`latents` has unsupported dimensions or shape: {latents.shape}.")
if processing_resolution > 0:
max_orig = max(H, W)
new_H = H * processing_resolution // max_orig
new_W = W * processing_resolution // max_orig
if new_H == 0 or new_W == 0:
raise ValueError(f"Extreme aspect ratio of the input image: [{W} x {H}]")
W, H = new_W, new_H
w = (W + self.vae_scale_factor - 1) // self.vae_scale_factor
h = (H + self.vae_scale_factor - 1) // self.vae_scale_factor
shape_expected = (num_images * ensemble_size, self.vae.config.latent_channels, h, w)
if latents.shape != shape_expected:
raise ValueError(f"`latents` has unexpected shape={latents.shape} expected={shape_expected}.")
# generator checks
if generator is not None:
if isinstance(generator, list):
if len(generator) != num_images * ensemble_size:
raise ValueError(
"The number of generators must match the total number of ensemble members for all input images."
)
if not all(g.device.type == generator[0].device.type for g in generator):
raise ValueError("`generator` device placement is not consistent in the list.")
elif not isinstance(generator, torch.Generator):
raise ValueError(f"Unsupported generator type: {type(generator)}.")
return num_images
def progress_bar(self, iterable=None, total=None, desc=None, leave=True):
if not hasattr(self, "_progress_bar_config"):
self._progress_bar_config = {}
elif not isinstance(self._progress_bar_config, dict):
raise ValueError(
f"`self._progress_bar_config` should be of type `dict`, but is {type(self._progress_bar_config)}."
)
progress_bar_config = dict(**self._progress_bar_config)
progress_bar_config["desc"] = progress_bar_config.get("desc", desc)
progress_bar_config["leave"] = progress_bar_config.get("leave", leave)
if iterable is not None:
return tqdm(iterable, **progress_bar_config)
elif total is not None:
return tqdm(total=total, **progress_bar_config)
else:
raise ValueError("Either `total` or `iterable` has to be defined.")
@torch.no_grad()
@replace_example_docstring(EXAMPLE_DOC_STRING)
def __call__(
self,
image: PipelineImageInput,
num_inference_steps: Optional[int] = None,
ensemble_size: int = 1,
processing_resolution: Optional[int] = None,
match_input_resolution: bool = True,
resample_method_input: str = "bilinear",
resample_method_output: str = "bilinear",
batch_size: int = 1,
ensembling_kwargs: Optional[Dict[str, Any]] = None,
latents: Optional[Union[torch.Tensor, List[torch.Tensor]]] = None,
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
output_type: str = "np",
output_uncertainty: bool = False,
output_latent: bool = False,
return_dict: bool = True,
):
"""
Function invoked when calling the pipeline.
Args:
image (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`),
`List[torch.Tensor]`: An input image or images used as an input for the depth estimation task. For
arrays and tensors, the expected value range is between `[0, 1]`. Passing a batch of images is possible
by providing a four-dimensional array or a tensor. Additionally, a list of images of two- or
three-dimensional arrays or tensors can be passed. In the latter case, all list elements must have the
same width and height.
num_inference_steps (`int`, *optional*, defaults to `None`):
Number of denoising diffusion steps during inference. The default value `None` results in automatic
selection. The number of steps should be at least 10 with the full Marigold models, and between 1 and 4
for Marigold-LCM models.
ensemble_size (`int`, defaults to `1`):
Number of ensemble predictions. Recommended values are 5 and higher for better precision, or 1 for
faster inference.
processing_resolution (`int`, *optional*, defaults to `None`):
Effective processing resolution. When set to `0`, matches the larger input image dimension. This
produces crisper predictions, but may also lead to the overall loss of global context. The default
value `None` resolves to the optimal value from the model config.
match_input_resolution (`bool`, *optional*, defaults to `True`):
When enabled, the output prediction is resized to match the input dimensions. When disabled, the longer
side of the output will equal to `processing_resolution`.
resample_method_input (`str`, *optional*, defaults to `"bilinear"`):
Resampling method used to resize input images to `processing_resolution`. The accepted values are:
`"nearest"`, `"nearest-exact"`, `"bilinear"`, `"bicubic"`, or `"area"`.
resample_method_output (`str`, *optional*, defaults to `"bilinear"`):
Resampling method used to resize output predictions to match the input resolution. The accepted values
are `"nearest"`, `"nearest-exact"`, `"bilinear"`, `"bicubic"`, or `"area"`.
batch_size (`int`, *optional*, defaults to `1`):
Batch size; only matters when setting `ensemble_size` or passing a tensor of images.
ensembling_kwargs (`dict`, *optional*, defaults to `None`)
Extra dictionary with arguments for precise ensembling control. The following options are available:
- reduction (`str`, *optional*, defaults to `"median"`): Defines the ensembling function applied in
every pixel location, can be either `"median"` or `"mean"`.
- regularizer_strength (`float`, *optional*, defaults to `0.02`): Strength of the regularizer that
pulls the aligned predictions to the unit range from 0 to 1.
- max_iter (`int`, *optional*, defaults to `2`): Maximum number of the alignment solver steps. Refer to
`scipy.optimize.minimize` function, `options` argument.
- tol (`float`, *optional*, defaults to `1e-3`): Alignment solver tolerance. The solver stops when the
tolerance is reached.
- max_res (`int`, *optional*, defaults to `None`): Resolution at which the alignment is performed;
`None` matches the `processing_resolution`.
latents (`torch.Tensor`, or `List[torch.Tensor]`, *optional*, defaults to `None`):
Latent noise tensors to replace the random initialization. These can be taken from the previous
function call's output.
generator (`torch.Generator`, or `List[torch.Generator]`, *optional*, defaults to `None`):
Random number generator object to ensure reproducibility.
output_type (`str`, *optional*, defaults to `"np"`):
Preferred format of the output's `prediction` and the optional `uncertainty` fields. The accepted
values are: `"np"` (numpy array) or `"pt"` (torch tensor).
output_uncertainty (`bool`, *optional*, defaults to `False`):
When enabled, the output's `uncertainty` field contains the predictive uncertainty map, provided that
the `ensemble_size` argument is set to a value above 2.
output_latent (`bool`, *optional*, defaults to `False`):
When enabled, the output's `latent` field contains the latent codes corresponding to the predictions
within the ensemble. These codes can be saved, modified, and used for subsequent calls with the
`latents` argument.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~pipelines.marigold.MarigoldDepthOutput`] instead of a plain tuple.
Examples:
Returns:
[`~pipelines.marigold.MarigoldDepthOutput`] or `tuple`:
If `return_dict` is `True`, [`~pipelines.marigold.MarigoldDepthOutput`] is returned, otherwise a
`tuple` is returned where the first element is the prediction, the second element is the uncertainty
(or `None`), and the third is the latent (or `None`).
"""
# 0. Resolving variables.
device = self._execution_device
dtype = self.dtype
# Model-specific optimal default values leading to fast and reasonable results.
if num_inference_steps is None:
num_inference_steps = self.default_denoising_steps
if processing_resolution is None:
processing_resolution = self.default_processing_resolution
# 1. Check inputs.
num_images = self.check_inputs(
image,
num_inference_steps,
ensemble_size,
processing_resolution,
resample_method_input,
resample_method_output,
batch_size,
ensembling_kwargs,
latents,
generator,
output_type,
output_uncertainty,
)
# 2. Prepare empty text conditioning.
# Model invocation: self.tokenizer, self.text_encoder.
if self.empty_text_embedding is None:
prompt = ""
text_inputs = self.tokenizer(
prompt,
padding="do_not_pad",
max_length=self.tokenizer.model_max_length,
truncation=True,
return_tensors="pt",
)
text_input_ids = text_inputs.input_ids.to(device)
self.empty_text_embedding = self.text_encoder(text_input_ids)[0] # [1,2,1024]
# 3. Preprocess input images. This function loads input image or images of compatible dimensions `(H, W)`,
# optionally downsamples them to the `processing_resolution` `(PH, PW)`, where
# `max(PH, PW) == processing_resolution`, and pads the dimensions to `(PPH, PPW)` such that these values are
# divisible by the latent space downscaling factor (typically 8 in Stable Diffusion). The default value `None`
# of `processing_resolution` resolves to the optimal value from the model config. It is a recommended mode of
# operation and leads to the most reasonable results. Using the native image resolution or any other processing
# resolution can lead to loss of either fine details or global context in the output predictions.
image, padding, original_resolution = self.image_processor.preprocess(
image, processing_resolution, resample_method_input, device, dtype
) # [N,3,PPH,PPW]
# 4. Encode input image into latent space. At this step, each of the `N` input images is represented with `E`
# ensemble members. Each ensemble member is an independent diffused prediction, just initialized independently.
# Latents of each such predictions across all input images and all ensemble members are represented in the
# `pred_latent` variable. The variable `image_latent` is of the same shape: it contains each input image encoded
# into latent space and replicated `E` times. The latents can be either generated (see `generator` to ensure
# reproducibility), or passed explicitly via the `latents` argument. The latter can be set outside the pipeline
# code. For example, in the Marigold-LCM video processing demo, the latents initialization of a frame is taken
# as a convex combination of the latents output of the pipeline for the previous frame and a newly-sampled
# noise. This behavior can be achieved by setting the `output_latent` argument to `True`. The latent space
# dimensions are `(h, w)`. Encoding into latent space happens in batches of size `batch_size`.
# Model invocation: self.vae.encoder.
image_latent, pred_latent = self.prepare_latents(
image, latents, generator, ensemble_size, batch_size
) # [N*E,4,h,w], [N*E,4,h,w]
del image
batch_empty_text_embedding = self.empty_text_embedding.to(device=device, dtype=dtype).repeat(
batch_size, 1, 1
) # [B,1024,2]
# 5. Process the denoising loop. All `N * E` latents are processed sequentially in batches of size `batch_size`.
# The unet model takes concatenated latent spaces of the input image and the predicted modality as an input, and
# outputs noise for the predicted modality's latent space. The number of denoising diffusion steps is defined by
# `num_inference_steps`. It is either set directly, or resolves to the optimal value specific to the loaded
# model.
# Model invocation: self.unet.
pred_latents = []
for i in self.progress_bar(
range(0, num_images * ensemble_size, batch_size), leave=True, desc="Marigold predictions..."
):
batch_image_latent = image_latent[i : i + batch_size] # [B,4,h,w]
batch_pred_latent = pred_latent[i : i + batch_size] # [B,4,h,w]
effective_batch_size = batch_image_latent.shape[0]
text = batch_empty_text_embedding[:effective_batch_size] # [B,2,1024]
self.scheduler.set_timesteps(num_inference_steps, device=device)
for t in self.progress_bar(self.scheduler.timesteps, leave=False, desc="Diffusion steps..."):
batch_latent = torch.cat([batch_image_latent, batch_pred_latent], dim=1) # [B,8,h,w]
noise = self.unet(batch_latent, t, encoder_hidden_states=text, return_dict=False)[0] # [B,4,h,w]
batch_pred_latent = self.scheduler.step(
noise, t, batch_pred_latent, generator=generator
).prev_sample # [B,4,h,w]
if XLA_AVAILABLE:
xm.mark_step()
pred_latents.append(batch_pred_latent)
pred_latent = torch.cat(pred_latents, dim=0) # [N*E,4,h,w]
del (
pred_latents,
image_latent,
batch_empty_text_embedding,
batch_image_latent,
batch_pred_latent,
text,
batch_latent,
noise,
)
# 6. Decode predictions from latent into pixel space. The resulting `N * E` predictions have shape `(PPH, PPW)`,
# which requires slight postprocessing. Decoding into pixel space happens in batches of size `batch_size`.
# Model invocation: self.vae.decoder.
prediction = torch.cat(
[
self.decode_prediction(pred_latent[i : i + batch_size])
for i in range(0, pred_latent.shape[0], batch_size)
],
dim=0,
) # [N*E,1,PPH,PPW]
if not output_latent:
pred_latent = None
# 7. Remove padding. The output shape is (PH, PW).
prediction = self.image_processor.unpad_image(prediction, padding) # [N*E,1,PH,PW]
# 8. Ensemble and compute uncertainty (when `output_uncertainty` is set). This code treats each of the `N`
# groups of `E` ensemble predictions independently. For each group it computes an ensembled prediction of shape
# `(PH, PW)` and an optional uncertainty map of the same dimensions. After computing this pair of outputs for
# each group independently, it stacks them respectively into batches of `N` almost final predictions and
# uncertainty maps.
uncertainty = None
if ensemble_size > 1:
prediction = prediction.reshape(num_images, ensemble_size, *prediction.shape[1:]) # [N,E,1,PH,PW]
prediction = [
self.ensemble_depth(
prediction[i],
self.scale_invariant,
self.shift_invariant,
output_uncertainty,
**(ensembling_kwargs or {}),
)
for i in range(num_images)
] # [ [[1,1,PH,PW], [1,1,PH,PW]], ... ]
prediction, uncertainty = zip(*prediction) # [[1,1,PH,PW], ... ], [[1,1,PH,PW], ... ]
prediction = torch.cat(prediction, dim=0) # [N,1,PH,PW]
if output_uncertainty:
uncertainty = torch.cat(uncertainty, dim=0) # [N,1,PH,PW]
else:
uncertainty = None
# 9. If `match_input_resolution` is set, the output prediction and the uncertainty are upsampled to match the
# input resolution `(H, W)`. This step may introduce upsampling artifacts, and therefore can be disabled.
# Depending on the downstream use-case, upsampling can be also chosen based on the tolerated artifacts by
# setting the `resample_method_output` parameter (e.g., to `"nearest"`).
if match_input_resolution:
prediction = self.image_processor.resize_antialias(
prediction, original_resolution, resample_method_output, is_aa=False
) # [N,1,H,W]
if uncertainty is not None and output_uncertainty:
uncertainty = self.image_processor.resize_antialias(
uncertainty, original_resolution, resample_method_output, is_aa=False
) # [N,1,H,W]
# 10. Prepare the final outputs.
if output_type == "np":
prediction = self.image_processor.pt_to_numpy(prediction) # [N,H,W,1]
if uncertainty is not None and output_uncertainty:
uncertainty = self.image_processor.pt_to_numpy(uncertainty) # [N,H,W,1]
# 11. Offload all models
self.maybe_free_model_hooks()
if not return_dict:
return (prediction, uncertainty, pred_latent)
return MarigoldDepthOutput(
prediction=prediction,
uncertainty=uncertainty,
latent=pred_latent,
)
def prepare_latents(
self,
image: torch.Tensor,
latents: Optional[torch.Tensor],
generator: Optional[torch.Generator],
ensemble_size: int,
batch_size: int,
) -> Tuple[torch.Tensor, torch.Tensor]:
def retrieve_latents(encoder_output):
if hasattr(encoder_output, "latent_dist"):
return encoder_output.latent_dist.mode()
elif hasattr(encoder_output, "latents"):
return encoder_output.latents
else:
raise AttributeError("Could not access latents of provided encoder_output")
image_latent = torch.cat(
[
retrieve_latents(self.vae.encode(image[i : i + batch_size]))
for i in range(0, image.shape[0], batch_size)
],
dim=0,
) # [N,4,h,w]
image_latent = image_latent * self.vae.config.scaling_factor
image_latent = image_latent.repeat_interleave(ensemble_size, dim=0) # [N*E,4,h,w]
pred_latent = latents
if pred_latent is None:
pred_latent = randn_tensor(
image_latent.shape,
generator=generator,
device=image_latent.device,
dtype=image_latent.dtype,
) # [N*E,4,h,w]
return image_latent, pred_latent
def decode_prediction(self, pred_latent: torch.Tensor) -> torch.Tensor:
if pred_latent.dim() != 4 or pred_latent.shape[1] != self.vae.config.latent_channels:
raise ValueError(
f"Expecting 4D tensor of shape [B,{self.vae.config.latent_channels},H,W]; got {pred_latent.shape}."
)
prediction = self.vae.decode(pred_latent / self.vae.config.scaling_factor, return_dict=False)[0] # [B,3,H,W]
prediction = prediction.mean(dim=1, keepdim=True) # [B,1,H,W]
prediction = torch.clip(prediction, -1.0, 1.0) # [B,1,H,W]
prediction = (prediction + 1.0) / 2.0
return prediction # [B,1,H,W]
@staticmethod
def ensemble_depth(
depth: torch.Tensor,
scale_invariant: bool = True,
shift_invariant: bool = True,
output_uncertainty: bool = False,
reduction: str = "median",
regularizer_strength: float = 0.02,
max_iter: int = 2,
tol: float = 1e-3,
max_res: int = 1024,
) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
"""
Ensembles the depth maps represented by the `depth` tensor with expected shape `(B, 1, H, W)`, where B is the
number of ensemble members for a given prediction of size `(H x W)`. Even though the function is designed for
depth maps, it can also be used with disparity maps as long as the input tensor values are non-negative. The
alignment happens when the predictions have one or more degrees of freedom, that is when they are either
affine-invariant (`scale_invariant=True` and `shift_invariant=True`), or just scale-invariant (only
`scale_invariant=True`). For absolute predictions (`scale_invariant=False` and `shift_invariant=False`)
alignment is skipped and only ensembling is performed.
Args:
depth (`torch.Tensor`):
Input ensemble depth maps.
scale_invariant (`bool`, *optional*, defaults to `True`):
Whether to treat predictions as scale-invariant.
shift_invariant (`bool`, *optional*, defaults to `True`):
Whether to treat predictions as shift-invariant.
output_uncertainty (`bool`, *optional*, defaults to `False`):
Whether to output uncertainty map.
reduction (`str`, *optional*, defaults to `"median"`):
Reduction method used to ensemble aligned predictions. The accepted values are: `"mean"` and
`"median"`.
regularizer_strength (`float`, *optional*, defaults to `0.02`):
Strength of the regularizer that pulls the aligned predictions to the unit range from 0 to 1.
max_iter (`int`, *optional*, defaults to `2`):
Maximum number of the alignment solver steps. Refer to `scipy.optimize.minimize` function, `options`
argument.
tol (`float`, *optional*, defaults to `1e-3`):
Alignment solver tolerance. The solver stops when the tolerance is reached.
max_res (`int`, *optional*, defaults to `1024`):
Resolution at which the alignment is performed; `None` matches the `processing_resolution`.
Returns:
A tensor of aligned and ensembled depth maps and optionally a tensor of uncertainties of the same shape:
`(1, 1, H, W)`.
"""
if depth.dim() != 4 or depth.shape[1] != 1:
raise ValueError(f"Expecting 4D tensor of shape [B,1,H,W]; got {depth.shape}.")
if reduction not in ("mean", "median"):
raise ValueError(f"Unrecognized reduction method: {reduction}.")
if not scale_invariant and shift_invariant:
raise ValueError("Pure shift-invariant ensembling is not supported.")
def init_param(depth: torch.Tensor):
init_min = depth.reshape(ensemble_size, -1).min(dim=1).values
init_max = depth.reshape(ensemble_size, -1).max(dim=1).values
if scale_invariant and shift_invariant:
init_s = 1.0 / (init_max - init_min).clamp(min=1e-6)
init_t = -init_s * init_min
param = torch.cat((init_s, init_t)).cpu().numpy()
elif scale_invariant:
init_s = 1.0 / init_max.clamp(min=1e-6)
param = init_s.cpu().numpy()
else:
raise ValueError("Unrecognized alignment.")
return param
def align(depth: torch.Tensor, param: np.ndarray) -> torch.Tensor:
if scale_invariant and shift_invariant:
s, t = np.split(param, 2)
s = torch.from_numpy(s).to(depth).view(ensemble_size, 1, 1, 1)
t = torch.from_numpy(t).to(depth).view(ensemble_size, 1, 1, 1)
out = depth * s + t
elif scale_invariant:
s = torch.from_numpy(param).to(depth).view(ensemble_size, 1, 1, 1)
out = depth * s
else:
raise ValueError("Unrecognized alignment.")
return out
def ensemble(
depth_aligned: torch.Tensor, return_uncertainty: bool = False
) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
uncertainty = None
if reduction == "mean":
prediction = torch.mean(depth_aligned, dim=0, keepdim=True)
if return_uncertainty:
uncertainty = torch.std(depth_aligned, dim=0, keepdim=True)
elif reduction == "median":
prediction = torch.median(depth_aligned, dim=0, keepdim=True).values
if return_uncertainty:
uncertainty = torch.median(torch.abs(depth_aligned - prediction), dim=0, keepdim=True).values
else:
raise ValueError(f"Unrecognized reduction method: {reduction}.")
return prediction, uncertainty
def cost_fn(param: np.ndarray, depth: torch.Tensor) -> float:
cost = 0.0
depth_aligned = align(depth, param)
for i, j in torch.combinations(torch.arange(ensemble_size)):
diff = depth_aligned[i] - depth_aligned[j]
cost += (diff**2).mean().sqrt().item()
if regularizer_strength > 0:
prediction, _ = ensemble(depth_aligned, return_uncertainty=False)
err_near = (0.0 - prediction.min()).abs().item()
err_far = (1.0 - prediction.max()).abs().item()
cost += (err_near + err_far) * regularizer_strength
return cost
def compute_param(depth: torch.Tensor):
import scipy
depth_to_align = depth.to(torch.float32)
if max_res is not None and max(depth_to_align.shape[2:]) > max_res:
depth_to_align = MarigoldImageProcessor.resize_to_max_edge(depth_to_align, max_res, "nearest-exact")
param = init_param(depth_to_align)
res = scipy.optimize.minimize(
partial(cost_fn, depth=depth_to_align),
param,
method="BFGS",
tol=tol,
options={"maxiter": max_iter, "disp": False},
)
return res.x
requires_aligning = scale_invariant or shift_invariant
ensemble_size = depth.shape[0]
if requires_aligning:
param = compute_param(depth)
depth = align(depth, param)
depth, uncertainty = ensemble(depth, return_uncertainty=output_uncertainty)
depth_max = depth.max()
if scale_invariant and shift_invariant:
depth_min = depth.min()
elif scale_invariant:
depth_min = 0
else:
raise ValueError("Unrecognized alignment.")
depth_range = (depth_max - depth_min).clamp(min=1e-6)
depth = (depth - depth_min) / depth_range
if output_uncertainty:
uncertainty /= depth_range
return depth, uncertainty # [1,1,H,W], [1,1,H,W]
| diffusers/src/diffusers/pipelines/marigold/pipeline_marigold_depth.py/0 | {
"file_path": "diffusers/src/diffusers/pipelines/marigold/pipeline_marigold_depth.py",
"repo_id": "diffusers",
"token_count": 17444
} |
# Copyright 2024 PixArt-Sigma Authors and The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import html
import inspect
import re
import urllib.parse as ul
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import T5EncoderModel, T5Tokenizer
from ...image_processor import PixArtImageProcessor
from ...models import AutoencoderKL, PixArtTransformer2DModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import (
BACKENDS_MAPPING,
deprecate,
is_bs4_available,
is_ftfy_available,
is_torch_xla_available,
logging,
replace_example_docstring,
)
from ...utils.torch_utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from ..pixart_alpha.pipeline_pixart_alpha import (
ASPECT_RATIO_256_BIN,
ASPECT_RATIO_512_BIN,
ASPECT_RATIO_1024_BIN,
)
from ..pixart_alpha.pipeline_pixart_sigma import ASPECT_RATIO_2048_BIN
from .pag_utils import PAGMixin
if is_torch_xla_available():
import torch_xla.core.xla_model as xm
XLA_AVAILABLE = True
else:
XLA_AVAILABLE = False
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
if is_bs4_available():
from bs4 import BeautifulSoup
if is_ftfy_available():
import ftfy
EXAMPLE_DOC_STRING = """
Examples:
```py
>>> import torch
>>> from diffusers import AutoPipelineForText2Image
>>> pipe = AutoPipelineForText2Image.from_pretrained(
... "PixArt-alpha/PixArt-Sigma-XL-2-1024-MS",
... torch_dtype=torch.float16,
... pag_applied_layers=["blocks.14"],
... enable_pag=True,
... )
>>> pipe = pipe.to("cuda")
>>> prompt = "A small cactus with a happy face in the Sahara desert"
>>> image = pipe(prompt, pag_scale=4.0, guidance_scale=1.0).images[0]
```
"""
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps
def retrieve_timesteps(
scheduler,
num_inference_steps: Optional[int] = None,
device: Optional[Union[str, torch.device]] = None,
timesteps: Optional[List[int]] = None,
sigmas: Optional[List[float]] = None,
**kwargs,
):
r"""
Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles
custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`.
Args:
scheduler (`SchedulerMixin`):
The scheduler to get timesteps from.
num_inference_steps (`int`):
The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps`
must be `None`.
device (`str` or `torch.device`, *optional*):
The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
timesteps (`List[int]`, *optional*):
Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed,
`num_inference_steps` and `sigmas` must be `None`.
sigmas (`List[float]`, *optional*):
Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed,
`num_inference_steps` and `timesteps` must be `None`.
Returns:
`Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the
second element is the number of inference steps.
"""
if timesteps is not None and sigmas is not None:
raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values")
if timesteps is not None:
accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
if not accepts_timesteps:
raise ValueError(
f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
f" timestep schedules. Please check whether you are using the correct scheduler."
)
scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
timesteps = scheduler.timesteps
num_inference_steps = len(timesteps)
elif sigmas is not None:
accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
if not accept_sigmas:
raise ValueError(
f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
f" sigmas schedules. Please check whether you are using the correct scheduler."
)
scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs)
timesteps = scheduler.timesteps
num_inference_steps = len(timesteps)
else:
scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
timesteps = scheduler.timesteps
return timesteps, num_inference_steps
class PixArtSigmaPAGPipeline(DiffusionPipeline, PAGMixin):
r"""
[PAG pipeline](https://huggingface.co/docs/diffusers/main/en/using-diffusers/pag) for text-to-image generation
using PixArt-Sigma.
"""
bad_punct_regex = re.compile(
r"["
+ "#®•©™&@·º½¾¿¡§~"
+ r"\)"
+ r"\("
+ r"\]"
+ r"\["
+ r"\}"
+ r"\{"
+ r"\|"
+ "\\"
+ r"\/"
+ r"\*"
+ r"]{1,}"
) # noqa
_optional_components = ["tokenizer", "text_encoder"]
model_cpu_offload_seq = "text_encoder->transformer->vae"
def __init__(
self,
tokenizer: T5Tokenizer,
text_encoder: T5EncoderModel,
vae: AutoencoderKL,
transformer: PixArtTransformer2DModel,
scheduler: KarrasDiffusionSchedulers,
pag_applied_layers: Union[str, List[str]] = "blocks.1", # 1st transformer block
):
super().__init__()
self.register_modules(
tokenizer=tokenizer, text_encoder=text_encoder, vae=vae, transformer=transformer, scheduler=scheduler
)
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8
self.image_processor = PixArtImageProcessor(vae_scale_factor=self.vae_scale_factor)
self.set_pag_applied_layers(pag_applied_layers)
# Copied from diffusers.pipelines.pixart_alpha.pipeline_pixart_alpha.PixArtAlphaPipeline.encode_prompt with 120->300
def encode_prompt(
self,
prompt: Union[str, List[str]],
do_classifier_free_guidance: bool = True,
negative_prompt: str = "",
num_images_per_prompt: int = 1,
device: Optional[torch.device] = None,
prompt_embeds: Optional[torch.Tensor] = None,
negative_prompt_embeds: Optional[torch.Tensor] = None,
prompt_attention_mask: Optional[torch.Tensor] = None,
negative_prompt_attention_mask: Optional[torch.Tensor] = None,
clean_caption: bool = False,
max_sequence_length: int = 300,
**kwargs,
):
r"""
Encodes the prompt into text encoder hidden states.
Args:
prompt (`str` or `List[str]`, *optional*):
prompt to be encoded
negative_prompt (`str` or `List[str]`, *optional*):
The prompt not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds`
instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). For
PixArt-Alpha, this should be "".
do_classifier_free_guidance (`bool`, *optional*, defaults to `True`):
whether to use classifier free guidance or not
num_images_per_prompt (`int`, *optional*, defaults to 1):
number of images that should be generated per prompt
device: (`torch.device`, *optional*):
torch device to place the resulting embeddings on
prompt_embeds (`torch.Tensor`, *optional*):
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
provided, text embeddings will be generated from `prompt` input argument.
negative_prompt_embeds (`torch.Tensor`, *optional*):
Pre-generated negative text embeddings. For PixArt-Alpha, it's should be the embeddings of the ""
string.
clean_caption (`bool`, defaults to `False`):
If `True`, the function will preprocess and clean the provided caption before encoding.
max_sequence_length (`int`, defaults to 300): Maximum sequence length to use for the prompt.
"""
if "mask_feature" in kwargs:
deprecation_message = "The use of `mask_feature` is deprecated. It is no longer used in any computation and that doesn't affect the end results. It will be removed in a future version."
deprecate("mask_feature", "1.0.0", deprecation_message, standard_warn=False)
if device is None:
device = self._execution_device
# See Section 3.1. of the paper.
max_length = max_sequence_length
if prompt_embeds is None:
prompt = self._text_preprocessing(prompt, clean_caption=clean_caption)
text_inputs = self.tokenizer(
prompt,
padding="max_length",
max_length=max_length,
truncation=True,
add_special_tokens=True,
return_tensors="pt",
)
text_input_ids = text_inputs.input_ids
untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
text_input_ids, untruncated_ids
):
removed_text = self.tokenizer.batch_decode(untruncated_ids[:, max_length - 1 : -1])
logger.warning(
"The following part of your input was truncated because T5 can only handle sequences up to"
f" {max_length} tokens: {removed_text}"
)
prompt_attention_mask = text_inputs.attention_mask
prompt_attention_mask = prompt_attention_mask.to(device)
prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=prompt_attention_mask)
prompt_embeds = prompt_embeds[0]
if self.text_encoder is not None:
dtype = self.text_encoder.dtype
elif self.transformer is not None:
dtype = self.transformer.dtype
else:
dtype = None
prompt_embeds = prompt_embeds.to(dtype=dtype, device=device)
bs_embed, seq_len, _ = prompt_embeds.shape
# duplicate text embeddings and attention mask for each generation per prompt, using mps friendly method
prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
prompt_attention_mask = prompt_attention_mask.repeat(1, num_images_per_prompt)
prompt_attention_mask = prompt_attention_mask.view(bs_embed * num_images_per_prompt, -1)
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance and negative_prompt_embeds is None:
uncond_tokens = [negative_prompt] * bs_embed if isinstance(negative_prompt, str) else negative_prompt
uncond_tokens = self._text_preprocessing(uncond_tokens, clean_caption=clean_caption)
max_length = prompt_embeds.shape[1]
uncond_input = self.tokenizer(
uncond_tokens,
padding="max_length",
max_length=max_length,
truncation=True,
return_attention_mask=True,
add_special_tokens=True,
return_tensors="pt",
)
negative_prompt_attention_mask = uncond_input.attention_mask
negative_prompt_attention_mask = negative_prompt_attention_mask.to(device)
negative_prompt_embeds = self.text_encoder(
uncond_input.input_ids.to(device), attention_mask=negative_prompt_attention_mask
)
negative_prompt_embeds = negative_prompt_embeds[0]
if do_classifier_free_guidance:
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
seq_len = negative_prompt_embeds.shape[1]
negative_prompt_embeds = negative_prompt_embeds.to(dtype=dtype, device=device)
negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
negative_prompt_embeds = negative_prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
negative_prompt_attention_mask = negative_prompt_attention_mask.repeat(1, num_images_per_prompt)
negative_prompt_attention_mask = negative_prompt_attention_mask.view(bs_embed * num_images_per_prompt, -1)
else:
negative_prompt_embeds = None
negative_prompt_attention_mask = None
return prompt_embeds, prompt_attention_mask, negative_prompt_embeds, negative_prompt_attention_mask
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
def prepare_extra_step_kwargs(self, generator, eta):
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
extra_step_kwargs = {}
if accepts_eta:
extra_step_kwargs["eta"] = eta
# check if the scheduler accepts generator
accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
if accepts_generator:
extra_step_kwargs["generator"] = generator
return extra_step_kwargs
# Copied from diffusers.pipelines.pixart_alpha.pipeline_pixart_alpha.PixArtAlphaPipeline.check_inputs
def check_inputs(
self,
prompt,
height,
width,
negative_prompt,
callback_steps,
prompt_embeds=None,
negative_prompt_embeds=None,
prompt_attention_mask=None,
negative_prompt_attention_mask=None,
):
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(callback_steps)}."
)
if prompt is not None and prompt_embeds is not None:
raise ValueError(
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
" only forward one of the two."
)
elif prompt is None and prompt_embeds is None:
raise ValueError(
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
)
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
if prompt is not None and negative_prompt_embeds is not None:
raise ValueError(
f"Cannot forward both `prompt`: {prompt} and `negative_prompt_embeds`:"
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
)
if negative_prompt is not None and negative_prompt_embeds is not None:
raise ValueError(
f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
)
if prompt_embeds is not None and prompt_attention_mask is None:
raise ValueError("Must provide `prompt_attention_mask` when specifying `prompt_embeds`.")
if negative_prompt_embeds is not None and negative_prompt_attention_mask is None:
raise ValueError("Must provide `negative_prompt_attention_mask` when specifying `negative_prompt_embeds`.")
if prompt_embeds is not None and negative_prompt_embeds is not None:
if prompt_embeds.shape != negative_prompt_embeds.shape:
raise ValueError(
"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
f" {negative_prompt_embeds.shape}."
)
if prompt_attention_mask.shape != negative_prompt_attention_mask.shape:
raise ValueError(
"`prompt_attention_mask` and `negative_prompt_attention_mask` must have the same shape when passed directly, but"
f" got: `prompt_attention_mask` {prompt_attention_mask.shape} != `negative_prompt_attention_mask`"
f" {negative_prompt_attention_mask.shape}."
)
# Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline._text_preprocessing
def _text_preprocessing(self, text, clean_caption=False):
if clean_caption and not is_bs4_available():
logger.warning(BACKENDS_MAPPING["bs4"][-1].format("Setting `clean_caption=True`"))
logger.warning("Setting `clean_caption` to False...")
clean_caption = False
if clean_caption and not is_ftfy_available():
logger.warning(BACKENDS_MAPPING["ftfy"][-1].format("Setting `clean_caption=True`"))
logger.warning("Setting `clean_caption` to False...")
clean_caption = False
if not isinstance(text, (tuple, list)):
text = [text]
def process(text: str):
if clean_caption:
text = self._clean_caption(text)
text = self._clean_caption(text)
else:
text = text.lower().strip()
return text
return [process(t) for t in text]
# Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline._clean_caption
def _clean_caption(self, caption):
caption = str(caption)
caption = ul.unquote_plus(caption)
caption = caption.strip().lower()
caption = re.sub("<person>", "person", caption)
# urls:
caption = re.sub(
r"\b((?:https?:(?:\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\w/-]*\b\/?(?!@)))", # noqa
"",
caption,
) # regex for urls
caption = re.sub(
r"\b((?:www:(?:\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\w/-]*\b\/?(?!@)))", # noqa
"",
caption,
) # regex for urls
# html:
caption = BeautifulSoup(caption, features="html.parser").text
# @<nickname>
caption = re.sub(r"@[\w\d]+\b", "", caption)
# 31C0—31EF CJK Strokes
# 31F0—31FF Katakana Phonetic Extensions
# 3200—32FF Enclosed CJK Letters and Months
# 3300—33FF CJK Compatibility
# 3400—4DBF CJK Unified Ideographs Extension A
# 4DC0—4DFF Yijing Hexagram Symbols
# 4E00—9FFF CJK Unified Ideographs
caption = re.sub(r"[\u31c0-\u31ef]+", "", caption)
caption = re.sub(r"[\u31f0-\u31ff]+", "", caption)
caption = re.sub(r"[\u3200-\u32ff]+", "", caption)
caption = re.sub(r"[\u3300-\u33ff]+", "", caption)
caption = re.sub(r"[\u3400-\u4dbf]+", "", caption)
caption = re.sub(r"[\u4dc0-\u4dff]+", "", caption)
caption = re.sub(r"[\u4e00-\u9fff]+", "", caption)
#######################################################
# все виды тире / all types of dash --> "-"
caption = re.sub(
r"[\u002D\u058A\u05BE\u1400\u1806\u2010-\u2015\u2E17\u2E1A\u2E3A\u2E3B\u2E40\u301C\u3030\u30A0\uFE31\uFE32\uFE58\uFE63\uFF0D]+", # noqa
"-",
caption,
)
# кавычки к одному стандарту
caption = re.sub(r"[`´«»“”¨]", '"', caption)
caption = re.sub(r"[‘’]", "'", caption)
# "
caption = re.sub(r""?", "", caption)
# &
caption = re.sub(r"&", "", caption)
# ip adresses:
caption = re.sub(r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}", " ", caption)
# article ids:
caption = re.sub(r"\d:\d\d\s+$", "", caption)
# \n
caption = re.sub(r"\\n", " ", caption)
# "#123"
caption = re.sub(r"#\d{1,3}\b", "", caption)
# "#12345.."
caption = re.sub(r"#\d{5,}\b", "", caption)
# "123456.."
caption = re.sub(r"\b\d{6,}\b", "", caption)
# filenames:
caption = re.sub(r"[\S]+\.(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)", "", caption)
#
caption = re.sub(r"[\"\']{2,}", r'"', caption) # """AUSVERKAUFT"""
caption = re.sub(r"[\.]{2,}", r" ", caption) # """AUSVERKAUFT"""
caption = re.sub(self.bad_punct_regex, r" ", caption) # ***AUSVERKAUFT***, #AUSVERKAUFT
caption = re.sub(r"\s+\.\s+", r" ", caption) # " . "
# this-is-my-cute-cat / this_is_my_cute_cat
regex2 = re.compile(r"(?:\-|\_)")
if len(re.findall(regex2, caption)) > 3:
caption = re.sub(regex2, " ", caption)
caption = ftfy.fix_text(caption)
caption = html.unescape(html.unescape(caption))
caption = re.sub(r"\b[a-zA-Z]{1,3}\d{3,15}\b", "", caption) # jc6640
caption = re.sub(r"\b[a-zA-Z]+\d+[a-zA-Z]+\b", "", caption) # jc6640vc
caption = re.sub(r"\b\d+[a-zA-Z]+\d+\b", "", caption) # 6640vc231
caption = re.sub(r"(worldwide\s+)?(free\s+)?shipping", "", caption)
caption = re.sub(r"(free\s)?download(\sfree)?", "", caption)
caption = re.sub(r"\bclick\b\s(?:for|on)\s\w+", "", caption)
caption = re.sub(r"\b(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)(\simage[s]?)?", "", caption)
caption = re.sub(r"\bpage\s+\d+\b", "", caption)
caption = re.sub(r"\b\d*[a-zA-Z]+\d+[a-zA-Z]+\d+[a-zA-Z\d]*\b", r" ", caption) # j2d1a2a...
caption = re.sub(r"\b\d+\.?\d*[xх×]\d+\.?\d*\b", "", caption)
caption = re.sub(r"\b\s+\:\s+", r": ", caption)
caption = re.sub(r"(\D[,\./])\b", r"\1 ", caption)
caption = re.sub(r"\s+", " ", caption)
caption.strip()
caption = re.sub(r"^[\"\']([\w\W]+)[\"\']$", r"\1", caption)
caption = re.sub(r"^[\'\_,\-\:;]", r"", caption)
caption = re.sub(r"[\'\_,\-\:\-\+]$", r"", caption)
caption = re.sub(r"^\.\S+$", "", caption)
return caption.strip()
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents
def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
shape = (
batch_size,
num_channels_latents,
int(height) // self.vae_scale_factor,
int(width) // self.vae_scale_factor,
)
if isinstance(generator, list) and len(generator) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators."
)
if latents is None:
latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
else:
latents = latents.to(device)
# scale the initial noise by the standard deviation required by the scheduler
latents = latents * self.scheduler.init_noise_sigma
return latents
@torch.no_grad()
@replace_example_docstring(EXAMPLE_DOC_STRING)
def __call__(
self,
prompt: Union[str, List[str]] = None,
negative_prompt: str = "",
num_inference_steps: int = 20,
timesteps: List[int] = None,
sigmas: List[float] = None,
guidance_scale: float = 4.5,
num_images_per_prompt: Optional[int] = 1,
height: Optional[int] = None,
width: Optional[int] = None,
eta: float = 0.0,
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
latents: Optional[torch.Tensor] = None,
prompt_embeds: Optional[torch.Tensor] = None,
prompt_attention_mask: Optional[torch.Tensor] = None,
negative_prompt_embeds: Optional[torch.Tensor] = None,
negative_prompt_attention_mask: Optional[torch.Tensor] = None,
output_type: Optional[str] = "pil",
return_dict: bool = True,
callback: Optional[Callable[[int, int, torch.Tensor], None]] = None,
callback_steps: int = 1,
clean_caption: bool = True,
use_resolution_binning: bool = True,
max_sequence_length: int = 300,
pag_scale: float = 3.0,
pag_adaptive_scale: float = 0.0,
) -> Union[ImagePipelineOutput, Tuple]:
"""
Function invoked when calling the pipeline for generation.
Args:
prompt (`str` or `List[str]`, *optional*):
The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
instead.
negative_prompt (`str` or `List[str]`, *optional*):
The prompt or prompts not to guide the image generation. If not defined, one has to pass
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
less than `1`).
num_inference_steps (`int`, *optional*, defaults to 100):
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
expense of slower inference.
timesteps (`List[int]`, *optional*):
Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument
in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is
passed will be used. Must be in descending order.
sigmas (`List[float]`, *optional*):
Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in
their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed
will be used.
guidance_scale (`float`, *optional*, defaults to 4.5):
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
`guidance_scale` is defined as `w` of equation 2. of [Imagen
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
usually at the expense of lower image quality.
num_images_per_prompt (`int`, *optional*, defaults to 1):
The number of images to generate per prompt.
height (`int`, *optional*, defaults to self.unet.config.sample_size):
The height in pixels of the generated image.
width (`int`, *optional*, defaults to self.unet.config.sample_size):
The width in pixels of the generated image.
eta (`float`, *optional*, defaults to 0.0):
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
[`schedulers.DDIMScheduler`], will be ignored for others.
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
to make generation deterministic.
latents (`torch.Tensor`, *optional*):
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
tensor will ge generated by sampling using the supplied random `generator`.
prompt_embeds (`torch.Tensor`, *optional*):
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
provided, text embeddings will be generated from `prompt` input argument.
prompt_attention_mask (`torch.Tensor`, *optional*): Pre-generated attention mask for text embeddings.
negative_prompt_embeds (`torch.Tensor`, *optional*):
Pre-generated negative text embeddings. For PixArt-Sigma this negative prompt should be "". If not
provided, negative_prompt_embeds will be generated from `negative_prompt` input argument.
negative_prompt_attention_mask (`torch.Tensor`, *optional*):
Pre-generated attention mask for negative text embeddings.
output_type (`str`, *optional*, defaults to `"pil"`):
The output format of the generate image. Choose between
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~pipelines.stable_diffusion.IFPipelineOutput`] instead of a plain tuple.
callback (`Callable`, *optional*):
A function that will be called every `callback_steps` steps during inference. The function will be
called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`.
callback_steps (`int`, *optional*, defaults to 1):
The frequency at which the `callback` function will be called. If not specified, the callback will be
called at every step.
clean_caption (`bool`, *optional*, defaults to `True`):
Whether or not to clean the caption before creating embeddings. Requires `beautifulsoup4` and `ftfy` to
be installed. If the dependencies are not installed, the embeddings will be created from the raw
prompt.
use_resolution_binning (`bool` defaults to `True`):
If set to `True`, the requested height and width are first mapped to the closest resolutions using
`ASPECT_RATIO_1024_BIN`. After the produced latents are decoded into images, they are resized back to
the requested resolution. Useful for generating non-square images.
max_sequence_length (`int` defaults to 300): Maximum sequence length to use with the `prompt`.
pag_scale (`float`, *optional*, defaults to 3.0):
The scale factor for the perturbed attention guidance. If it is set to 0.0, the perturbed attention
guidance will not be used.
pag_adaptive_scale (`float`, *optional*, defaults to 0.0):
The adaptive scale factor for the perturbed attention guidance. If it is set to 0.0, `pag_scale` is
used.
Examples:
Returns:
[`~pipelines.ImagePipelineOutput`] or `tuple`:
If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is
returned where the first element is a list with the generated images
"""
# 1. Check inputs. Raise error if not correct
height = height or self.transformer.config.sample_size * self.vae_scale_factor
width = width or self.transformer.config.sample_size * self.vae_scale_factor
if use_resolution_binning:
if self.transformer.config.sample_size == 256:
aspect_ratio_bin = ASPECT_RATIO_2048_BIN
elif self.transformer.config.sample_size == 128:
aspect_ratio_bin = ASPECT_RATIO_1024_BIN
elif self.transformer.config.sample_size == 64:
aspect_ratio_bin = ASPECT_RATIO_512_BIN
elif self.transformer.config.sample_size == 32:
aspect_ratio_bin = ASPECT_RATIO_256_BIN
else:
raise ValueError("Invalid sample size")
orig_height, orig_width = height, width
height, width = self.image_processor.classify_height_width_bin(height, width, ratios=aspect_ratio_bin)
self.check_inputs(
prompt,
height,
width,
negative_prompt,
callback_steps,
prompt_embeds,
negative_prompt_embeds,
prompt_attention_mask,
negative_prompt_attention_mask,
)
self._pag_scale = pag_scale
self._pag_adaptive_scale = pag_adaptive_scale
# 2. Default height and width to transformer
if prompt is not None and isinstance(prompt, str):
batch_size = 1
elif prompt is not None and isinstance(prompt, list):
batch_size = len(prompt)
else:
batch_size = prompt_embeds.shape[0]
device = self._execution_device
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
do_classifier_free_guidance = guidance_scale > 1.0
# 3. Encode input prompt
(
prompt_embeds,
prompt_attention_mask,
negative_prompt_embeds,
negative_prompt_attention_mask,
) = self.encode_prompt(
prompt,
do_classifier_free_guidance,
negative_prompt=negative_prompt,
num_images_per_prompt=num_images_per_prompt,
device=device,
prompt_embeds=prompt_embeds,
negative_prompt_embeds=negative_prompt_embeds,
prompt_attention_mask=prompt_attention_mask,
negative_prompt_attention_mask=negative_prompt_attention_mask,
clean_caption=clean_caption,
max_sequence_length=max_sequence_length,
)
if self.do_perturbed_attention_guidance:
prompt_embeds = self._prepare_perturbed_attention_guidance(
prompt_embeds, negative_prompt_embeds, do_classifier_free_guidance
)
prompt_attention_mask = self._prepare_perturbed_attention_guidance(
prompt_attention_mask, negative_prompt_attention_mask, do_classifier_free_guidance
)
elif do_classifier_free_guidance:
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
prompt_attention_mask = torch.cat([negative_prompt_attention_mask, prompt_attention_mask], dim=0)
# 4. Prepare timesteps
timesteps, num_inference_steps = retrieve_timesteps(
self.scheduler, num_inference_steps, device, timesteps, sigmas
)
# 5. Prepare latents.
latent_channels = self.transformer.config.in_channels
latents = self.prepare_latents(
batch_size * num_images_per_prompt,
latent_channels,
height,
width,
prompt_embeds.dtype,
device,
generator,
latents,
)
if self.do_perturbed_attention_guidance:
original_attn_proc = self.transformer.attn_processors
self._set_pag_attn_processor(
pag_applied_layers=self.pag_applied_layers,
do_classifier_free_guidance=do_classifier_free_guidance,
)
# 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
# 6.1 Prepare micro-conditions.
added_cond_kwargs = {"resolution": None, "aspect_ratio": None}
# 7. Denoising loop
num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)
with self.progress_bar(total=num_inference_steps) as progress_bar:
for i, t in enumerate(timesteps):
# expand the latents if we are doing classifier free guidance, perturbed-attention guidance, or both
latent_model_input = torch.cat([latents] * (prompt_embeds.shape[0] // latents.shape[0]))
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
current_timestep = t
if not torch.is_tensor(current_timestep):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
is_mps = latent_model_input.device.type == "mps"
is_npu = latent_model_input.device.type == "npu"
if isinstance(current_timestep, float):
dtype = torch.float32 if (is_mps or is_npu) else torch.float64
else:
dtype = torch.int32 if (is_mps or is_npu) else torch.int64
current_timestep = torch.tensor([current_timestep], dtype=dtype, device=latent_model_input.device)
elif len(current_timestep.shape) == 0:
current_timestep = current_timestep[None].to(latent_model_input.device)
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
current_timestep = current_timestep.expand(latent_model_input.shape[0])
# predict noise model_output
noise_pred = self.transformer(
latent_model_input,
encoder_hidden_states=prompt_embeds,
encoder_attention_mask=prompt_attention_mask,
timestep=current_timestep,
added_cond_kwargs=added_cond_kwargs,
return_dict=False,
)[0]
# perform guidance
if self.do_perturbed_attention_guidance:
noise_pred = self._apply_perturbed_attention_guidance(
noise_pred, do_classifier_free_guidance, guidance_scale, current_timestep
)
elif do_classifier_free_guidance:
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
noise_pred = noise_pred.chunk(2, dim=1)[0]
else:
noise_pred = noise_pred
# compute previous image: x_t -> x_t-1
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
# call the callback, if provided
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
progress_bar.update()
if callback is not None and i % callback_steps == 0:
step_idx = i // getattr(self.scheduler, "order", 1)
callback(step_idx, t, latents)
if XLA_AVAILABLE:
xm.mark_step()
if not output_type == "latent":
image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]
if use_resolution_binning:
image = self.image_processor.resize_and_crop_tensor(image, orig_width, orig_height)
else:
image = latents
if not output_type == "latent":
image = self.image_processor.postprocess(image, output_type=output_type)
# Offload all models
self.maybe_free_model_hooks()
if self.do_perturbed_attention_guidance:
self.transformer.set_attn_processor(original_attn_proc)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=image)
| diffusers/src/diffusers/pipelines/pag/pipeline_pag_pixart_sigma.py/0 | {
"file_path": "diffusers/src/diffusers/pipelines/pag/pipeline_pag_pixart_sigma.py",
"repo_id": "diffusers",
"token_count": 19124
} |
# coding=utf-8
# Copyright 2025 The HuggingFace Inc. team.
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
import inspect
import os
from typing import Any, Dict, List, Optional, Union
import flax
import numpy as np
import PIL.Image
from flax.core.frozen_dict import FrozenDict
from huggingface_hub import create_repo, snapshot_download
from huggingface_hub.utils import validate_hf_hub_args
from PIL import Image
from tqdm.auto import tqdm
from ..configuration_utils import ConfigMixin
from ..models.modeling_flax_utils import FLAX_WEIGHTS_NAME, FlaxModelMixin
from ..schedulers.scheduling_utils_flax import SCHEDULER_CONFIG_NAME, FlaxSchedulerMixin
from ..utils import (
CONFIG_NAME,
BaseOutput,
PushToHubMixin,
http_user_agent,
is_transformers_available,
logging,
)
if is_transformers_available():
from transformers import FlaxPreTrainedModel
INDEX_FILE = "diffusion_flax_model.bin"
logger = logging.get_logger(__name__)
LOADABLE_CLASSES = {
"diffusers": {
"FlaxModelMixin": ["save_pretrained", "from_pretrained"],
"FlaxSchedulerMixin": ["save_pretrained", "from_pretrained"],
"FlaxDiffusionPipeline": ["save_pretrained", "from_pretrained"],
},
"transformers": {
"PreTrainedTokenizer": ["save_pretrained", "from_pretrained"],
"PreTrainedTokenizerFast": ["save_pretrained", "from_pretrained"],
"FlaxPreTrainedModel": ["save_pretrained", "from_pretrained"],
"FeatureExtractionMixin": ["save_pretrained", "from_pretrained"],
"ProcessorMixin": ["save_pretrained", "from_pretrained"],
"ImageProcessingMixin": ["save_pretrained", "from_pretrained"],
},
}
ALL_IMPORTABLE_CLASSES = {}
for library in LOADABLE_CLASSES:
ALL_IMPORTABLE_CLASSES.update(LOADABLE_CLASSES[library])
def import_flax_or_no_model(module, class_name):
try:
# 1. First make sure that if a Flax object is present, import this one
class_obj = getattr(module, "Flax" + class_name)
except AttributeError:
# 2. If this doesn't work, it's not a model and we don't append "Flax"
class_obj = getattr(module, class_name)
except AttributeError:
raise ValueError(f"Neither Flax{class_name} nor {class_name} exist in {module}")
return class_obj
@flax.struct.dataclass
class FlaxImagePipelineOutput(BaseOutput):
"""
Output class for image pipelines.
Args:
images (`List[PIL.Image.Image]` or `np.ndarray`)
List of denoised PIL images of length `batch_size` or NumPy array of shape `(batch_size, height, width,
num_channels)`.
"""
images: Union[List[PIL.Image.Image], np.ndarray]
class FlaxDiffusionPipeline(ConfigMixin, PushToHubMixin):
r"""
Base class for Flax-based pipelines.
[`FlaxDiffusionPipeline`] stores all components (models, schedulers, and processors) for diffusion pipelines and
provides methods for loading, downloading and saving models. It also includes methods to:
- enable/disable the progress bar for the denoising iteration
Class attributes:
- **config_name** ([`str`]) -- The configuration filename that stores the class and module names of all the
diffusion pipeline's components.
"""
config_name = "model_index.json"
def register_modules(self, **kwargs):
# import it here to avoid circular import
from diffusers import pipelines
for name, module in kwargs.items():
if module is None:
register_dict = {name: (None, None)}
else:
# retrieve library
library = module.__module__.split(".")[0]
# check if the module is a pipeline module
pipeline_dir = module.__module__.split(".")[-2]
path = module.__module__.split(".")
is_pipeline_module = pipeline_dir in path and hasattr(pipelines, pipeline_dir)
# if library is not in LOADABLE_CLASSES, then it is a custom module.
# Or if it's a pipeline module, then the module is inside the pipeline
# folder so we set the library to module name.
if library not in LOADABLE_CLASSES or is_pipeline_module:
library = pipeline_dir
# retrieve class_name
class_name = module.__class__.__name__
register_dict = {name: (library, class_name)}
# save model index config
self.register_to_config(**register_dict)
# set models
setattr(self, name, module)
def save_pretrained(
self,
save_directory: Union[str, os.PathLike],
params: Union[Dict, FrozenDict],
push_to_hub: bool = False,
**kwargs,
):
# TODO: handle inference_state
"""
Save all saveable variables of the pipeline to a directory. A pipeline variable can be saved and loaded if its
class implements both a save and loading method. The pipeline is easily reloaded using the
[`~FlaxDiffusionPipeline.from_pretrained`] class method.
Arguments:
save_directory (`str` or `os.PathLike`):
Directory to which to save. Will be created if it doesn't exist.
push_to_hub (`bool`, *optional*, defaults to `False`):
Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the
repository you want to push to with `repo_id` (will default to the name of `save_directory` in your
namespace).
kwargs (`Dict[str, Any]`, *optional*):
Additional keyword arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method.
"""
self.save_config(save_directory)
model_index_dict = dict(self.config)
model_index_dict.pop("_class_name")
model_index_dict.pop("_diffusers_version")
model_index_dict.pop("_module", None)
if push_to_hub:
commit_message = kwargs.pop("commit_message", None)
private = kwargs.pop("private", None)
create_pr = kwargs.pop("create_pr", False)
token = kwargs.pop("token", None)
repo_id = kwargs.pop("repo_id", save_directory.split(os.path.sep)[-1])
repo_id = create_repo(repo_id, exist_ok=True, private=private, token=token).repo_id
for pipeline_component_name in model_index_dict.keys():
sub_model = getattr(self, pipeline_component_name)
if sub_model is None:
# edge case for saving a pipeline with safety_checker=None
continue
model_cls = sub_model.__class__
save_method_name = None
# search for the model's base class in LOADABLE_CLASSES
for library_name, library_classes in LOADABLE_CLASSES.items():
library = importlib.import_module(library_name)
for base_class, save_load_methods in library_classes.items():
class_candidate = getattr(library, base_class, None)
if class_candidate is not None and issubclass(model_cls, class_candidate):
# if we found a suitable base class in LOADABLE_CLASSES then grab its save method
save_method_name = save_load_methods[0]
break
if save_method_name is not None:
break
save_method = getattr(sub_model, save_method_name)
expects_params = "params" in set(inspect.signature(save_method).parameters.keys())
if expects_params:
save_method(
os.path.join(save_directory, pipeline_component_name), params=params[pipeline_component_name]
)
else:
save_method(os.path.join(save_directory, pipeline_component_name))
if push_to_hub:
self._upload_folder(
save_directory,
repo_id,
token=token,
commit_message=commit_message,
create_pr=create_pr,
)
@classmethod
@validate_hf_hub_args
def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], **kwargs):
r"""
Instantiate a Flax-based diffusion pipeline from pretrained pipeline weights.
The pipeline is set in evaluation mode (`model.eval()) by default and dropout modules are deactivated.
If you get the error message below, you need to finetune the weights for your downstream task:
```
Some weights of FlaxUNet2DConditionModel were not initialized from the model checkpoint at stable-diffusion-v1-5/stable-diffusion-v1-5 and are newly initialized because the shapes did not match:
```
Parameters:
pretrained_model_name_or_path (`str` or `os.PathLike`, *optional*):
Can be either:
- A string, the *repo id* (for example `stable-diffusion-v1-5/stable-diffusion-v1-5`) of a
pretrained pipeline hosted on the Hub.
- A path to a *directory* (for example `./my_model_directory`) containing the model weights saved
using [`~FlaxDiffusionPipeline.save_pretrained`].
dtype (`str` or `jnp.dtype`, *optional*):
Override the default `jnp.dtype` and load the model under this dtype. If `"auto"`, the dtype is
automatically derived from the model's weights.
force_download (`bool`, *optional*, defaults to `False`):
Whether or not to force the (re-)download of the model weights and configuration files, overriding the
cached versions if they exist.
proxies (`Dict[str, str]`, *optional*):
A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
output_loading_info(`bool`, *optional*, defaults to `False`):
Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages.
local_files_only (`bool`, *optional*, defaults to `False`):
Whether to only load local model weights and configuration files or not. If set to `True`, the model
won't be downloaded from the Hub.
token (`str` or *bool*, *optional*):
The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from
`diffusers-cli login` (stored in `~/.huggingface`) is used.
revision (`str`, *optional*, defaults to `"main"`):
The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier
allowed by Git.
mirror (`str`, *optional*):
Mirror source to resolve accessibility issues if you're downloading a model in China. We do not
guarantee the timeliness or safety of the source, and you should refer to the mirror site for more
information.
kwargs (remaining dictionary of keyword arguments, *optional*):
Can be used to overwrite load and saveable variables (the pipeline components) of the specific pipeline
class. The overwritten components are passed directly to the pipelines `__init__` method.
<Tip>
To use private or [gated models](https://huggingface.co/docs/hub/models-gated#gated-models), log-in with
`huggingface-cli login`.
</Tip>
Examples:
```py
>>> from diffusers import FlaxDiffusionPipeline
>>> # Download pipeline from huggingface.co and cache.
>>> # Requires to be logged in to Hugging Face hub,
>>> # see more in [the documentation](https://huggingface.co/docs/hub/security-tokens)
>>> pipeline, params = FlaxDiffusionPipeline.from_pretrained(
... "stable-diffusion-v1-5/stable-diffusion-v1-5",
... variant="bf16",
... dtype=jnp.bfloat16,
... )
>>> # Download pipeline, but use a different scheduler
>>> from diffusers import FlaxDPMSolverMultistepScheduler
>>> model_id = "stable-diffusion-v1-5/stable-diffusion-v1-5"
>>> dpmpp, dpmpp_state = FlaxDPMSolverMultistepScheduler.from_pretrained(
... model_id,
... subfolder="scheduler",
... )
>>> dpm_pipe, dpm_params = FlaxStableDiffusionPipeline.from_pretrained(
... model_id, variant="bf16", dtype=jnp.bfloat16, scheduler=dpmpp
... )
>>> dpm_params["scheduler"] = dpmpp_state
```
"""
cache_dir = kwargs.pop("cache_dir", None)
proxies = kwargs.pop("proxies", None)
local_files_only = kwargs.pop("local_files_only", False)
token = kwargs.pop("token", None)
revision = kwargs.pop("revision", None)
from_pt = kwargs.pop("from_pt", False)
use_memory_efficient_attention = kwargs.pop("use_memory_efficient_attention", False)
split_head_dim = kwargs.pop("split_head_dim", False)
dtype = kwargs.pop("dtype", None)
# 1. Download the checkpoints and configs
# use snapshot download here to get it working from from_pretrained
if not os.path.isdir(pretrained_model_name_or_path):
config_dict = cls.load_config(
pretrained_model_name_or_path,
cache_dir=cache_dir,
proxies=proxies,
local_files_only=local_files_only,
token=token,
revision=revision,
)
# make sure we only download sub-folders and `diffusers` filenames
folder_names = [k for k in config_dict.keys() if not k.startswith("_")]
allow_patterns = [os.path.join(k, "*") for k in folder_names]
allow_patterns += [FLAX_WEIGHTS_NAME, SCHEDULER_CONFIG_NAME, CONFIG_NAME, cls.config_name]
ignore_patterns = ["*.bin", "*.safetensors"] if not from_pt else []
ignore_patterns += ["*.onnx", "*.onnx_data", "*.xml", "*.pb"]
if cls != FlaxDiffusionPipeline:
requested_pipeline_class = cls.__name__
else:
requested_pipeline_class = config_dict.get("_class_name", cls.__name__)
requested_pipeline_class = (
requested_pipeline_class
if requested_pipeline_class.startswith("Flax")
else "Flax" + requested_pipeline_class
)
user_agent = {"pipeline_class": requested_pipeline_class}
user_agent = http_user_agent(user_agent)
# download all allow_patterns
cached_folder = snapshot_download(
pretrained_model_name_or_path,
cache_dir=cache_dir,
proxies=proxies,
local_files_only=local_files_only,
token=token,
revision=revision,
allow_patterns=allow_patterns,
ignore_patterns=ignore_patterns,
user_agent=user_agent,
)
else:
cached_folder = pretrained_model_name_or_path
config_dict = cls.load_config(cached_folder)
# 2. Load the pipeline class, if using custom module then load it from the hub
# if we load from explicit class, let's use it
if cls != FlaxDiffusionPipeline:
pipeline_class = cls
else:
diffusers_module = importlib.import_module(cls.__module__.split(".")[0])
class_name = (
config_dict["_class_name"]
if config_dict["_class_name"].startswith("Flax")
else "Flax" + config_dict["_class_name"]
)
pipeline_class = getattr(diffusers_module, class_name)
# some modules can be passed directly to the init
# in this case they are already instantiated in `kwargs`
# extract them here
expected_modules, optional_kwargs = cls._get_signature_keys(pipeline_class)
passed_class_obj = {k: kwargs.pop(k) for k in expected_modules if k in kwargs}
passed_pipe_kwargs = {k: kwargs.pop(k) for k in optional_kwargs if k in kwargs}
init_dict, unused_kwargs, _ = pipeline_class.extract_init_dict(config_dict, **kwargs)
# define init kwargs
init_kwargs = {k: init_dict.pop(k) for k in optional_kwargs if k in init_dict}
init_kwargs = {**init_kwargs, **passed_pipe_kwargs}
# remove `null` components
def load_module(name, value):
if value[0] is None:
return False
if name in passed_class_obj and passed_class_obj[name] is None:
return False
return True
init_dict = {k: v for k, v in init_dict.items() if load_module(k, v)}
# Throw nice warnings / errors for fast accelerate loading
if len(unused_kwargs) > 0:
logger.warning(
f"Keyword arguments {unused_kwargs} are not expected by {pipeline_class.__name__} and will be ignored."
)
# inference_params
params = {}
# import it here to avoid circular import
from diffusers import pipelines
# 3. Load each module in the pipeline
for name, (library_name, class_name) in init_dict.items():
if class_name is None:
# edge case for when the pipeline was saved with safety_checker=None
init_kwargs[name] = None
continue
is_pipeline_module = hasattr(pipelines, library_name)
loaded_sub_model = None
sub_model_should_be_defined = True
# if the model is in a pipeline module, then we load it from the pipeline
if name in passed_class_obj:
# 1. check that passed_class_obj has correct parent class
if not is_pipeline_module:
library = importlib.import_module(library_name)
class_obj = getattr(library, class_name)
importable_classes = LOADABLE_CLASSES[library_name]
class_candidates = {c: getattr(library, c, None) for c in importable_classes.keys()}
expected_class_obj = None
for class_name, class_candidate in class_candidates.items():
if class_candidate is not None and issubclass(class_obj, class_candidate):
expected_class_obj = class_candidate
if not issubclass(passed_class_obj[name].__class__, expected_class_obj):
raise ValueError(
f"{passed_class_obj[name]} is of type: {type(passed_class_obj[name])}, but should be"
f" {expected_class_obj}"
)
elif passed_class_obj[name] is None:
logger.warning(
f"You have passed `None` for {name} to disable its functionality in {pipeline_class}. Note"
f" that this might lead to problems when using {pipeline_class} and is not recommended."
)
sub_model_should_be_defined = False
else:
logger.warning(
f"You have passed a non-standard module {passed_class_obj[name]}. We cannot verify whether it"
" has the correct type"
)
# set passed class object
loaded_sub_model = passed_class_obj[name]
elif is_pipeline_module:
pipeline_module = getattr(pipelines, library_name)
class_obj = import_flax_or_no_model(pipeline_module, class_name)
importable_classes = ALL_IMPORTABLE_CLASSES
class_candidates = {c: class_obj for c in importable_classes.keys()}
else:
# else we just import it from the library.
library = importlib.import_module(library_name)
class_obj = import_flax_or_no_model(library, class_name)
importable_classes = LOADABLE_CLASSES[library_name]
class_candidates = {c: getattr(library, c, None) for c in importable_classes.keys()}
if loaded_sub_model is None and sub_model_should_be_defined:
load_method_name = None
for class_name, class_candidate in class_candidates.items():
if class_candidate is not None and issubclass(class_obj, class_candidate):
load_method_name = importable_classes[class_name][1]
load_method = getattr(class_obj, load_method_name)
# check if the module is in a subdirectory
if os.path.isdir(os.path.join(cached_folder, name)):
loadable_folder = os.path.join(cached_folder, name)
else:
loaded_sub_model = cached_folder
if issubclass(class_obj, FlaxModelMixin):
loaded_sub_model, loaded_params = load_method(
loadable_folder,
from_pt=from_pt,
use_memory_efficient_attention=use_memory_efficient_attention,
split_head_dim=split_head_dim,
dtype=dtype,
)
params[name] = loaded_params
elif is_transformers_available() and issubclass(class_obj, FlaxPreTrainedModel):
if from_pt:
# TODO(Suraj): Fix this in Transformers. We should be able to use `_do_init=False` here
loaded_sub_model = load_method(loadable_folder, from_pt=from_pt)
loaded_params = loaded_sub_model.params
del loaded_sub_model._params
else:
loaded_sub_model, loaded_params = load_method(loadable_folder, _do_init=False)
params[name] = loaded_params
elif issubclass(class_obj, FlaxSchedulerMixin):
loaded_sub_model, scheduler_state = load_method(loadable_folder)
params[name] = scheduler_state
else:
loaded_sub_model = load_method(loadable_folder)
init_kwargs[name] = loaded_sub_model # UNet(...), # DiffusionSchedule(...)
# 4. Potentially add passed objects if expected
missing_modules = set(expected_modules) - set(init_kwargs.keys())
passed_modules = list(passed_class_obj.keys())
if len(missing_modules) > 0 and missing_modules <= set(passed_modules):
for module in missing_modules:
init_kwargs[module] = passed_class_obj.get(module, None)
elif len(missing_modules) > 0:
passed_modules = set(list(init_kwargs.keys()) + list(passed_class_obj.keys())) - optional_kwargs
raise ValueError(
f"Pipeline {pipeline_class} expected {expected_modules}, but only {passed_modules} were passed."
)
model = pipeline_class(**init_kwargs, dtype=dtype)
return model, params
@classmethod
def _get_signature_keys(cls, obj):
parameters = inspect.signature(obj.__init__).parameters
required_parameters = {k: v for k, v in parameters.items() if v.default == inspect._empty}
optional_parameters = set({k for k, v in parameters.items() if v.default != inspect._empty})
expected_modules = set(required_parameters.keys()) - {"self"}
return expected_modules, optional_parameters
@property
def components(self) -> Dict[str, Any]:
r"""
The `self.components` property can be useful to run different pipelines with the same weights and
configurations to not have to re-allocate memory.
Examples:
```py
>>> from diffusers import (
... FlaxStableDiffusionPipeline,
... FlaxStableDiffusionImg2ImgPipeline,
... )
>>> text2img = FlaxStableDiffusionPipeline.from_pretrained(
... "stable-diffusion-v1-5/stable-diffusion-v1-5", variant="bf16", dtype=jnp.bfloat16
... )
>>> img2img = FlaxStableDiffusionImg2ImgPipeline(**text2img.components)
```
Returns:
A dictionary containing all the modules needed to initialize the pipeline.
"""
expected_modules, optional_parameters = self._get_signature_keys(self)
components = {
k: getattr(self, k) for k in self.config.keys() if not k.startswith("_") and k not in optional_parameters
}
if set(components.keys()) != expected_modules:
raise ValueError(
f"{self} has been incorrectly initialized or {self.__class__} is incorrectly implemented. Expected"
f" {expected_modules} to be defined, but {components} are defined."
)
return components
@staticmethod
def numpy_to_pil(images):
"""
Convert a NumPy image or a batch of images to a PIL image.
"""
if images.ndim == 3:
images = images[None, ...]
images = (images * 255).round().astype("uint8")
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
pil_images = [Image.fromarray(image.squeeze(), mode="L") for image in images]
else:
pil_images = [Image.fromarray(image) for image in images]
return pil_images
# TODO: make it compatible with jax.lax
def progress_bar(self, iterable):
if not hasattr(self, "_progress_bar_config"):
self._progress_bar_config = {}
elif not isinstance(self._progress_bar_config, dict):
raise ValueError(
f"`self._progress_bar_config` should be of type `dict`, but is {type(self._progress_bar_config)}."
)
return tqdm(iterable, **self._progress_bar_config)
def set_progress_bar_config(self, **kwargs):
self._progress_bar_config = kwargs
| diffusers/src/diffusers/pipelines/pipeline_flax_utils.py/0 | {
"file_path": "diffusers/src/diffusers/pipelines/pipeline_flax_utils.py",
"repo_id": "diffusers",
"token_count": 12080
} |
# Copyright 2024 Open AI and The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from dataclasses import dataclass
from typing import Dict, Optional, Tuple
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
from ...utils import BaseOutput
from .camera import create_pan_cameras
def sample_pmf(pmf: torch.Tensor, n_samples: int) -> torch.Tensor:
r"""
Sample from the given discrete probability distribution with replacement.
The i-th bin is assumed to have mass pmf[i].
Args:
pmf: [batch_size, *shape, n_samples, 1] where (pmf.sum(dim=-2) == 1).all()
n_samples: number of samples
Return:
indices sampled with replacement
"""
*shape, support_size, last_dim = pmf.shape
assert last_dim == 1
cdf = torch.cumsum(pmf.view(-1, support_size), dim=1)
inds = torch.searchsorted(cdf, torch.rand(cdf.shape[0], n_samples, device=cdf.device))
return inds.view(*shape, n_samples, 1).clamp(0, support_size - 1)
def posenc_nerf(x: torch.Tensor, min_deg: int = 0, max_deg: int = 15) -> torch.Tensor:
"""
Concatenate x and its positional encodings, following NeRF.
Reference: https://arxiv.org/pdf/2210.04628.pdf
"""
if min_deg == max_deg:
return x
scales = 2.0 ** torch.arange(min_deg, max_deg, dtype=x.dtype, device=x.device)
*shape, dim = x.shape
xb = (x.reshape(-1, 1, dim) * scales.view(1, -1, 1)).reshape(*shape, -1)
assert xb.shape[-1] == dim * (max_deg - min_deg)
emb = torch.cat([xb, xb + math.pi / 2.0], axis=-1).sin()
return torch.cat([x, emb], dim=-1)
def encode_position(position):
return posenc_nerf(position, min_deg=0, max_deg=15)
def encode_direction(position, direction=None):
if direction is None:
return torch.zeros_like(posenc_nerf(position, min_deg=0, max_deg=8))
else:
return posenc_nerf(direction, min_deg=0, max_deg=8)
def _sanitize_name(x: str) -> str:
return x.replace(".", "__")
def integrate_samples(volume_range, ts, density, channels):
r"""
Function integrating the model output.
Args:
volume_range: Specifies the integral range [t0, t1]
ts: timesteps
density: torch.Tensor [batch_size, *shape, n_samples, 1]
channels: torch.Tensor [batch_size, *shape, n_samples, n_channels]
returns:
channels: integrated rgb output weights: torch.Tensor [batch_size, *shape, n_samples, 1] (density
*transmittance)[i] weight for each rgb output at [..., i, :]. transmittance: transmittance of this volume
)
"""
# 1. Calculate the weights
_, _, dt = volume_range.partition(ts)
ddensity = density * dt
mass = torch.cumsum(ddensity, dim=-2)
transmittance = torch.exp(-mass[..., -1, :])
alphas = 1.0 - torch.exp(-ddensity)
Ts = torch.exp(torch.cat([torch.zeros_like(mass[..., :1, :]), -mass[..., :-1, :]], dim=-2))
# This is the probability of light hitting and reflecting off of
# something at depth [..., i, :].
weights = alphas * Ts
# 2. Integrate channels
channels = torch.sum(channels * weights, dim=-2)
return channels, weights, transmittance
def volume_query_points(volume, grid_size):
indices = torch.arange(grid_size**3, device=volume.bbox_min.device)
zs = indices % grid_size
ys = torch.div(indices, grid_size, rounding_mode="trunc") % grid_size
xs = torch.div(indices, grid_size**2, rounding_mode="trunc") % grid_size
combined = torch.stack([xs, ys, zs], dim=1)
return (combined.float() / (grid_size - 1)) * (volume.bbox_max - volume.bbox_min) + volume.bbox_min
def _convert_srgb_to_linear(u: torch.Tensor):
return torch.where(u <= 0.04045, u / 12.92, ((u + 0.055) / 1.055) ** 2.4)
def _create_flat_edge_indices(
flat_cube_indices: torch.Tensor,
grid_size: Tuple[int, int, int],
):
num_xs = (grid_size[0] - 1) * grid_size[1] * grid_size[2]
y_offset = num_xs
num_ys = grid_size[0] * (grid_size[1] - 1) * grid_size[2]
z_offset = num_xs + num_ys
return torch.stack(
[
# Edges spanning x-axis.
flat_cube_indices[:, 0] * grid_size[1] * grid_size[2]
+ flat_cube_indices[:, 1] * grid_size[2]
+ flat_cube_indices[:, 2],
flat_cube_indices[:, 0] * grid_size[1] * grid_size[2]
+ (flat_cube_indices[:, 1] + 1) * grid_size[2]
+ flat_cube_indices[:, 2],
flat_cube_indices[:, 0] * grid_size[1] * grid_size[2]
+ flat_cube_indices[:, 1] * grid_size[2]
+ flat_cube_indices[:, 2]
+ 1,
flat_cube_indices[:, 0] * grid_size[1] * grid_size[2]
+ (flat_cube_indices[:, 1] + 1) * grid_size[2]
+ flat_cube_indices[:, 2]
+ 1,
# Edges spanning y-axis.
(
y_offset
+ flat_cube_indices[:, 0] * (grid_size[1] - 1) * grid_size[2]
+ flat_cube_indices[:, 1] * grid_size[2]
+ flat_cube_indices[:, 2]
),
(
y_offset
+ (flat_cube_indices[:, 0] + 1) * (grid_size[1] - 1) * grid_size[2]
+ flat_cube_indices[:, 1] * grid_size[2]
+ flat_cube_indices[:, 2]
),
(
y_offset
+ flat_cube_indices[:, 0] * (grid_size[1] - 1) * grid_size[2]
+ flat_cube_indices[:, 1] * grid_size[2]
+ flat_cube_indices[:, 2]
+ 1
),
(
y_offset
+ (flat_cube_indices[:, 0] + 1) * (grid_size[1] - 1) * grid_size[2]
+ flat_cube_indices[:, 1] * grid_size[2]
+ flat_cube_indices[:, 2]
+ 1
),
# Edges spanning z-axis.
(
z_offset
+ flat_cube_indices[:, 0] * grid_size[1] * (grid_size[2] - 1)
+ flat_cube_indices[:, 1] * (grid_size[2] - 1)
+ flat_cube_indices[:, 2]
),
(
z_offset
+ (flat_cube_indices[:, 0] + 1) * grid_size[1] * (grid_size[2] - 1)
+ flat_cube_indices[:, 1] * (grid_size[2] - 1)
+ flat_cube_indices[:, 2]
),
(
z_offset
+ flat_cube_indices[:, 0] * grid_size[1] * (grid_size[2] - 1)
+ (flat_cube_indices[:, 1] + 1) * (grid_size[2] - 1)
+ flat_cube_indices[:, 2]
),
(
z_offset
+ (flat_cube_indices[:, 0] + 1) * grid_size[1] * (grid_size[2] - 1)
+ (flat_cube_indices[:, 1] + 1) * (grid_size[2] - 1)
+ flat_cube_indices[:, 2]
),
],
dim=-1,
)
class VoidNeRFModel(nn.Module):
"""
Implements the default empty space model where all queries are rendered as background.
"""
def __init__(self, background, channel_scale=255.0):
super().__init__()
background = nn.Parameter(torch.from_numpy(np.array(background)).to(dtype=torch.float32) / channel_scale)
self.register_buffer("background", background)
def forward(self, position):
background = self.background[None].to(position.device)
shape = position.shape[:-1]
ones = [1] * (len(shape) - 1)
n_channels = background.shape[-1]
background = torch.broadcast_to(background.view(background.shape[0], *ones, n_channels), [*shape, n_channels])
return background
@dataclass
class VolumeRange:
t0: torch.Tensor
t1: torch.Tensor
intersected: torch.Tensor
def __post_init__(self):
assert self.t0.shape == self.t1.shape == self.intersected.shape
def partition(self, ts):
"""
Partitions t0 and t1 into n_samples intervals.
Args:
ts: [batch_size, *shape, n_samples, 1]
Return:
lower: [batch_size, *shape, n_samples, 1] upper: [batch_size, *shape, n_samples, 1] delta: [batch_size,
*shape, n_samples, 1]
where
ts \\in [lower, upper] deltas = upper - lower
"""
mids = (ts[..., 1:, :] + ts[..., :-1, :]) * 0.5
lower = torch.cat([self.t0[..., None, :], mids], dim=-2)
upper = torch.cat([mids, self.t1[..., None, :]], dim=-2)
delta = upper - lower
assert lower.shape == upper.shape == delta.shape == ts.shape
return lower, upper, delta
class BoundingBoxVolume(nn.Module):
"""
Axis-aligned bounding box defined by the two opposite corners.
"""
def __init__(
self,
*,
bbox_min,
bbox_max,
min_dist: float = 0.0,
min_t_range: float = 1e-3,
):
"""
Args:
bbox_min: the left/bottommost corner of the bounding box
bbox_max: the other corner of the bounding box
min_dist: all rays should start at least this distance away from the origin.
"""
super().__init__()
self.min_dist = min_dist
self.min_t_range = min_t_range
self.bbox_min = torch.tensor(bbox_min)
self.bbox_max = torch.tensor(bbox_max)
self.bbox = torch.stack([self.bbox_min, self.bbox_max])
assert self.bbox.shape == (2, 3)
assert min_dist >= 0.0
assert min_t_range > 0.0
def intersect(
self,
origin: torch.Tensor,
direction: torch.Tensor,
t0_lower: Optional[torch.Tensor] = None,
epsilon=1e-6,
):
"""
Args:
origin: [batch_size, *shape, 3]
direction: [batch_size, *shape, 3]
t0_lower: Optional [batch_size, *shape, 1] lower bound of t0 when intersecting this volume.
params: Optional meta parameters in case Volume is parametric
epsilon: to stabilize calculations
Return:
A tuple of (t0, t1, intersected) where each has a shape [batch_size, *shape, 1]. If a ray intersects with
the volume, `o + td` is in the volume for all t in [t0, t1]. If the volume is bounded, t1 is guaranteed to
be on the boundary of the volume.
"""
batch_size, *shape, _ = origin.shape
ones = [1] * len(shape)
bbox = self.bbox.view(1, *ones, 2, 3).to(origin.device)
def _safe_divide(a, b, epsilon=1e-6):
return a / torch.where(b < 0, b - epsilon, b + epsilon)
ts = _safe_divide(bbox - origin[..., None, :], direction[..., None, :], epsilon=epsilon)
# Cases to think about:
#
# 1. t1 <= t0: the ray does not pass through the AABB.
# 2. t0 < t1 <= 0: the ray intersects but the BB is behind the origin.
# 3. t0 <= 0 <= t1: the ray starts from inside the BB
# 4. 0 <= t0 < t1: the ray is not inside and intersects with the BB twice.
#
# 1 and 4 are clearly handled from t0 < t1 below.
# Making t0 at least min_dist (>= 0) takes care of 2 and 3.
t0 = ts.min(dim=-2).values.max(dim=-1, keepdim=True).values.clamp(self.min_dist)
t1 = ts.max(dim=-2).values.min(dim=-1, keepdim=True).values
assert t0.shape == t1.shape == (batch_size, *shape, 1)
if t0_lower is not None:
assert t0.shape == t0_lower.shape
t0 = torch.maximum(t0, t0_lower)
intersected = t0 + self.min_t_range < t1
t0 = torch.where(intersected, t0, torch.zeros_like(t0))
t1 = torch.where(intersected, t1, torch.ones_like(t1))
return VolumeRange(t0=t0, t1=t1, intersected=intersected)
class StratifiedRaySampler(nn.Module):
"""
Instead of fixed intervals, a sample is drawn uniformly at random from each interval.
"""
def __init__(self, depth_mode: str = "linear"):
"""
:param depth_mode: linear samples ts linearly in depth. harmonic ensures
closer points are sampled more densely.
"""
self.depth_mode = depth_mode
assert self.depth_mode in ("linear", "geometric", "harmonic")
def sample(
self,
t0: torch.Tensor,
t1: torch.Tensor,
n_samples: int,
epsilon: float = 1e-3,
) -> torch.Tensor:
"""
Args:
t0: start time has shape [batch_size, *shape, 1]
t1: finish time has shape [batch_size, *shape, 1]
n_samples: number of ts to sample
Return:
sampled ts of shape [batch_size, *shape, n_samples, 1]
"""
ones = [1] * (len(t0.shape) - 1)
ts = torch.linspace(0, 1, n_samples).view(*ones, n_samples).to(t0.dtype).to(t0.device)
if self.depth_mode == "linear":
ts = t0 * (1.0 - ts) + t1 * ts
elif self.depth_mode == "geometric":
ts = (t0.clamp(epsilon).log() * (1.0 - ts) + t1.clamp(epsilon).log() * ts).exp()
elif self.depth_mode == "harmonic":
# The original NeRF recommends this interpolation scheme for
# spherical scenes, but there could be some weird edge cases when
# the observer crosses from the inner to outer volume.
ts = 1.0 / (1.0 / t0.clamp(epsilon) * (1.0 - ts) + 1.0 / t1.clamp(epsilon) * ts)
mids = 0.5 * (ts[..., 1:] + ts[..., :-1])
upper = torch.cat([mids, t1], dim=-1)
lower = torch.cat([t0, mids], dim=-1)
# yiyi notes: add a random seed here for testing, don't forget to remove
torch.manual_seed(0)
t_rand = torch.rand_like(ts)
ts = lower + (upper - lower) * t_rand
return ts.unsqueeze(-1)
class ImportanceRaySampler(nn.Module):
"""
Given the initial estimate of densities, this samples more from regions/bins expected to have objects.
"""
def __init__(
self,
volume_range: VolumeRange,
ts: torch.Tensor,
weights: torch.Tensor,
blur_pool: bool = False,
alpha: float = 1e-5,
):
"""
Args:
volume_range: the range in which a ray intersects the given volume.
ts: earlier samples from the coarse rendering step
weights: discretized version of density * transmittance
blur_pool: if true, use 2-tap max + 2-tap blur filter from mip-NeRF.
alpha: small value to add to weights.
"""
self.volume_range = volume_range
self.ts = ts.clone().detach()
self.weights = weights.clone().detach()
self.blur_pool = blur_pool
self.alpha = alpha
@torch.no_grad()
def sample(self, t0: torch.Tensor, t1: torch.Tensor, n_samples: int) -> torch.Tensor:
"""
Args:
t0: start time has shape [batch_size, *shape, 1]
t1: finish time has shape [batch_size, *shape, 1]
n_samples: number of ts to sample
Return:
sampled ts of shape [batch_size, *shape, n_samples, 1]
"""
lower, upper, _ = self.volume_range.partition(self.ts)
batch_size, *shape, n_coarse_samples, _ = self.ts.shape
weights = self.weights
if self.blur_pool:
padded = torch.cat([weights[..., :1, :], weights, weights[..., -1:, :]], dim=-2)
maxes = torch.maximum(padded[..., :-1, :], padded[..., 1:, :])
weights = 0.5 * (maxes[..., :-1, :] + maxes[..., 1:, :])
weights = weights + self.alpha
pmf = weights / weights.sum(dim=-2, keepdim=True)
inds = sample_pmf(pmf, n_samples)
assert inds.shape == (batch_size, *shape, n_samples, 1)
assert (inds >= 0).all() and (inds < n_coarse_samples).all()
t_rand = torch.rand(inds.shape, device=inds.device)
lower_ = torch.gather(lower, -2, inds)
upper_ = torch.gather(upper, -2, inds)
ts = lower_ + (upper_ - lower_) * t_rand
ts = torch.sort(ts, dim=-2).values
return ts
@dataclass
class MeshDecoderOutput(BaseOutput):
"""
A 3D triangle mesh with optional data at the vertices and faces.
Args:
verts (`torch.Tensor` of shape `(N, 3)`):
array of vertext coordinates
faces (`torch.Tensor` of shape `(N, 3)`):
array of triangles, pointing to indices in verts.
vertext_channels (Dict):
vertext coordinates for each color channel
"""
verts: torch.Tensor
faces: torch.Tensor
vertex_channels: Dict[str, torch.Tensor]
class MeshDecoder(nn.Module):
"""
Construct meshes from Signed distance functions (SDFs) using marching cubes method
"""
def __init__(self):
super().__init__()
cases = torch.zeros(256, 5, 3, dtype=torch.long)
masks = torch.zeros(256, 5, dtype=torch.bool)
self.register_buffer("cases", cases)
self.register_buffer("masks", masks)
def forward(self, field: torch.Tensor, min_point: torch.Tensor, size: torch.Tensor):
"""
For a signed distance field, produce a mesh using marching cubes.
:param field: a 3D tensor of field values, where negative values correspond
to the outside of the shape. The dimensions correspond to the x, y, and z directions, respectively.
:param min_point: a tensor of shape [3] containing the point corresponding
to (0, 0, 0) in the field.
:param size: a tensor of shape [3] containing the per-axis distance from the
(0, 0, 0) field corner and the (-1, -1, -1) field corner.
"""
assert len(field.shape) == 3, "input must be a 3D scalar field"
dev = field.device
cases = self.cases.to(dev)
masks = self.masks.to(dev)
min_point = min_point.to(dev)
size = size.to(dev)
grid_size = field.shape
grid_size_tensor = torch.tensor(grid_size).to(size)
# Create bitmasks between 0 and 255 (inclusive) indicating the state
# of the eight corners of each cube.
bitmasks = (field > 0).to(torch.uint8)
bitmasks = bitmasks[:-1, :, :] | (bitmasks[1:, :, :] << 1)
bitmasks = bitmasks[:, :-1, :] | (bitmasks[:, 1:, :] << 2)
bitmasks = bitmasks[:, :, :-1] | (bitmasks[:, :, 1:] << 4)
# Compute corner coordinates across the entire grid.
corner_coords = torch.empty(*grid_size, 3, device=dev, dtype=field.dtype)
corner_coords[range(grid_size[0]), :, :, 0] = torch.arange(grid_size[0], device=dev, dtype=field.dtype)[
:, None, None
]
corner_coords[:, range(grid_size[1]), :, 1] = torch.arange(grid_size[1], device=dev, dtype=field.dtype)[
:, None
]
corner_coords[:, :, range(grid_size[2]), 2] = torch.arange(grid_size[2], device=dev, dtype=field.dtype)
# Compute all vertices across all edges in the grid, even though we will
# throw some out later. We have (X-1)*Y*Z + X*(Y-1)*Z + X*Y*(Z-1) vertices.
# These are all midpoints, and don't account for interpolation (which is
# done later based on the used edge midpoints).
edge_midpoints = torch.cat(
[
((corner_coords[:-1] + corner_coords[1:]) / 2).reshape(-1, 3),
((corner_coords[:, :-1] + corner_coords[:, 1:]) / 2).reshape(-1, 3),
((corner_coords[:, :, :-1] + corner_coords[:, :, 1:]) / 2).reshape(-1, 3),
],
dim=0,
)
# Create a flat array of [X, Y, Z] indices for each cube.
cube_indices = torch.zeros(
grid_size[0] - 1, grid_size[1] - 1, grid_size[2] - 1, 3, device=dev, dtype=torch.long
)
cube_indices[range(grid_size[0] - 1), :, :, 0] = torch.arange(grid_size[0] - 1, device=dev)[:, None, None]
cube_indices[:, range(grid_size[1] - 1), :, 1] = torch.arange(grid_size[1] - 1, device=dev)[:, None]
cube_indices[:, :, range(grid_size[2] - 1), 2] = torch.arange(grid_size[2] - 1, device=dev)
flat_cube_indices = cube_indices.reshape(-1, 3)
# Create a flat array mapping each cube to 12 global edge indices.
edge_indices = _create_flat_edge_indices(flat_cube_indices, grid_size)
# Apply the LUT to figure out the triangles.
flat_bitmasks = bitmasks.reshape(-1).long() # must cast to long for indexing to believe this not a mask
local_tris = cases[flat_bitmasks]
local_masks = masks[flat_bitmasks]
# Compute the global edge indices for the triangles.
global_tris = torch.gather(edge_indices, 1, local_tris.reshape(local_tris.shape[0], -1)).reshape(
local_tris.shape
)
# Select the used triangles for each cube.
selected_tris = global_tris.reshape(-1, 3)[local_masks.reshape(-1)]
# Now we have a bunch of indices into the full list of possible vertices,
# but we want to reduce this list to only the used vertices.
used_vertex_indices = torch.unique(selected_tris.view(-1))
used_edge_midpoints = edge_midpoints[used_vertex_indices]
old_index_to_new_index = torch.zeros(len(edge_midpoints), device=dev, dtype=torch.long)
old_index_to_new_index[used_vertex_indices] = torch.arange(
len(used_vertex_indices), device=dev, dtype=torch.long
)
# Rewrite the triangles to use the new indices
faces = torch.gather(old_index_to_new_index, 0, selected_tris.view(-1)).reshape(selected_tris.shape)
# Compute the actual interpolated coordinates corresponding to edge midpoints.
v1 = torch.floor(used_edge_midpoints).to(torch.long)
v2 = torch.ceil(used_edge_midpoints).to(torch.long)
s1 = field[v1[:, 0], v1[:, 1], v1[:, 2]]
s2 = field[v2[:, 0], v2[:, 1], v2[:, 2]]
p1 = (v1.float() / (grid_size_tensor - 1)) * size + min_point
p2 = (v2.float() / (grid_size_tensor - 1)) * size + min_point
# The signs of s1 and s2 should be different. We want to find
# t such that t*s2 + (1-t)*s1 = 0.
t = (s1 / (s1 - s2))[:, None]
verts = t * p2 + (1 - t) * p1
return MeshDecoderOutput(verts=verts, faces=faces, vertex_channels=None)
@dataclass
class MLPNeRFModelOutput(BaseOutput):
density: torch.Tensor
signed_distance: torch.Tensor
channels: torch.Tensor
ts: torch.Tensor
class MLPNeRSTFModel(ModelMixin, ConfigMixin):
@register_to_config
def __init__(
self,
d_hidden: int = 256,
n_output: int = 12,
n_hidden_layers: int = 6,
act_fn: str = "swish",
insert_direction_at: int = 4,
):
super().__init__()
# Instantiate the MLP
# Find out the dimension of encoded position and direction
dummy = torch.eye(1, 3)
d_posenc_pos = encode_position(position=dummy).shape[-1]
d_posenc_dir = encode_direction(position=dummy).shape[-1]
mlp_widths = [d_hidden] * n_hidden_layers
input_widths = [d_posenc_pos] + mlp_widths
output_widths = mlp_widths + [n_output]
if insert_direction_at is not None:
input_widths[insert_direction_at] += d_posenc_dir
self.mlp = nn.ModuleList([nn.Linear(d_in, d_out) for d_in, d_out in zip(input_widths, output_widths)])
if act_fn == "swish":
# self.activation = swish
# yiyi testing:
self.activation = lambda x: F.silu(x)
else:
raise ValueError(f"Unsupported activation function {act_fn}")
self.sdf_activation = torch.tanh
self.density_activation = torch.nn.functional.relu
self.channel_activation = torch.sigmoid
def map_indices_to_keys(self, output):
h_map = {
"sdf": (0, 1),
"density_coarse": (1, 2),
"density_fine": (2, 3),
"stf": (3, 6),
"nerf_coarse": (6, 9),
"nerf_fine": (9, 12),
}
mapped_output = {k: output[..., start:end] for k, (start, end) in h_map.items()}
return mapped_output
def forward(self, *, position, direction, ts, nerf_level="coarse", rendering_mode="nerf"):
h = encode_position(position)
h_preact = h
h_directionless = None
for i, layer in enumerate(self.mlp):
if i == self.config.insert_direction_at: # 4 in the config
h_directionless = h_preact
h_direction = encode_direction(position, direction=direction)
h = torch.cat([h, h_direction], dim=-1)
h = layer(h)
h_preact = h
if i < len(self.mlp) - 1:
h = self.activation(h)
h_final = h
if h_directionless is None:
h_directionless = h_preact
activation = self.map_indices_to_keys(h_final)
if nerf_level == "coarse":
h_density = activation["density_coarse"]
else:
h_density = activation["density_fine"]
if rendering_mode == "nerf":
if nerf_level == "coarse":
h_channels = activation["nerf_coarse"]
else:
h_channels = activation["nerf_fine"]
elif rendering_mode == "stf":
h_channels = activation["stf"]
density = self.density_activation(h_density)
signed_distance = self.sdf_activation(activation["sdf"])
channels = self.channel_activation(h_channels)
# yiyi notes: I think signed_distance is not used
return MLPNeRFModelOutput(density=density, signed_distance=signed_distance, channels=channels, ts=ts)
class ChannelsProj(nn.Module):
def __init__(
self,
*,
vectors: int,
channels: int,
d_latent: int,
):
super().__init__()
self.proj = nn.Linear(d_latent, vectors * channels)
self.norm = nn.LayerNorm(channels)
self.d_latent = d_latent
self.vectors = vectors
self.channels = channels
def forward(self, x: torch.Tensor) -> torch.Tensor:
x_bvd = x
w_vcd = self.proj.weight.view(self.vectors, self.channels, self.d_latent)
b_vc = self.proj.bias.view(1, self.vectors, self.channels)
h = torch.einsum("bvd,vcd->bvc", x_bvd, w_vcd)
h = self.norm(h)
h = h + b_vc
return h
class ShapEParamsProjModel(ModelMixin, ConfigMixin):
"""
project the latent representation of a 3D asset to obtain weights of a multi-layer perceptron (MLP).
For more details, see the original paper:
"""
@register_to_config
def __init__(
self,
*,
param_names: Tuple[str] = (
"nerstf.mlp.0.weight",
"nerstf.mlp.1.weight",
"nerstf.mlp.2.weight",
"nerstf.mlp.3.weight",
),
param_shapes: Tuple[Tuple[int]] = (
(256, 93),
(256, 256),
(256, 256),
(256, 256),
),
d_latent: int = 1024,
):
super().__init__()
# check inputs
if len(param_names) != len(param_shapes):
raise ValueError("Must provide same number of `param_names` as `param_shapes`")
self.projections = nn.ModuleDict({})
for k, (vectors, channels) in zip(param_names, param_shapes):
self.projections[_sanitize_name(k)] = ChannelsProj(
vectors=vectors,
channels=channels,
d_latent=d_latent,
)
def forward(self, x: torch.Tensor):
out = {}
start = 0
for k, shape in zip(self.config.param_names, self.config.param_shapes):
vectors, _ = shape
end = start + vectors
x_bvd = x[:, start:end]
out[k] = self.projections[_sanitize_name(k)](x_bvd).reshape(len(x), *shape)
start = end
return out
class ShapERenderer(ModelMixin, ConfigMixin):
@register_to_config
def __init__(
self,
*,
param_names: Tuple[str] = (
"nerstf.mlp.0.weight",
"nerstf.mlp.1.weight",
"nerstf.mlp.2.weight",
"nerstf.mlp.3.weight",
),
param_shapes: Tuple[Tuple[int]] = (
(256, 93),
(256, 256),
(256, 256),
(256, 256),
),
d_latent: int = 1024,
d_hidden: int = 256,
n_output: int = 12,
n_hidden_layers: int = 6,
act_fn: str = "swish",
insert_direction_at: int = 4,
background: Tuple[float] = (
255.0,
255.0,
255.0,
),
):
super().__init__()
self.params_proj = ShapEParamsProjModel(
param_names=param_names,
param_shapes=param_shapes,
d_latent=d_latent,
)
self.mlp = MLPNeRSTFModel(d_hidden, n_output, n_hidden_layers, act_fn, insert_direction_at)
self.void = VoidNeRFModel(background=background, channel_scale=255.0)
self.volume = BoundingBoxVolume(bbox_max=[1.0, 1.0, 1.0], bbox_min=[-1.0, -1.0, -1.0])
self.mesh_decoder = MeshDecoder()
@torch.no_grad()
def render_rays(self, rays, sampler, n_samples, prev_model_out=None, render_with_direction=False):
"""
Perform volumetric rendering over a partition of possible t's in the union of rendering volumes (written below
with some abuse of notations)
C(r) := sum(
transmittance(t[i]) * integrate(
lambda t: density(t) * channels(t) * transmittance(t), [t[i], t[i + 1]],
) for i in range(len(parts))
) + transmittance(t[-1]) * void_model(t[-1]).channels
where
1) transmittance(s) := exp(-integrate(density, [t[0], s])) calculates the probability of light passing through
the volume specified by [t[0], s]. (transmittance of 1 means light can pass freely) 2) density and channels are
obtained by evaluating the appropriate part.model at time t. 3) [t[i], t[i + 1]] is defined as the range of t
where the ray intersects (parts[i].volume \\ union(part.volume for part in parts[:i])) at the surface of the
shell (if bounded). If the ray does not intersect, the integral over this segment is evaluated as 0 and
transmittance(t[i + 1]) := transmittance(t[i]). 4) The last term is integration to infinity (e.g. [t[-1],
math.inf]) that is evaluated by the void_model (i.e. we consider this space to be empty).
Args:
rays: [batch_size x ... x 2 x 3] origin and direction. sampler: disjoint volume integrals. n_samples:
number of ts to sample. prev_model_outputs: model outputs from the previous rendering step, including
:return: A tuple of
- `channels`
- A importance samplers for additional fine-grained rendering
- raw model output
"""
origin, direction = rays[..., 0, :], rays[..., 1, :]
# Integrate over [t[i], t[i + 1]]
# 1 Intersect the rays with the current volume and sample ts to integrate along.
vrange = self.volume.intersect(origin, direction, t0_lower=None)
ts = sampler.sample(vrange.t0, vrange.t1, n_samples)
ts = ts.to(rays.dtype)
if prev_model_out is not None:
# Append the previous ts now before fprop because previous
# rendering used a different model and we can't reuse the output.
ts = torch.sort(torch.cat([ts, prev_model_out.ts], dim=-2), dim=-2).values
batch_size, *_shape, _t0_dim = vrange.t0.shape
_, *ts_shape, _ts_dim = ts.shape
# 2. Get the points along the ray and query the model
directions = torch.broadcast_to(direction.unsqueeze(-2), [batch_size, *ts_shape, 3])
positions = origin.unsqueeze(-2) + ts * directions
directions = directions.to(self.mlp.dtype)
positions = positions.to(self.mlp.dtype)
optional_directions = directions if render_with_direction else None
model_out = self.mlp(
position=positions,
direction=optional_directions,
ts=ts,
nerf_level="coarse" if prev_model_out is None else "fine",
)
# 3. Integrate the model results
channels, weights, transmittance = integrate_samples(
vrange, model_out.ts, model_out.density, model_out.channels
)
# 4. Clean up results that do not intersect with the volume.
transmittance = torch.where(vrange.intersected, transmittance, torch.ones_like(transmittance))
channels = torch.where(vrange.intersected, channels, torch.zeros_like(channels))
# 5. integration to infinity (e.g. [t[-1], math.inf]) that is evaluated by the void_model (i.e. we consider this space to be empty).
channels = channels + transmittance * self.void(origin)
weighted_sampler = ImportanceRaySampler(vrange, ts=model_out.ts, weights=weights)
return channels, weighted_sampler, model_out
@torch.no_grad()
def decode_to_image(
self,
latents,
device,
size: int = 64,
ray_batch_size: int = 4096,
n_coarse_samples=64,
n_fine_samples=128,
):
# project the parameters from the generated latents
projected_params = self.params_proj(latents)
# update the mlp layers of the renderer
for name, param in self.mlp.state_dict().items():
if f"nerstf.{name}" in projected_params.keys():
param.copy_(projected_params[f"nerstf.{name}"].squeeze(0))
# create cameras object
camera = create_pan_cameras(size)
rays = camera.camera_rays
rays = rays.to(device)
n_batches = rays.shape[1] // ray_batch_size
coarse_sampler = StratifiedRaySampler()
images = []
for idx in range(n_batches):
rays_batch = rays[:, idx * ray_batch_size : (idx + 1) * ray_batch_size]
# render rays with coarse, stratified samples.
_, fine_sampler, coarse_model_out = self.render_rays(rays_batch, coarse_sampler, n_coarse_samples)
# Then, render with additional importance-weighted ray samples.
channels, _, _ = self.render_rays(
rays_batch, fine_sampler, n_fine_samples, prev_model_out=coarse_model_out
)
images.append(channels)
images = torch.cat(images, dim=1)
images = images.view(*camera.shape, camera.height, camera.width, -1).squeeze(0)
return images
@torch.no_grad()
def decode_to_mesh(
self,
latents,
device,
grid_size: int = 128,
query_batch_size: int = 4096,
texture_channels: Tuple = ("R", "G", "B"),
):
# 1. project the parameters from the generated latents
projected_params = self.params_proj(latents)
# 2. update the mlp layers of the renderer
for name, param in self.mlp.state_dict().items():
if f"nerstf.{name}" in projected_params.keys():
param.copy_(projected_params[f"nerstf.{name}"].squeeze(0))
# 3. decoding with STF rendering
# 3.1 query the SDF values at vertices along a regular 128**3 grid
query_points = volume_query_points(self.volume, grid_size)
query_positions = query_points[None].repeat(1, 1, 1).to(device=device, dtype=self.mlp.dtype)
fields = []
for idx in range(0, query_positions.shape[1], query_batch_size):
query_batch = query_positions[:, idx : idx + query_batch_size]
model_out = self.mlp(
position=query_batch, direction=None, ts=None, nerf_level="fine", rendering_mode="stf"
)
fields.append(model_out.signed_distance)
# predicted SDF values
fields = torch.cat(fields, dim=1)
fields = fields.float()
assert (
len(fields.shape) == 3 and fields.shape[-1] == 1
), f"expected [meta_batch x inner_batch] SDF results, but got {fields.shape}"
fields = fields.reshape(1, *([grid_size] * 3))
# create grid 128 x 128 x 128
# - force a negative border around the SDFs to close off all the models.
full_grid = torch.zeros(
1,
grid_size + 2,
grid_size + 2,
grid_size + 2,
device=fields.device,
dtype=fields.dtype,
)
full_grid.fill_(-1.0)
full_grid[:, 1:-1, 1:-1, 1:-1] = fields
fields = full_grid
# apply a differentiable implementation of Marching Cubes to construct meshs
raw_meshes = []
mesh_mask = []
for field in fields:
raw_mesh = self.mesh_decoder(field, self.volume.bbox_min, self.volume.bbox_max - self.volume.bbox_min)
mesh_mask.append(True)
raw_meshes.append(raw_mesh)
mesh_mask = torch.tensor(mesh_mask, device=fields.device)
max_vertices = max(len(m.verts) for m in raw_meshes)
# 3.2. query the texture color head at each vertex of the resulting mesh.
texture_query_positions = torch.stack(
[m.verts[torch.arange(0, max_vertices) % len(m.verts)] for m in raw_meshes],
dim=0,
)
texture_query_positions = texture_query_positions.to(device=device, dtype=self.mlp.dtype)
textures = []
for idx in range(0, texture_query_positions.shape[1], query_batch_size):
query_batch = texture_query_positions[:, idx : idx + query_batch_size]
texture_model_out = self.mlp(
position=query_batch, direction=None, ts=None, nerf_level="fine", rendering_mode="stf"
)
textures.append(texture_model_out.channels)
# predict texture color
textures = torch.cat(textures, dim=1)
textures = _convert_srgb_to_linear(textures)
textures = textures.float()
# 3.3 augument the mesh with texture data
assert len(textures.shape) == 3 and textures.shape[-1] == len(
texture_channels
), f"expected [meta_batch x inner_batch x texture_channels] field results, but got {textures.shape}"
for m, texture in zip(raw_meshes, textures):
texture = texture[: len(m.verts)]
m.vertex_channels = dict(zip(texture_channels, texture.unbind(-1)))
return raw_meshes[0]
| diffusers/src/diffusers/pipelines/shap_e/renderer.py/0 | {
"file_path": "diffusers/src/diffusers/pipelines/shap_e/renderer.py",
"repo_id": "diffusers",
"token_count": 18167
} |
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class StableUnCLIPImageNormalizer(ModelMixin, ConfigMixin):
"""
This class is used to hold the mean and standard deviation of the CLIP embedder used in stable unCLIP.
It is used to normalize the image embeddings before the noise is applied and un-normalize the noised image
embeddings.
"""
@register_to_config
def __init__(
self,
embedding_dim: int = 768,
):
super().__init__()
self.mean = nn.Parameter(torch.zeros(1, embedding_dim))
self.std = nn.Parameter(torch.ones(1, embedding_dim))
def to(
self,
torch_device: Optional[Union[str, torch.device]] = None,
torch_dtype: Optional[torch.dtype] = None,
):
self.mean = nn.Parameter(self.mean.to(torch_device).to(torch_dtype))
self.std = nn.Parameter(self.std.to(torch_device).to(torch_dtype))
return self
def scale(self, embeds):
embeds = (embeds - self.mean) * 1.0 / self.std
return embeds
def unscale(self, embeds):
embeds = (embeds * self.std) + self.mean
return embeds
| diffusers/src/diffusers/pipelines/stable_diffusion/stable_unclip_image_normalizer.py/0 | {
"file_path": "diffusers/src/diffusers/pipelines/stable_diffusion/stable_unclip_image_normalizer.py",
"repo_id": "diffusers",
"token_count": 674
} |
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Adapted from
https://github.com/huggingface/transformers/blob/c409cd81777fb27aadc043ed3d8339dbc020fb3b/src/transformers/quantizers/auto.py
"""
import warnings
from typing import Dict, Optional, Union
from .bitsandbytes import BnB4BitDiffusersQuantizer, BnB8BitDiffusersQuantizer
from .gguf import GGUFQuantizer
from .quantization_config import (
BitsAndBytesConfig,
GGUFQuantizationConfig,
QuantizationConfigMixin,
QuantizationMethod,
TorchAoConfig,
)
from .torchao import TorchAoHfQuantizer
AUTO_QUANTIZER_MAPPING = {
"bitsandbytes_4bit": BnB4BitDiffusersQuantizer,
"bitsandbytes_8bit": BnB8BitDiffusersQuantizer,
"gguf": GGUFQuantizer,
"torchao": TorchAoHfQuantizer,
}
AUTO_QUANTIZATION_CONFIG_MAPPING = {
"bitsandbytes_4bit": BitsAndBytesConfig,
"bitsandbytes_8bit": BitsAndBytesConfig,
"gguf": GGUFQuantizationConfig,
"torchao": TorchAoConfig,
}
class DiffusersAutoQuantizer:
"""
The auto diffusers quantizer class that takes care of automatically instantiating to the correct
`DiffusersQuantizer` given the `QuantizationConfig`.
"""
@classmethod
def from_dict(cls, quantization_config_dict: Dict):
quant_method = quantization_config_dict.get("quant_method", None)
# We need a special care for bnb models to make sure everything is BC ..
if quantization_config_dict.get("load_in_8bit", False) or quantization_config_dict.get("load_in_4bit", False):
suffix = "_4bit" if quantization_config_dict.get("load_in_4bit", False) else "_8bit"
quant_method = QuantizationMethod.BITS_AND_BYTES + suffix
elif quant_method is None:
raise ValueError(
"The model's quantization config from the arguments has no `quant_method` attribute. Make sure that the model has been correctly quantized"
)
if quant_method not in AUTO_QUANTIZATION_CONFIG_MAPPING.keys():
raise ValueError(
f"Unknown quantization type, got {quant_method} - supported types are:"
f" {list(AUTO_QUANTIZER_MAPPING.keys())}"
)
target_cls = AUTO_QUANTIZATION_CONFIG_MAPPING[quant_method]
return target_cls.from_dict(quantization_config_dict)
@classmethod
def from_config(cls, quantization_config: Union[QuantizationConfigMixin, Dict], **kwargs):
# Convert it to a QuantizationConfig if the q_config is a dict
if isinstance(quantization_config, dict):
quantization_config = cls.from_dict(quantization_config)
quant_method = quantization_config.quant_method
# Again, we need a special care for bnb as we have a single quantization config
# class for both 4-bit and 8-bit quantization
if quant_method == QuantizationMethod.BITS_AND_BYTES:
if quantization_config.load_in_8bit:
quant_method += "_8bit"
else:
quant_method += "_4bit"
if quant_method not in AUTO_QUANTIZER_MAPPING.keys():
raise ValueError(
f"Unknown quantization type, got {quant_method} - supported types are:"
f" {list(AUTO_QUANTIZER_MAPPING.keys())}"
)
target_cls = AUTO_QUANTIZER_MAPPING[quant_method]
return target_cls(quantization_config, **kwargs)
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, **kwargs):
model_config = cls.load_config(pretrained_model_name_or_path, **kwargs)
if getattr(model_config, "quantization_config", None) is None:
raise ValueError(
f"Did not found a `quantization_config` in {pretrained_model_name_or_path}. Make sure that the model is correctly quantized."
)
quantization_config_dict = model_config.quantization_config
quantization_config = cls.from_dict(quantization_config_dict)
# Update with potential kwargs that are passed through from_pretrained.
quantization_config.update(kwargs)
return cls.from_config(quantization_config)
@classmethod
def merge_quantization_configs(
cls,
quantization_config: Union[dict, QuantizationConfigMixin],
quantization_config_from_args: Optional[QuantizationConfigMixin],
):
"""
handles situations where both quantization_config from args and quantization_config from model config are
present.
"""
if quantization_config_from_args is not None:
warning_msg = (
"You passed `quantization_config` or equivalent parameters to `from_pretrained` but the model you're loading"
" already has a `quantization_config` attribute. The `quantization_config` from the model will be used."
)
else:
warning_msg = ""
if isinstance(quantization_config, dict):
quantization_config = cls.from_dict(quantization_config)
if warning_msg != "":
warnings.warn(warning_msg)
return quantization_config
| diffusers/src/diffusers/quantizers/auto.py/0 | {
"file_path": "diffusers/src/diffusers/quantizers/auto.py",
"repo_id": "diffusers",
"token_count": 2206
} |
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils import SchedulerMixin
def gumbel_noise(t, generator=None):
device = generator.device if generator is not None else t.device
noise = torch.zeros_like(t, device=device).uniform_(0, 1, generator=generator).to(t.device)
return -torch.log((-torch.log(noise.clamp(1e-20))).clamp(1e-20))
def mask_by_random_topk(mask_len, probs, temperature=1.0, generator=None):
confidence = torch.log(probs.clamp(1e-20)) + temperature * gumbel_noise(probs, generator=generator)
sorted_confidence = torch.sort(confidence, dim=-1).values
cut_off = torch.gather(sorted_confidence, 1, mask_len.long())
masking = confidence < cut_off
return masking
@dataclass
class AmusedSchedulerOutput(BaseOutput):
"""
Output class for the scheduler's `step` function output.
Args:
prev_sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.Tensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample `(x_{0})` based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
"""
prev_sample: torch.Tensor
pred_original_sample: torch.Tensor = None
class AmusedScheduler(SchedulerMixin, ConfigMixin):
order = 1
temperatures: torch.Tensor
@register_to_config
def __init__(
self,
mask_token_id: int,
masking_schedule: str = "cosine",
):
self.temperatures = None
self.timesteps = None
def set_timesteps(
self,
num_inference_steps: int,
temperature: Union[int, Tuple[int, int], List[int]] = (2, 0),
device: Union[str, torch.device] = None,
):
self.timesteps = torch.arange(num_inference_steps, device=device).flip(0)
if isinstance(temperature, (tuple, list)):
self.temperatures = torch.linspace(temperature[0], temperature[1], num_inference_steps, device=device)
else:
self.temperatures = torch.linspace(temperature, 0.01, num_inference_steps, device=device)
def step(
self,
model_output: torch.Tensor,
timestep: torch.long,
sample: torch.LongTensor,
starting_mask_ratio: int = 1,
generator: Optional[torch.Generator] = None,
return_dict: bool = True,
) -> Union[AmusedSchedulerOutput, Tuple]:
two_dim_input = sample.ndim == 3 and model_output.ndim == 4
if two_dim_input:
batch_size, codebook_size, height, width = model_output.shape
sample = sample.reshape(batch_size, height * width)
model_output = model_output.reshape(batch_size, codebook_size, height * width).permute(0, 2, 1)
unknown_map = sample == self.config.mask_token_id
probs = model_output.softmax(dim=-1)
device = probs.device
probs_ = probs.to(generator.device) if generator is not None else probs # handles when generator is on CPU
if probs_.device.type == "cpu" and probs_.dtype != torch.float32:
probs_ = probs_.float() # multinomial is not implemented for cpu half precision
probs_ = probs_.reshape(-1, probs.size(-1))
pred_original_sample = torch.multinomial(probs_, 1, generator=generator).to(device=device)
pred_original_sample = pred_original_sample[:, 0].view(*probs.shape[:-1])
pred_original_sample = torch.where(unknown_map, pred_original_sample, sample)
if timestep == 0:
prev_sample = pred_original_sample
else:
seq_len = sample.shape[1]
step_idx = (self.timesteps == timestep).nonzero()
ratio = (step_idx + 1) / len(self.timesteps)
if self.config.masking_schedule == "cosine":
mask_ratio = torch.cos(ratio * math.pi / 2)
elif self.config.masking_schedule == "linear":
mask_ratio = 1 - ratio
else:
raise ValueError(f"unknown masking schedule {self.config.masking_schedule}")
mask_ratio = starting_mask_ratio * mask_ratio
mask_len = (seq_len * mask_ratio).floor()
# do not mask more than amount previously masked
mask_len = torch.min(unknown_map.sum(dim=-1, keepdim=True) - 1, mask_len)
# mask at least one
mask_len = torch.max(torch.tensor([1], device=model_output.device), mask_len)
selected_probs = torch.gather(probs, -1, pred_original_sample[:, :, None])[:, :, 0]
# Ignores the tokens given in the input by overwriting their confidence.
selected_probs = torch.where(unknown_map, selected_probs, torch.finfo(selected_probs.dtype).max)
masking = mask_by_random_topk(mask_len, selected_probs, self.temperatures[step_idx], generator)
# Masks tokens with lower confidence.
prev_sample = torch.where(masking, self.config.mask_token_id, pred_original_sample)
if two_dim_input:
prev_sample = prev_sample.reshape(batch_size, height, width)
pred_original_sample = pred_original_sample.reshape(batch_size, height, width)
if not return_dict:
return (prev_sample, pred_original_sample)
return AmusedSchedulerOutput(prev_sample, pred_original_sample)
def add_noise(self, sample, timesteps, generator=None):
step_idx = (self.timesteps == timesteps).nonzero()
ratio = (step_idx + 1) / len(self.timesteps)
if self.config.masking_schedule == "cosine":
mask_ratio = torch.cos(ratio * math.pi / 2)
elif self.config.masking_schedule == "linear":
mask_ratio = 1 - ratio
else:
raise ValueError(f"unknown masking schedule {self.config.masking_schedule}")
mask_indices = (
torch.rand(
sample.shape, device=generator.device if generator is not None else sample.device, generator=generator
).to(sample.device)
< mask_ratio
)
masked_sample = sample.clone()
masked_sample[mask_indices] = self.config.mask_token_id
return masked_sample
| diffusers/src/diffusers/schedulers/scheduling_amused.py/0 | {
"file_path": "diffusers/src/diffusers/schedulers/scheduling_amused.py",
"repo_id": "diffusers",
"token_count": 2775
} |
# Copyright 2024 TSAIL Team and The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# DISCLAIMER: This file is strongly influenced by https://github.com/LuChengTHU/dpm-solver
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
)
@flax.struct.dataclass
class DPMSolverMultistepSchedulerState:
common: CommonSchedulerState
alpha_t: jnp.ndarray
sigma_t: jnp.ndarray
lambda_t: jnp.ndarray
# setable values
init_noise_sigma: jnp.ndarray
timesteps: jnp.ndarray
num_inference_steps: Optional[int] = None
# running values
model_outputs: Optional[jnp.ndarray] = None
lower_order_nums: Optional[jnp.int32] = None
prev_timestep: Optional[jnp.int32] = None
cur_sample: Optional[jnp.ndarray] = None
@classmethod
def create(
cls,
common: CommonSchedulerState,
alpha_t: jnp.ndarray,
sigma_t: jnp.ndarray,
lambda_t: jnp.ndarray,
init_noise_sigma: jnp.ndarray,
timesteps: jnp.ndarray,
):
return cls(
common=common,
alpha_t=alpha_t,
sigma_t=sigma_t,
lambda_t=lambda_t,
init_noise_sigma=init_noise_sigma,
timesteps=timesteps,
)
@dataclass
class FlaxDPMSolverMultistepSchedulerOutput(FlaxSchedulerOutput):
state: DPMSolverMultistepSchedulerState
class FlaxDPMSolverMultistepScheduler(FlaxSchedulerMixin, ConfigMixin):
"""
DPM-Solver (and the improved version DPM-Solver++) is a fast dedicated high-order solver for diffusion ODEs with
the convergence order guarantee. Empirically, sampling by DPM-Solver with only 20 steps can generate high-quality
samples, and it can generate quite good samples even in only 10 steps.
For more details, see the original paper: https://arxiv.org/abs/2206.00927 and https://arxiv.org/abs/2211.01095
Currently, we support the multistep DPM-Solver for both noise prediction models and data prediction models. We
recommend to use `solver_order=2` for guided sampling, and `solver_order=3` for unconditional sampling.
We also support the "dynamic thresholding" method in Imagen (https://arxiv.org/abs/2205.11487). For pixel-space
diffusion models, you can set both `algorithm_type="dpmsolver++"` and `thresholding=True` to use the dynamic
thresholding. Note that the thresholding method is unsuitable for latent-space diffusion models (such as
stable-diffusion).
[`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__`
function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`.
[`SchedulerMixin`] provides general loading and saving functionality via the [`SchedulerMixin.save_pretrained`] and
[`~SchedulerMixin.from_pretrained`] functions.
For more details, see the original paper: https://arxiv.org/abs/2206.00927 and https://arxiv.org/abs/2211.01095
Args:
num_train_timesteps (`int`): number of diffusion steps used to train the model.
beta_start (`float`): the starting `beta` value of inference.
beta_end (`float`): the final `beta` value.
beta_schedule (`str`):
the beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from
`linear`, `scaled_linear`, or `squaredcos_cap_v2`.
trained_betas (`np.ndarray`, optional):
option to pass an array of betas directly to the constructor to bypass `beta_start`, `beta_end` etc.
solver_order (`int`, default `2`):
the order of DPM-Solver; can be `1` or `2` or `3`. We recommend to use `solver_order=2` for guided
sampling, and `solver_order=3` for unconditional sampling.
prediction_type (`str`, default `epsilon`):
indicates whether the model predicts the noise (epsilon), or the data / `x0`. One of `epsilon`, `sample`,
or `v-prediction`.
thresholding (`bool`, default `False`):
whether to use the "dynamic thresholding" method (introduced by Imagen, https://arxiv.org/abs/2205.11487).
For pixel-space diffusion models, you can set both `algorithm_type=dpmsolver++` and `thresholding=True` to
use the dynamic thresholding. Note that the thresholding method is unsuitable for latent-space diffusion
models (such as stable-diffusion).
dynamic_thresholding_ratio (`float`, default `0.995`):
the ratio for the dynamic thresholding method. Default is `0.995`, the same as Imagen
(https://arxiv.org/abs/2205.11487).
sample_max_value (`float`, default `1.0`):
the threshold value for dynamic thresholding. Valid only when `thresholding=True` and
`algorithm_type="dpmsolver++`.
algorithm_type (`str`, default `dpmsolver++`):
the algorithm type for the solver. Either `dpmsolver` or `dpmsolver++`. The `dpmsolver` type implements the
algorithms in https://arxiv.org/abs/2206.00927, and the `dpmsolver++` type implements the algorithms in
https://arxiv.org/abs/2211.01095. We recommend to use `dpmsolver++` with `solver_order=2` for guided
sampling (e.g. stable-diffusion).
solver_type (`str`, default `midpoint`):
the solver type for the second-order solver. Either `midpoint` or `heun`. The solver type slightly affects
the sample quality, especially for small number of steps. We empirically find that `midpoint` solvers are
slightly better, so we recommend to use the `midpoint` type.
lower_order_final (`bool`, default `True`):
whether to use lower-order solvers in the final steps. Only valid for < 15 inference steps. We empirically
find this trick can stabilize the sampling of DPM-Solver for steps < 15, especially for steps <= 10.
timestep_spacing (`str`, defaults to `"linspace"`):
The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and
Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information.
dtype (`jnp.dtype`, *optional*, defaults to `jnp.float32`):
the `dtype` used for params and computation.
"""
_compatibles = [e.name for e in FlaxKarrasDiffusionSchedulers]
dtype: jnp.dtype
@property
def has_state(self):
return True
@register_to_config
def __init__(
self,
num_train_timesteps: int = 1000,
beta_start: float = 0.0001,
beta_end: float = 0.02,
beta_schedule: str = "linear",
trained_betas: Optional[jnp.ndarray] = None,
solver_order: int = 2,
prediction_type: str = "epsilon",
thresholding: bool = False,
dynamic_thresholding_ratio: float = 0.995,
sample_max_value: float = 1.0,
algorithm_type: str = "dpmsolver++",
solver_type: str = "midpoint",
lower_order_final: bool = True,
timestep_spacing: str = "linspace",
dtype: jnp.dtype = jnp.float32,
):
self.dtype = dtype
def create_state(self, common: Optional[CommonSchedulerState] = None) -> DPMSolverMultistepSchedulerState:
if common is None:
common = CommonSchedulerState.create(self)
# Currently we only support VP-type noise schedule
alpha_t = jnp.sqrt(common.alphas_cumprod)
sigma_t = jnp.sqrt(1 - common.alphas_cumprod)
lambda_t = jnp.log(alpha_t) - jnp.log(sigma_t)
# settings for DPM-Solver
if self.config.algorithm_type not in ["dpmsolver", "dpmsolver++"]:
raise NotImplementedError(f"{self.config.algorithm_type} is not implemented for {self.__class__}")
if self.config.solver_type not in ["midpoint", "heun"]:
raise NotImplementedError(f"{self.config.solver_type} is not implemented for {self.__class__}")
# standard deviation of the initial noise distribution
init_noise_sigma = jnp.array(1.0, dtype=self.dtype)
timesteps = jnp.arange(0, self.config.num_train_timesteps).round()[::-1]
return DPMSolverMultistepSchedulerState.create(
common=common,
alpha_t=alpha_t,
sigma_t=sigma_t,
lambda_t=lambda_t,
init_noise_sigma=init_noise_sigma,
timesteps=timesteps,
)
def set_timesteps(
self, state: DPMSolverMultistepSchedulerState, num_inference_steps: int, shape: Tuple
) -> DPMSolverMultistepSchedulerState:
"""
Sets the discrete timesteps used for the diffusion chain. Supporting function to be run before inference.
Args:
state (`DPMSolverMultistepSchedulerState`):
the `FlaxDPMSolverMultistepScheduler` state data class instance.
num_inference_steps (`int`):
the number of diffusion steps used when generating samples with a pre-trained model.
shape (`Tuple`):
the shape of the samples to be generated.
"""
last_timestep = self.config.num_train_timesteps
if self.config.timestep_spacing == "linspace":
timesteps = (
jnp.linspace(0, last_timestep - 1, num_inference_steps + 1).round()[::-1][:-1].astype(jnp.int32)
)
elif self.config.timestep_spacing == "leading":
step_ratio = last_timestep // (num_inference_steps + 1)
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
timesteps = (
(jnp.arange(0, num_inference_steps + 1) * step_ratio).round()[::-1][:-1].copy().astype(jnp.int32)
)
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
step_ratio = self.config.num_train_timesteps / num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
timesteps = jnp.arange(last_timestep, 0, -step_ratio).round().copy().astype(jnp.int32)
timesteps -= 1
else:
raise ValueError(
f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'."
)
# initial running values
model_outputs = jnp.zeros((self.config.solver_order,) + shape, dtype=self.dtype)
lower_order_nums = jnp.int32(0)
prev_timestep = jnp.int32(-1)
cur_sample = jnp.zeros(shape, dtype=self.dtype)
return state.replace(
num_inference_steps=num_inference_steps,
timesteps=timesteps,
model_outputs=model_outputs,
lower_order_nums=lower_order_nums,
prev_timestep=prev_timestep,
cur_sample=cur_sample,
)
def convert_model_output(
self,
state: DPMSolverMultistepSchedulerState,
model_output: jnp.ndarray,
timestep: int,
sample: jnp.ndarray,
) -> jnp.ndarray:
"""
Convert the model output to the corresponding type that the algorithm (DPM-Solver / DPM-Solver++) needs.
DPM-Solver is designed to discretize an integral of the noise prediction model, and DPM-Solver++ is designed to
discretize an integral of the data prediction model. So we need to first convert the model output to the
corresponding type to match the algorithm.
Note that the algorithm type and the model type is decoupled. That is to say, we can use either DPM-Solver or
DPM-Solver++ for both noise prediction model and data prediction model.
Args:
model_output (`jnp.ndarray`): direct output from learned diffusion model.
timestep (`int`): current discrete timestep in the diffusion chain.
sample (`jnp.ndarray`):
current instance of sample being created by diffusion process.
Returns:
`jnp.ndarray`: the converted model output.
"""
# DPM-Solver++ needs to solve an integral of the data prediction model.
if self.config.algorithm_type == "dpmsolver++":
if self.config.prediction_type == "epsilon":
alpha_t, sigma_t = state.alpha_t[timestep], state.sigma_t[timestep]
x0_pred = (sample - sigma_t * model_output) / alpha_t
elif self.config.prediction_type == "sample":
x0_pred = model_output
elif self.config.prediction_type == "v_prediction":
alpha_t, sigma_t = state.alpha_t[timestep], state.sigma_t[timestep]
x0_pred = alpha_t * sample - sigma_t * model_output
else:
raise ValueError(
f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, "
" or `v_prediction` for the FlaxDPMSolverMultistepScheduler."
)
if self.config.thresholding:
# Dynamic thresholding in https://arxiv.org/abs/2205.11487
dynamic_max_val = jnp.percentile(
jnp.abs(x0_pred), self.config.dynamic_thresholding_ratio, axis=tuple(range(1, x0_pred.ndim))
)
dynamic_max_val = jnp.maximum(
dynamic_max_val, self.config.sample_max_value * jnp.ones_like(dynamic_max_val)
)
x0_pred = jnp.clip(x0_pred, -dynamic_max_val, dynamic_max_val) / dynamic_max_val
return x0_pred
# DPM-Solver needs to solve an integral of the noise prediction model.
elif self.config.algorithm_type == "dpmsolver":
if self.config.prediction_type == "epsilon":
return model_output
elif self.config.prediction_type == "sample":
alpha_t, sigma_t = state.alpha_t[timestep], state.sigma_t[timestep]
epsilon = (sample - alpha_t * model_output) / sigma_t
return epsilon
elif self.config.prediction_type == "v_prediction":
alpha_t, sigma_t = state.alpha_t[timestep], state.sigma_t[timestep]
epsilon = alpha_t * model_output + sigma_t * sample
return epsilon
else:
raise ValueError(
f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, "
" or `v_prediction` for the FlaxDPMSolverMultistepScheduler."
)
def dpm_solver_first_order_update(
self,
state: DPMSolverMultistepSchedulerState,
model_output: jnp.ndarray,
timestep: int,
prev_timestep: int,
sample: jnp.ndarray,
) -> jnp.ndarray:
"""
One step for the first-order DPM-Solver (equivalent to DDIM).
See https://arxiv.org/abs/2206.00927 for the detailed derivation.
Args:
model_output (`jnp.ndarray`): direct output from learned diffusion model.
timestep (`int`): current discrete timestep in the diffusion chain.
prev_timestep (`int`): previous discrete timestep in the diffusion chain.
sample (`jnp.ndarray`):
current instance of sample being created by diffusion process.
Returns:
`jnp.ndarray`: the sample tensor at the previous timestep.
"""
t, s0 = prev_timestep, timestep
m0 = model_output
lambda_t, lambda_s = state.lambda_t[t], state.lambda_t[s0]
alpha_t, alpha_s = state.alpha_t[t], state.alpha_t[s0]
sigma_t, sigma_s = state.sigma_t[t], state.sigma_t[s0]
h = lambda_t - lambda_s
if self.config.algorithm_type == "dpmsolver++":
x_t = (sigma_t / sigma_s) * sample - (alpha_t * (jnp.exp(-h) - 1.0)) * m0
elif self.config.algorithm_type == "dpmsolver":
x_t = (alpha_t / alpha_s) * sample - (sigma_t * (jnp.exp(h) - 1.0)) * m0
return x_t
def multistep_dpm_solver_second_order_update(
self,
state: DPMSolverMultistepSchedulerState,
model_output_list: jnp.ndarray,
timestep_list: List[int],
prev_timestep: int,
sample: jnp.ndarray,
) -> jnp.ndarray:
"""
One step for the second-order multistep DPM-Solver.
Args:
model_output_list (`List[jnp.ndarray]`):
direct outputs from learned diffusion model at current and latter timesteps.
timestep (`int`): current and latter discrete timestep in the diffusion chain.
prev_timestep (`int`): previous discrete timestep in the diffusion chain.
sample (`jnp.ndarray`):
current instance of sample being created by diffusion process.
Returns:
`jnp.ndarray`: the sample tensor at the previous timestep.
"""
t, s0, s1 = prev_timestep, timestep_list[-1], timestep_list[-2]
m0, m1 = model_output_list[-1], model_output_list[-2]
lambda_t, lambda_s0, lambda_s1 = state.lambda_t[t], state.lambda_t[s0], state.lambda_t[s1]
alpha_t, alpha_s0 = state.alpha_t[t], state.alpha_t[s0]
sigma_t, sigma_s0 = state.sigma_t[t], state.sigma_t[s0]
h, h_0 = lambda_t - lambda_s0, lambda_s0 - lambda_s1
r0 = h_0 / h
D0, D1 = m0, (1.0 / r0) * (m0 - m1)
if self.config.algorithm_type == "dpmsolver++":
# See https://arxiv.org/abs/2211.01095 for detailed derivations
if self.config.solver_type == "midpoint":
x_t = (
(sigma_t / sigma_s0) * sample
- (alpha_t * (jnp.exp(-h) - 1.0)) * D0
- 0.5 * (alpha_t * (jnp.exp(-h) - 1.0)) * D1
)
elif self.config.solver_type == "heun":
x_t = (
(sigma_t / sigma_s0) * sample
- (alpha_t * (jnp.exp(-h) - 1.0)) * D0
+ (alpha_t * ((jnp.exp(-h) - 1.0) / h + 1.0)) * D1
)
elif self.config.algorithm_type == "dpmsolver":
# See https://arxiv.org/abs/2206.00927 for detailed derivations
if self.config.solver_type == "midpoint":
x_t = (
(alpha_t / alpha_s0) * sample
- (sigma_t * (jnp.exp(h) - 1.0)) * D0
- 0.5 * (sigma_t * (jnp.exp(h) - 1.0)) * D1
)
elif self.config.solver_type == "heun":
x_t = (
(alpha_t / alpha_s0) * sample
- (sigma_t * (jnp.exp(h) - 1.0)) * D0
- (sigma_t * ((jnp.exp(h) - 1.0) / h - 1.0)) * D1
)
return x_t
def multistep_dpm_solver_third_order_update(
self,
state: DPMSolverMultistepSchedulerState,
model_output_list: jnp.ndarray,
timestep_list: List[int],
prev_timestep: int,
sample: jnp.ndarray,
) -> jnp.ndarray:
"""
One step for the third-order multistep DPM-Solver.
Args:
model_output_list (`List[jnp.ndarray]`):
direct outputs from learned diffusion model at current and latter timesteps.
timestep (`int`): current and latter discrete timestep in the diffusion chain.
prev_timestep (`int`): previous discrete timestep in the diffusion chain.
sample (`jnp.ndarray`):
current instance of sample being created by diffusion process.
Returns:
`jnp.ndarray`: the sample tensor at the previous timestep.
"""
t, s0, s1, s2 = prev_timestep, timestep_list[-1], timestep_list[-2], timestep_list[-3]
m0, m1, m2 = model_output_list[-1], model_output_list[-2], model_output_list[-3]
lambda_t, lambda_s0, lambda_s1, lambda_s2 = (
state.lambda_t[t],
state.lambda_t[s0],
state.lambda_t[s1],
state.lambda_t[s2],
)
alpha_t, alpha_s0 = state.alpha_t[t], state.alpha_t[s0]
sigma_t, sigma_s0 = state.sigma_t[t], state.sigma_t[s0]
h, h_0, h_1 = lambda_t - lambda_s0, lambda_s0 - lambda_s1, lambda_s1 - lambda_s2
r0, r1 = h_0 / h, h_1 / h
D0 = m0
D1_0, D1_1 = (1.0 / r0) * (m0 - m1), (1.0 / r1) * (m1 - m2)
D1 = D1_0 + (r0 / (r0 + r1)) * (D1_0 - D1_1)
D2 = (1.0 / (r0 + r1)) * (D1_0 - D1_1)
if self.config.algorithm_type == "dpmsolver++":
# See https://arxiv.org/abs/2206.00927 for detailed derivations
x_t = (
(sigma_t / sigma_s0) * sample
- (alpha_t * (jnp.exp(-h) - 1.0)) * D0
+ (alpha_t * ((jnp.exp(-h) - 1.0) / h + 1.0)) * D1
- (alpha_t * ((jnp.exp(-h) - 1.0 + h) / h**2 - 0.5)) * D2
)
elif self.config.algorithm_type == "dpmsolver":
# See https://arxiv.org/abs/2206.00927 for detailed derivations
x_t = (
(alpha_t / alpha_s0) * sample
- (sigma_t * (jnp.exp(h) - 1.0)) * D0
- (sigma_t * ((jnp.exp(h) - 1.0) / h - 1.0)) * D1
- (sigma_t * ((jnp.exp(h) - 1.0 - h) / h**2 - 0.5)) * D2
)
return x_t
def step(
self,
state: DPMSolverMultistepSchedulerState,
model_output: jnp.ndarray,
timestep: int,
sample: jnp.ndarray,
return_dict: bool = True,
) -> Union[FlaxDPMSolverMultistepSchedulerOutput, Tuple]:
"""
Predict the sample at the previous timestep by DPM-Solver. Core function to propagate the diffusion process
from the learned model outputs (most often the predicted noise).
Args:
state (`DPMSolverMultistepSchedulerState`):
the `FlaxDPMSolverMultistepScheduler` state data class instance.
model_output (`jnp.ndarray`): direct output from learned diffusion model.
timestep (`int`): current discrete timestep in the diffusion chain.
sample (`jnp.ndarray`):
current instance of sample being created by diffusion process.
return_dict (`bool`): option for returning tuple rather than FlaxDPMSolverMultistepSchedulerOutput class
Returns:
[`FlaxDPMSolverMultistepSchedulerOutput`] or `tuple`: [`FlaxDPMSolverMultistepSchedulerOutput`] if
`return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is the sample tensor.
"""
if state.num_inference_steps is None:
raise ValueError(
"Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler"
)
(step_index,) = jnp.where(state.timesteps == timestep, size=1)
step_index = step_index[0]
prev_timestep = jax.lax.select(step_index == len(state.timesteps) - 1, 0, state.timesteps[step_index + 1])
model_output = self.convert_model_output(state, model_output, timestep, sample)
model_outputs_new = jnp.roll(state.model_outputs, -1, axis=0)
model_outputs_new = model_outputs_new.at[-1].set(model_output)
state = state.replace(
model_outputs=model_outputs_new,
prev_timestep=prev_timestep,
cur_sample=sample,
)
def step_1(state: DPMSolverMultistepSchedulerState) -> jnp.ndarray:
return self.dpm_solver_first_order_update(
state,
state.model_outputs[-1],
state.timesteps[step_index],
state.prev_timestep,
state.cur_sample,
)
def step_23(state: DPMSolverMultistepSchedulerState) -> jnp.ndarray:
def step_2(state: DPMSolverMultistepSchedulerState) -> jnp.ndarray:
timestep_list = jnp.array([state.timesteps[step_index - 1], state.timesteps[step_index]])
return self.multistep_dpm_solver_second_order_update(
state,
state.model_outputs,
timestep_list,
state.prev_timestep,
state.cur_sample,
)
def step_3(state: DPMSolverMultistepSchedulerState) -> jnp.ndarray:
timestep_list = jnp.array(
[
state.timesteps[step_index - 2],
state.timesteps[step_index - 1],
state.timesteps[step_index],
]
)
return self.multistep_dpm_solver_third_order_update(
state,
state.model_outputs,
timestep_list,
state.prev_timestep,
state.cur_sample,
)
step_2_output = step_2(state)
step_3_output = step_3(state)
if self.config.solver_order == 2:
return step_2_output
elif self.config.lower_order_final and len(state.timesteps) < 15:
return jax.lax.select(
state.lower_order_nums < 2,
step_2_output,
jax.lax.select(
step_index == len(state.timesteps) - 2,
step_2_output,
step_3_output,
),
)
else:
return jax.lax.select(
state.lower_order_nums < 2,
step_2_output,
step_3_output,
)
step_1_output = step_1(state)
step_23_output = step_23(state)
if self.config.solver_order == 1:
prev_sample = step_1_output
elif self.config.lower_order_final and len(state.timesteps) < 15:
prev_sample = jax.lax.select(
state.lower_order_nums < 1,
step_1_output,
jax.lax.select(
step_index == len(state.timesteps) - 1,
step_1_output,
step_23_output,
),
)
else:
prev_sample = jax.lax.select(
state.lower_order_nums < 1,
step_1_output,
step_23_output,
)
state = state.replace(
lower_order_nums=jnp.minimum(state.lower_order_nums + 1, self.config.solver_order),
)
if not return_dict:
return (prev_sample, state)
return FlaxDPMSolverMultistepSchedulerOutput(prev_sample=prev_sample, state=state)
def scale_model_input(
self, state: DPMSolverMultistepSchedulerState, sample: jnp.ndarray, timestep: Optional[int] = None
) -> jnp.ndarray:
"""
Ensures interchangeability with schedulers that need to scale the denoising model input depending on the
current timestep.
Args:
state (`DPMSolverMultistepSchedulerState`):
the `FlaxDPMSolverMultistepScheduler` state data class instance.
sample (`jnp.ndarray`): input sample
timestep (`int`, optional): current timestep
Returns:
`jnp.ndarray`: scaled input sample
"""
return sample
def add_noise(
self,
state: DPMSolverMultistepSchedulerState,
original_samples: jnp.ndarray,
noise: jnp.ndarray,
timesteps: jnp.ndarray,
) -> jnp.ndarray:
return add_noise_common(state.common, original_samples, noise, timesteps)
def __len__(self):
return self.config.num_train_timesteps
| diffusers/src/diffusers/schedulers/scheduling_dpmsolver_multistep_flax.py/0 | {
"file_path": "diffusers/src/diffusers/schedulers/scheduling_dpmsolver_multistep_flax.py",
"repo_id": "diffusers",
"token_count": 13613
} |
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from packaging import version
from .. import __version__
from .constants import (
CONFIG_NAME,
DEPRECATED_REVISION_ARGS,
DIFFUSERS_DYNAMIC_MODULE_NAME,
FLAX_WEIGHTS_NAME,
GGUF_FILE_EXTENSION,
HF_MODULES_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MIN_PEFT_VERSION,
ONNX_EXTERNAL_WEIGHTS_NAME,
ONNX_WEIGHTS_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFETENSORS_FILE_EXTENSION,
SAFETENSORS_WEIGHTS_NAME,
USE_PEFT_BACKEND,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .deprecation_utils import deprecate
from .doc_utils import replace_example_docstring
from .dynamic_modules_utils import get_class_from_dynamic_module
from .export_utils import export_to_gif, export_to_obj, export_to_ply, export_to_video
from .hub_utils import (
PushToHubMixin,
_add_variant,
_get_checkpoint_shard_files,
_get_model_file,
extract_commit_hash,
http_user_agent,
)
from .import_utils import (
BACKENDS_MAPPING,
DIFFUSERS_SLOW_IMPORT,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
get_objects_from_module,
is_accelerate_available,
is_accelerate_version,
is_bitsandbytes_available,
is_bitsandbytes_version,
is_bs4_available,
is_flax_available,
is_ftfy_available,
is_gguf_available,
is_gguf_version,
is_google_colab,
is_hf_hub_version,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_matplotlib_available,
is_note_seq_available,
is_onnx_available,
is_peft_available,
is_peft_version,
is_safetensors_available,
is_scipy_available,
is_sentencepiece_available,
is_tensorboard_available,
is_timm_available,
is_torch_available,
is_torch_npu_available,
is_torch_version,
is_torch_xla_available,
is_torch_xla_version,
is_torchao_available,
is_torchsde_available,
is_torchvision_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
is_wandb_available,
is_xformers_available,
requires_backends,
)
from .loading_utils import get_module_from_name, get_submodule_by_name, load_image, load_video
from .logging import get_logger
from .outputs import BaseOutput
from .peft_utils import (
check_peft_version,
delete_adapter_layers,
get_adapter_name,
get_peft_kwargs,
recurse_remove_peft_layers,
scale_lora_layers,
set_adapter_layers,
set_weights_and_activate_adapters,
unscale_lora_layers,
)
from .pil_utils import PIL_INTERPOLATION, make_image_grid, numpy_to_pil, pt_to_pil
from .state_dict_utils import (
convert_all_state_dict_to_peft,
convert_state_dict_to_diffusers,
convert_state_dict_to_kohya,
convert_state_dict_to_peft,
convert_unet_state_dict_to_peft,
)
logger = get_logger(__name__)
def check_min_version(min_version):
if version.parse(__version__) < version.parse(min_version):
if "dev" in min_version:
error_message = (
"This example requires a source install from HuggingFace diffusers (see "
"`https://huggingface.co/docs/diffusers/installation#install-from-source`),"
)
else:
error_message = f"This example requires a minimum version of {min_version},"
error_message += f" but the version found is {__version__}.\n"
raise ImportError(error_message)
| diffusers/src/diffusers/utils/__init__.py/0 | {
"file_path": "diffusers/src/diffusers/utils/__init__.py",
"repo_id": "diffusers",
"token_count": 1727
} |
# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
class AllegroPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class AltDiffusionImg2ImgPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class AltDiffusionPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class AmusedImg2ImgPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class AmusedInpaintPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class AmusedPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class AnimateDiffControlNetPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class AnimateDiffPAGPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class AnimateDiffPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class AnimateDiffSDXLPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class AnimateDiffSparseControlNetPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class AnimateDiffVideoToVideoControlNetPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class AnimateDiffVideoToVideoPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class AudioLDM2Pipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class AudioLDM2ProjectionModel(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class AudioLDM2UNet2DConditionModel(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class AudioLDMPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class AuraFlowPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class CLIPImageProjection(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class CogVideoXFunControlPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class CogVideoXImageToVideoPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class CogVideoXPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class CogVideoXVideoToVideoPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class CogView3PlusPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class ConsisIDPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class CycleDiffusionPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class FluxControlImg2ImgPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class FluxControlInpaintPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class FluxControlNetImg2ImgPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class FluxControlNetInpaintPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class FluxControlNetPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class FluxControlPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class FluxFillPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class FluxImg2ImgPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class FluxInpaintPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class FluxPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class FluxPriorReduxPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class HunyuanDiTControlNetPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class HunyuanDiTPAGPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class HunyuanDiTPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class HunyuanVideoPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class I2VGenXLPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class IFImg2ImgPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class IFImg2ImgSuperResolutionPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class IFInpaintingPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class IFInpaintingSuperResolutionPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class IFPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class IFSuperResolutionPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class ImageTextPipelineOutput(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class Kandinsky3Img2ImgPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class Kandinsky3Pipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class KandinskyCombinedPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class KandinskyImg2ImgCombinedPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class KandinskyImg2ImgPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class KandinskyInpaintCombinedPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class KandinskyInpaintPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class KandinskyPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class KandinskyPriorPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class KandinskyV22CombinedPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class KandinskyV22ControlnetImg2ImgPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class KandinskyV22ControlnetPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class KandinskyV22Img2ImgCombinedPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class KandinskyV22Img2ImgPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class KandinskyV22InpaintCombinedPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class KandinskyV22InpaintPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class KandinskyV22Pipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class KandinskyV22PriorEmb2EmbPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class KandinskyV22PriorPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class LatentConsistencyModelImg2ImgPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class LatentConsistencyModelPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class LattePipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class LDMTextToImagePipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class LEditsPPPipelineStableDiffusion(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class LEditsPPPipelineStableDiffusionXL(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class LTXImageToVideoPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class LTXPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class LuminaText2ImgPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class MarigoldDepthPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class MarigoldNormalsPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class MochiPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class MusicLDMPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class PaintByExamplePipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class PIAPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class PixArtAlphaPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class PixArtSigmaPAGPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class PixArtSigmaPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class ReduxImageEncoder(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class SanaPAGPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class SanaPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class SemanticStableDiffusionPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class ShapEImg2ImgPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class ShapEPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class StableAudioPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class StableAudioProjectionModel(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class StableCascadeCombinedPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class StableCascadeDecoderPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class StableCascadePriorPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class StableDiffusion3ControlNetPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class StableDiffusion3Img2ImgPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class StableDiffusion3InpaintPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class StableDiffusion3PAGImg2ImgPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class StableDiffusion3PAGPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class StableDiffusion3Pipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class StableDiffusionAdapterPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class StableDiffusionAttendAndExcitePipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class StableDiffusionControlNetImg2ImgPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class StableDiffusionControlNetInpaintPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class StableDiffusionControlNetPAGInpaintPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class StableDiffusionControlNetPAGPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class StableDiffusionControlNetPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class StableDiffusionControlNetXSPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class StableDiffusionDepth2ImgPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class StableDiffusionDiffEditPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class StableDiffusionGLIGENPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class StableDiffusionGLIGENTextImagePipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class StableDiffusionImageVariationPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class StableDiffusionImg2ImgPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class StableDiffusionInpaintPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class StableDiffusionInpaintPipelineLegacy(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class StableDiffusionInstructPix2PixPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class StableDiffusionLatentUpscalePipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class StableDiffusionLDM3DPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class StableDiffusionModelEditingPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class StableDiffusionPAGImg2ImgPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class StableDiffusionPAGInpaintPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class StableDiffusionPAGPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class StableDiffusionPanoramaPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class StableDiffusionParadigmsPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class StableDiffusionPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class StableDiffusionPipelineSafe(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class StableDiffusionPix2PixZeroPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class StableDiffusionSAGPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class StableDiffusionUpscalePipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class StableDiffusionXLAdapterPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class StableDiffusionXLControlNetImg2ImgPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class StableDiffusionXLControlNetInpaintPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class StableDiffusionXLControlNetPAGImg2ImgPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class StableDiffusionXLControlNetPAGPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class StableDiffusionXLControlNetPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class StableDiffusionXLControlNetUnionImg2ImgPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class StableDiffusionXLControlNetUnionInpaintPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class StableDiffusionXLControlNetUnionPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class StableDiffusionXLControlNetXSPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class StableDiffusionXLImg2ImgPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class StableDiffusionXLInpaintPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class StableDiffusionXLInstructPix2PixPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class StableDiffusionXLPAGImg2ImgPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class StableDiffusionXLPAGInpaintPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class StableDiffusionXLPAGPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class StableDiffusionXLPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class StableUnCLIPImg2ImgPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class StableUnCLIPPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class StableVideoDiffusionPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class TextToVideoSDPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class TextToVideoZeroPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class TextToVideoZeroSDXLPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class UnCLIPImageVariationPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class UnCLIPPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class UniDiffuserModel(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class UniDiffuserPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class UniDiffuserTextDecoder(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class VersatileDiffusionDualGuidedPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class VersatileDiffusionImageVariationPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class VersatileDiffusionPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class VersatileDiffusionTextToImagePipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class VideoToVideoSDPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class VQDiffusionPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class WuerstchenCombinedPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class WuerstchenDecoderPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class WuerstchenPriorPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
| diffusers/src/diffusers/utils/dummy_torch_and_transformers_objects.py/0 | {
"file_path": "diffusers/src/diffusers/utils/dummy_torch_and_transformers_objects.py",
"repo_id": "diffusers",
"token_count": 30978
} |
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from .image_processor import VaeImageProcessor, is_valid_image, is_valid_image_imagelist
class VideoProcessor(VaeImageProcessor):
r"""Simple video processor."""
def preprocess_video(self, video, height: Optional[int] = None, width: Optional[int] = None) -> torch.Tensor:
r"""
Preprocesses input video(s).
Args:
video (`List[PIL.Image]`, `List[List[PIL.Image]]`, `torch.Tensor`, `np.array`, `List[torch.Tensor]`, `List[np.array]`):
The input video. It can be one of the following:
* List of the PIL images.
* List of list of PIL images.
* 4D Torch tensors (expected shape for each tensor `(num_frames, num_channels, height, width)`).
* 4D NumPy arrays (expected shape for each array `(num_frames, height, width, num_channels)`).
* List of 4D Torch tensors (expected shape for each tensor `(num_frames, num_channels, height,
width)`).
* List of 4D NumPy arrays (expected shape for each array `(num_frames, height, width, num_channels)`).
* 5D NumPy arrays: expected shape for each array `(batch_size, num_frames, height, width,
num_channels)`.
* 5D Torch tensors: expected shape for each array `(batch_size, num_frames, num_channels, height,
width)`.
height (`int`, *optional*, defaults to `None`):
The height in preprocessed frames of the video. If `None`, will use the `get_default_height_width()` to
get default height.
width (`int`, *optional*`, defaults to `None`):
The width in preprocessed frames of the video. If `None`, will use get_default_height_width()` to get
the default width.
"""
if isinstance(video, list) and isinstance(video[0], np.ndarray) and video[0].ndim == 5:
warnings.warn(
"Passing `video` as a list of 5d np.ndarray is deprecated."
"Please concatenate the list along the batch dimension and pass it as a single 5d np.ndarray",
FutureWarning,
)
video = np.concatenate(video, axis=0)
if isinstance(video, list) and isinstance(video[0], torch.Tensor) and video[0].ndim == 5:
warnings.warn(
"Passing `video` as a list of 5d torch.Tensor is deprecated."
"Please concatenate the list along the batch dimension and pass it as a single 5d torch.Tensor",
FutureWarning,
)
video = torch.cat(video, axis=0)
# ensure the input is a list of videos:
# - if it is a batch of videos (5d torch.Tensor or np.ndarray), it is converted to a list of videos (a list of 4d torch.Tensor or np.ndarray)
# - if it is a single video, it is convereted to a list of one video.
if isinstance(video, (np.ndarray, torch.Tensor)) and video.ndim == 5:
video = list(video)
elif isinstance(video, list) and is_valid_image(video[0]) or is_valid_image_imagelist(video):
video = [video]
elif isinstance(video, list) and is_valid_image_imagelist(video[0]):
video = video
else:
raise ValueError(
"Input is in incorrect format. Currently, we only support numpy.ndarray, torch.Tensor, PIL.Image.Image"
)
video = torch.stack([self.preprocess(img, height=height, width=width) for img in video], dim=0)
# move the number of channels before the number of frames.
video = video.permute(0, 2, 1, 3, 4)
return video
def postprocess_video(
self, video: torch.Tensor, output_type: str = "np"
) -> Union[np.ndarray, torch.Tensor, List[PIL.Image.Image]]:
r"""
Converts a video tensor to a list of frames for export.
Args:
video (`torch.Tensor`): The video as a tensor.
output_type (`str`, defaults to `"np"`): Output type of the postprocessed `video` tensor.
"""
batch_size = video.shape[0]
outputs = []
for batch_idx in range(batch_size):
batch_vid = video[batch_idx].permute(1, 0, 2, 3)
batch_output = self.postprocess(batch_vid, output_type)
outputs.append(batch_output)
if output_type == "np":
outputs = np.stack(outputs)
elif output_type == "pt":
outputs = torch.stack(outputs)
elif not output_type == "pil":
raise ValueError(f"{output_type} does not exist. Please choose one of ['np', 'pt', 'pil']")
return outputs
| diffusers/src/diffusers/video_processor.py/0 | {
"file_path": "diffusers/src/diffusers/video_processor.py",
"repo_id": "diffusers",
"token_count": 2251
} |
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import gc
import importlib
import sys
import time
import unittest
import numpy as np
import torch
from packaging import version
from transformers import CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
ControlNetModel,
EulerDiscreteScheduler,
LCMScheduler,
StableDiffusionXLAdapterPipeline,
StableDiffusionXLControlNetPipeline,
StableDiffusionXLPipeline,
T2IAdapter,
)
from diffusers.utils import logging
from diffusers.utils.import_utils import is_accelerate_available
from diffusers.utils.testing_utils import (
CaptureLogger,
load_image,
nightly,
numpy_cosine_similarity_distance,
require_peft_backend,
require_torch_gpu,
slow,
torch_device,
)
sys.path.append(".")
from utils import PeftLoraLoaderMixinTests, check_if_lora_correctly_set, state_dicts_almost_equal # noqa: E402
if is_accelerate_available():
from accelerate.utils import release_memory
class StableDiffusionXLLoRATests(PeftLoraLoaderMixinTests, unittest.TestCase):
has_two_text_encoders = True
pipeline_class = StableDiffusionXLPipeline
scheduler_cls = EulerDiscreteScheduler
scheduler_kwargs = {
"beta_start": 0.00085,
"beta_end": 0.012,
"beta_schedule": "scaled_linear",
"timestep_spacing": "leading",
"steps_offset": 1,
}
unet_kwargs = {
"block_out_channels": (32, 64),
"layers_per_block": 2,
"sample_size": 32,
"in_channels": 4,
"out_channels": 4,
"down_block_types": ("DownBlock2D", "CrossAttnDownBlock2D"),
"up_block_types": ("CrossAttnUpBlock2D", "UpBlock2D"),
"attention_head_dim": (2, 4),
"use_linear_projection": True,
"addition_embed_type": "text_time",
"addition_time_embed_dim": 8,
"transformer_layers_per_block": (1, 2),
"projection_class_embeddings_input_dim": 80, # 6 * 8 + 32
"cross_attention_dim": 64,
}
vae_kwargs = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 4,
"sample_size": 128,
}
text_encoder_cls, text_encoder_id = CLIPTextModel, "peft-internal-testing/tiny-clip-text-2"
tokenizer_cls, tokenizer_id = CLIPTokenizer, "peft-internal-testing/tiny-clip-text-2"
text_encoder_2_cls, text_encoder_2_id = CLIPTextModelWithProjection, "peft-internal-testing/tiny-clip-text-2"
tokenizer_2_cls, tokenizer_2_id = CLIPTokenizer, "peft-internal-testing/tiny-clip-text-2"
@property
def output_shape(self):
return (1, 64, 64, 3)
def setUp(self):
super().setUp()
gc.collect()
torch.cuda.empty_cache()
def tearDown(self):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@slow
@nightly
@require_torch_gpu
@require_peft_backend
class LoraSDXLIntegrationTests(unittest.TestCase):
def setUp(self):
super().setUp()
gc.collect()
torch.cuda.empty_cache()
def tearDown(self):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def test_sdxl_1_0_lora(self):
generator = torch.Generator("cpu").manual_seed(0)
pipe = StableDiffusionXLPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0")
pipe.enable_model_cpu_offload()
lora_model_id = "hf-internal-testing/sdxl-1.0-lora"
lora_filename = "sd_xl_offset_example-lora_1.0.safetensors"
pipe.load_lora_weights(lora_model_id, weight_name=lora_filename)
images = pipe(
"masterpiece, best quality, mountain", output_type="np", generator=generator, num_inference_steps=2
).images
images = images[0, -3:, -3:, -1].flatten()
expected = np.array([0.4468, 0.4061, 0.4134, 0.3637, 0.3202, 0.365, 0.3786, 0.3725, 0.3535])
max_diff = numpy_cosine_similarity_distance(expected, images)
assert max_diff < 1e-4
pipe.unload_lora_weights()
release_memory(pipe)
def test_sdxl_1_0_blockwise_lora(self):
generator = torch.Generator("cpu").manual_seed(0)
pipe = StableDiffusionXLPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0")
pipe.enable_model_cpu_offload()
lora_model_id = "hf-internal-testing/sdxl-1.0-lora"
lora_filename = "sd_xl_offset_example-lora_1.0.safetensors"
pipe.load_lora_weights(lora_model_id, weight_name=lora_filename, adapter_name="offset")
scales = {
"unet": {
"down": {"block_1": [1.0, 1.0], "block_2": [1.0, 1.0]},
"mid": 1.0,
"up": {"block_0": [1.0, 1.0, 1.0], "block_1": [1.0, 1.0, 1.0]},
},
}
pipe.set_adapters(["offset"], [scales])
images = pipe(
"masterpiece, best quality, mountain", output_type="np", generator=generator, num_inference_steps=2
).images
images = images[0, -3:, -3:, -1].flatten()
expected = np.array([00.4468, 0.4061, 0.4134, 0.3637, 0.3202, 0.365, 0.3786, 0.3725, 0.3535])
max_diff = numpy_cosine_similarity_distance(expected, images)
assert max_diff < 1e-4
pipe.unload_lora_weights()
release_memory(pipe)
def test_sdxl_lcm_lora(self):
pipe = StableDiffusionXLPipeline.from_pretrained(
"stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16
)
pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
pipe.enable_model_cpu_offload()
generator = torch.Generator("cpu").manual_seed(0)
lora_model_id = "latent-consistency/lcm-lora-sdxl"
pipe.load_lora_weights(lora_model_id)
image = pipe(
"masterpiece, best quality, mountain", generator=generator, num_inference_steps=4, guidance_scale=0.5
).images[0]
expected_image = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/lcm_lora/sdxl_lcm_lora.png"
)
image_np = pipe.image_processor.pil_to_numpy(image)
expected_image_np = pipe.image_processor.pil_to_numpy(expected_image)
max_diff = numpy_cosine_similarity_distance(image_np.flatten(), expected_image_np.flatten())
assert max_diff < 1e-4
pipe.unload_lora_weights()
release_memory(pipe)
def test_sdxl_1_0_lora_fusion(self):
generator = torch.Generator().manual_seed(0)
pipe = StableDiffusionXLPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0")
lora_model_id = "hf-internal-testing/sdxl-1.0-lora"
lora_filename = "sd_xl_offset_example-lora_1.0.safetensors"
pipe.load_lora_weights(lora_model_id, weight_name=lora_filename)
pipe.fuse_lora()
# We need to unload the lora weights since in the previous API `fuse_lora` led to lora weights being
# silently deleted - otherwise this will CPU OOM
pipe.unload_lora_weights()
pipe.enable_model_cpu_offload()
images = pipe(
"masterpiece, best quality, mountain", output_type="np", generator=generator, num_inference_steps=2
).images
images = images[0, -3:, -3:, -1].flatten()
# This way we also test equivalence between LoRA fusion and the non-fusion behaviour.
expected = np.array([0.4468, 0.4061, 0.4134, 0.3637, 0.3202, 0.365, 0.3786, 0.3725, 0.3535])
max_diff = numpy_cosine_similarity_distance(expected, images)
assert max_diff < 1e-4
release_memory(pipe)
def test_sdxl_1_0_lora_unfusion(self):
generator = torch.Generator("cpu").manual_seed(0)
pipe = StableDiffusionXLPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0")
lora_model_id = "hf-internal-testing/sdxl-1.0-lora"
lora_filename = "sd_xl_offset_example-lora_1.0.safetensors"
pipe.load_lora_weights(lora_model_id, weight_name=lora_filename)
pipe.fuse_lora()
pipe.enable_model_cpu_offload()
images = pipe(
"masterpiece, best quality, mountain", output_type="np", generator=generator, num_inference_steps=3
).images
images_with_fusion = images.flatten()
pipe.unfuse_lora()
generator = torch.Generator("cpu").manual_seed(0)
images = pipe(
"masterpiece, best quality, mountain", output_type="np", generator=generator, num_inference_steps=3
).images
images_without_fusion = images.flatten()
max_diff = numpy_cosine_similarity_distance(images_with_fusion, images_without_fusion)
assert max_diff < 1e-4
release_memory(pipe)
def test_sdxl_1_0_lora_unfusion_effectivity(self):
pipe = StableDiffusionXLPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0")
pipe.enable_model_cpu_offload()
generator = torch.Generator().manual_seed(0)
images = pipe(
"masterpiece, best quality, mountain", output_type="np", generator=generator, num_inference_steps=2
).images
original_image_slice = images[0, -3:, -3:, -1].flatten()
lora_model_id = "hf-internal-testing/sdxl-1.0-lora"
lora_filename = "sd_xl_offset_example-lora_1.0.safetensors"
pipe.load_lora_weights(lora_model_id, weight_name=lora_filename)
pipe.fuse_lora()
generator = torch.Generator().manual_seed(0)
_ = pipe(
"masterpiece, best quality, mountain", output_type="np", generator=generator, num_inference_steps=2
).images
pipe.unfuse_lora()
# We need to unload the lora weights - in the old API unfuse led to unloading the adapter weights
pipe.unload_lora_weights()
generator = torch.Generator().manual_seed(0)
images = pipe(
"masterpiece, best quality, mountain", output_type="np", generator=generator, num_inference_steps=2
).images
images_without_fusion_slice = images[0, -3:, -3:, -1].flatten()
max_diff = numpy_cosine_similarity_distance(images_without_fusion_slice, original_image_slice)
assert max_diff < 1e-3
release_memory(pipe)
def test_sdxl_1_0_lora_fusion_efficiency(self):
generator = torch.Generator().manual_seed(0)
lora_model_id = "hf-internal-testing/sdxl-1.0-lora"
lora_filename = "sd_xl_offset_example-lora_1.0.safetensors"
pipe = StableDiffusionXLPipeline.from_pretrained(
"stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16
)
pipe.load_lora_weights(lora_model_id, weight_name=lora_filename, torch_dtype=torch.float16)
pipe.enable_model_cpu_offload()
start_time = time.time()
for _ in range(3):
pipe(
"masterpiece, best quality, mountain", output_type="np", generator=generator, num_inference_steps=2
).images
end_time = time.time()
elapsed_time_non_fusion = end_time - start_time
del pipe
pipe = StableDiffusionXLPipeline.from_pretrained(
"stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16
)
pipe.load_lora_weights(lora_model_id, weight_name=lora_filename, torch_dtype=torch.float16)
pipe.fuse_lora()
# We need to unload the lora weights since in the previous API `fuse_lora` led to lora weights being
# silently deleted - otherwise this will CPU OOM
pipe.unload_lora_weights()
pipe.enable_model_cpu_offload()
generator = torch.Generator().manual_seed(0)
start_time = time.time()
for _ in range(3):
pipe(
"masterpiece, best quality, mountain", output_type="np", generator=generator, num_inference_steps=2
).images
end_time = time.time()
elapsed_time_fusion = end_time - start_time
self.assertTrue(elapsed_time_fusion < elapsed_time_non_fusion)
release_memory(pipe)
def test_sdxl_1_0_last_ben(self):
generator = torch.Generator().manual_seed(0)
pipe = StableDiffusionXLPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0")
pipe.enable_model_cpu_offload()
lora_model_id = "TheLastBen/Papercut_SDXL"
lora_filename = "papercut.safetensors"
pipe.load_lora_weights(lora_model_id, weight_name=lora_filename)
images = pipe("papercut.safetensors", output_type="np", generator=generator, num_inference_steps=2).images
images = images[0, -3:, -3:, -1].flatten()
expected = np.array([0.5244, 0.4347, 0.4312, 0.4246, 0.4398, 0.4409, 0.4884, 0.4938, 0.4094])
max_diff = numpy_cosine_similarity_distance(expected, images)
assert max_diff < 1e-3
pipe.unload_lora_weights()
release_memory(pipe)
def test_sdxl_1_0_fuse_unfuse_all(self):
pipe = StableDiffusionXLPipeline.from_pretrained(
"stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16
)
text_encoder_1_sd = copy.deepcopy(pipe.text_encoder.state_dict())
text_encoder_2_sd = copy.deepcopy(pipe.text_encoder_2.state_dict())
unet_sd = copy.deepcopy(pipe.unet.state_dict())
pipe.load_lora_weights(
"davizca87/sun-flower", weight_name="snfw3rXL-000004.safetensors", torch_dtype=torch.float16
)
fused_te_state_dict = pipe.text_encoder.state_dict()
fused_te_2_state_dict = pipe.text_encoder_2.state_dict()
unet_state_dict = pipe.unet.state_dict()
peft_ge_070 = version.parse(importlib.metadata.version("peft")) >= version.parse("0.7.0")
def remap_key(key, sd):
# some keys have moved around for PEFT >= 0.7.0, but they should still be loaded correctly
if (key in sd) or (not peft_ge_070):
return key
# instead of linear.weight, we now have linear.base_layer.weight, etc.
if key.endswith(".weight"):
key = key[:-7] + ".base_layer.weight"
elif key.endswith(".bias"):
key = key[:-5] + ".base_layer.bias"
return key
for key, value in text_encoder_1_sd.items():
key = remap_key(key, fused_te_state_dict)
self.assertTrue(torch.allclose(fused_te_state_dict[key], value))
for key, value in text_encoder_2_sd.items():
key = remap_key(key, fused_te_2_state_dict)
self.assertTrue(torch.allclose(fused_te_2_state_dict[key], value))
for key, value in unet_state_dict.items():
self.assertTrue(torch.allclose(unet_state_dict[key], value))
pipe.fuse_lora()
pipe.unload_lora_weights()
assert not state_dicts_almost_equal(text_encoder_1_sd, pipe.text_encoder.state_dict())
assert not state_dicts_almost_equal(text_encoder_2_sd, pipe.text_encoder_2.state_dict())
assert not state_dicts_almost_equal(unet_sd, pipe.unet.state_dict())
release_memory(pipe)
del unet_sd, text_encoder_1_sd, text_encoder_2_sd
def test_sdxl_1_0_lora_with_sequential_cpu_offloading(self):
generator = torch.Generator().manual_seed(0)
pipe = StableDiffusionXLPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0")
pipe.enable_sequential_cpu_offload()
lora_model_id = "hf-internal-testing/sdxl-1.0-lora"
lora_filename = "sd_xl_offset_example-lora_1.0.safetensors"
pipe.load_lora_weights(lora_model_id, weight_name=lora_filename)
images = pipe(
"masterpiece, best quality, mountain", output_type="np", generator=generator, num_inference_steps=2
).images
images = images[0, -3:, -3:, -1].flatten()
expected = np.array([0.4468, 0.4087, 0.4134, 0.366, 0.3202, 0.3505, 0.3786, 0.387, 0.3535])
max_diff = numpy_cosine_similarity_distance(expected, images)
assert max_diff < 1e-3
pipe.unload_lora_weights()
release_memory(pipe)
def test_controlnet_canny_lora(self):
controlnet = ControlNetModel.from_pretrained("diffusers/controlnet-canny-sdxl-1.0")
pipe = StableDiffusionXLControlNetPipeline.from_pretrained(
"stabilityai/stable-diffusion-xl-base-1.0", controlnet=controlnet
)
pipe.load_lora_weights("nerijs/pixel-art-xl", weight_name="pixel-art-xl.safetensors")
pipe.enable_sequential_cpu_offload()
generator = torch.Generator(device="cpu").manual_seed(0)
prompt = "corgi"
image = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png"
)
images = pipe(prompt, image=image, generator=generator, output_type="np", num_inference_steps=3).images
assert images[0].shape == (768, 512, 3)
original_image = images[0, -3:, -3:, -1].flatten()
expected_image = np.array([0.4574, 0.4487, 0.4435, 0.5163, 0.4396, 0.4411, 0.518, 0.4465, 0.4333])
max_diff = numpy_cosine_similarity_distance(expected_image, original_image)
assert max_diff < 1e-4
pipe.unload_lora_weights()
release_memory(pipe)
def test_sdxl_t2i_adapter_canny_lora(self):
adapter = T2IAdapter.from_pretrained("TencentARC/t2i-adapter-lineart-sdxl-1.0", torch_dtype=torch.float16).to(
"cpu"
)
pipe = StableDiffusionXLAdapterPipeline.from_pretrained(
"stabilityai/stable-diffusion-xl-base-1.0",
adapter=adapter,
torch_dtype=torch.float16,
variant="fp16",
)
pipe.load_lora_weights("CiroN2022/toy-face", weight_name="toy_face_sdxl.safetensors")
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=None)
generator = torch.Generator(device="cpu").manual_seed(0)
prompt = "toy"
image = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/toy_canny.png"
)
images = pipe(prompt, image=image, generator=generator, output_type="np", num_inference_steps=3).images
assert images[0].shape == (768, 512, 3)
image_slice = images[0, -3:, -3:, -1].flatten()
expected_slice = np.array([0.4284, 0.4337, 0.4319, 0.4255, 0.4329, 0.4280, 0.4338, 0.4420, 0.4226])
assert numpy_cosine_similarity_distance(image_slice, expected_slice) < 1e-4
@nightly
def test_sequential_fuse_unfuse(self):
pipe = StableDiffusionXLPipeline.from_pretrained(
"stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16
)
# 1. round
pipe.load_lora_weights("Pclanglais/TintinIA", torch_dtype=torch.float16)
pipe.to(torch_device)
pipe.fuse_lora()
generator = torch.Generator().manual_seed(0)
images = pipe(
"masterpiece, best quality, mountain", output_type="np", generator=generator, num_inference_steps=2
).images
image_slice = images[0, -3:, -3:, -1].flatten()
pipe.unfuse_lora()
# 2. round
pipe.load_lora_weights("ProomptEngineer/pe-balloon-diffusion-style", torch_dtype=torch.float16)
pipe.fuse_lora()
pipe.unfuse_lora()
# 3. round
pipe.load_lora_weights("ostris/crayon_style_lora_sdxl", torch_dtype=torch.float16)
pipe.fuse_lora()
pipe.unfuse_lora()
# 4. back to 1st round
pipe.load_lora_weights("Pclanglais/TintinIA", torch_dtype=torch.float16)
pipe.fuse_lora()
generator = torch.Generator().manual_seed(0)
images_2 = pipe(
"masterpiece, best quality, mountain", output_type="np", generator=generator, num_inference_steps=2
).images
image_slice_2 = images_2[0, -3:, -3:, -1].flatten()
max_diff = numpy_cosine_similarity_distance(image_slice, image_slice_2)
assert max_diff < 1e-3
pipe.unload_lora_weights()
release_memory(pipe)
@nightly
def test_integration_logits_multi_adapter(self):
path = "stabilityai/stable-diffusion-xl-base-1.0"
lora_id = "CiroN2022/toy-face"
pipe = StableDiffusionXLPipeline.from_pretrained(path, torch_dtype=torch.float16)
pipe.load_lora_weights(lora_id, weight_name="toy_face_sdxl.safetensors", adapter_name="toy")
pipe = pipe.to(torch_device)
self.assertTrue(check_if_lora_correctly_set(pipe.unet), "Lora not correctly set in Unet")
prompt = "toy_face of a hacker with a hoodie"
lora_scale = 0.9
images = pipe(
prompt=prompt,
num_inference_steps=30,
generator=torch.manual_seed(0),
cross_attention_kwargs={"scale": lora_scale},
output_type="np",
).images
expected_slice_scale = np.array([0.538, 0.539, 0.540, 0.540, 0.542, 0.539, 0.538, 0.541, 0.539])
predicted_slice = images[0, -3:, -3:, -1].flatten()
max_diff = numpy_cosine_similarity_distance(expected_slice_scale, predicted_slice)
assert max_diff < 1e-3
pipe.load_lora_weights("nerijs/pixel-art-xl", weight_name="pixel-art-xl.safetensors", adapter_name="pixel")
pipe.set_adapters("pixel")
prompt = "pixel art, a hacker with a hoodie, simple, flat colors"
images = pipe(
prompt,
num_inference_steps=30,
guidance_scale=7.5,
cross_attention_kwargs={"scale": lora_scale},
generator=torch.manual_seed(0),
output_type="np",
).images
predicted_slice = images[0, -3:, -3:, -1].flatten()
expected_slice_scale = np.array(
[0.61973065, 0.62018543, 0.62181497, 0.61933696, 0.6208608, 0.620576, 0.6200281, 0.62258327, 0.6259889]
)
max_diff = numpy_cosine_similarity_distance(expected_slice_scale, predicted_slice)
assert max_diff < 1e-3
# multi-adapter inference
pipe.set_adapters(["pixel", "toy"], adapter_weights=[0.5, 1.0])
images = pipe(
prompt,
num_inference_steps=30,
guidance_scale=7.5,
cross_attention_kwargs={"scale": 1.0},
generator=torch.manual_seed(0),
output_type="np",
).images
predicted_slice = images[0, -3:, -3:, -1].flatten()
expected_slice_scale = np.array([0.5888, 0.5897, 0.5946, 0.5888, 0.5935, 0.5946, 0.5857, 0.5891, 0.5909])
max_diff = numpy_cosine_similarity_distance(expected_slice_scale, predicted_slice)
assert max_diff < 1e-3
# Lora disabled
pipe.disable_lora()
images = pipe(
prompt,
num_inference_steps=30,
guidance_scale=7.5,
cross_attention_kwargs={"scale": lora_scale},
generator=torch.manual_seed(0),
output_type="np",
).images
predicted_slice = images[0, -3:, -3:, -1].flatten()
expected_slice_scale = np.array([0.5456, 0.5466, 0.5487, 0.5458, 0.5469, 0.5454, 0.5446, 0.5479, 0.5487])
max_diff = numpy_cosine_similarity_distance(expected_slice_scale, predicted_slice)
assert max_diff < 1e-3
@nightly
def test_integration_logits_for_dora_lora(self):
pipeline = StableDiffusionXLPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0")
logger = logging.get_logger("diffusers.loaders.lora_pipeline")
logger.setLevel(30)
with CaptureLogger(logger) as cap_logger:
pipeline.load_lora_weights("hf-internal-testing/dora-trained-on-kohya")
pipeline.enable_model_cpu_offload()
images = pipeline(
"photo of ohwx dog",
num_inference_steps=10,
generator=torch.manual_seed(0),
output_type="np",
).images
assert "It seems like you are using a DoRA checkpoint" in cap_logger.out
predicted_slice = images[0, -3:, -3:, -1].flatten()
expected_slice_scale = np.array([0.1817, 0.0697, 0.2346, 0.0900, 0.1261, 0.2279, 0.1767, 0.1991, 0.2886])
max_diff = numpy_cosine_similarity_distance(expected_slice_scale, predicted_slice)
assert max_diff < 1e-3
| diffusers/tests/lora/test_lora_layers_sdxl.py/0 | {
"file_path": "diffusers/tests/lora/test_lora_layers_sdxl.py",
"repo_id": "diffusers",
"token_count": 11643
} |
def get_autoencoder_kl_config(block_out_channels=None, norm_num_groups=None):
block_out_channels = block_out_channels or [2, 4]
norm_num_groups = norm_num_groups or 2
init_dict = {
"block_out_channels": block_out_channels,
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D"] * len(block_out_channels),
"up_block_types": ["UpDecoderBlock2D"] * len(block_out_channels),
"latent_channels": 4,
"norm_num_groups": norm_num_groups,
}
return init_dict
def get_asym_autoencoder_kl_config(block_out_channels=None, norm_num_groups=None):
block_out_channels = block_out_channels or [2, 4]
norm_num_groups = norm_num_groups or 2
init_dict = {
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D"] * len(block_out_channels),
"down_block_out_channels": block_out_channels,
"layers_per_down_block": 1,
"up_block_types": ["UpDecoderBlock2D"] * len(block_out_channels),
"up_block_out_channels": block_out_channels,
"layers_per_up_block": 1,
"act_fn": "silu",
"latent_channels": 4,
"norm_num_groups": norm_num_groups,
"sample_size": 32,
"scaling_factor": 0.18215,
}
return init_dict
def get_autoencoder_tiny_config(block_out_channels=None):
block_out_channels = (len(block_out_channels) * [32]) if block_out_channels is not None else [32, 32]
init_dict = {
"in_channels": 3,
"out_channels": 3,
"encoder_block_out_channels": block_out_channels,
"decoder_block_out_channels": block_out_channels,
"num_encoder_blocks": [b // min(block_out_channels) for b in block_out_channels],
"num_decoder_blocks": [b // min(block_out_channels) for b in reversed(block_out_channels)],
}
return init_dict
def get_consistency_vae_config(block_out_channels=None, norm_num_groups=None):
block_out_channels = block_out_channels or [2, 4]
norm_num_groups = norm_num_groups or 2
return {
"encoder_block_out_channels": block_out_channels,
"encoder_in_channels": 3,
"encoder_out_channels": 4,
"encoder_down_block_types": ["DownEncoderBlock2D"] * len(block_out_channels),
"decoder_add_attention": False,
"decoder_block_out_channels": block_out_channels,
"decoder_down_block_types": ["ResnetDownsampleBlock2D"] * len(block_out_channels),
"decoder_downsample_padding": 1,
"decoder_in_channels": 7,
"decoder_layers_per_block": 1,
"decoder_norm_eps": 1e-05,
"decoder_norm_num_groups": norm_num_groups,
"encoder_norm_num_groups": norm_num_groups,
"decoder_num_train_timesteps": 1024,
"decoder_out_channels": 6,
"decoder_resnet_time_scale_shift": "scale_shift",
"decoder_time_embedding_type": "learned",
"decoder_up_block_types": ["ResnetUpsampleBlock2D"] * len(block_out_channels),
"scaling_factor": 1,
"latent_channels": 4,
}
def get_autoencoder_oobleck_config(block_out_channels=None):
init_dict = {
"encoder_hidden_size": 12,
"decoder_channels": 12,
"decoder_input_channels": 6,
"audio_channels": 2,
"downsampling_ratios": [2, 4],
"channel_multiples": [1, 2],
}
return init_dict
| diffusers/tests/models/autoencoders/vae.py/0 | {
"file_path": "diffusers/tests/models/autoencoders/vae.py",
"repo_id": "diffusers",
"token_count": 1582
} |
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import torch
from diffusers import HunyuanDiT2DModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
torch_device,
)
from ..test_modeling_common import ModelTesterMixin
enable_full_determinism()
class HunyuanDiTTests(ModelTesterMixin, unittest.TestCase):
model_class = HunyuanDiT2DModel
main_input_name = "hidden_states"
@property
def dummy_input(self):
batch_size = 2
num_channels = 4
height = width = 8
embedding_dim = 8
sequence_length = 4
sequence_length_t5 = 4
hidden_states = torch.randn((batch_size, num_channels, height, width)).to(torch_device)
encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to(torch_device)
text_embedding_mask = torch.ones(size=(batch_size, sequence_length)).to(torch_device)
encoder_hidden_states_t5 = torch.randn((batch_size, sequence_length_t5, embedding_dim)).to(torch_device)
text_embedding_mask_t5 = torch.ones(size=(batch_size, sequence_length_t5)).to(torch_device)
timestep = torch.randint(0, 1000, size=(batch_size,), dtype=encoder_hidden_states.dtype).to(torch_device)
original_size = [1024, 1024]
target_size = [16, 16]
crops_coords_top_left = [0, 0]
add_time_ids = list(original_size + target_size + crops_coords_top_left)
add_time_ids = torch.tensor([add_time_ids, add_time_ids], dtype=encoder_hidden_states.dtype).to(torch_device)
style = torch.zeros(size=(batch_size,), dtype=int).to(torch_device)
image_rotary_emb = [
torch.ones(size=(1, 8), dtype=encoder_hidden_states.dtype),
torch.zeros(size=(1, 8), dtype=encoder_hidden_states.dtype),
]
return {
"hidden_states": hidden_states,
"encoder_hidden_states": encoder_hidden_states,
"text_embedding_mask": text_embedding_mask,
"encoder_hidden_states_t5": encoder_hidden_states_t5,
"text_embedding_mask_t5": text_embedding_mask_t5,
"timestep": timestep,
"image_meta_size": add_time_ids,
"style": style,
"image_rotary_emb": image_rotary_emb,
}
@property
def input_shape(self):
return (4, 8, 8)
@property
def output_shape(self):
return (8, 8, 8)
def prepare_init_args_and_inputs_for_common(self):
init_dict = {
"sample_size": 8,
"patch_size": 2,
"in_channels": 4,
"num_layers": 1,
"attention_head_dim": 8,
"num_attention_heads": 2,
"cross_attention_dim": 8,
"cross_attention_dim_t5": 8,
"pooled_projection_dim": 4,
"hidden_size": 16,
"text_len": 4,
"text_len_t5": 4,
"activation_fn": "gelu-approximate",
}
inputs_dict = self.dummy_input
return init_dict, inputs_dict
def test_output(self):
super().test_output(
expected_output_shape=(self.dummy_input[self.main_input_name].shape[0],) + self.output_shape
)
@unittest.skip("HunyuanDIT use a custom processor HunyuanAttnProcessor2_0")
def test_set_xformers_attn_processor_for_determinism(self):
pass
@unittest.skip("HunyuanDIT use a custom processor HunyuanAttnProcessor2_0")
def test_set_attn_processor_for_determinism(self):
pass
| diffusers/tests/models/transformers/test_models_transformer_hunyuan_dit.py/0 | {
"file_path": "diffusers/tests/models/transformers/test_models_transformer_hunyuan_dit.py",
"repo_id": "diffusers",
"token_count": 1790
} |
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import os
import tempfile
import unittest
import numpy as np
import torch
from diffusers import MotionAdapter, UNet2DConditionModel, UNetMotionModel
from diffusers.utils import logging
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
torch_device,
)
from ..test_modeling_common import ModelTesterMixin, UNetTesterMixin
logger = logging.get_logger(__name__)
enable_full_determinism()
class UNetMotionModelTests(ModelTesterMixin, UNetTesterMixin, unittest.TestCase):
model_class = UNetMotionModel
main_input_name = "sample"
@property
def dummy_input(self):
batch_size = 4
num_channels = 4
num_frames = 4
sizes = (16, 16)
noise = floats_tensor((batch_size, num_channels, num_frames) + sizes).to(torch_device)
time_step = torch.tensor([10]).to(torch_device)
encoder_hidden_states = floats_tensor((batch_size * num_frames, 4, 16)).to(torch_device)
return {"sample": noise, "timestep": time_step, "encoder_hidden_states": encoder_hidden_states}
@property
def input_shape(self):
return (4, 4, 16, 16)
@property
def output_shape(self):
return (4, 4, 16, 16)
def prepare_init_args_and_inputs_for_common(self):
init_dict = {
"block_out_channels": (16, 32),
"norm_num_groups": 16,
"down_block_types": ("CrossAttnDownBlockMotion", "DownBlockMotion"),
"up_block_types": ("UpBlockMotion", "CrossAttnUpBlockMotion"),
"cross_attention_dim": 16,
"num_attention_heads": 2,
"out_channels": 4,
"in_channels": 4,
"layers_per_block": 1,
"sample_size": 16,
}
inputs_dict = self.dummy_input
return init_dict, inputs_dict
def test_from_unet2d(self):
torch.manual_seed(0)
unet2d = UNet2DConditionModel()
torch.manual_seed(1)
model = self.model_class.from_unet2d(unet2d)
model_state_dict = model.state_dict()
for param_name, param_value in unet2d.named_parameters():
self.assertTrue(torch.equal(model_state_dict[param_name], param_value))
def test_freeze_unet2d(self):
init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
model = self.model_class(**init_dict)
model.freeze_unet2d_params()
for param_name, param_value in model.named_parameters():
if "motion_modules" not in param_name:
self.assertFalse(param_value.requires_grad)
else:
self.assertTrue(param_value.requires_grad)
def test_loading_motion_adapter(self):
model = self.model_class()
adapter = MotionAdapter()
model.load_motion_modules(adapter)
for idx, down_block in enumerate(model.down_blocks):
adapter_state_dict = adapter.down_blocks[idx].motion_modules.state_dict()
for param_name, param_value in down_block.motion_modules.named_parameters():
self.assertTrue(torch.equal(adapter_state_dict[param_name], param_value))
for idx, up_block in enumerate(model.up_blocks):
adapter_state_dict = adapter.up_blocks[idx].motion_modules.state_dict()
for param_name, param_value in up_block.motion_modules.named_parameters():
self.assertTrue(torch.equal(adapter_state_dict[param_name], param_value))
mid_block_adapter_state_dict = adapter.mid_block.motion_modules.state_dict()
for param_name, param_value in model.mid_block.motion_modules.named_parameters():
self.assertTrue(torch.equal(mid_block_adapter_state_dict[param_name], param_value))
def test_saving_motion_modules(self):
torch.manual_seed(0)
init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
model = self.model_class(**init_dict)
model.to(torch_device)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_motion_modules(tmpdirname)
self.assertTrue(os.path.isfile(os.path.join(tmpdirname, "diffusion_pytorch_model.safetensors")))
adapter_loaded = MotionAdapter.from_pretrained(tmpdirname)
torch.manual_seed(0)
model_loaded = self.model_class(**init_dict)
model_loaded.load_motion_modules(adapter_loaded)
model_loaded.to(torch_device)
with torch.no_grad():
output = model(**inputs_dict)[0]
output_loaded = model_loaded(**inputs_dict)[0]
max_diff = (output - output_loaded).abs().max().item()
self.assertLessEqual(max_diff, 1e-4, "Models give different forward passes")
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available(),
reason="XFormers attention is only available with CUDA and `xformers` installed",
)
def test_xformers_enable_works(self):
init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
model = self.model_class(**init_dict)
model.enable_xformers_memory_efficient_attention()
assert (
model.mid_block.attentions[0].transformer_blocks[0].attn1.processor.__class__.__name__
== "XFormersAttnProcessor"
), "xformers is not enabled"
def test_gradient_checkpointing_is_applied(self):
expected_set = {
"CrossAttnUpBlockMotion",
"CrossAttnDownBlockMotion",
"UNetMidBlockCrossAttnMotion",
"UpBlockMotion",
"Transformer2DModel",
"DownBlockMotion",
}
super().test_gradient_checkpointing_is_applied(expected_set=expected_set)
def test_feed_forward_chunking(self):
init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
init_dict["block_out_channels"] = (32, 64)
init_dict["norm_num_groups"] = 32
model = self.model_class(**init_dict)
model.to(torch_device)
model.eval()
with torch.no_grad():
output = model(**inputs_dict)[0]
model.enable_forward_chunking()
with torch.no_grad():
output_2 = model(**inputs_dict)[0]
self.assertEqual(output.shape, output_2.shape, "Shape doesn't match")
assert np.abs(output.cpu() - output_2.cpu()).max() < 1e-2
def test_pickle(self):
# enable deterministic behavior for gradient checkpointing
init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
model = self.model_class(**init_dict)
model.to(torch_device)
with torch.no_grad():
sample = model(**inputs_dict).sample
sample_copy = copy.copy(sample)
assert (sample - sample_copy).abs().max() < 1e-4
def test_from_save_pretrained(self, expected_max_diff=5e-5):
init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
torch.manual_seed(0)
model = self.model_class(**init_dict)
model.to(torch_device)
model.eval()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname, safe_serialization=False)
torch.manual_seed(0)
new_model = self.model_class.from_pretrained(tmpdirname)
new_model.to(torch_device)
with torch.no_grad():
image = model(**inputs_dict)
if isinstance(image, dict):
image = image.to_tuple()[0]
new_image = new_model(**inputs_dict)
if isinstance(new_image, dict):
new_image = new_image.to_tuple()[0]
max_diff = (image - new_image).abs().max().item()
self.assertLessEqual(max_diff, expected_max_diff, "Models give different forward passes")
def test_from_save_pretrained_variant(self, expected_max_diff=5e-5):
init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
torch.manual_seed(0)
model = self.model_class(**init_dict)
model.to(torch_device)
model.eval()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname, variant="fp16", safe_serialization=False)
torch.manual_seed(0)
new_model = self.model_class.from_pretrained(tmpdirname, variant="fp16")
# non-variant cannot be loaded
with self.assertRaises(OSError) as error_context:
self.model_class.from_pretrained(tmpdirname)
# make sure that error message states what keys are missing
assert "Error no file named diffusion_pytorch_model.bin found in directory" in str(error_context.exception)
new_model.to(torch_device)
with torch.no_grad():
image = model(**inputs_dict)
if isinstance(image, dict):
image = image.to_tuple()[0]
new_image = new_model(**inputs_dict)
if isinstance(new_image, dict):
new_image = new_image.to_tuple()[0]
max_diff = (image - new_image).abs().max().item()
self.assertLessEqual(max_diff, expected_max_diff, "Models give different forward passes")
def test_forward_with_norm_groups(self):
init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
init_dict["norm_num_groups"] = 16
init_dict["block_out_channels"] = (16, 32)
model = self.model_class(**init_dict)
model.to(torch_device)
model.eval()
with torch.no_grad():
output = model(**inputs_dict)
if isinstance(output, dict):
output = output.to_tuple()[0]
self.assertIsNotNone(output)
expected_shape = inputs_dict["sample"].shape
self.assertEqual(output.shape, expected_shape, "Input and output shapes do not match")
def test_asymmetric_motion_model(self):
init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
init_dict["layers_per_block"] = (2, 3)
init_dict["transformer_layers_per_block"] = ((1, 2), (3, 4, 5))
init_dict["reverse_transformer_layers_per_block"] = ((7, 6, 7, 4), (4, 2, 2))
init_dict["temporal_transformer_layers_per_block"] = ((2, 5), (2, 3, 5))
init_dict["reverse_temporal_transformer_layers_per_block"] = ((5, 4, 3, 4), (3, 2, 2))
init_dict["num_attention_heads"] = (2, 4)
init_dict["motion_num_attention_heads"] = (4, 4)
init_dict["reverse_motion_num_attention_heads"] = (2, 2)
init_dict["use_motion_mid_block"] = True
init_dict["mid_block_layers"] = 2
init_dict["transformer_layers_per_mid_block"] = (1, 5)
init_dict["temporal_transformer_layers_per_mid_block"] = (2, 4)
model = self.model_class(**init_dict)
model.to(torch_device)
model.eval()
with torch.no_grad():
output = model(**inputs_dict)
if isinstance(output, dict):
output = output.to_tuple()[0]
self.assertIsNotNone(output)
expected_shape = inputs_dict["sample"].shape
self.assertEqual(output.shape, expected_shape, "Input and output shapes do not match")
| diffusers/tests/models/unets/test_models_unet_motion.py/0 | {
"file_path": "diffusers/tests/models/unets/test_models_unet_motion.py",
"repo_id": "diffusers",
"token_count": 5224
} |
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import unittest
import numpy as np
import torch
from transformers import (
ClapAudioConfig,
ClapConfig,
ClapFeatureExtractor,
ClapModel,
ClapTextConfig,
GPT2Config,
GPT2Model,
RobertaTokenizer,
SpeechT5HifiGan,
SpeechT5HifiGanConfig,
T5Config,
T5EncoderModel,
T5Tokenizer,
)
from diffusers import (
AudioLDM2Pipeline,
AudioLDM2ProjectionModel,
AudioLDM2UNet2DConditionModel,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from diffusers.utils.testing_utils import enable_full_determinism, nightly, torch_device
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class AudioLDM2PipelineFastTests(PipelineTesterMixin, unittest.TestCase):
pipeline_class = AudioLDM2Pipeline
params = TEXT_TO_AUDIO_PARAMS
batch_params = TEXT_TO_AUDIO_BATCH_PARAMS
required_optional_params = frozenset(
[
"num_inference_steps",
"num_waveforms_per_prompt",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
]
)
supports_dduf = False
def get_dummy_components(self):
torch.manual_seed(0)
unet = AudioLDM2UNet2DConditionModel(
block_out_channels=(8, 16),
layers_per_block=1,
norm_num_groups=8,
sample_size=32,
in_channels=4,
out_channels=4,
down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"),
up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"),
cross_attention_dim=(8, 16),
)
scheduler = DDIMScheduler(
beta_start=0.00085,
beta_end=0.012,
beta_schedule="scaled_linear",
clip_sample=False,
set_alpha_to_one=False,
)
torch.manual_seed(0)
vae = AutoencoderKL(
block_out_channels=[8, 16],
in_channels=1,
out_channels=1,
norm_num_groups=8,
down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"],
up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"],
latent_channels=4,
)
torch.manual_seed(0)
text_branch_config = ClapTextConfig(
bos_token_id=0,
eos_token_id=2,
hidden_size=8,
intermediate_size=37,
layer_norm_eps=1e-05,
num_attention_heads=1,
num_hidden_layers=1,
pad_token_id=1,
vocab_size=1000,
projection_dim=8,
)
audio_branch_config = ClapAudioConfig(
spec_size=8,
window_size=4,
num_mel_bins=8,
intermediate_size=37,
layer_norm_eps=1e-05,
depths=[1, 1],
num_attention_heads=[1, 1],
num_hidden_layers=1,
hidden_size=192,
projection_dim=8,
patch_size=2,
patch_stride=2,
patch_embed_input_channels=4,
)
text_encoder_config = ClapConfig.from_text_audio_configs(
text_config=text_branch_config,
audio_config=audio_branch_config,
projection_dim=16,
)
text_encoder = ClapModel(text_encoder_config)
tokenizer = RobertaTokenizer.from_pretrained("hf-internal-testing/tiny-random-roberta", model_max_length=77)
feature_extractor = ClapFeatureExtractor.from_pretrained(
"hf-internal-testing/tiny-random-ClapModel", hop_length=7900
)
torch.manual_seed(0)
text_encoder_2_config = T5Config(
vocab_size=32100,
d_model=32,
d_ff=37,
d_kv=8,
num_heads=1,
num_layers=1,
)
text_encoder_2 = T5EncoderModel(text_encoder_2_config)
tokenizer_2 = T5Tokenizer.from_pretrained("hf-internal-testing/tiny-random-T5Model", model_max_length=77)
torch.manual_seed(0)
language_model_config = GPT2Config(
n_embd=16,
n_head=1,
n_layer=1,
vocab_size=1000,
n_ctx=99,
n_positions=99,
)
language_model = GPT2Model(language_model_config)
language_model.config.max_new_tokens = 8
torch.manual_seed(0)
projection_model = AudioLDM2ProjectionModel(
text_encoder_dim=16,
text_encoder_1_dim=32,
langauge_model_dim=16,
)
vocoder_config = SpeechT5HifiGanConfig(
model_in_dim=8,
sampling_rate=16000,
upsample_initial_channel=16,
upsample_rates=[2, 2],
upsample_kernel_sizes=[4, 4],
resblock_kernel_sizes=[3, 7],
resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]],
normalize_before=False,
)
vocoder = SpeechT5HifiGan(vocoder_config)
components = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"text_encoder_2": text_encoder_2,
"tokenizer": tokenizer,
"tokenizer_2": tokenizer_2,
"feature_extractor": feature_extractor,
"language_model": language_model,
"projection_model": projection_model,
"vocoder": vocoder,
}
return components
def get_dummy_inputs(self, device, seed=0):
if str(device).startswith("mps"):
generator = torch.manual_seed(seed)
else:
generator = torch.Generator(device=device).manual_seed(seed)
inputs = {
"prompt": "A hammer hitting a wooden surface",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
}
return inputs
def test_audioldm2_ddim(self):
device = "cpu" # ensure determinism for the device-dependent torch.Generator
components = self.get_dummy_components()
audioldm_pipe = AudioLDM2Pipeline(**components)
audioldm_pipe = audioldm_pipe.to(torch_device)
audioldm_pipe.set_progress_bar_config(disable=None)
inputs = self.get_dummy_inputs(device)
output = audioldm_pipe(**inputs)
audio = output.audios[0]
assert audio.ndim == 1
assert len(audio) == 256
audio_slice = audio[:10]
expected_slice = np.array(
[
2.602e-03,
1.729e-03,
1.863e-03,
-2.219e-03,
-2.656e-03,
-2.017e-03,
-2.648e-03,
-2.115e-03,
-2.502e-03,
-2.081e-03,
]
)
assert np.abs(audio_slice - expected_slice).max() < 1e-4
def test_audioldm2_prompt_embeds(self):
components = self.get_dummy_components()
audioldm_pipe = AudioLDM2Pipeline(**components)
audioldm_pipe = audioldm_pipe.to(torch_device)
audioldm_pipe = audioldm_pipe.to(torch_device)
audioldm_pipe.set_progress_bar_config(disable=None)
inputs = self.get_dummy_inputs(torch_device)
inputs["prompt"] = 3 * [inputs["prompt"]]
# forward
output = audioldm_pipe(**inputs)
audio_1 = output.audios[0]
inputs = self.get_dummy_inputs(torch_device)
prompt = 3 * [inputs.pop("prompt")]
text_inputs = audioldm_pipe.tokenizer(
prompt,
padding="max_length",
max_length=audioldm_pipe.tokenizer.model_max_length,
truncation=True,
return_tensors="pt",
)
text_inputs = text_inputs["input_ids"].to(torch_device)
clap_prompt_embeds = audioldm_pipe.text_encoder.get_text_features(text_inputs)
clap_prompt_embeds = clap_prompt_embeds[:, None, :]
text_inputs = audioldm_pipe.tokenizer_2(
prompt,
padding="max_length",
max_length=True,
truncation=True,
return_tensors="pt",
)
text_inputs = text_inputs["input_ids"].to(torch_device)
t5_prompt_embeds = audioldm_pipe.text_encoder_2(
text_inputs,
)
t5_prompt_embeds = t5_prompt_embeds[0]
projection_embeds = audioldm_pipe.projection_model(clap_prompt_embeds, t5_prompt_embeds)[0]
generated_prompt_embeds = audioldm_pipe.generate_language_model(projection_embeds, max_new_tokens=8)
inputs["prompt_embeds"] = t5_prompt_embeds
inputs["generated_prompt_embeds"] = generated_prompt_embeds
# forward
output = audioldm_pipe(**inputs)
audio_2 = output.audios[0]
assert np.abs(audio_1 - audio_2).max() < 1e-2
def test_audioldm2_negative_prompt_embeds(self):
components = self.get_dummy_components()
audioldm_pipe = AudioLDM2Pipeline(**components)
audioldm_pipe = audioldm_pipe.to(torch_device)
audioldm_pipe = audioldm_pipe.to(torch_device)
audioldm_pipe.set_progress_bar_config(disable=None)
inputs = self.get_dummy_inputs(torch_device)
negative_prompt = 3 * ["this is a negative prompt"]
inputs["negative_prompt"] = negative_prompt
inputs["prompt"] = 3 * [inputs["prompt"]]
# forward
output = audioldm_pipe(**inputs)
audio_1 = output.audios[0]
inputs = self.get_dummy_inputs(torch_device)
prompt = 3 * [inputs.pop("prompt")]
embeds = []
generated_embeds = []
for p in [prompt, negative_prompt]:
text_inputs = audioldm_pipe.tokenizer(
p,
padding="max_length",
max_length=audioldm_pipe.tokenizer.model_max_length,
truncation=True,
return_tensors="pt",
)
text_inputs = text_inputs["input_ids"].to(torch_device)
clap_prompt_embeds = audioldm_pipe.text_encoder.get_text_features(text_inputs)
clap_prompt_embeds = clap_prompt_embeds[:, None, :]
text_inputs = audioldm_pipe.tokenizer_2(
prompt,
padding="max_length",
max_length=True if len(embeds) == 0 else embeds[0].shape[1],
truncation=True,
return_tensors="pt",
)
text_inputs = text_inputs["input_ids"].to(torch_device)
t5_prompt_embeds = audioldm_pipe.text_encoder_2(
text_inputs,
)
t5_prompt_embeds = t5_prompt_embeds[0]
projection_embeds = audioldm_pipe.projection_model(clap_prompt_embeds, t5_prompt_embeds)[0]
generated_prompt_embeds = audioldm_pipe.generate_language_model(projection_embeds, max_new_tokens=8)
embeds.append(t5_prompt_embeds)
generated_embeds.append(generated_prompt_embeds)
inputs["prompt_embeds"], inputs["negative_prompt_embeds"] = embeds
inputs["generated_prompt_embeds"], inputs["negative_generated_prompt_embeds"] = generated_embeds
# forward
output = audioldm_pipe(**inputs)
audio_2 = output.audios[0]
assert np.abs(audio_1 - audio_2).max() < 1e-2
def test_audioldm2_negative_prompt(self):
device = "cpu" # ensure determinism for the device-dependent torch.Generator
components = self.get_dummy_components()
components["scheduler"] = PNDMScheduler(skip_prk_steps=True)
audioldm_pipe = AudioLDM2Pipeline(**components)
audioldm_pipe = audioldm_pipe.to(device)
audioldm_pipe.set_progress_bar_config(disable=None)
inputs = self.get_dummy_inputs(device)
negative_prompt = "egg cracking"
output = audioldm_pipe(**inputs, negative_prompt=negative_prompt)
audio = output.audios[0]
assert audio.ndim == 1
assert len(audio) == 256
audio_slice = audio[:10]
expected_slice = np.array(
[0.0026, 0.0017, 0.0018, -0.0022, -0.0026, -0.002, -0.0026, -0.0021, -0.0025, -0.0021]
)
assert np.abs(audio_slice - expected_slice).max() < 1e-4
def test_audioldm2_num_waveforms_per_prompt(self):
device = "cpu" # ensure determinism for the device-dependent torch.Generator
components = self.get_dummy_components()
components["scheduler"] = PNDMScheduler(skip_prk_steps=True)
audioldm_pipe = AudioLDM2Pipeline(**components)
audioldm_pipe = audioldm_pipe.to(device)
audioldm_pipe.set_progress_bar_config(disable=None)
prompt = "A hammer hitting a wooden surface"
# test num_waveforms_per_prompt=1 (default)
audios = audioldm_pipe(prompt, num_inference_steps=2).audios
assert audios.shape == (1, 256)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
batch_size = 2
audios = audioldm_pipe([prompt] * batch_size, num_inference_steps=2).audios
assert audios.shape == (batch_size, 256)
# test num_waveforms_per_prompt for single prompt
num_waveforms_per_prompt = 1
audios = audioldm_pipe(prompt, num_inference_steps=2, num_waveforms_per_prompt=num_waveforms_per_prompt).audios
assert audios.shape == (num_waveforms_per_prompt, 256)
# test num_waveforms_per_prompt for batch of prompts
batch_size = 2
audios = audioldm_pipe(
[prompt] * batch_size, num_inference_steps=2, num_waveforms_per_prompt=num_waveforms_per_prompt
).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 256)
def test_audioldm2_audio_length_in_s(self):
device = "cpu" # ensure determinism for the device-dependent torch.Generator
components = self.get_dummy_components()
audioldm_pipe = AudioLDM2Pipeline(**components)
audioldm_pipe = audioldm_pipe.to(torch_device)
audioldm_pipe.set_progress_bar_config(disable=None)
vocoder_sampling_rate = audioldm_pipe.vocoder.config.sampling_rate
inputs = self.get_dummy_inputs(device)
output = audioldm_pipe(audio_length_in_s=0.016, **inputs)
audio = output.audios[0]
assert audio.ndim == 1
assert len(audio) / vocoder_sampling_rate == 0.016
output = audioldm_pipe(audio_length_in_s=0.032, **inputs)
audio = output.audios[0]
assert audio.ndim == 1
assert len(audio) / vocoder_sampling_rate == 0.032
def test_audioldm2_vocoder_model_in_dim(self):
components = self.get_dummy_components()
audioldm_pipe = AudioLDM2Pipeline(**components)
audioldm_pipe = audioldm_pipe.to(torch_device)
audioldm_pipe.set_progress_bar_config(disable=None)
prompt = ["hey"]
output = audioldm_pipe(prompt, num_inference_steps=1)
audio_shape = output.audios.shape
assert audio_shape == (1, 256)
config = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
audioldm_pipe.vocoder = SpeechT5HifiGan(config).to(torch_device)
output = audioldm_pipe(prompt, num_inference_steps=1)
audio_shape = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 256)
def test_attention_slicing_forward_pass(self):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=False)
@unittest.skip("Raises a not implemented error in AudioLDM2")
def test_xformers_attention_forwardGenerator_pass(self):
pass
def test_dict_tuple_outputs_equivalent(self):
# increase tolerance from 1e-4 -> 3e-4 to account for large composite model
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-4)
def test_inference_batch_single_identical(self):
# increase tolerance from 1e-4 -> 2e-4 to account for large composite model
self._test_inference_batch_single_identical(expected_max_diff=2e-4)
def test_save_load_local(self):
# increase tolerance from 1e-4 -> 2e-4 to account for large composite model
super().test_save_load_local(expected_max_difference=2e-4)
def test_save_load_optional_components(self):
# increase tolerance from 1e-4 -> 2e-4 to account for large composite model
super().test_save_load_optional_components(expected_max_difference=2e-4)
def test_to_dtype(self):
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe.set_progress_bar_config(disable=None)
# The method component.dtype returns the dtype of the first parameter registered in the model, not the
# dtype of the entire model. In the case of CLAP, the first parameter is a float64 constant (logit scale)
model_dtypes = {key: component.dtype for key, component in components.items() if hasattr(component, "dtype")}
# Without the logit scale parameters, everything is float32
model_dtypes.pop("text_encoder")
self.assertTrue(all(dtype == torch.float32 for dtype in model_dtypes.values()))
# the CLAP sub-models are float32
model_dtypes["clap_text_branch"] = components["text_encoder"].text_model.dtype
self.assertTrue(all(dtype == torch.float32 for dtype in model_dtypes.values()))
# Once we send to fp16, all params are in half-precision, including the logit scale
pipe.to(dtype=torch.float16)
model_dtypes = {key: component.dtype for key, component in components.items() if hasattr(component, "dtype")}
self.assertTrue(all(dtype == torch.float16 for dtype in model_dtypes.values()))
def test_sequential_cpu_offload_forward_pass(self):
pass
@nightly
class AudioLDM2PipelineSlowTests(unittest.TestCase):
def setUp(self):
super().setUp()
gc.collect()
torch.cuda.empty_cache()
def tearDown(self):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0):
generator = torch.Generator(device=generator_device).manual_seed(seed)
latents = np.random.RandomState(seed).standard_normal((1, 8, 128, 16))
latents = torch.from_numpy(latents).to(device=device, dtype=dtype)
inputs = {
"prompt": "A hammer hitting a wooden surface",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 2.5,
}
return inputs
def get_inputs_tts(self, device, generator_device="cpu", dtype=torch.float32, seed=0):
generator = torch.Generator(device=generator_device).manual_seed(seed)
latents = np.random.RandomState(seed).standard_normal((1, 8, 128, 16))
latents = torch.from_numpy(latents).to(device=device, dtype=dtype)
inputs = {
"prompt": "A men saying",
"transcription": "hello my name is John",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 2.5,
}
return inputs
def test_audioldm2(self):
audioldm_pipe = AudioLDM2Pipeline.from_pretrained("cvssp/audioldm2")
audioldm_pipe = audioldm_pipe.to(torch_device)
audioldm_pipe.set_progress_bar_config(disable=None)
inputs = self.get_inputs(torch_device)
inputs["num_inference_steps"] = 25
audio = audioldm_pipe(**inputs).audios[0]
assert audio.ndim == 1
assert len(audio) == 81952
# check the portion of the generated audio with the largest dynamic range (reduces flakiness)
audio_slice = audio[17275:17285]
expected_slice = np.array([0.0791, 0.0666, 0.1158, 0.1227, 0.1171, -0.2880, -0.1940, -0.0283, -0.0126, 0.1127])
max_diff = np.abs(expected_slice - audio_slice).max()
assert max_diff < 1e-3
def test_audioldm2_lms(self):
audioldm_pipe = AudioLDM2Pipeline.from_pretrained("cvssp/audioldm2")
audioldm_pipe.scheduler = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config)
audioldm_pipe = audioldm_pipe.to(torch_device)
audioldm_pipe.set_progress_bar_config(disable=None)
inputs = self.get_inputs(torch_device)
audio = audioldm_pipe(**inputs).audios[0]
assert audio.ndim == 1
assert len(audio) == 81952
# check the portion of the generated audio with the largest dynamic range (reduces flakiness)
audio_slice = audio[31390:31400]
expected_slice = np.array(
[-0.1318, -0.0577, 0.0446, -0.0573, 0.0659, 0.1074, -0.2600, 0.0080, -0.2190, -0.4301]
)
max_diff = np.abs(expected_slice - audio_slice).max()
assert max_diff < 1e-3
def test_audioldm2_large(self):
audioldm_pipe = AudioLDM2Pipeline.from_pretrained("cvssp/audioldm2-large")
audioldm_pipe = audioldm_pipe.to(torch_device)
audioldm_pipe.set_progress_bar_config(disable=None)
inputs = self.get_inputs(torch_device)
audio = audioldm_pipe(**inputs).audios[0]
assert audio.ndim == 1
assert len(audio) == 81952
# check the portion of the generated audio with the largest dynamic range (reduces flakiness)
audio_slice = audio[8825:8835]
expected_slice = np.array(
[-0.1829, -0.1461, 0.0759, -0.1493, -0.1396, 0.5783, 0.3001, -0.3038, -0.0639, -0.2244]
)
max_diff = np.abs(expected_slice - audio_slice).max()
assert max_diff < 1e-3
def test_audioldm2_tts(self):
audioldm_tts_pipe = AudioLDM2Pipeline.from_pretrained("anhnct/audioldm2_gigaspeech")
audioldm_tts_pipe = audioldm_tts_pipe.to(torch_device)
audioldm_tts_pipe.set_progress_bar_config(disable=None)
inputs = self.get_inputs_tts(torch_device)
audio = audioldm_tts_pipe(**inputs).audios[0]
assert audio.ndim == 1
assert len(audio) == 81952
# check the portion of the generated audio with the largest dynamic range (reduces flakiness)
audio_slice = audio[8825:8835]
expected_slice = np.array(
[-0.1829, -0.1461, 0.0759, -0.1493, -0.1396, 0.5783, 0.3001, -0.3038, -0.0639, -0.2244]
)
max_diff = np.abs(expected_slice - audio_slice).max()
assert max_diff < 1e-3
| diffusers/tests/pipelines/audioldm2/test_audioldm2.py/0 | {
"file_path": "diffusers/tests/pipelines/audioldm2/test_audioldm2.py",
"repo_id": "diffusers",
"token_count": 11126
} |
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import torch
from transformers import AutoTokenizer, CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer, T5EncoderModel
from diffusers import (
AutoencoderKL,
FlowMatchEulerDiscreteScheduler,
SD3Transformer2DModel,
StableDiffusion3ControlNetInpaintingPipeline,
)
from diffusers.models import SD3ControlNetModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
torch_device,
)
from diffusers.utils.torch_utils import randn_tensor
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class StableDiffusion3ControlInpaintNetPipelineFastTests(unittest.TestCase, PipelineTesterMixin):
pipeline_class = StableDiffusion3ControlNetInpaintingPipeline
params = frozenset(
[
"prompt",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
]
)
batch_params = frozenset(["prompt", "negative_prompt"])
def get_dummy_components(self):
torch.manual_seed(0)
transformer = SD3Transformer2DModel(
sample_size=32,
patch_size=1,
in_channels=8,
num_layers=4,
attention_head_dim=8,
num_attention_heads=4,
joint_attention_dim=32,
caption_projection_dim=32,
pooled_projection_dim=64,
out_channels=8,
)
torch.manual_seed(0)
controlnet = SD3ControlNetModel(
sample_size=32,
patch_size=1,
in_channels=8,
num_layers=1,
attention_head_dim=8,
num_attention_heads=4,
joint_attention_dim=32,
caption_projection_dim=32,
pooled_projection_dim=64,
out_channels=8,
extra_conditioning_channels=1,
)
clip_text_encoder_config = CLIPTextConfig(
bos_token_id=0,
eos_token_id=2,
hidden_size=32,
intermediate_size=37,
layer_norm_eps=1e-05,
num_attention_heads=4,
num_hidden_layers=5,
pad_token_id=1,
vocab_size=1000,
hidden_act="gelu",
projection_dim=32,
)
torch.manual_seed(0)
text_encoder = CLIPTextModelWithProjection(clip_text_encoder_config)
torch.manual_seed(0)
text_encoder_2 = CLIPTextModelWithProjection(clip_text_encoder_config)
torch.manual_seed(0)
text_encoder_3 = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5")
tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
tokenizer_3 = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5")
torch.manual_seed(0)
vae = AutoencoderKL(
sample_size=32,
in_channels=3,
out_channels=3,
block_out_channels=(4,),
layers_per_block=1,
latent_channels=8,
norm_num_groups=1,
use_quant_conv=False,
use_post_quant_conv=False,
shift_factor=0.0609,
scaling_factor=1.5035,
)
scheduler = FlowMatchEulerDiscreteScheduler()
return {
"scheduler": scheduler,
"text_encoder": text_encoder,
"text_encoder_2": text_encoder_2,
"text_encoder_3": text_encoder_3,
"tokenizer": tokenizer,
"tokenizer_2": tokenizer_2,
"tokenizer_3": tokenizer_3,
"transformer": transformer,
"vae": vae,
"controlnet": controlnet,
"image_encoder": None,
"feature_extractor": None,
}
def get_dummy_inputs(self, device, seed=0):
if str(device).startswith("mps"):
generator = torch.manual_seed(seed)
else:
generator = torch.Generator(device="cpu").manual_seed(seed)
control_image = randn_tensor(
(1, 3, 32, 32),
generator=generator,
device=torch.device(device),
dtype=torch.float16,
)
control_mask = randn_tensor(
(1, 1, 32, 32),
generator=generator,
device=torch.device(device),
dtype=torch.float16,
)
controlnet_conditioning_scale = 0.95
inputs = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 7.0,
"output_type": "np",
"control_image": control_image,
"control_mask": control_mask,
"controlnet_conditioning_scale": controlnet_conditioning_scale,
}
return inputs
def test_controlnet_inpaint_sd3(self):
components = self.get_dummy_components()
sd_pipe = StableDiffusion3ControlNetInpaintingPipeline(**components)
sd_pipe = sd_pipe.to(torch_device, dtype=torch.float16)
sd_pipe.set_progress_bar_config(disable=None)
inputs = self.get_dummy_inputs(torch_device)
output = sd_pipe(**inputs)
image = output.images
image_slice = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
expected_slice = np.array(
[0.51708984, 0.7421875, 0.4580078, 0.6435547, 0.65625, 0.43603516, 0.5151367, 0.65722656, 0.60839844]
)
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
), f"Expected: {expected_slice}, got: {image_slice.flatten()}"
@unittest.skip("xFormersAttnProcessor does not work with SD3 Joint Attention")
def test_xformers_attention_forwardGenerator_pass(self):
pass
| diffusers/tests/pipelines/controlnet_sd3/test_controlnet_inpaint_sd3.py/0 | {
"file_path": "diffusers/tests/pipelines/controlnet_sd3/test_controlnet_inpaint_sd3.py",
"repo_id": "diffusers",
"token_count": 3184
} |
# Copyright 2024 The HuggingFace Team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer, LlamaConfig, LlamaModel, LlamaTokenizer
from diffusers import (
AutoencoderKLHunyuanVideo,
FlowMatchEulerDiscreteScheduler,
HunyuanVideoPipeline,
HunyuanVideoTransformer3DModel,
)
from diffusers.utils.testing_utils import (
enable_full_determinism,
torch_device,
)
from ..test_pipelines_common import PipelineTesterMixin, PyramidAttentionBroadcastTesterMixin, to_np
enable_full_determinism()
class HunyuanVideoPipelineFastTests(PipelineTesterMixin, PyramidAttentionBroadcastTesterMixin, unittest.TestCase):
pipeline_class = HunyuanVideoPipeline
params = frozenset(["prompt", "height", "width", "guidance_scale", "prompt_embeds", "pooled_prompt_embeds"])
batch_params = frozenset(["prompt"])
required_optional_params = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback_on_step_end",
"callback_on_step_end_tensor_inputs",
]
)
# there is no xformers processor for Flux
test_xformers_attention = False
test_layerwise_casting = True
def get_dummy_components(self, num_layers: int = 1, num_single_layers: int = 1):
torch.manual_seed(0)
transformer = HunyuanVideoTransformer3DModel(
in_channels=4,
out_channels=4,
num_attention_heads=2,
attention_head_dim=10,
num_layers=num_layers,
num_single_layers=num_single_layers,
num_refiner_layers=1,
patch_size=1,
patch_size_t=1,
guidance_embeds=True,
text_embed_dim=16,
pooled_projection_dim=8,
rope_axes_dim=(2, 4, 4),
)
torch.manual_seed(0)
vae = AutoencoderKLHunyuanVideo(
in_channels=3,
out_channels=3,
latent_channels=4,
down_block_types=(
"HunyuanVideoDownBlock3D",
"HunyuanVideoDownBlock3D",
"HunyuanVideoDownBlock3D",
"HunyuanVideoDownBlock3D",
),
up_block_types=(
"HunyuanVideoUpBlock3D",
"HunyuanVideoUpBlock3D",
"HunyuanVideoUpBlock3D",
"HunyuanVideoUpBlock3D",
),
block_out_channels=(8, 8, 8, 8),
layers_per_block=1,
act_fn="silu",
norm_num_groups=4,
scaling_factor=0.476986,
spatial_compression_ratio=8,
temporal_compression_ratio=4,
mid_block_add_attention=True,
)
torch.manual_seed(0)
scheduler = FlowMatchEulerDiscreteScheduler(shift=7.0)
llama_text_encoder_config = LlamaConfig(
bos_token_id=0,
eos_token_id=2,
hidden_size=16,
intermediate_size=37,
layer_norm_eps=1e-05,
num_attention_heads=4,
num_hidden_layers=2,
pad_token_id=1,
vocab_size=1000,
hidden_act="gelu",
projection_dim=32,
)
clip_text_encoder_config = CLIPTextConfig(
bos_token_id=0,
eos_token_id=2,
hidden_size=8,
intermediate_size=37,
layer_norm_eps=1e-05,
num_attention_heads=4,
num_hidden_layers=2,
pad_token_id=1,
vocab_size=1000,
hidden_act="gelu",
projection_dim=32,
)
torch.manual_seed(0)
text_encoder = LlamaModel(llama_text_encoder_config)
tokenizer = LlamaTokenizer.from_pretrained("finetrainers/dummy-hunyaunvideo", subfolder="tokenizer")
torch.manual_seed(0)
text_encoder_2 = CLIPTextModel(clip_text_encoder_config)
tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
components = {
"transformer": transformer,
"vae": vae,
"scheduler": scheduler,
"text_encoder": text_encoder,
"text_encoder_2": text_encoder_2,
"tokenizer": tokenizer,
"tokenizer_2": tokenizer_2,
}
return components
def get_dummy_inputs(self, device, seed=0):
if str(device).startswith("mps"):
generator = torch.manual_seed(seed)
else:
generator = torch.Generator(device=device).manual_seed(seed)
inputs = {
"prompt": "dance monkey",
"prompt_template": {
"template": "{}",
"crop_start": 0,
},
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 4.5,
"height": 16,
"width": 16,
# 4 * k + 1 is the recommendation
"num_frames": 9,
"max_sequence_length": 16,
"output_type": "pt",
}
return inputs
def test_inference(self):
device = "cpu"
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe.to(device)
pipe.set_progress_bar_config(disable=None)
inputs = self.get_dummy_inputs(device)
video = pipe(**inputs).frames
generated_video = video[0]
self.assertEqual(generated_video.shape, (9, 3, 16, 16))
expected_video = torch.randn(9, 3, 16, 16)
max_diff = np.abs(generated_video - expected_video).max()
self.assertLessEqual(max_diff, 1e10)
def test_callback_inputs(self):
sig = inspect.signature(self.pipeline_class.__call__)
has_callback_tensor_inputs = "callback_on_step_end_tensor_inputs" in sig.parameters
has_callback_step_end = "callback_on_step_end" in sig.parameters
if not (has_callback_tensor_inputs and has_callback_step_end):
return
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
self.assertTrue(
hasattr(pipe, "_callback_tensor_inputs"),
f" {self.pipeline_class} should have `_callback_tensor_inputs` that defines a list of tensor variables its callback function can use as inputs",
)
def callback_inputs_subset(pipe, i, t, callback_kwargs):
# iterate over callback args
for tensor_name, tensor_value in callback_kwargs.items():
# check that we're only passing in allowed tensor inputs
assert tensor_name in pipe._callback_tensor_inputs
return callback_kwargs
def callback_inputs_all(pipe, i, t, callback_kwargs):
for tensor_name in pipe._callback_tensor_inputs:
assert tensor_name in callback_kwargs
# iterate over callback args
for tensor_name, tensor_value in callback_kwargs.items():
# check that we're only passing in allowed tensor inputs
assert tensor_name in pipe._callback_tensor_inputs
return callback_kwargs
inputs = self.get_dummy_inputs(torch_device)
# Test passing in a subset
inputs["callback_on_step_end"] = callback_inputs_subset
inputs["callback_on_step_end_tensor_inputs"] = ["latents"]
output = pipe(**inputs)[0]
# Test passing in a everything
inputs["callback_on_step_end"] = callback_inputs_all
inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs
output = pipe(**inputs)[0]
def callback_inputs_change_tensor(pipe, i, t, callback_kwargs):
is_last = i == (pipe.num_timesteps - 1)
if is_last:
callback_kwargs["latents"] = torch.zeros_like(callback_kwargs["latents"])
return callback_kwargs
inputs["callback_on_step_end"] = callback_inputs_change_tensor
inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs
output = pipe(**inputs)[0]
assert output.abs().sum() < 1e10
def test_attention_slicing_forward_pass(
self, test_max_difference=True, test_mean_pixel_difference=True, expected_max_diff=1e-3
):
if not self.test_attention_slicing:
return
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
for component in pipe.components.values():
if hasattr(component, "set_default_attn_processor"):
component.set_default_attn_processor()
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
generator_device = "cpu"
inputs = self.get_dummy_inputs(generator_device)
output_without_slicing = pipe(**inputs)[0]
pipe.enable_attention_slicing(slice_size=1)
inputs = self.get_dummy_inputs(generator_device)
output_with_slicing1 = pipe(**inputs)[0]
pipe.enable_attention_slicing(slice_size=2)
inputs = self.get_dummy_inputs(generator_device)
output_with_slicing2 = pipe(**inputs)[0]
if test_max_difference:
max_diff1 = np.abs(to_np(output_with_slicing1) - to_np(output_without_slicing)).max()
max_diff2 = np.abs(to_np(output_with_slicing2) - to_np(output_without_slicing)).max()
self.assertLess(
max(max_diff1, max_diff2),
expected_max_diff,
"Attention slicing should not affect the inference results",
)
def test_vae_tiling(self, expected_diff_max: float = 0.2):
# Seems to require higher tolerance than the other tests
expected_diff_max = 0.6
generator_device = "cpu"
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe.to("cpu")
pipe.set_progress_bar_config(disable=None)
# Without tiling
inputs = self.get_dummy_inputs(generator_device)
inputs["height"] = inputs["width"] = 128
output_without_tiling = pipe(**inputs)[0]
# With tiling
pipe.vae.enable_tiling(
tile_sample_min_height=96,
tile_sample_min_width=96,
tile_sample_stride_height=64,
tile_sample_stride_width=64,
)
inputs = self.get_dummy_inputs(generator_device)
inputs["height"] = inputs["width"] = 128
output_with_tiling = pipe(**inputs)[0]
self.assertLess(
(to_np(output_without_tiling) - to_np(output_with_tiling)).max(),
expected_diff_max,
"VAE tiling should not affect the inference results",
)
# TODO(aryan): Create a dummy gemma model with smol vocab size
@unittest.skip(
"A very small vocab size is used for fast tests. So, any kind of prompt other than the empty default used in other tests will lead to a embedding lookup error. This test uses a long prompt that causes the error."
)
def test_inference_batch_consistent(self):
pass
@unittest.skip(
"A very small vocab size is used for fast tests. So, any kind of prompt other than the empty default used in other tests will lead to a embedding lookup error. This test uses a long prompt that causes the error."
)
def test_inference_batch_single_identical(self):
pass
| diffusers/tests/pipelines/hunyuan_video/test_hunyuan_video.py/0 | {
"file_path": "diffusers/tests/pipelines/hunyuan_video/test_hunyuan_video.py",
"repo_id": "diffusers",
"token_count": 5678
} |
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyV22InpaintPipeline,
KandinskyV22PriorPipeline,
UNet2DConditionModel,
VQModel,
)
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
is_flaky,
load_image,
load_numpy,
numpy_cosine_similarity_distance,
require_torch_gpu,
slow,
torch_device,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class Dummies:
@property
def text_embedder_hidden_size(self):
return 32
@property
def time_input_dim(self):
return 32
@property
def block_out_channels_0(self):
return self.time_input_dim
@property
def time_embed_dim(self):
return self.time_input_dim * 4
@property
def cross_attention_dim(self):
return 32
@property
def dummy_unet(self):
torch.manual_seed(0)
model_kwargs = {
"in_channels": 9,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_0, self.block_out_channels_0 * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
model = UNet2DConditionModel(**model_kwargs)
return model
@property
def dummy_movq_kwargs(self):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def dummy_movq(self):
torch.manual_seed(0)
model = VQModel(**self.dummy_movq_kwargs)
return model
def get_dummy_components(self):
unet = self.dummy_unet
movq = self.dummy_movq
scheduler = DDIMScheduler(
num_train_timesteps=1000,
beta_schedule="linear",
beta_start=0.00085,
beta_end=0.012,
clip_sample=False,
set_alpha_to_one=False,
steps_offset=1,
prediction_type="epsilon",
thresholding=False,
)
components = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def get_dummy_inputs(self, device, seed=0):
image_embeds = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed)).to(device)
negative_image_embeds = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed + 1)).to(
device
)
# create init_image
image = floats_tensor((1, 3, 64, 64), rng=random.Random(seed)).to(device)
image = image.cpu().permute(0, 2, 3, 1)[0]
init_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((256, 256))
# create mask
mask = np.zeros((64, 64), dtype=np.float32)
mask[:32, :32] = 1
if str(device).startswith("mps"):
generator = torch.manual_seed(seed)
else:
generator = torch.Generator(device=device).manual_seed(seed)
inputs = {
"image": init_image,
"mask_image": mask,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 2,
"guidance_scale": 4.0,
"output_type": "np",
}
return inputs
class KandinskyV22InpaintPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
pipeline_class = KandinskyV22InpaintPipeline
params = ["image_embeds", "negative_image_embeds", "image", "mask_image"]
batch_params = [
"image_embeds",
"negative_image_embeds",
"image",
"mask_image",
]
required_optional_params = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
test_xformers_attention = False
callback_cfg_params = ["image_embeds", "masked_image", "mask_image"]
def get_dummy_components(self):
dummies = Dummies()
return dummies.get_dummy_components()
def get_dummy_inputs(self, device, seed=0):
dummies = Dummies()
return dummies.get_dummy_inputs(device=device, seed=seed)
def test_kandinsky_inpaint(self):
device = "cpu"
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe = pipe.to(device)
pipe.set_progress_bar_config(disable=None)
output = pipe(**self.get_dummy_inputs(device))
image = output.images
image_from_tuple = pipe(
**self.get_dummy_inputs(device),
return_dict=False,
)[0]
image_slice = image[0, -3:, -3:, -1]
image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
expected_slice = np.array(
[0.50775903, 0.49527195, 0.48824543, 0.50192237, 0.48644906, 0.49373814, 0.4780598, 0.47234827, 0.48327848]
)
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
def test_inference_batch_single_identical(self):
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
def test_float16_inference(self):
super().test_float16_inference(expected_max_diff=5e-1)
@is_flaky()
def test_model_cpu_offload_forward_pass(self):
super().test_inference_batch_single_identical(expected_max_diff=8e-4)
def test_save_load_optional_components(self):
super().test_save_load_optional_components(expected_max_difference=5e-4)
def test_sequential_cpu_offload_forward_pass(self):
super().test_sequential_cpu_offload_forward_pass(expected_max_diff=5e-4)
# override default test because we need to zero out mask too in order to make sure final latent is all zero
def test_callback_inputs(self):
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
self.assertTrue(
hasattr(pipe, "_callback_tensor_inputs"),
f" {self.pipeline_class} should have `_callback_tensor_inputs` that defines a list of tensor variables its callback function can use as inputs",
)
def callback_inputs_test(pipe, i, t, callback_kwargs):
missing_callback_inputs = set()
for v in pipe._callback_tensor_inputs:
if v not in callback_kwargs:
missing_callback_inputs.add(v)
self.assertTrue(
len(missing_callback_inputs) == 0, f"Missing callback tensor inputs: {missing_callback_inputs}"
)
last_i = pipe.num_timesteps - 1
if i == last_i:
callback_kwargs["latents"] = torch.zeros_like(callback_kwargs["latents"])
callback_kwargs["mask_image"] = torch.zeros_like(callback_kwargs["mask_image"])
return callback_kwargs
inputs = self.get_dummy_inputs(torch_device)
inputs["callback_on_step_end"] = callback_inputs_test
inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs
inputs["output_type"] = "latent"
output = pipe(**inputs)[0]
assert output.abs().sum() == 0
@slow
@require_torch_gpu
class KandinskyV22InpaintPipelineIntegrationTests(unittest.TestCase):
def setUp(self):
# clean up the VRAM before each test
super().setUp()
gc.collect()
torch.cuda.empty_cache()
def tearDown(self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def test_kandinsky_inpaint(self):
expected_image = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy"
)
init_image = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png"
)
mask = np.zeros((768, 768), dtype=np.float32)
mask[:250, 250:-250] = 1
prompt = "a hat"
pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16
)
pipe_prior.to(torch_device)
pipeline = KandinskyV22InpaintPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder-inpaint", torch_dtype=torch.float16
)
pipeline = pipeline.to(torch_device)
pipeline.set_progress_bar_config(disable=None)
generator = torch.Generator(device="cpu").manual_seed(0)
image_emb, zero_image_emb = pipe_prior(
prompt,
generator=generator,
num_inference_steps=2,
negative_prompt="",
).to_tuple()
generator = torch.Generator(device="cpu").manual_seed(0)
output = pipeline(
image=init_image,
mask_image=mask,
image_embeds=image_emb,
negative_image_embeds=zero_image_emb,
generator=generator,
num_inference_steps=2,
height=768,
width=768,
output_type="np",
)
image = output.images[0]
assert image.shape == (768, 768, 3)
max_diff = numpy_cosine_similarity_distance(expected_image.flatten(), image.flatten())
assert max_diff < 1e-4
| diffusers/tests/pipelines/kandinsky2_2/test_kandinsky_inpaint.py/0 | {
"file_path": "diffusers/tests/pipelines/kandinsky2_2/test_kandinsky_inpaint.py",
"repo_id": "diffusers",
"token_count": 5553
} |
# coding=utf-8
# Copyright 2024 Latte Team and HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import inspect
import tempfile
import unittest
import numpy as np
import torch
from transformers import AutoTokenizer, T5EncoderModel
from diffusers import (
AutoencoderKL,
DDIMScheduler,
LattePipeline,
LatteTransformer3DModel,
PyramidAttentionBroadcastConfig,
)
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
numpy_cosine_similarity_distance,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, PyramidAttentionBroadcastTesterMixin, to_np
enable_full_determinism()
class LattePipelineFastTests(PipelineTesterMixin, PyramidAttentionBroadcastTesterMixin, unittest.TestCase):
pipeline_class = LattePipeline
params = TEXT_TO_IMAGE_PARAMS - {"cross_attention_kwargs"}
batch_params = TEXT_TO_IMAGE_BATCH_PARAMS
image_params = TEXT_TO_IMAGE_IMAGE_PARAMS
image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS
required_optional_params = PipelineTesterMixin.required_optional_params
test_layerwise_casting = True
pab_config = PyramidAttentionBroadcastConfig(
spatial_attention_block_skip_range=2,
temporal_attention_block_skip_range=2,
cross_attention_block_skip_range=2,
spatial_attention_timestep_skip_range=(100, 700),
temporal_attention_timestep_skip_range=(100, 800),
cross_attention_timestep_skip_range=(100, 800),
spatial_attention_block_identifiers=["transformer_blocks"],
temporal_attention_block_identifiers=["temporal_transformer_blocks"],
cross_attention_block_identifiers=["transformer_blocks"],
)
def get_dummy_components(self, num_layers: int = 1):
torch.manual_seed(0)
transformer = LatteTransformer3DModel(
sample_size=8,
num_layers=num_layers,
patch_size=2,
attention_head_dim=8,
num_attention_heads=3,
caption_channels=32,
in_channels=4,
cross_attention_dim=24,
out_channels=8,
attention_bias=True,
activation_fn="gelu-approximate",
num_embeds_ada_norm=1000,
norm_type="ada_norm_single",
norm_elementwise_affine=False,
norm_eps=1e-6,
)
torch.manual_seed(0)
vae = AutoencoderKL()
scheduler = DDIMScheduler()
text_encoder = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5")
tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5")
components = {
"transformer": transformer.eval(),
"vae": vae.eval(),
"scheduler": scheduler,
"text_encoder": text_encoder.eval(),
"tokenizer": tokenizer,
}
return components
def get_dummy_inputs(self, device, seed=0):
if str(device).startswith("mps"):
generator = torch.manual_seed(seed)
else:
generator = torch.Generator(device=device).manual_seed(seed)
inputs = {
"prompt": "A painting of a squirrel eating a burger",
"negative_prompt": "low quality",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 5.0,
"height": 8,
"width": 8,
"video_length": 1,
"output_type": "pt",
"clean_caption": False,
}
return inputs
def test_inference(self):
device = "cpu"
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe.to(device)
pipe.set_progress_bar_config(disable=None)
inputs = self.get_dummy_inputs(device)
video = pipe(**inputs).frames
generated_video = video[0]
self.assertEqual(generated_video.shape, (1, 3, 8, 8))
expected_video = torch.randn(1, 3, 8, 8)
max_diff = np.abs(generated_video - expected_video).max()
self.assertLessEqual(max_diff, 1e10)
def test_callback_inputs(self):
sig = inspect.signature(self.pipeline_class.__call__)
has_callback_tensor_inputs = "callback_on_step_end_tensor_inputs" in sig.parameters
has_callback_step_end = "callback_on_step_end" in sig.parameters
if not (has_callback_tensor_inputs and has_callback_step_end):
return
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
self.assertTrue(
hasattr(pipe, "_callback_tensor_inputs"),
f" {self.pipeline_class} should have `_callback_tensor_inputs` that defines a list of tensor variables its callback function can use as inputs",
)
def callback_inputs_subset(pipe, i, t, callback_kwargs):
# iterate over callback args
for tensor_name, tensor_value in callback_kwargs.items():
# check that we're only passing in allowed tensor inputs
assert tensor_name in pipe._callback_tensor_inputs
return callback_kwargs
def callback_inputs_all(pipe, i, t, callback_kwargs):
for tensor_name in pipe._callback_tensor_inputs:
assert tensor_name in callback_kwargs
# iterate over callback args
for tensor_name, tensor_value in callback_kwargs.items():
# check that we're only passing in allowed tensor inputs
assert tensor_name in pipe._callback_tensor_inputs
return callback_kwargs
inputs = self.get_dummy_inputs(torch_device)
# Test passing in a subset
inputs["callback_on_step_end"] = callback_inputs_subset
inputs["callback_on_step_end_tensor_inputs"] = ["latents"]
output = pipe(**inputs)[0]
# Test passing in a everything
inputs["callback_on_step_end"] = callback_inputs_all
inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs
output = pipe(**inputs)[0]
def callback_inputs_change_tensor(pipe, i, t, callback_kwargs):
is_last = i == (pipe.num_timesteps - 1)
if is_last:
callback_kwargs["latents"] = torch.zeros_like(callback_kwargs["latents"])
return callback_kwargs
inputs["callback_on_step_end"] = callback_inputs_change_tensor
inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs
output = pipe(**inputs)[0]
assert output.abs().sum() < 1e10
def test_inference_batch_single_identical(self):
self._test_inference_batch_single_identical(batch_size=3, expected_max_diff=1e-3)
def test_attention_slicing_forward_pass(self):
pass
def test_save_load_optional_components(self):
if not hasattr(self.pipeline_class, "_optional_components"):
return
components = self.get_dummy_components()
pipe = self.pipeline_class(**components)
for component in pipe.components.values():
if hasattr(component, "set_default_attn_processor"):
component.set_default_attn_processor()
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
inputs = self.get_dummy_inputs(torch_device)
prompt = inputs["prompt"]
generator = inputs["generator"]
(
prompt_embeds,
negative_prompt_embeds,
) = pipe.encode_prompt(prompt)
# inputs with prompt converted to embeddings
inputs = {
"prompt_embeds": prompt_embeds,
"negative_prompt": None,
"negative_prompt_embeds": negative_prompt_embeds,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 5.0,
"height": 8,
"width": 8,
"video_length": 1,
"mask_feature": False,
"output_type": "pt",
"clean_caption": False,
}
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(pipe, optional_component, None)
output = pipe(**inputs)[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(tmpdir, safe_serialization=False)
pipe_loaded = self.pipeline_class.from_pretrained(tmpdir)
pipe_loaded.to(torch_device)
for component in pipe_loaded.components.values():
if hasattr(component, "set_default_attn_processor"):
component.set_default_attn_processor()
pipe_loaded.set_progress_bar_config(disable=None)
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(pipe_loaded, optional_component) is None,
f"`{optional_component}` did not stay set to None after loading.",
)
output_loaded = pipe_loaded(**inputs)[0]
max_diff = np.abs(to_np(output) - to_np(output_loaded)).max()
self.assertLess(max_diff, 1.0)
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available(),
reason="XFormers attention is only available with CUDA and `xformers` installed",
)
def test_xformers_attention_forwardGenerator_pass(self):
super()._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=False)
@slow
@require_torch_gpu
class LattePipelineIntegrationTests(unittest.TestCase):
prompt = "A painting of a squirrel eating a burger."
def setUp(self):
super().setUp()
gc.collect()
torch.cuda.empty_cache()
def tearDown(self):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def test_latte(self):
generator = torch.Generator("cpu").manual_seed(0)
pipe = LattePipeline.from_pretrained("maxin-cn/Latte-1", torch_dtype=torch.float16)
pipe.enable_model_cpu_offload()
prompt = self.prompt
videos = pipe(
prompt=prompt,
height=512,
width=512,
generator=generator,
num_inference_steps=2,
clean_caption=False,
).frames
video = videos[0]
expected_video = torch.randn(1, 512, 512, 3).numpy()
max_diff = numpy_cosine_similarity_distance(video.flatten(), expected_video)
assert max_diff < 1e-3, f"Max diff is too high. got {video.flatten()}"
| diffusers/tests/pipelines/latte/test_latte.py/0 | {
"file_path": "diffusers/tests/pipelines/latte/test_latte.py",
"repo_id": "diffusers",
"token_count": 5024
} |
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNet2DConditionModel
from diffusers.pipelines.semantic_stable_diffusion import SemanticStableDiffusionPipeline as StableDiffusionPipeline
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
nightly,
require_accelerator,
require_torch_gpu,
torch_device,
)
enable_full_determinism()
class SafeDiffusionPipelineFastTests(unittest.TestCase):
def setUp(self):
# clean up the VRAM before each test
super().setUp()
gc.collect()
torch.cuda.empty_cache()
def tearDown(self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def dummy_image(self):
batch_size = 1
num_channels = 3
sizes = (32, 32)
image = floats_tensor((batch_size, num_channels) + sizes, rng=random.Random(0)).to(torch_device)
return image
@property
def dummy_cond_unet(self):
torch.manual_seed(0)
model = UNet2DConditionModel(
block_out_channels=(32, 64),
layers_per_block=2,
sample_size=32,
in_channels=4,
out_channels=4,
down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"),
up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"),
cross_attention_dim=32,
)
return model
@property
def dummy_vae(self):
torch.manual_seed(0)
model = AutoencoderKL(
block_out_channels=[32, 64],
in_channels=3,
out_channels=3,
down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"],
up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"],
latent_channels=4,
)
return model
@property
def dummy_text_encoder(self):
torch.manual_seed(0)
config = CLIPTextConfig(
bos_token_id=0,
eos_token_id=2,
hidden_size=32,
intermediate_size=37,
layer_norm_eps=1e-05,
num_attention_heads=4,
num_hidden_layers=5,
pad_token_id=1,
vocab_size=1000,
)
return CLIPTextModel(config)
@property
def dummy_extractor(self):
def extract(*args, **kwargs):
class Out:
def __init__(self):
self.pixel_values = torch.ones([0])
def to(self, device):
self.pixel_values.to(device)
return self
return Out()
return extract
def test_semantic_diffusion_ddim(self):
device = "cpu" # ensure determinism for the device-dependent torch.Generator
unet = self.dummy_cond_unet
scheduler = DDIMScheduler(
beta_start=0.00085,
beta_end=0.012,
beta_schedule="scaled_linear",
clip_sample=False,
set_alpha_to_one=False,
)
vae = self.dummy_vae
bert = self.dummy_text_encoder
tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
# make sure here that pndm scheduler skips prk
sd_pipe = StableDiffusionPipeline(
unet=unet,
scheduler=scheduler,
vae=vae,
text_encoder=bert,
tokenizer=tokenizer,
safety_checker=None,
feature_extractor=self.dummy_extractor,
)
sd_pipe = sd_pipe.to(device)
sd_pipe.set_progress_bar_config(disable=None)
prompt = "A painting of a squirrel eating a burger"
generator = torch.Generator(device=device).manual_seed(0)
output = sd_pipe([prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np")
image = output.images
generator = torch.Generator(device=device).manual_seed(0)
image_from_tuple = sd_pipe(
[prompt],
generator=generator,
guidance_scale=6.0,
num_inference_steps=2,
output_type="np",
return_dict=False,
)[0]
image_slice = image[0, -3:, -3:, -1]
image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
expected_slice = np.array([0.5753, 0.6114, 0.5001, 0.5034, 0.5470, 0.4729, 0.4971, 0.4867, 0.4867])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
def test_semantic_diffusion_pndm(self):
device = "cpu" # ensure determinism for the device-dependent torch.Generator
unet = self.dummy_cond_unet
scheduler = PNDMScheduler(skip_prk_steps=True)
vae = self.dummy_vae
bert = self.dummy_text_encoder
tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
# make sure here that pndm scheduler skips prk
sd_pipe = StableDiffusionPipeline(
unet=unet,
scheduler=scheduler,
vae=vae,
text_encoder=bert,
tokenizer=tokenizer,
safety_checker=None,
feature_extractor=self.dummy_extractor,
)
sd_pipe = sd_pipe.to(device)
sd_pipe.set_progress_bar_config(disable=None)
prompt = "A painting of a squirrel eating a burger"
generator = torch.Generator(device=device).manual_seed(0)
output = sd_pipe([prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np")
image = output.images
generator = torch.Generator(device=device).manual_seed(0)
image_from_tuple = sd_pipe(
[prompt],
generator=generator,
guidance_scale=6.0,
num_inference_steps=2,
output_type="np",
return_dict=False,
)[0]
image_slice = image[0, -3:, -3:, -1]
image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
expected_slice = np.array([0.5122, 0.5712, 0.4825, 0.5053, 0.5646, 0.4769, 0.5179, 0.4894, 0.4994])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
def test_semantic_diffusion_no_safety_checker(self):
pipe = StableDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-lms-pipe", safety_checker=None
)
assert isinstance(pipe, StableDiffusionPipeline)
assert isinstance(pipe.scheduler, LMSDiscreteScheduler)
assert pipe.safety_checker is None
image = pipe("example prompt", num_inference_steps=2).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(tmpdirname)
pipe = StableDiffusionPipeline.from_pretrained(tmpdirname)
# sanity check that the pipeline still works
assert pipe.safety_checker is None
image = pipe("example prompt", num_inference_steps=2).images[0]
assert image is not None
@require_accelerator
def test_semantic_diffusion_fp16(self):
"""Test that stable diffusion works with fp16"""
unet = self.dummy_cond_unet
scheduler = PNDMScheduler(skip_prk_steps=True)
vae = self.dummy_vae
bert = self.dummy_text_encoder
tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
# put models in fp16
unet = unet.half()
vae = vae.half()
bert = bert.half()
# make sure here that pndm scheduler skips prk
sd_pipe = StableDiffusionPipeline(
unet=unet,
scheduler=scheduler,
vae=vae,
text_encoder=bert,
tokenizer=tokenizer,
safety_checker=None,
feature_extractor=self.dummy_extractor,
)
sd_pipe = sd_pipe.to(torch_device)
sd_pipe.set_progress_bar_config(disable=None)
prompt = "A painting of a squirrel eating a burger"
image = sd_pipe([prompt], num_inference_steps=2, output_type="np").images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class SemanticDiffusionPipelineIntegrationTests(unittest.TestCase):
def setUp(self):
# clean up the VRAM before each test
super().setUp()
gc.collect()
torch.cuda.empty_cache()
def tearDown(self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def test_positive_guidance(self):
torch_device = "cuda"
pipe = StableDiffusionPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5")
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
prompt = "a photo of a cat"
edit = {
"editing_prompt": ["sunglasses"],
"reverse_editing_direction": [False],
"edit_warmup_steps": 10,
"edit_guidance_scale": 6,
"edit_threshold": 0.95,
"edit_momentum_scale": 0.5,
"edit_mom_beta": 0.6,
}
seed = 3
guidance_scale = 7
# no sega enabled
generator = torch.Generator(torch_device)
generator.manual_seed(seed)
output = pipe(
[prompt],
generator=generator,
guidance_scale=guidance_scale,
num_inference_steps=50,
output_type="np",
width=512,
height=512,
)
image = output.images
image_slice = image[0, -3:, -3:, -1]
expected_slice = [
0.34673113,
0.38492733,
0.37597352,
0.34086335,
0.35650748,
0.35579205,
0.3384763,
0.34340236,
0.3573271,
]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
# with sega enabled
# generator = torch.manual_seed(seed)
generator.manual_seed(seed)
output = pipe(
[prompt],
generator=generator,
guidance_scale=guidance_scale,
num_inference_steps=50,
output_type="np",
width=512,
height=512,
**edit,
)
image = output.images
image_slice = image[0, -3:, -3:, -1]
expected_slice = [
0.41887826,
0.37728766,
0.30138272,
0.41416335,
0.41664985,
0.36283392,
0.36191246,
0.43364465,
0.43001732,
]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def test_negative_guidance(self):
torch_device = "cuda"
pipe = StableDiffusionPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5")
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
prompt = "an image of a crowded boulevard, realistic, 4k"
edit = {
"editing_prompt": "crowd, crowded, people",
"reverse_editing_direction": True,
"edit_warmup_steps": 10,
"edit_guidance_scale": 8.3,
"edit_threshold": 0.9,
"edit_momentum_scale": 0.5,
"edit_mom_beta": 0.6,
}
seed = 9
guidance_scale = 7
# no sega enabled
generator = torch.Generator(torch_device)
generator.manual_seed(seed)
output = pipe(
[prompt],
generator=generator,
guidance_scale=guidance_scale,
num_inference_steps=50,
output_type="np",
width=512,
height=512,
)
image = output.images
image_slice = image[0, -3:, -3:, -1]
expected_slice = [
0.43497998,
0.91814065,
0.7540739,
0.55580205,
0.8467265,
0.5389691,
0.62574506,
0.58897763,
0.50926757,
]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
# with sega enabled
# generator = torch.manual_seed(seed)
generator.manual_seed(seed)
output = pipe(
[prompt],
generator=generator,
guidance_scale=guidance_scale,
num_inference_steps=50,
output_type="np",
width=512,
height=512,
**edit,
)
image = output.images
image_slice = image[0, -3:, -3:, -1]
expected_slice = [
0.3089719,
0.30500144,
0.29016042,
0.30630964,
0.325687,
0.29419225,
0.2908091,
0.28723598,
0.27696294,
]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def test_multi_cond_guidance(self):
torch_device = "cuda"
pipe = StableDiffusionPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5")
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
prompt = "a castle next to a river"
edit = {
"editing_prompt": ["boat on a river, boat", "monet, impression, sunrise"],
"reverse_editing_direction": False,
"edit_warmup_steps": [15, 18],
"edit_guidance_scale": 6,
"edit_threshold": [0.9, 0.8],
"edit_momentum_scale": 0.5,
"edit_mom_beta": 0.6,
}
seed = 48
guidance_scale = 7
# no sega enabled
generator = torch.Generator(torch_device)
generator.manual_seed(seed)
output = pipe(
[prompt],
generator=generator,
guidance_scale=guidance_scale,
num_inference_steps=50,
output_type="np",
width=512,
height=512,
)
image = output.images
image_slice = image[0, -3:, -3:, -1]
expected_slice = [
0.75163555,
0.76037145,
0.61785,
0.9189673,
0.8627701,
0.85189694,
0.8512813,
0.87012076,
0.8312857,
]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
# with sega enabled
# generator = torch.manual_seed(seed)
generator.manual_seed(seed)
output = pipe(
[prompt],
generator=generator,
guidance_scale=guidance_scale,
num_inference_steps=50,
output_type="np",
width=512,
height=512,
**edit,
)
image = output.images
image_slice = image[0, -3:, -3:, -1]
expected_slice = [
0.73553365,
0.7537271,
0.74341905,
0.66480356,
0.6472925,
0.63039416,
0.64812905,
0.6749717,
0.6517102,
]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def test_guidance_fp16(self):
torch_device = "cuda"
pipe = StableDiffusionPipeline.from_pretrained(
"stable-diffusion-v1-5/stable-diffusion-v1-5", torch_dtype=torch.float16
)
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
prompt = "a photo of a cat"
edit = {
"editing_prompt": ["sunglasses"],
"reverse_editing_direction": [False],
"edit_warmup_steps": 10,
"edit_guidance_scale": 6,
"edit_threshold": 0.95,
"edit_momentum_scale": 0.5,
"edit_mom_beta": 0.6,
}
seed = 3
guidance_scale = 7
# no sega enabled
generator = torch.Generator(torch_device)
generator.manual_seed(seed)
output = pipe(
[prompt],
generator=generator,
guidance_scale=guidance_scale,
num_inference_steps=50,
output_type="np",
width=512,
height=512,
)
image = output.images
image_slice = image[0, -3:, -3:, -1]
expected_slice = [
0.34887695,
0.3876953,
0.375,
0.34423828,
0.3581543,
0.35717773,
0.3383789,
0.34570312,
0.359375,
]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
# with sega enabled
# generator = torch.manual_seed(seed)
generator.manual_seed(seed)
output = pipe(
[prompt],
generator=generator,
guidance_scale=guidance_scale,
num_inference_steps=50,
output_type="np",
width=512,
height=512,
**edit,
)
image = output.images
image_slice = image[0, -3:, -3:, -1]
expected_slice = [
0.42285156,
0.36914062,
0.29077148,
0.42041016,
0.41918945,
0.35498047,
0.3618164,
0.4423828,
0.43115234,
]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
| diffusers/tests/pipelines/semantic_stable_diffusion/test_semantic_diffusion.py/0 | {
"file_path": "diffusers/tests/pipelines/semantic_stable_diffusion/test_semantic_diffusion.py",
"repo_id": "diffusers",
"token_count": 9781
} |
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import random
import traceback
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
AutoencoderTiny,
DDIMScheduler,
DPMSolverMultistepScheduler,
HeunDiscreteScheduler,
LCMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionImg2ImgPipeline,
UNet2DConditionModel,
)
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
is_torch_compile,
load_image,
load_numpy,
nightly,
require_torch_2,
require_torch_gpu,
run_test_in_subprocess,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS,
)
from ..test_pipelines_common import (
IPAdapterTesterMixin,
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
# Will be run via run_test_in_subprocess
def _test_img2img_compile(in_queue, out_queue, timeout):
error = None
try:
inputs = in_queue.get(timeout=timeout)
torch_device = inputs.pop("torch_device")
seed = inputs.pop("seed")
inputs["generator"] = torch.Generator(device=torch_device).manual_seed(seed)
pipe = StableDiffusionImg2ImgPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", safety_checker=None)
pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
pipe.unet.set_default_attn_processor()
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
pipe.unet.to(memory_format=torch.channels_last)
pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
image = pipe(**inputs).images
image_slice = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 768, 3)
expected_slice = np.array([0.0606, 0.0570, 0.0805, 0.0579, 0.0628, 0.0623, 0.0843, 0.1115, 0.0806])
assert np.abs(expected_slice - image_slice).max() < 1e-3
except Exception:
error = f"{traceback.format_exc()}"
results = {"error": error}
out_queue.put(results, timeout=timeout)
out_queue.join()
class StableDiffusionImg2ImgPipelineFastTests(
IPAdapterTesterMixin,
PipelineLatentTesterMixin,
PipelineKarrasSchedulerTesterMixin,
PipelineTesterMixin,
unittest.TestCase,
):
pipeline_class = StableDiffusionImg2ImgPipeline
params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
required_optional_params = PipelineTesterMixin.required_optional_params - {"latents"}
batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
image_params = IMAGE_TO_IMAGE_IMAGE_PARAMS
image_latents_params = IMAGE_TO_IMAGE_IMAGE_PARAMS
callback_cfg_params = TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS
def get_dummy_components(self, time_cond_proj_dim=None):
torch.manual_seed(0)
unet = UNet2DConditionModel(
block_out_channels=(32, 64),
layers_per_block=2,
time_cond_proj_dim=time_cond_proj_dim,
sample_size=32,
in_channels=4,
out_channels=4,
down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"),
up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"),
cross_attention_dim=32,
)
scheduler = PNDMScheduler(skip_prk_steps=True)
torch.manual_seed(0)
vae = AutoencoderKL(
block_out_channels=[32, 64],
in_channels=3,
out_channels=3,
down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"],
up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"],
latent_channels=4,
)
torch.manual_seed(0)
text_encoder_config = CLIPTextConfig(
bos_token_id=0,
eos_token_id=2,
hidden_size=32,
intermediate_size=37,
layer_norm_eps=1e-05,
num_attention_heads=4,
num_hidden_layers=5,
pad_token_id=1,
vocab_size=1000,
)
text_encoder = CLIPTextModel(text_encoder_config)
tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
components = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
"image_encoder": None,
}
return components
def get_dummy_tiny_autoencoder(self):
return AutoencoderTiny(in_channels=3, out_channels=3, latent_channels=4)
def get_dummy_inputs(self, device, seed=0):
image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device)
image = image / 2 + 0.5
if str(device).startswith("mps"):
generator = torch.manual_seed(seed)
else:
generator = torch.Generator(device=device).manual_seed(seed)
inputs = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "np",
}
return inputs
def test_stable_diffusion_img2img_default_case(self):
device = "cpu" # ensure determinism for the device-dependent torch.Generator
components = self.get_dummy_components()
sd_pipe = StableDiffusionImg2ImgPipeline(**components)
sd_pipe = sd_pipe.to(device)
sd_pipe.set_progress_bar_config(disable=None)
inputs = self.get_dummy_inputs(device)
image = sd_pipe(**inputs).images
image_slice = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
expected_slice = np.array([0.4555, 0.3216, 0.4049, 0.4620, 0.4618, 0.4126, 0.4122, 0.4629, 0.4579])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def test_stable_diffusion_img2img_default_case_lcm(self):
device = "cpu" # ensure determinism for the device-dependent torch.Generator
components = self.get_dummy_components(time_cond_proj_dim=256)
sd_pipe = StableDiffusionImg2ImgPipeline(**components)
sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.scheduler.config)
sd_pipe = sd_pipe.to(device)
sd_pipe.set_progress_bar_config(disable=None)
inputs = self.get_dummy_inputs(device)
image = sd_pipe(**inputs).images
image_slice = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
expected_slice = np.array([0.5709, 0.4614, 0.4587, 0.5978, 0.5298, 0.6910, 0.6240, 0.5212, 0.5454])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def test_stable_diffusion_img2img_default_case_lcm_custom_timesteps(self):
device = "cpu" # ensure determinism for the device-dependent torch.Generator
components = self.get_dummy_components(time_cond_proj_dim=256)
sd_pipe = StableDiffusionImg2ImgPipeline(**components)
sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.scheduler.config)
sd_pipe = sd_pipe.to(device)
sd_pipe.set_progress_bar_config(disable=None)
inputs = self.get_dummy_inputs(device)
del inputs["num_inference_steps"]
inputs["timesteps"] = [999, 499]
image = sd_pipe(**inputs).images
image_slice = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
expected_slice = np.array([0.5709, 0.4614, 0.4587, 0.5978, 0.5298, 0.6910, 0.6240, 0.5212, 0.5454])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def test_stable_diffusion_img2img_negative_prompt(self):
device = "cpu" # ensure determinism for the device-dependent torch.Generator
components = self.get_dummy_components()
sd_pipe = StableDiffusionImg2ImgPipeline(**components)
sd_pipe = sd_pipe.to(device)
sd_pipe.set_progress_bar_config(disable=None)
inputs = self.get_dummy_inputs(device)
negative_prompt = "french fries"
output = sd_pipe(**inputs, negative_prompt=negative_prompt)
image = output.images
image_slice = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
expected_slice = np.array([0.4593, 0.3408, 0.4232, 0.4749, 0.4476, 0.4115, 0.4357, 0.4733, 0.4663])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def test_ip_adapter(self):
expected_pipe_slice = None
if torch_device == "cpu":
expected_pipe_slice = np.array([0.4932, 0.5092, 0.5135, 0.5517, 0.5626, 0.6621, 0.6490, 0.5021, 0.5441])
return super().test_ip_adapter(expected_pipe_slice=expected_pipe_slice)
def test_stable_diffusion_img2img_multiple_init_images(self):
device = "cpu" # ensure determinism for the device-dependent torch.Generator
components = self.get_dummy_components()
sd_pipe = StableDiffusionImg2ImgPipeline(**components)
sd_pipe = sd_pipe.to(device)
sd_pipe.set_progress_bar_config(disable=None)
inputs = self.get_dummy_inputs(device)
inputs["prompt"] = [inputs["prompt"]] * 2
inputs["image"] = inputs["image"].repeat(2, 1, 1, 1)
image = sd_pipe(**inputs).images
image_slice = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
expected_slice = np.array([0.4241, 0.5576, 0.5711, 0.4792, 0.4311, 0.5952, 0.5827, 0.5138, 0.5109])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def test_stable_diffusion_img2img_k_lms(self):
device = "cpu" # ensure determinism for the device-dependent torch.Generator
components = self.get_dummy_components()
components["scheduler"] = LMSDiscreteScheduler(
beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear"
)
sd_pipe = StableDiffusionImg2ImgPipeline(**components)
sd_pipe = sd_pipe.to(device)
sd_pipe.set_progress_bar_config(disable=None)
inputs = self.get_dummy_inputs(device)
image = sd_pipe(**inputs).images
image_slice = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
expected_slice = np.array([0.4398, 0.4949, 0.4337, 0.6580, 0.5555, 0.4338, 0.5769, 0.5955, 0.5175])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def test_stable_diffusion_img2img_tiny_autoencoder(self):
device = "cpu" # ensure determinism for the device-dependent torch.Generator
components = self.get_dummy_components()
sd_pipe = StableDiffusionImg2ImgPipeline(**components)
sd_pipe.vae = self.get_dummy_tiny_autoencoder()
sd_pipe = sd_pipe.to(device)
sd_pipe.set_progress_bar_config(disable=None)
inputs = self.get_dummy_inputs(device)
image = sd_pipe(**inputs).images
image_slice = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
expected_slice = np.array([0.00669, 0.00669, 0.0, 0.00693, 0.00858, 0.0, 0.00567, 0.00515, 0.00125])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
@skip_mps
def test_save_load_local(self):
return super().test_save_load_local()
@skip_mps
def test_dict_tuple_outputs_equivalent(self):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def test_save_load_optional_components(self):
return super().test_save_load_optional_components()
@skip_mps
def test_attention_slicing_forward_pass(self):
return super().test_attention_slicing_forward_pass(expected_max_diff=5e-3)
def test_inference_batch_single_identical(self):
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
def test_float16_inference(self):
super().test_float16_inference(expected_max_diff=5e-1)
def test_pipeline_interrupt(self):
components = self.get_dummy_components()
sd_pipe = StableDiffusionImg2ImgPipeline(**components)
sd_pipe = sd_pipe.to(torch_device)
sd_pipe.set_progress_bar_config(disable=None)
inputs = self.get_dummy_inputs(torch_device)
prompt = "hey"
num_inference_steps = 3
# store intermediate latents from the generation process
class PipelineState:
def __init__(self):
self.state = []
def apply(self, pipe, i, t, callback_kwargs):
self.state.append(callback_kwargs["latents"])
return callback_kwargs
pipe_state = PipelineState()
sd_pipe(
prompt,
image=inputs["image"],
num_inference_steps=num_inference_steps,
output_type="np",
generator=torch.Generator("cpu").manual_seed(0),
callback_on_step_end=pipe_state.apply,
).images
# interrupt generation at step index
interrupt_step_idx = 1
def callback_on_step_end(pipe, i, t, callback_kwargs):
if i == interrupt_step_idx:
pipe._interrupt = True
return callback_kwargs
output_interrupted = sd_pipe(
prompt,
image=inputs["image"],
num_inference_steps=num_inference_steps,
output_type="latent",
generator=torch.Generator("cpu").manual_seed(0),
callback_on_step_end=callback_on_step_end,
).images
# fetch intermediate latents at the interrupted step
# from the completed generation process
intermediate_latent = pipe_state.state[interrupt_step_idx]
# compare the intermediate latent to the output of the interrupted process
# they should be the same
assert torch.allclose(intermediate_latent, output_interrupted, atol=1e-4)
@slow
@require_torch_gpu
class StableDiffusionImg2ImgPipelineSlowTests(unittest.TestCase):
def setUp(self):
super().setUp()
gc.collect()
torch.cuda.empty_cache()
def tearDown(self):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0):
generator = torch.Generator(device=generator_device).manual_seed(seed)
init_image = load_image(
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main"
"/stable_diffusion_img2img/sketch-mountains-input.png"
)
inputs = {
"prompt": "a fantasy landscape, concept art, high resolution",
"image": init_image,
"generator": generator,
"num_inference_steps": 3,
"strength": 0.75,
"guidance_scale": 7.5,
"output_type": "np",
}
return inputs
def test_stable_diffusion_img2img_default(self):
pipe = StableDiffusionImg2ImgPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", safety_checker=None)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
pipe.enable_attention_slicing()
inputs = self.get_inputs(torch_device)
image = pipe(**inputs).images
image_slice = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 768, 3)
expected_slice = np.array([0.4300, 0.4662, 0.4930, 0.3990, 0.4307, 0.4525, 0.3719, 0.4064, 0.3923])
assert np.abs(expected_slice - image_slice).max() < 1e-3
def test_stable_diffusion_img2img_k_lms(self):
pipe = StableDiffusionImg2ImgPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", safety_checker=None)
pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
pipe.enable_attention_slicing()
inputs = self.get_inputs(torch_device)
image = pipe(**inputs).images
image_slice = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 768, 3)
expected_slice = np.array([0.0389, 0.0346, 0.0415, 0.0290, 0.0218, 0.0210, 0.0408, 0.0567, 0.0271])
assert np.abs(expected_slice - image_slice).max() < 1e-3
def test_stable_diffusion_img2img_ddim(self):
pipe = StableDiffusionImg2ImgPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", safety_checker=None)
pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
pipe.enable_attention_slicing()
inputs = self.get_inputs(torch_device)
image = pipe(**inputs).images
image_slice = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 768, 3)
expected_slice = np.array([0.0593, 0.0607, 0.0851, 0.0582, 0.0636, 0.0721, 0.0751, 0.0981, 0.0781])
assert np.abs(expected_slice - image_slice).max() < 1e-3
def test_stable_diffusion_img2img_intermediate_state(self):
number_of_steps = 0
def callback_fn(step: int, timestep: int, latents: torch.Tensor) -> None:
callback_fn.has_been_called = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
latents = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 96)
latents_slice = latents[0, -3:, -3:, -1]
expected_slice = np.array([-0.4958, 0.5107, 1.1045, 2.7539, 4.6680, 3.8320, 1.5049, 1.8633, 2.6523])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2
elif step == 2:
latents = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 96)
latents_slice = latents[0, -3:, -3:, -1]
expected_slice = np.array([-0.4956, 0.5078, 1.0918, 2.7520, 4.6484, 3.8125, 1.5146, 1.8633, 2.6367])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2
callback_fn.has_been_called = False
pipe = StableDiffusionImg2ImgPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4", safety_checker=None, torch_dtype=torch.float16
)
pipe = pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
pipe.enable_attention_slicing()
inputs = self.get_inputs(torch_device, dtype=torch.float16)
pipe(**inputs, callback=callback_fn, callback_steps=1)
assert callback_fn.has_been_called
assert number_of_steps == 2
def test_stable_diffusion_pipeline_with_sequential_cpu_offloading(self):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
pipe = StableDiffusionImg2ImgPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4", safety_checker=None, torch_dtype=torch.float16
)
pipe.set_progress_bar_config(disable=None)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
inputs = self.get_inputs(torch_device, dtype=torch.float16)
_ = pipe(**inputs)
mem_bytes = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def test_stable_diffusion_pipeline_with_model_offloading(self):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
inputs = self.get_inputs(torch_device, dtype=torch.float16)
# Normal inference
pipe = StableDiffusionImg2ImgPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4",
safety_checker=None,
torch_dtype=torch.float16,
)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
pipe(**inputs)
mem_bytes = torch.cuda.max_memory_allocated()
# With model offloading
# Reload but don't move to cuda
pipe = StableDiffusionImg2ImgPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4",
safety_checker=None,
torch_dtype=torch.float16,
)
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=None)
_ = pipe(**inputs)
mem_bytes_offloaded = torch.cuda.max_memory_allocated()
assert mem_bytes_offloaded < mem_bytes
for module in pipe.text_encoder, pipe.unet, pipe.vae:
assert module.device == torch.device("cpu")
def test_img2img_2nd_order(self):
sd_pipe = StableDiffusionImg2ImgPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5")
sd_pipe.scheduler = HeunDiscreteScheduler.from_config(sd_pipe.scheduler.config)
sd_pipe.to(torch_device)
sd_pipe.set_progress_bar_config(disable=None)
inputs = self.get_inputs(torch_device)
inputs["num_inference_steps"] = 10
inputs["strength"] = 0.75
image = sd_pipe(**inputs).images[0]
expected_image = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/img2img_heun.npy"
)
max_diff = np.abs(expected_image - image).max()
assert max_diff < 5e-2
inputs = self.get_inputs(torch_device)
inputs["num_inference_steps"] = 11
inputs["strength"] = 0.75
image_other = sd_pipe(**inputs).images[0]
mean_diff = np.abs(image - image_other).mean()
# images should be very similar
assert mean_diff < 5e-2
def test_stable_diffusion_img2img_pipeline_multiple_of_8(self):
init_image = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg"
)
# resize to resolution that is divisible by 8 but not 16 or 32
init_image = init_image.resize((760, 504))
model_id = "CompVis/stable-diffusion-v1-4"
pipe = StableDiffusionImg2ImgPipeline.from_pretrained(
model_id,
safety_checker=None,
)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
pipe.enable_attention_slicing()
prompt = "A fantasy landscape, trending on artstation"
generator = torch.manual_seed(0)
output = pipe(
prompt=prompt,
image=init_image,
strength=0.75,
guidance_scale=7.5,
generator=generator,
output_type="np",
)
image = output.images[0]
image_slice = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
expected_slice = np.array([0.9393, 0.9500, 0.9399, 0.9438, 0.9458, 0.9400, 0.9455, 0.9414, 0.9423])
assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-3
def test_img2img_safety_checker_works(self):
sd_pipe = StableDiffusionImg2ImgPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5")
sd_pipe.to(torch_device)
sd_pipe.set_progress_bar_config(disable=None)
inputs = self.get_inputs(torch_device)
inputs["num_inference_steps"] = 20
# make sure the safety checker is activated
inputs["prompt"] = "naked, sex, porn"
out = sd_pipe(**inputs)
assert out.nsfw_content_detected[0], f"Safety checker should work for prompt: {inputs['prompt']}"
assert np.abs(out.images[0]).sum() < 1e-5 # should be all zeros
@is_torch_compile
@require_torch_2
def test_img2img_compile(self):
seed = 0
inputs = self.get_inputs(torch_device, seed=seed)
# Can't pickle a Generator object
del inputs["generator"]
inputs["torch_device"] = torch_device
inputs["seed"] = seed
run_test_in_subprocess(test_case=self, target_func=_test_img2img_compile, inputs=inputs)
@nightly
@require_torch_gpu
class StableDiffusionImg2ImgPipelineNightlyTests(unittest.TestCase):
def setUp(self):
super().setUp()
gc.collect()
torch.cuda.empty_cache()
def tearDown(self):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0):
generator = torch.Generator(device=generator_device).manual_seed(seed)
init_image = load_image(
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main"
"/stable_diffusion_img2img/sketch-mountains-input.png"
)
inputs = {
"prompt": "a fantasy landscape, concept art, high resolution",
"image": init_image,
"generator": generator,
"num_inference_steps": 50,
"strength": 0.75,
"guidance_scale": 7.5,
"output_type": "np",
}
return inputs
def test_img2img_pndm(self):
sd_pipe = StableDiffusionImg2ImgPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5")
sd_pipe.to(torch_device)
sd_pipe.set_progress_bar_config(disable=None)
inputs = self.get_inputs(torch_device)
image = sd_pipe(**inputs).images[0]
expected_image = load_numpy(
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main"
"/stable_diffusion_img2img/stable_diffusion_1_5_pndm.npy"
)
max_diff = np.abs(expected_image - image).max()
assert max_diff < 1e-3
def test_img2img_ddim(self):
sd_pipe = StableDiffusionImg2ImgPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5")
sd_pipe.scheduler = DDIMScheduler.from_config(sd_pipe.scheduler.config)
sd_pipe.to(torch_device)
sd_pipe.set_progress_bar_config(disable=None)
inputs = self.get_inputs(torch_device)
image = sd_pipe(**inputs).images[0]
expected_image = load_numpy(
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main"
"/stable_diffusion_img2img/stable_diffusion_1_5_ddim.npy"
)
max_diff = np.abs(expected_image - image).max()
assert max_diff < 1e-3
def test_img2img_lms(self):
sd_pipe = StableDiffusionImg2ImgPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5")
sd_pipe.scheduler = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config)
sd_pipe.to(torch_device)
sd_pipe.set_progress_bar_config(disable=None)
inputs = self.get_inputs(torch_device)
image = sd_pipe(**inputs).images[0]
expected_image = load_numpy(
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main"
"/stable_diffusion_img2img/stable_diffusion_1_5_lms.npy"
)
max_diff = np.abs(expected_image - image).max()
assert max_diff < 1e-3
def test_img2img_dpm(self):
sd_pipe = StableDiffusionImg2ImgPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5")
sd_pipe.scheduler = DPMSolverMultistepScheduler.from_config(sd_pipe.scheduler.config)
sd_pipe.to(torch_device)
sd_pipe.set_progress_bar_config(disable=None)
inputs = self.get_inputs(torch_device)
inputs["num_inference_steps"] = 30
image = sd_pipe(**inputs).images[0]
expected_image = load_numpy(
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main"
"/stable_diffusion_img2img/stable_diffusion_1_5_dpm.npy"
)
max_diff = np.abs(expected_image - image).max()
assert max_diff < 1e-3
| diffusers/tests/pipelines/stable_diffusion/test_stable_diffusion_img2img.py/0 | {
"file_path": "diffusers/tests/pipelines/stable_diffusion/test_stable_diffusion_img2img.py",
"repo_id": "diffusers",
"token_count": 13425
} |
import gc
import random
import unittest
import numpy as np
import pytest
import torch
from transformers import AutoTokenizer, CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer, T5EncoderModel
from diffusers import (
AutoencoderKL,
FlowMatchEulerDiscreteScheduler,
SD3Transformer2DModel,
StableDiffusion3Img2ImgPipeline,
)
from diffusers.utils import load_image
from diffusers.utils.testing_utils import (
floats_tensor,
numpy_cosine_similarity_distance,
require_big_gpu_with_torch_cuda,
slow,
torch_device,
)
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
class StableDiffusion3Img2ImgPipelineFastTests(PipelineLatentTesterMixin, unittest.TestCase, PipelineTesterMixin):
pipeline_class = StableDiffusion3Img2ImgPipeline
params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
required_optional_params = PipelineTesterMixin.required_optional_params
batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
image_params = IMAGE_TO_IMAGE_IMAGE_PARAMS
image_latents_params = IMAGE_TO_IMAGE_IMAGE_PARAMS
def get_dummy_components(self):
torch.manual_seed(0)
transformer = SD3Transformer2DModel(
sample_size=32,
patch_size=1,
in_channels=4,
num_layers=1,
attention_head_dim=8,
num_attention_heads=4,
joint_attention_dim=32,
caption_projection_dim=32,
pooled_projection_dim=64,
out_channels=4,
)
clip_text_encoder_config = CLIPTextConfig(
bos_token_id=0,
eos_token_id=2,
hidden_size=32,
intermediate_size=37,
layer_norm_eps=1e-05,
num_attention_heads=4,
num_hidden_layers=5,
pad_token_id=1,
vocab_size=1000,
hidden_act="gelu",
projection_dim=32,
)
torch.manual_seed(0)
text_encoder = CLIPTextModelWithProjection(clip_text_encoder_config)
torch.manual_seed(0)
text_encoder_2 = CLIPTextModelWithProjection(clip_text_encoder_config)
text_encoder_3 = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5")
tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
tokenizer_3 = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5")
torch.manual_seed(0)
vae = AutoencoderKL(
sample_size=32,
in_channels=3,
out_channels=3,
block_out_channels=(4,),
layers_per_block=1,
latent_channels=4,
norm_num_groups=1,
use_quant_conv=False,
use_post_quant_conv=False,
shift_factor=0.0609,
scaling_factor=1.5035,
)
scheduler = FlowMatchEulerDiscreteScheduler()
return {
"scheduler": scheduler,
"text_encoder": text_encoder,
"text_encoder_2": text_encoder_2,
"text_encoder_3": text_encoder_3,
"tokenizer": tokenizer,
"tokenizer_2": tokenizer_2,
"tokenizer_3": tokenizer_3,
"transformer": transformer,
"vae": vae,
"image_encoder": None,
"feature_extractor": None,
}
def get_dummy_inputs(self, device, seed=0):
image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device)
if str(device).startswith("mps"):
generator = torch.manual_seed(seed)
else:
generator = torch.Generator(device="cpu").manual_seed(seed)
inputs = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 5.0,
"output_type": "np",
"strength": 0.8,
}
return inputs
def test_stable_diffusion_3_img2img_different_prompts(self):
pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device)
inputs = self.get_dummy_inputs(torch_device)
output_same_prompt = pipe(**inputs).images[0]
inputs = self.get_dummy_inputs(torch_device)
inputs["prompt_2"] = "a different prompt"
inputs["prompt_3"] = "another different prompt"
output_different_prompts = pipe(**inputs).images[0]
max_diff = np.abs(output_same_prompt - output_different_prompts).max()
# Outputs should be different here
assert max_diff > 1e-2
def test_stable_diffusion_3_img2img_different_negative_prompts(self):
pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device)
inputs = self.get_dummy_inputs(torch_device)
output_same_prompt = pipe(**inputs).images[0]
inputs = self.get_dummy_inputs(torch_device)
inputs["negative_prompt_2"] = "deformed"
inputs["negative_prompt_3"] = "blurry"
output_different_prompts = pipe(**inputs).images[0]
max_diff = np.abs(output_same_prompt - output_different_prompts).max()
# Outputs should be different here
assert max_diff > 1e-2
def test_stable_diffusion_3_img2img_prompt_embeds(self):
pipe = self.pipeline_class(**self.get_dummy_components()).to(torch_device)
inputs = self.get_dummy_inputs(torch_device)
output_with_prompt = pipe(**inputs).images[0]
inputs = self.get_dummy_inputs(torch_device)
prompt = inputs.pop("prompt")
do_classifier_free_guidance = inputs["guidance_scale"] > 1
(
prompt_embeds,
negative_prompt_embeds,
pooled_prompt_embeds,
negative_pooled_prompt_embeds,
) = pipe.encode_prompt(
prompt,
prompt_2=None,
prompt_3=None,
do_classifier_free_guidance=do_classifier_free_guidance,
device=torch_device,
)
output_with_embeds = pipe(
prompt_embeds=prompt_embeds,
negative_prompt_embeds=negative_prompt_embeds,
pooled_prompt_embeds=pooled_prompt_embeds,
negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
**inputs,
).images[0]
max_diff = np.abs(output_with_prompt - output_with_embeds).max()
assert max_diff < 1e-4
def test_multi_vae(self):
pass
@slow
@require_big_gpu_with_torch_cuda
@pytest.mark.big_gpu_with_torch_cuda
class StableDiffusion3Img2ImgPipelineSlowTests(unittest.TestCase):
pipeline_class = StableDiffusion3Img2ImgPipeline
repo_id = "stabilityai/stable-diffusion-3-medium-diffusers"
def setUp(self):
super().setUp()
gc.collect()
torch.cuda.empty_cache()
def tearDown(self):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def get_inputs(self, device, seed=0):
init_image = load_image(
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main"
"/stable_diffusion_img2img/sketch-mountains-input.png"
)
if str(device).startswith("mps"):
generator = torch.manual_seed(seed)
else:
generator = torch.Generator(device="cpu").manual_seed(seed)
return {
"prompt": "A photo of a cat",
"num_inference_steps": 2,
"guidance_scale": 5.0,
"output_type": "np",
"generator": generator,
"image": init_image,
}
def test_sd3_img2img_inference(self):
pipe = self.pipeline_class.from_pretrained(self.repo_id, torch_dtype=torch.float16)
pipe.enable_model_cpu_offload()
inputs = self.get_inputs(torch_device)
image = pipe(**inputs).images[0]
image_slice = image[0, :10, :10]
expected_slice = np.array(
[
0.5435,
0.4673,
0.5732,
0.4438,
0.3557,
0.4912,
0.4331,
0.3491,
0.4915,
0.4287,
0.3477,
0.4849,
0.4355,
0.3469,
0.4871,
0.4431,
0.3538,
0.4912,
0.4521,
0.3643,
0.5059,
0.4587,
0.3730,
0.5166,
0.4685,
0.3845,
0.5264,
0.4746,
0.3914,
0.5342,
]
)
max_diff = numpy_cosine_similarity_distance(expected_slice.flatten(), image_slice.flatten())
assert max_diff < 1e-4, f"Outputs are not close enough, got {max_diff}"
| diffusers/tests/pipelines/stable_diffusion_3/test_pipeline_stable_diffusion_3_img2img.py/0 | {
"file_path": "diffusers/tests/pipelines/stable_diffusion_3/test_pipeline_stable_diffusion_3_img2img.py",
"repo_id": "diffusers",
"token_count": 4723
} |
import contextlib
import io
import re
import unittest
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AnimateDiffPipeline,
AnimateDiffVideoToVideoPipeline,
AutoencoderKL,
DDIMScheduler,
MotionAdapter,
StableDiffusionImg2ImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionPipeline,
UNet2DConditionModel,
)
from diffusers.pipelines.pipeline_loading_utils import is_safetensors_compatible, variant_compatible_siblings
from diffusers.utils.testing_utils import torch_device
class IsSafetensorsCompatibleTests(unittest.TestCase):
def test_all_is_compatible(self):
filenames = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertTrue(is_safetensors_compatible(filenames))
def test_diffusers_model_is_compatible(self):
filenames = [
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertTrue(is_safetensors_compatible(filenames))
def test_diffusers_model_is_not_compatible(self):
filenames = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
"unet/diffusion_pytorch_model.bin",
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(filenames))
def test_transformer_model_is_compatible(self):
filenames = [
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
]
self.assertTrue(is_safetensors_compatible(filenames))
def test_transformer_model_is_not_compatible(self):
filenames = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
# Removed: 'text_encoder/model.safetensors',
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertFalse(is_safetensors_compatible(filenames))
def test_all_is_compatible_variant(self):
filenames = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
self.assertTrue(is_safetensors_compatible(filenames))
def test_diffusers_model_is_compatible_variant(self):
filenames = [
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
self.assertTrue(is_safetensors_compatible(filenames))
def test_diffusers_model_is_compatible_variant_mixed(self):
filenames = [
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
self.assertTrue(is_safetensors_compatible(filenames))
def test_diffusers_model_is_not_compatible_variant(self):
filenames = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
"unet/diffusion_pytorch_model.fp16.bin",
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
self.assertFalse(is_safetensors_compatible(filenames))
def test_transformer_model_is_compatible_variant(self):
filenames = [
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
]
self.assertTrue(is_safetensors_compatible(filenames))
def test_transformer_model_is_not_compatible_variant(self):
filenames = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
self.assertFalse(is_safetensors_compatible(filenames))
def test_transformer_model_is_compatible_variant_extra_folder(self):
filenames = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
self.assertTrue(is_safetensors_compatible(filenames, folder_names={"vae", "unet"}))
def test_transformer_model_is_not_compatible_variant_extra_folder(self):
filenames = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
self.assertFalse(is_safetensors_compatible(filenames, folder_names={"text_encoder"}))
def test_transformers_is_compatible_sharded(self):
filenames = [
"text_encoder/pytorch_model.bin",
"text_encoder/model-00001-of-00002.safetensors",
"text_encoder/model-00002-of-00002.safetensors",
]
self.assertTrue(is_safetensors_compatible(filenames))
def test_transformers_is_compatible_variant_sharded(self):
filenames = [
"text_encoder/pytorch_model.bin",
"text_encoder/model.fp16-00001-of-00002.safetensors",
"text_encoder/model.fp16-00001-of-00002.safetensors",
]
self.assertTrue(is_safetensors_compatible(filenames))
def test_diffusers_is_compatible_sharded(self):
filenames = [
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model-00001-of-00002.safetensors",
"unet/diffusion_pytorch_model-00002-of-00002.safetensors",
]
self.assertTrue(is_safetensors_compatible(filenames))
def test_diffusers_is_compatible_variant_sharded(self):
filenames = [
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.fp16-00001-of-00002.safetensors",
"unet/diffusion_pytorch_model.fp16-00001-of-00002.safetensors",
]
self.assertTrue(is_safetensors_compatible(filenames))
def test_diffusers_is_compatible_only_variants(self):
filenames = [
"unet/diffusion_pytorch_model.fp16.safetensors",
]
self.assertTrue(is_safetensors_compatible(filenames))
def test_diffusers_is_compatible_no_components(self):
filenames = [
"diffusion_pytorch_model.bin",
]
self.assertFalse(is_safetensors_compatible(filenames))
def test_diffusers_is_compatible_no_components_only_variants(self):
filenames = [
"diffusion_pytorch_model.fp16.bin",
]
self.assertFalse(is_safetensors_compatible(filenames))
class VariantCompatibleSiblingsTest(unittest.TestCase):
def test_only_non_variants_downloaded(self):
variant = "fp16"
filenames = [
f"vae/diffusion_pytorch_model.{variant}.safetensors",
"vae/diffusion_pytorch_model.safetensors",
f"text_encoder/model.{variant}.safetensors",
"text_encoder/model.safetensors",
f"unet/diffusion_pytorch_model.{variant}.safetensors",
"unet/diffusion_pytorch_model.safetensors",
]
model_filenames, variant_filenames = variant_compatible_siblings(filenames, variant=None)
assert all(variant not in f for f in model_filenames)
def test_only_variants_downloaded(self):
variant = "fp16"
filenames = [
f"vae/diffusion_pytorch_model.{variant}.safetensors",
"vae/diffusion_pytorch_model.safetensors",
f"text_encoder/model.{variant}.safetensors",
"text_encoder/model.safetensors",
f"unet/diffusion_pytorch_model.{variant}.safetensors",
"unet/diffusion_pytorch_model.safetensors",
]
model_filenames, variant_filenames = variant_compatible_siblings(filenames, variant=variant)
assert all(variant in f for f in model_filenames)
def test_mixed_variants_downloaded(self):
variant = "fp16"
non_variant_file = "text_encoder/model.safetensors"
filenames = [
f"vae/diffusion_pytorch_model.{variant}.safetensors",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/model.safetensors",
f"unet/diffusion_pytorch_model.{variant}.safetensors",
"unet/diffusion_pytorch_model.safetensors",
]
model_filenames, variant_filenames = variant_compatible_siblings(filenames, variant=variant)
assert all(variant in f if f != non_variant_file else variant not in f for f in model_filenames)
def test_non_variants_in_main_dir_downloaded(self):
variant = "fp16"
filenames = [
f"diffusion_pytorch_model.{variant}.safetensors",
"diffusion_pytorch_model.safetensors",
"model.safetensors",
f"model.{variant}.safetensors",
f"diffusion_pytorch_model.{variant}.safetensors",
"diffusion_pytorch_model.safetensors",
]
model_filenames, variant_filenames = variant_compatible_siblings(filenames, variant=None)
assert all(variant not in f for f in model_filenames)
def test_variants_in_main_dir_downloaded(self):
variant = "fp16"
filenames = [
f"diffusion_pytorch_model.{variant}.safetensors",
"diffusion_pytorch_model.safetensors",
"model.safetensors",
f"model.{variant}.safetensors",
f"diffusion_pytorch_model.{variant}.safetensors",
"diffusion_pytorch_model.safetensors",
]
model_filenames, variant_filenames = variant_compatible_siblings(filenames, variant=variant)
assert all(variant in f for f in model_filenames)
def test_mixed_variants_in_main_dir_downloaded(self):
variant = "fp16"
non_variant_file = "model.safetensors"
filenames = [
f"diffusion_pytorch_model.{variant}.safetensors",
"diffusion_pytorch_model.safetensors",
"model.safetensors",
f"diffusion_pytorch_model.{variant}.safetensors",
"diffusion_pytorch_model.safetensors",
]
model_filenames, variant_filenames = variant_compatible_siblings(filenames, variant=variant)
assert all(variant in f if f != non_variant_file else variant not in f for f in model_filenames)
def test_sharded_non_variants_downloaded(self):
variant = "fp16"
filenames = [
f"unet/diffusion_pytorch_model.safetensors.index.{variant}.json",
"unet/diffusion_pytorch_model.safetensors.index.json",
"unet/diffusion_pytorch_model-00001-of-00003.safetensors",
"unet/diffusion_pytorch_model-00002-of-00003.safetensors",
"unet/diffusion_pytorch_model-00003-of-00003.safetensors",
f"unet/diffusion_pytorch_model.{variant}-00001-of-00002.safetensors",
f"unet/diffusion_pytorch_model.{variant}-00002-of-00002.safetensors",
]
model_filenames, variant_filenames = variant_compatible_siblings(filenames, variant=None)
assert all(variant not in f for f in model_filenames)
def test_sharded_variants_downloaded(self):
variant = "fp16"
filenames = [
f"unet/diffusion_pytorch_model.safetensors.index.{variant}.json",
"unet/diffusion_pytorch_model.safetensors.index.json",
"unet/diffusion_pytorch_model-00001-of-00003.safetensors",
"unet/diffusion_pytorch_model-00002-of-00003.safetensors",
"unet/diffusion_pytorch_model-00003-of-00003.safetensors",
f"unet/diffusion_pytorch_model.{variant}-00001-of-00002.safetensors",
f"unet/diffusion_pytorch_model.{variant}-00002-of-00002.safetensors",
]
model_filenames, variant_filenames = variant_compatible_siblings(filenames, variant=variant)
assert all(variant in f for f in model_filenames)
def test_sharded_mixed_variants_downloaded(self):
variant = "fp16"
allowed_non_variant = "unet"
filenames = [
f"vae/diffusion_pytorch_model.safetensors.index.{variant}.json",
"vae/diffusion_pytorch_model.safetensors.index.json",
"unet/diffusion_pytorch_model.safetensors.index.json",
"unet/diffusion_pytorch_model-00001-of-00003.safetensors",
"unet/diffusion_pytorch_model-00002-of-00003.safetensors",
"unet/diffusion_pytorch_model-00003-of-00003.safetensors",
f"vae/diffusion_pytorch_model.{variant}-00001-of-00002.safetensors",
f"vae/diffusion_pytorch_model.{variant}-00002-of-00002.safetensors",
"vae/diffusion_pytorch_model-00001-of-00003.safetensors",
"vae/diffusion_pytorch_model-00002-of-00003.safetensors",
"vae/diffusion_pytorch_model-00003-of-00003.safetensors",
]
model_filenames, variant_filenames = variant_compatible_siblings(filenames, variant=variant)
assert all(variant in f if allowed_non_variant not in f else variant not in f for f in model_filenames)
class ProgressBarTests(unittest.TestCase):
def get_dummy_components_image_generation(self):
cross_attention_dim = 8
torch.manual_seed(0)
unet = UNet2DConditionModel(
block_out_channels=(4, 8),
layers_per_block=1,
sample_size=32,
in_channels=4,
out_channels=4,
down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"),
up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"),
cross_attention_dim=cross_attention_dim,
norm_num_groups=2,
)
scheduler = DDIMScheduler(
beta_start=0.00085,
beta_end=0.012,
beta_schedule="scaled_linear",
clip_sample=False,
set_alpha_to_one=False,
)
torch.manual_seed(0)
vae = AutoencoderKL(
block_out_channels=[4, 8],
in_channels=3,
out_channels=3,
down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"],
up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"],
latent_channels=4,
norm_num_groups=2,
)
torch.manual_seed(0)
text_encoder_config = CLIPTextConfig(
bos_token_id=0,
eos_token_id=2,
hidden_size=cross_attention_dim,
intermediate_size=16,
layer_norm_eps=1e-05,
num_attention_heads=2,
num_hidden_layers=2,
pad_token_id=1,
vocab_size=1000,
)
text_encoder = CLIPTextModel(text_encoder_config)
tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
components = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
"image_encoder": None,
}
return components
def get_dummy_components_video_generation(self):
cross_attention_dim = 8
block_out_channels = (8, 8)
torch.manual_seed(0)
unet = UNet2DConditionModel(
block_out_channels=block_out_channels,
layers_per_block=2,
sample_size=8,
in_channels=4,
out_channels=4,
down_block_types=("CrossAttnDownBlock2D", "DownBlock2D"),
up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"),
cross_attention_dim=cross_attention_dim,
norm_num_groups=2,
)
scheduler = DDIMScheduler(
beta_start=0.00085,
beta_end=0.012,
beta_schedule="linear",
clip_sample=False,
)
torch.manual_seed(0)
vae = AutoencoderKL(
block_out_channels=block_out_channels,
in_channels=3,
out_channels=3,
down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"],
up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"],
latent_channels=4,
norm_num_groups=2,
)
torch.manual_seed(0)
text_encoder_config = CLIPTextConfig(
bos_token_id=0,
eos_token_id=2,
hidden_size=cross_attention_dim,
intermediate_size=37,
layer_norm_eps=1e-05,
num_attention_heads=4,
num_hidden_layers=5,
pad_token_id=1,
vocab_size=1000,
)
text_encoder = CLIPTextModel(text_encoder_config)
tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
torch.manual_seed(0)
motion_adapter = MotionAdapter(
block_out_channels=block_out_channels,
motion_layers_per_block=2,
motion_norm_num_groups=2,
motion_num_attention_heads=4,
)
components = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"motion_adapter": motion_adapter,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"feature_extractor": None,
"image_encoder": None,
}
return components
def test_text_to_image(self):
components = self.get_dummy_components_image_generation()
pipe = StableDiffusionPipeline(**components)
pipe.to(torch_device)
inputs = {"prompt": "a cute cat", "num_inference_steps": 2}
with io.StringIO() as stderr, contextlib.redirect_stderr(stderr):
_ = pipe(**inputs)
stderr = stderr.getvalue()
# we can't calculate the number of progress steps beforehand e.g. for strength-dependent img2img,
# so we just match "5" in "#####| 1/5 [00:01<00:00]"
max_steps = re.search("/(.*?) ", stderr).group(1)
self.assertTrue(max_steps is not None and len(max_steps) > 0)
self.assertTrue(
f"{max_steps}/{max_steps}" in stderr, "Progress bar should be enabled and stopped at the max step"
)
pipe.set_progress_bar_config(disable=True)
with io.StringIO() as stderr, contextlib.redirect_stderr(stderr):
_ = pipe(**inputs)
self.assertTrue(stderr.getvalue() == "", "Progress bar should be disabled")
def test_image_to_image(self):
components = self.get_dummy_components_image_generation()
pipe = StableDiffusionImg2ImgPipeline(**components)
pipe.to(torch_device)
image = Image.new("RGB", (32, 32))
inputs = {"prompt": "a cute cat", "num_inference_steps": 2, "strength": 0.5, "image": image}
with io.StringIO() as stderr, contextlib.redirect_stderr(stderr):
_ = pipe(**inputs)
stderr = stderr.getvalue()
# we can't calculate the number of progress steps beforehand e.g. for strength-dependent img2img,
# so we just match "5" in "#####| 1/5 [00:01<00:00]"
max_steps = re.search("/(.*?) ", stderr).group(1)
self.assertTrue(max_steps is not None and len(max_steps) > 0)
self.assertTrue(
f"{max_steps}/{max_steps}" in stderr, "Progress bar should be enabled and stopped at the max step"
)
pipe.set_progress_bar_config(disable=True)
with io.StringIO() as stderr, contextlib.redirect_stderr(stderr):
_ = pipe(**inputs)
self.assertTrue(stderr.getvalue() == "", "Progress bar should be disabled")
def test_inpainting(self):
components = self.get_dummy_components_image_generation()
pipe = StableDiffusionInpaintPipeline(**components)
pipe.to(torch_device)
image = Image.new("RGB", (32, 32))
mask = Image.new("RGB", (32, 32))
inputs = {
"prompt": "a cute cat",
"num_inference_steps": 2,
"strength": 0.5,
"image": image,
"mask_image": mask,
}
with io.StringIO() as stderr, contextlib.redirect_stderr(stderr):
_ = pipe(**inputs)
stderr = stderr.getvalue()
# we can't calculate the number of progress steps beforehand e.g. for strength-dependent img2img,
# so we just match "5" in "#####| 1/5 [00:01<00:00]"
max_steps = re.search("/(.*?) ", stderr).group(1)
self.assertTrue(max_steps is not None and len(max_steps) > 0)
self.assertTrue(
f"{max_steps}/{max_steps}" in stderr, "Progress bar should be enabled and stopped at the max step"
)
pipe.set_progress_bar_config(disable=True)
with io.StringIO() as stderr, contextlib.redirect_stderr(stderr):
_ = pipe(**inputs)
self.assertTrue(stderr.getvalue() == "", "Progress bar should be disabled")
def test_text_to_video(self):
components = self.get_dummy_components_video_generation()
pipe = AnimateDiffPipeline(**components)
pipe.to(torch_device)
inputs = {"prompt": "a cute cat", "num_inference_steps": 2, "num_frames": 2}
with io.StringIO() as stderr, contextlib.redirect_stderr(stderr):
_ = pipe(**inputs)
stderr = stderr.getvalue()
# we can't calculate the number of progress steps beforehand e.g. for strength-dependent img2img,
# so we just match "5" in "#####| 1/5 [00:01<00:00]"
max_steps = re.search("/(.*?) ", stderr).group(1)
self.assertTrue(max_steps is not None and len(max_steps) > 0)
self.assertTrue(
f"{max_steps}/{max_steps}" in stderr, "Progress bar should be enabled and stopped at the max step"
)
pipe.set_progress_bar_config(disable=True)
with io.StringIO() as stderr, contextlib.redirect_stderr(stderr):
_ = pipe(**inputs)
self.assertTrue(stderr.getvalue() == "", "Progress bar should be disabled")
def test_video_to_video(self):
components = self.get_dummy_components_video_generation()
pipe = AnimateDiffVideoToVideoPipeline(**components)
pipe.to(torch_device)
num_frames = 2
video = [Image.new("RGB", (32, 32))] * num_frames
inputs = {"prompt": "a cute cat", "num_inference_steps": 2, "video": video}
with io.StringIO() as stderr, contextlib.redirect_stderr(stderr):
_ = pipe(**inputs)
stderr = stderr.getvalue()
# we can't calculate the number of progress steps beforehand e.g. for strength-dependent img2img,
# so we just match "5" in "#####| 1/5 [00:01<00:00]"
max_steps = re.search("/(.*?) ", stderr).group(1)
self.assertTrue(max_steps is not None and len(max_steps) > 0)
self.assertTrue(
f"{max_steps}/{max_steps}" in stderr, "Progress bar should be enabled and stopped at the max step"
)
pipe.set_progress_bar_config(disable=True)
with io.StringIO() as stderr, contextlib.redirect_stderr(stderr):
_ = pipe(**inputs)
self.assertTrue(stderr.getvalue() == "", "Progress bar should be disabled")
| diffusers/tests/pipelines/test_pipeline_utils.py/0 | {
"file_path": "diffusers/tests/pipelines/test_pipeline_utils.py",
"repo_id": "diffusers",
"token_count": 12422
} |
import gc
import random
import traceback
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionModelWithProjection,
GPT2Tokenizer,
)
from diffusers import (
AutoencoderKL,
DPMSolverMultistepScheduler,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
)
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
nightly,
require_torch_2,
require_torch_gpu,
run_test_in_subprocess,
torch_device,
)
from diffusers.utils.torch_utils import randn_tensor
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
# Will be run via run_test_in_subprocess
def _test_unidiffuser_compile(in_queue, out_queue, timeout):
error = None
try:
inputs = in_queue.get(timeout=timeout)
torch_device = inputs.pop("torch_device")
seed = inputs.pop("seed")
inputs["generator"] = torch.Generator(device=torch_device).manual_seed(seed)
pipe = UniDiffuserPipeline.from_pretrained("thu-ml/unidiffuser-v1")
# pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
pipe = pipe.to(torch_device)
pipe.unet.to(memory_format=torch.channels_last)
pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
pipe.set_progress_bar_config(disable=None)
image = pipe(**inputs).images
image_slice = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
expected_slice = np.array([0.2402, 0.2375, 0.2285, 0.2378, 0.2407, 0.2263, 0.2354, 0.2307, 0.2520])
assert np.abs(image_slice - expected_slice).max() < 1e-1
except Exception:
error = f"{traceback.format_exc()}"
results = {"error": error}
out_queue.put(results, timeout=timeout)
out_queue.join()
class UniDiffuserPipelineFastTests(
PipelineTesterMixin, PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, unittest.TestCase
):
pipeline_class = UniDiffuserPipeline
params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
image_params = IMAGE_TO_IMAGE_IMAGE_PARAMS
# vae_latents, not latents, is the argument that corresponds to VAE latent inputs
image_latents_params = frozenset(["vae_latents"])
supports_dduf = False
def get_dummy_components(self):
unet = UniDiffuserModel.from_pretrained(
"hf-internal-testing/unidiffuser-diffusers-test",
subfolder="unet",
)
scheduler = DPMSolverMultistepScheduler(
beta_start=0.00085,
beta_end=0.012,
beta_schedule="scaled_linear",
solver_order=3,
)
vae = AutoencoderKL.from_pretrained(
"hf-internal-testing/unidiffuser-diffusers-test",
subfolder="vae",
)
text_encoder = CLIPTextModel.from_pretrained(
"hf-internal-testing/unidiffuser-diffusers-test",
subfolder="text_encoder",
)
clip_tokenizer = CLIPTokenizer.from_pretrained(
"hf-internal-testing/unidiffuser-diffusers-test",
subfolder="clip_tokenizer",
)
image_encoder = CLIPVisionModelWithProjection.from_pretrained(
"hf-internal-testing/unidiffuser-diffusers-test",
subfolder="image_encoder",
)
# From the Stable Diffusion Image Variation pipeline tests
clip_image_processor = CLIPImageProcessor(crop_size=32, size=32)
# image_processor = CLIPImageProcessor.from_pretrained("hf-internal-testing/tiny-random-clip")
text_tokenizer = GPT2Tokenizer.from_pretrained(
"hf-internal-testing/unidiffuser-diffusers-test",
subfolder="text_tokenizer",
)
text_decoder = UniDiffuserTextDecoder.from_pretrained(
"hf-internal-testing/unidiffuser-diffusers-test",
subfolder="text_decoder",
)
components = {
"vae": vae,
"text_encoder": text_encoder,
"image_encoder": image_encoder,
"clip_image_processor": clip_image_processor,
"clip_tokenizer": clip_tokenizer,
"text_decoder": text_decoder,
"text_tokenizer": text_tokenizer,
"unet": unet,
"scheduler": scheduler,
}
return components
def get_dummy_inputs(self, device, seed=0):
image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device)
image = image.cpu().permute(0, 2, 3, 1)[0]
image = Image.fromarray(np.uint8(image)).convert("RGB")
if str(device).startswith("mps"):
generator = torch.manual_seed(seed)
else:
generator = torch.Generator(device=device).manual_seed(seed)
inputs = {
"prompt": "an elephant under the sea",
"image": image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "np",
}
return inputs
def get_fixed_latents(self, device, seed=0):
if isinstance(device, str):
device = torch.device(device)
generator = torch.Generator(device=device).manual_seed(seed)
# Hardcode the shapes for now.
prompt_latents = randn_tensor((1, 77, 32), generator=generator, device=device, dtype=torch.float32)
vae_latents = randn_tensor((1, 4, 16, 16), generator=generator, device=device, dtype=torch.float32)
clip_latents = randn_tensor((1, 1, 32), generator=generator, device=device, dtype=torch.float32)
latents = {
"prompt_latents": prompt_latents,
"vae_latents": vae_latents,
"clip_latents": clip_latents,
}
return latents
def get_dummy_inputs_with_latents(self, device, seed=0):
# image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device)
# image = image.cpu().permute(0, 2, 3, 1)[0]
# image = Image.fromarray(np.uint8(image)).convert("RGB")
image = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unidiffuser/unidiffuser_example_image.jpg",
)
image = image.resize((32, 32))
latents = self.get_fixed_latents(device, seed=seed)
if str(device).startswith("mps"):
generator = torch.manual_seed(seed)
else:
generator = torch.Generator(device=device).manual_seed(seed)
inputs = {
"prompt": "an elephant under the sea",
"image": image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "np",
"prompt_latents": latents.get("prompt_latents"),
"vae_latents": latents.get("vae_latents"),
"clip_latents": latents.get("clip_latents"),
}
return inputs
def test_dict_tuple_outputs_equivalent(self):
expected_slice = None
if torch_device == "cpu":
expected_slice = np.array([0.7489, 0.3722, 0.4475, 0.5630, 0.5923, 0.4992, 0.3936, 0.5844, 0.4975])
super().test_dict_tuple_outputs_equivalent(expected_slice=expected_slice)
def test_unidiffuser_default_joint_v0(self):
device = "cpu" # ensure determinism for the device-dependent torch.Generator
components = self.get_dummy_components()
unidiffuser_pipe = UniDiffuserPipeline(**components)
unidiffuser_pipe = unidiffuser_pipe.to(device)
unidiffuser_pipe.set_progress_bar_config(disable=None)
# Set mode to 'joint'
unidiffuser_pipe.set_joint_mode()
assert unidiffuser_pipe.mode == "joint"
# inputs = self.get_dummy_inputs(device)
inputs = self.get_dummy_inputs_with_latents(device)
# Delete prompt and image for joint inference.
del inputs["prompt"]
del inputs["image"]
sample = unidiffuser_pipe(**inputs)
image = sample.images
text = sample.text
assert image.shape == (1, 32, 32, 3)
image_slice = image[0, -3:, -3:, -1]
expected_img_slice = np.array([0.5760, 0.6270, 0.6571, 0.4965, 0.4638, 0.5663, 0.5254, 0.5068, 0.5716])
assert np.abs(image_slice.flatten() - expected_img_slice).max() < 1e-3
expected_text_prefix = " no no no "
assert text[0][:10] == expected_text_prefix
def test_unidiffuser_default_joint_no_cfg_v0(self):
device = "cpu" # ensure determinism for the device-dependent torch.Generator
components = self.get_dummy_components()
unidiffuser_pipe = UniDiffuserPipeline(**components)
unidiffuser_pipe = unidiffuser_pipe.to(device)
unidiffuser_pipe.set_progress_bar_config(disable=None)
# Set mode to 'joint'
unidiffuser_pipe.set_joint_mode()
assert unidiffuser_pipe.mode == "joint"
# inputs = self.get_dummy_inputs(device)
inputs = self.get_dummy_inputs_with_latents(device)
# Delete prompt and image for joint inference.
del inputs["prompt"]
del inputs["image"]
# Set guidance scale to 1.0 to turn off CFG
inputs["guidance_scale"] = 1.0
sample = unidiffuser_pipe(**inputs)
image = sample.images
text = sample.text
assert image.shape == (1, 32, 32, 3)
image_slice = image[0, -3:, -3:, -1]
expected_img_slice = np.array([0.5760, 0.6270, 0.6571, 0.4965, 0.4638, 0.5663, 0.5254, 0.5068, 0.5716])
assert np.abs(image_slice.flatten() - expected_img_slice).max() < 1e-3
expected_text_prefix = " no no no "
assert text[0][:10] == expected_text_prefix
def test_unidiffuser_default_text2img_v0(self):
device = "cpu" # ensure determinism for the device-dependent torch.Generator
components = self.get_dummy_components()
unidiffuser_pipe = UniDiffuserPipeline(**components)
unidiffuser_pipe = unidiffuser_pipe.to(device)
unidiffuser_pipe.set_progress_bar_config(disable=None)
# Set mode to 'text2img'
unidiffuser_pipe.set_text_to_image_mode()
assert unidiffuser_pipe.mode == "text2img"
inputs = self.get_dummy_inputs_with_latents(device)
# Delete image for text-conditioned image generation
del inputs["image"]
image = unidiffuser_pipe(**inputs).images
assert image.shape == (1, 32, 32, 3)
image_slice = image[0, -3:, -3:, -1]
expected_slice = np.array([0.5758, 0.6269, 0.6570, 0.4967, 0.4639, 0.5664, 0.5257, 0.5067, 0.5715])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def test_unidiffuser_default_image_0(self):
device = "cpu" # ensure determinism for the device-dependent torch.Generator
components = self.get_dummy_components()
unidiffuser_pipe = UniDiffuserPipeline(**components)
unidiffuser_pipe = unidiffuser_pipe.to(device)
unidiffuser_pipe.set_progress_bar_config(disable=None)
# Set mode to 'img'
unidiffuser_pipe.set_image_mode()
assert unidiffuser_pipe.mode == "img"
inputs = self.get_dummy_inputs(device)
# Delete prompt and image for unconditional ("marginal") text generation.
del inputs["prompt"]
del inputs["image"]
image = unidiffuser_pipe(**inputs).images
assert image.shape == (1, 32, 32, 3)
image_slice = image[0, -3:, -3:, -1]
expected_slice = np.array([0.5760, 0.6270, 0.6571, 0.4966, 0.4638, 0.5663, 0.5254, 0.5068, 0.5715])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def test_unidiffuser_default_text_v0(self):
device = "cpu" # ensure determinism for the device-dependent torch.Generator
components = self.get_dummy_components()
unidiffuser_pipe = UniDiffuserPipeline(**components)
unidiffuser_pipe = unidiffuser_pipe.to(device)
unidiffuser_pipe.set_progress_bar_config(disable=None)
# Set mode to 'img'
unidiffuser_pipe.set_text_mode()
assert unidiffuser_pipe.mode == "text"
inputs = self.get_dummy_inputs(device)
# Delete prompt and image for unconditional ("marginal") text generation.
del inputs["prompt"]
del inputs["image"]
text = unidiffuser_pipe(**inputs).text
expected_text_prefix = " no no no "
assert text[0][:10] == expected_text_prefix
def test_unidiffuser_default_img2text_v0(self):
device = "cpu" # ensure determinism for the device-dependent torch.Generator
components = self.get_dummy_components()
unidiffuser_pipe = UniDiffuserPipeline(**components)
unidiffuser_pipe = unidiffuser_pipe.to(device)
unidiffuser_pipe.set_progress_bar_config(disable=None)
# Set mode to 'img2text'
unidiffuser_pipe.set_image_to_text_mode()
assert unidiffuser_pipe.mode == "img2text"
inputs = self.get_dummy_inputs_with_latents(device)
# Delete text for image-conditioned text generation
del inputs["prompt"]
text = unidiffuser_pipe(**inputs).text
expected_text_prefix = " no no no "
assert text[0][:10] == expected_text_prefix
def test_unidiffuser_default_joint_v1(self):
device = "cpu" # ensure determinism for the device-dependent torch.Generator
unidiffuser_pipe = UniDiffuserPipeline.from_pretrained("hf-internal-testing/unidiffuser-test-v1")
unidiffuser_pipe = unidiffuser_pipe.to(device)
unidiffuser_pipe.set_progress_bar_config(disable=None)
# Set mode to 'joint'
unidiffuser_pipe.set_joint_mode()
assert unidiffuser_pipe.mode == "joint"
# inputs = self.get_dummy_inputs(device)
inputs = self.get_dummy_inputs_with_latents(device)
# Delete prompt and image for joint inference.
del inputs["prompt"]
del inputs["image"]
inputs["data_type"] = 1
sample = unidiffuser_pipe(**inputs)
image = sample.images
text = sample.text
assert image.shape == (1, 32, 32, 3)
image_slice = image[0, -3:, -3:, -1]
expected_img_slice = np.array([0.5760, 0.6270, 0.6571, 0.4965, 0.4638, 0.5663, 0.5254, 0.5068, 0.5716])
assert np.abs(image_slice.flatten() - expected_img_slice).max() < 1e-3
expected_text_prefix = " no no no "
assert text[0][:10] == expected_text_prefix
def test_unidiffuser_default_text2img_v1(self):
device = "cpu" # ensure determinism for the device-dependent torch.Generator
unidiffuser_pipe = UniDiffuserPipeline.from_pretrained("hf-internal-testing/unidiffuser-test-v1")
unidiffuser_pipe = unidiffuser_pipe.to(device)
unidiffuser_pipe.set_progress_bar_config(disable=None)
# Set mode to 'text2img'
unidiffuser_pipe.set_text_to_image_mode()
assert unidiffuser_pipe.mode == "text2img"
inputs = self.get_dummy_inputs_with_latents(device)
# Delete image for text-conditioned image generation
del inputs["image"]
image = unidiffuser_pipe(**inputs).images
assert image.shape == (1, 32, 32, 3)
image_slice = image[0, -3:, -3:, -1]
expected_slice = np.array([0.5758, 0.6269, 0.6570, 0.4967, 0.4639, 0.5664, 0.5257, 0.5067, 0.5715])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def test_unidiffuser_default_img2text_v1(self):
device = "cpu" # ensure determinism for the device-dependent torch.Generator
unidiffuser_pipe = UniDiffuserPipeline.from_pretrained("hf-internal-testing/unidiffuser-test-v1")
unidiffuser_pipe = unidiffuser_pipe.to(device)
unidiffuser_pipe.set_progress_bar_config(disable=None)
# Set mode to 'img2text'
unidiffuser_pipe.set_image_to_text_mode()
assert unidiffuser_pipe.mode == "img2text"
inputs = self.get_dummy_inputs_with_latents(device)
# Delete text for image-conditioned text generation
del inputs["prompt"]
text = unidiffuser_pipe(**inputs).text
expected_text_prefix = " no no no "
assert text[0][:10] == expected_text_prefix
def test_unidiffuser_text2img_multiple_images(self):
device = "cpu" # ensure determinism for the device-dependent torch.Generator
components = self.get_dummy_components()
unidiffuser_pipe = UniDiffuserPipeline(**components)
unidiffuser_pipe = unidiffuser_pipe.to(device)
unidiffuser_pipe.set_progress_bar_config(disable=None)
# Set mode to 'text2img'
unidiffuser_pipe.set_text_to_image_mode()
assert unidiffuser_pipe.mode == "text2img"
inputs = self.get_dummy_inputs(device)
# Delete image for text-conditioned image generation
del inputs["image"]
inputs["num_images_per_prompt"] = 2
inputs["num_prompts_per_image"] = 3
image = unidiffuser_pipe(**inputs).images
assert image.shape == (2, 32, 32, 3)
def test_unidiffuser_img2text_multiple_prompts(self):
device = "cpu" # ensure determinism for the device-dependent torch.Generator
components = self.get_dummy_components()
unidiffuser_pipe = UniDiffuserPipeline(**components)
unidiffuser_pipe = unidiffuser_pipe.to(device)
unidiffuser_pipe.set_progress_bar_config(disable=None)
# Set mode to 'img2text'
unidiffuser_pipe.set_image_to_text_mode()
assert unidiffuser_pipe.mode == "img2text"
inputs = self.get_dummy_inputs(device)
# Delete text for image-conditioned text generation
del inputs["prompt"]
inputs["num_images_per_prompt"] = 2
inputs["num_prompts_per_image"] = 3
text = unidiffuser_pipe(**inputs).text
assert len(text) == 3
def test_unidiffuser_text2img_multiple_images_with_latents(self):
device = "cpu" # ensure determinism for the device-dependent torch.Generator
components = self.get_dummy_components()
unidiffuser_pipe = UniDiffuserPipeline(**components)
unidiffuser_pipe = unidiffuser_pipe.to(device)
unidiffuser_pipe.set_progress_bar_config(disable=None)
# Set mode to 'text2img'
unidiffuser_pipe.set_text_to_image_mode()
assert unidiffuser_pipe.mode == "text2img"
inputs = self.get_dummy_inputs_with_latents(device)
# Delete image for text-conditioned image generation
del inputs["image"]
inputs["num_images_per_prompt"] = 2
inputs["num_prompts_per_image"] = 3
image = unidiffuser_pipe(**inputs).images
assert image.shape == (2, 32, 32, 3)
def test_unidiffuser_img2text_multiple_prompts_with_latents(self):
device = "cpu" # ensure determinism for the device-dependent torch.Generator
components = self.get_dummy_components()
unidiffuser_pipe = UniDiffuserPipeline(**components)
unidiffuser_pipe = unidiffuser_pipe.to(device)
unidiffuser_pipe.set_progress_bar_config(disable=None)
# Set mode to 'img2text'
unidiffuser_pipe.set_image_to_text_mode()
assert unidiffuser_pipe.mode == "img2text"
inputs = self.get_dummy_inputs_with_latents(device)
# Delete text for image-conditioned text generation
del inputs["prompt"]
inputs["num_images_per_prompt"] = 2
inputs["num_prompts_per_image"] = 3
text = unidiffuser_pipe(**inputs).text
assert len(text) == 3
def test_inference_batch_single_identical(self):
super().test_inference_batch_single_identical(expected_max_diff=2e-4)
@require_torch_gpu
def test_unidiffuser_default_joint_v1_cuda_fp16(self):
device = "cuda"
unidiffuser_pipe = UniDiffuserPipeline.from_pretrained(
"hf-internal-testing/unidiffuser-test-v1", torch_dtype=torch.float16
)
unidiffuser_pipe = unidiffuser_pipe.to(device)
unidiffuser_pipe.set_progress_bar_config(disable=None)
# Set mode to 'joint'
unidiffuser_pipe.set_joint_mode()
assert unidiffuser_pipe.mode == "joint"
inputs = self.get_dummy_inputs_with_latents(device)
# Delete prompt and image for joint inference.
del inputs["prompt"]
del inputs["image"]
inputs["data_type"] = 1
sample = unidiffuser_pipe(**inputs)
image = sample.images
text = sample.text
assert image.shape == (1, 32, 32, 3)
image_slice = image[0, -3:, -3:, -1]
expected_img_slice = np.array([0.5049, 0.5498, 0.5854, 0.3052, 0.4460, 0.6489, 0.5122, 0.4810, 0.6138])
assert np.abs(image_slice.flatten() - expected_img_slice).max() < 1e-3
expected_text_prefix = '" This This'
assert text[0][: len(expected_text_prefix)] == expected_text_prefix
@require_torch_gpu
def test_unidiffuser_default_text2img_v1_cuda_fp16(self):
device = "cuda"
unidiffuser_pipe = UniDiffuserPipeline.from_pretrained(
"hf-internal-testing/unidiffuser-test-v1", torch_dtype=torch.float16
)
unidiffuser_pipe = unidiffuser_pipe.to(device)
unidiffuser_pipe.set_progress_bar_config(disable=None)
# Set mode to 'text2img'
unidiffuser_pipe.set_text_to_image_mode()
assert unidiffuser_pipe.mode == "text2img"
inputs = self.get_dummy_inputs_with_latents(device)
# Delete prompt and image for joint inference.
del inputs["image"]
inputs["data_type"] = 1
sample = unidiffuser_pipe(**inputs)
image = sample.images
assert image.shape == (1, 32, 32, 3)
image_slice = image[0, -3:, -3:, -1]
expected_img_slice = np.array([0.5054, 0.5498, 0.5854, 0.3052, 0.4458, 0.6489, 0.5122, 0.4810, 0.6138])
assert np.abs(image_slice.flatten() - expected_img_slice).max() < 1e-3
@require_torch_gpu
def test_unidiffuser_default_img2text_v1_cuda_fp16(self):
device = "cuda"
unidiffuser_pipe = UniDiffuserPipeline.from_pretrained(
"hf-internal-testing/unidiffuser-test-v1", torch_dtype=torch.float16
)
unidiffuser_pipe = unidiffuser_pipe.to(device)
unidiffuser_pipe.set_progress_bar_config(disable=None)
# Set mode to 'img2text'
unidiffuser_pipe.set_image_to_text_mode()
assert unidiffuser_pipe.mode == "img2text"
inputs = self.get_dummy_inputs_with_latents(device)
# Delete prompt and image for joint inference.
del inputs["prompt"]
inputs["data_type"] = 1
text = unidiffuser_pipe(**inputs).text
expected_text_prefix = '" This This'
assert text[0][: len(expected_text_prefix)] == expected_text_prefix
@nightly
@require_torch_gpu
class UniDiffuserPipelineSlowTests(unittest.TestCase):
def setUp(self):
super().setUp()
gc.collect()
torch.cuda.empty_cache()
def tearDown(self):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def get_inputs(self, device, seed=0, generate_latents=False):
generator = torch.manual_seed(seed)
image = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unidiffuser/unidiffuser_example_image.jpg"
)
inputs = {
"prompt": "an elephant under the sea",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 8.0,
"output_type": "np",
}
if generate_latents:
latents = self.get_fixed_latents(device, seed=seed)
for latent_name, latent_tensor in latents.items():
inputs[latent_name] = latent_tensor
return inputs
def get_fixed_latents(self, device, seed=0):
if isinstance(device, str):
device = torch.device(device)
latent_device = torch.device("cpu")
generator = torch.Generator(device=latent_device).manual_seed(seed)
# Hardcode the shapes for now.
prompt_latents = randn_tensor((1, 77, 768), generator=generator, device=device, dtype=torch.float32)
vae_latents = randn_tensor((1, 4, 64, 64), generator=generator, device=device, dtype=torch.float32)
clip_latents = randn_tensor((1, 1, 512), generator=generator, device=device, dtype=torch.float32)
# Move latents onto desired device.
prompt_latents = prompt_latents.to(device)
vae_latents = vae_latents.to(device)
clip_latents = clip_latents.to(device)
latents = {
"prompt_latents": prompt_latents,
"vae_latents": vae_latents,
"clip_latents": clip_latents,
}
return latents
def test_unidiffuser_default_joint_v1(self):
pipe = UniDiffuserPipeline.from_pretrained("thu-ml/unidiffuser-v1")
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
pipe.enable_attention_slicing()
# inputs = self.get_dummy_inputs(device)
inputs = self.get_inputs(device=torch_device, generate_latents=True)
# Delete prompt and image for joint inference.
del inputs["prompt"]
del inputs["image"]
sample = pipe(**inputs)
image = sample.images
text = sample.text
assert image.shape == (1, 512, 512, 3)
image_slice = image[0, -3:, -3:, -1]
expected_img_slice = np.array([0.2402, 0.2375, 0.2285, 0.2378, 0.2407, 0.2263, 0.2354, 0.2307, 0.2520])
assert np.abs(image_slice.flatten() - expected_img_slice).max() < 1e-1
expected_text_prefix = "a living room"
assert text[0][: len(expected_text_prefix)] == expected_text_prefix
def test_unidiffuser_default_text2img_v1(self):
pipe = UniDiffuserPipeline.from_pretrained("thu-ml/unidiffuser-v1")
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
pipe.enable_attention_slicing()
inputs = self.get_inputs(device=torch_device, generate_latents=True)
del inputs["image"]
sample = pipe(**inputs)
image = sample.images
assert image.shape == (1, 512, 512, 3)
image_slice = image[0, -3:, -3:, -1]
expected_slice = np.array([0.0242, 0.0103, 0.0022, 0.0129, 0.0000, 0.0090, 0.0376, 0.0508, 0.0005])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def test_unidiffuser_default_img2text_v1(self):
pipe = UniDiffuserPipeline.from_pretrained("thu-ml/unidiffuser-v1")
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
pipe.enable_attention_slicing()
inputs = self.get_inputs(device=torch_device, generate_latents=True)
del inputs["prompt"]
sample = pipe(**inputs)
text = sample.text
expected_text_prefix = "An astronaut"
assert text[0][: len(expected_text_prefix)] == expected_text_prefix
@unittest.skip(reason="Skip torch.compile test to speed up the slow test suite.")
@require_torch_2
def test_unidiffuser_compile(self, seed=0):
inputs = self.get_inputs(torch_device, seed=seed, generate_latents=True)
# Delete prompt and image for joint inference.
del inputs["prompt"]
del inputs["image"]
# Can't pickle a Generator object
del inputs["generator"]
inputs["torch_device"] = torch_device
inputs["seed"] = seed
run_test_in_subprocess(test_case=self, target_func=_test_unidiffuser_compile, inputs=inputs)
@nightly
@require_torch_gpu
class UniDiffuserPipelineNightlyTests(unittest.TestCase):
def setUp(self):
super().setUp()
gc.collect()
torch.cuda.empty_cache()
def tearDown(self):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def get_inputs(self, device, seed=0, generate_latents=False):
generator = torch.manual_seed(seed)
image = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unidiffuser/unidiffuser_example_image.jpg"
)
inputs = {
"prompt": "an elephant under the sea",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 8.0,
"output_type": "np",
}
if generate_latents:
latents = self.get_fixed_latents(device, seed=seed)
for latent_name, latent_tensor in latents.items():
inputs[latent_name] = latent_tensor
return inputs
def get_fixed_latents(self, device, seed=0):
if isinstance(device, str):
device = torch.device(device)
latent_device = torch.device("cpu")
generator = torch.Generator(device=latent_device).manual_seed(seed)
# Hardcode the shapes for now.
prompt_latents = randn_tensor((1, 77, 768), generator=generator, device=device, dtype=torch.float32)
vae_latents = randn_tensor((1, 4, 64, 64), generator=generator, device=device, dtype=torch.float32)
clip_latents = randn_tensor((1, 1, 512), generator=generator, device=device, dtype=torch.float32)
# Move latents onto desired device.
prompt_latents = prompt_latents.to(device)
vae_latents = vae_latents.to(device)
clip_latents = clip_latents.to(device)
latents = {
"prompt_latents": prompt_latents,
"vae_latents": vae_latents,
"clip_latents": clip_latents,
}
return latents
def test_unidiffuser_default_joint_v1_fp16(self):
pipe = UniDiffuserPipeline.from_pretrained("thu-ml/unidiffuser-v1", torch_dtype=torch.float16)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
pipe.enable_attention_slicing()
# inputs = self.get_dummy_inputs(device)
inputs = self.get_inputs(device=torch_device, generate_latents=True)
# Delete prompt and image for joint inference.
del inputs["prompt"]
del inputs["image"]
sample = pipe(**inputs)
image = sample.images
text = sample.text
assert image.shape == (1, 512, 512, 3)
image_slice = image[0, -3:, -3:, -1]
expected_img_slice = np.array([0.2402, 0.2375, 0.2285, 0.2378, 0.2407, 0.2263, 0.2354, 0.2307, 0.2520])
assert np.abs(image_slice.flatten() - expected_img_slice).max() < 2e-1
expected_text_prefix = "a living room"
assert text[0][: len(expected_text_prefix)] == expected_text_prefix
def test_unidiffuser_default_text2img_v1_fp16(self):
pipe = UniDiffuserPipeline.from_pretrained("thu-ml/unidiffuser-v1", torch_dtype=torch.float16)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
pipe.enable_attention_slicing()
inputs = self.get_inputs(device=torch_device, generate_latents=True)
del inputs["image"]
sample = pipe(**inputs)
image = sample.images
assert image.shape == (1, 512, 512, 3)
image_slice = image[0, -3:, -3:, -1]
expected_slice = np.array([0.0242, 0.0103, 0.0022, 0.0129, 0.0000, 0.0090, 0.0376, 0.0508, 0.0005])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1
def test_unidiffuser_default_img2text_v1_fp16(self):
pipe = UniDiffuserPipeline.from_pretrained("thu-ml/unidiffuser-v1", torch_dtype=torch.float16)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
pipe.enable_attention_slicing()
inputs = self.get_inputs(device=torch_device, generate_latents=True)
del inputs["prompt"]
sample = pipe(**inputs)
text = sample.text
expected_text_prefix = "An astronaut"
assert text[0][: len(expected_text_prefix)] == expected_text_prefix
| diffusers/tests/pipelines/unidiffuser/test_unidiffuser.py/0 | {
"file_path": "diffusers/tests/pipelines/unidiffuser/test_unidiffuser.py",
"repo_id": "diffusers",
"token_count": 14637
} |
# coding=utf-8
# Copyright 2024 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from diffusers import (
MotionAdapter,
)
from diffusers.utils.testing_utils import (
enable_full_determinism,
)
enable_full_determinism()
class MotionAdapterSingleFileTests(unittest.TestCase):
model_class = MotionAdapter
def test_single_file_components_version_v1_5(self):
ckpt_path = "https://huggingface.co/guoyww/animatediff/blob/main/mm_sd_v15.ckpt"
repo_id = "guoyww/animatediff-motion-adapter-v1-5"
model = self.model_class.from_pretrained(repo_id)
model_single_file = self.model_class.from_single_file(ckpt_path)
PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"]
for param_name, param_value in model_single_file.config.items():
if param_name in PARAMS_TO_IGNORE:
continue
assert (
model.config[param_name] == param_value
), f"{param_name} differs between pretrained loading and single file loading"
def test_single_file_components_version_v1_5_2(self):
ckpt_path = "https://huggingface.co/guoyww/animatediff/blob/main/mm_sd_v15_v2.ckpt"
repo_id = "guoyww/animatediff-motion-adapter-v1-5-2"
model = self.model_class.from_pretrained(repo_id)
model_single_file = self.model_class.from_single_file(ckpt_path)
PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"]
for param_name, param_value in model_single_file.config.items():
if param_name in PARAMS_TO_IGNORE:
continue
assert (
model.config[param_name] == param_value
), f"{param_name} differs between pretrained loading and single file loading"
def test_single_file_components_version_v1_5_3(self):
ckpt_path = "https://huggingface.co/guoyww/animatediff/blob/main/v3_sd15_mm.ckpt"
repo_id = "guoyww/animatediff-motion-adapter-v1-5-3"
model = self.model_class.from_pretrained(repo_id)
model_single_file = self.model_class.from_single_file(ckpt_path)
PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"]
for param_name, param_value in model_single_file.config.items():
if param_name in PARAMS_TO_IGNORE:
continue
assert (
model.config[param_name] == param_value
), f"{param_name} differs between pretrained loading and single file loading"
def test_single_file_components_version_sdxl_beta(self):
ckpt_path = "https://huggingface.co/guoyww/animatediff/blob/main/mm_sdxl_v10_beta.ckpt"
repo_id = "guoyww/animatediff-motion-adapter-sdxl-beta"
model = self.model_class.from_pretrained(repo_id)
model_single_file = self.model_class.from_single_file(ckpt_path)
PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "_use_default_values", "_diffusers_version"]
for param_name, param_value in model_single_file.config.items():
if param_name in PARAMS_TO_IGNORE:
continue
assert (
model.config[param_name] == param_value
), f"{param_name} differs between pretrained loading and single file loading"
| diffusers/tests/single_file/test_model_motion_adapter_single_file.py/0 | {
"file_path": "diffusers/tests/single_file/test_model_motion_adapter_single_file.py",
"repo_id": "diffusers",
"token_count": 1629
} |
# coding=utf-8
# Copyright 2025 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import glob
import os
import re
import subprocess
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
DIFFUSERS_PATH = "src/diffusers"
REPO_PATH = "."
def _should_continue(line, indent):
return line.startswith(indent) or len(line) <= 1 or re.search(r"^\s*\)(\s*->.*:|:)\s*$", line) is not None
def find_code_in_diffusers(object_name):
"""Find and return the code source code of `object_name`."""
parts = object_name.split(".")
i = 0
# First let's find the module where our object lives.
module = parts[i]
while i < len(parts) and not os.path.isfile(os.path.join(DIFFUSERS_PATH, f"{module}.py")):
i += 1
if i < len(parts):
module = os.path.join(module, parts[i])
if i >= len(parts):
raise ValueError(f"`object_name` should begin with the name of a module of diffusers but got {object_name}.")
with open(
os.path.join(DIFFUSERS_PATH, f"{module}.py"),
"r",
encoding="utf-8",
newline="\n",
) as f:
lines = f.readlines()
# Now let's find the class / func in the code!
indent = ""
line_index = 0
for name in parts[i + 1 :]:
while (
line_index < len(lines) and re.search(rf"^{indent}(class|def)\s+{name}(\(|\:)", lines[line_index]) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(lines):
raise ValueError(f" {object_name} does not match any function or class in {module}.")
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
start_index = line_index
while line_index < len(lines) and _should_continue(lines[line_index], indent):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1]) <= 1:
line_index -= 1
code_lines = lines[start_index:line_index]
return "".join(code_lines)
_re_copy_warning = re.compile(r"^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)")
_re_replace_pattern = re.compile(r"^\s*(\S+)->(\S+)(\s+.*|$)")
_re_fill_pattern = re.compile(r"<FILL\s+[^>]*>")
def get_indent(code):
lines = code.split("\n")
idx = 0
while idx < len(lines) and len(lines[idx]) == 0:
idx += 1
if idx < len(lines):
return re.search(r"^(\s*)\S", lines[idx]).groups()[0]
return ""
def run_ruff(code):
command = ["ruff", "format", "-", "--config", "pyproject.toml", "--silent"]
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
stdout, _ = process.communicate(input=code.encode())
return stdout.decode()
def stylify(code: str) -> str:
"""
Applies the ruff part of our `make style` command to some code. This formats the code using `ruff format`.
As `ruff` does not provide a python api this cannot be done on the fly.
Args:
code (`str`): The code to format.
Returns:
`str`: The formatted code.
"""
has_indent = len(get_indent(code)) > 0
if has_indent:
code = f"class Bla:\n{code}"
formatted_code = run_ruff(code)
return formatted_code[len("class Bla:\n") :] if has_indent else formatted_code
def is_copy_consistent(filename, overwrite=False):
"""
Check if the code commented as a copy in `filename` matches the original.
Return the differences or overwrites the content depending on `overwrite`.
"""
with open(filename, "r", encoding="utf-8", newline="\n") as f:
lines = f.readlines()
diffs = []
line_index = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(lines):
search = _re_copy_warning.search(lines[line_index])
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
indent, object_name, replace_pattern = search.groups()
theoretical_code = find_code_in_diffusers(object_name)
theoretical_indent = get_indent(theoretical_code)
start_index = line_index + 1 if indent == theoretical_indent else line_index + 2
indent = theoretical_indent
line_index = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
should_continue = True
while line_index < len(lines) and should_continue:
line_index += 1
if line_index >= len(lines):
break
line = lines[line_index]
should_continue = _should_continue(line, indent) and re.search(f"^{indent}# End copy", line) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1]) <= 1:
line_index -= 1
observed_code_lines = lines[start_index:line_index]
observed_code = "".join(observed_code_lines)
# Remove any nested `Copied from` comments to avoid circular copies
theoretical_code = [line for line in theoretical_code.split("\n") if _re_copy_warning.search(line) is None]
theoretical_code = "\n".join(theoretical_code)
# Before comparing, use the `replace_pattern` on the original code.
if len(replace_pattern) > 0:
patterns = replace_pattern.replace("with", "").split(",")
patterns = [_re_replace_pattern.search(p) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
obj1, obj2, option = pattern.groups()
theoretical_code = re.sub(obj1, obj2, theoretical_code)
if option.strip() == "all-casing":
theoretical_code = re.sub(obj1.lower(), obj2.lower(), theoretical_code)
theoretical_code = re.sub(obj1.upper(), obj2.upper(), theoretical_code)
# stylify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
theoretical_code = stylify(lines[start_index - 1] + theoretical_code)
theoretical_code = theoretical_code[len(lines[start_index - 1]) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index])
if overwrite:
lines = lines[:start_index] + [theoretical_code] + lines[line_index:]
line_index = start_index + 1
if overwrite and len(diffs) > 0:
# Warn the user a file has been modified.
print(f"Detected changes, rewriting {filename}.")
with open(filename, "w", encoding="utf-8", newline="\n") as f:
f.writelines(lines)
return diffs
def check_copies(overwrite: bool = False):
all_files = glob.glob(os.path.join(DIFFUSERS_PATH, "**/*.py"), recursive=True)
diffs = []
for filename in all_files:
new_diffs = is_copy_consistent(filename, overwrite)
diffs += [f"- {filename}: copy does not match {d[0]} at line {d[1]}" for d in new_diffs]
if not overwrite and len(diffs) > 0:
diff = "\n".join(diffs)
raise Exception(
"Found the following copy inconsistencies:\n"
+ diff
+ "\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them."
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--fix_and_overwrite",
action="store_true",
help="Whether to fix inconsistencies.",
)
args = parser.parse_args()
check_copies(args.fix_and_overwrite)
| diffusers/utils/check_copies.py/0 | {
"file_path": "diffusers/utils/check_copies.py",
"repo_id": "diffusers",
"token_count": 3397
} |
# coding=utf-8
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import re
import packaging.version
PATH_TO_EXAMPLES = "examples/"
REPLACE_PATTERNS = {
"examples": (re.compile(r'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'),
"init": (re.compile(r'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'),
"setup": (re.compile(r'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), r'\1version="VERSION",'),
"doc": (re.compile(r'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'),
}
REPLACE_FILES = {
"init": "src/diffusers/__init__.py",
"setup": "setup.py",
}
README_FILE = "README.md"
def update_version_in_file(fname, version, pattern):
"""Update the version in one file using a specific pattern."""
with open(fname, "r", encoding="utf-8", newline="\n") as f:
code = f.read()
re_pattern, replace = REPLACE_PATTERNS[pattern]
replace = replace.replace("VERSION", version)
code = re_pattern.sub(replace, code)
with open(fname, "w", encoding="utf-8", newline="\n") as f:
f.write(code)
def update_version_in_examples(version):
"""Update the version in all examples files."""
for folder, directories, fnames in os.walk(PATH_TO_EXAMPLES):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("research_projects")
if "legacy" in directories:
directories.remove("legacy")
for fname in fnames:
if fname.endswith(".py"):
update_version_in_file(os.path.join(folder, fname), version, pattern="examples")
def global_version_update(version, patch=False):
"""Update the version in all needed files."""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(fname, version, pattern)
if not patch:
update_version_in_examples(version)
def clean_main_ref_in_model_list():
"""Replace the links from main doc tp stable doc in the model list of the README."""
# If the introduction or the conclusion of the list change, the prompts may need to be updated.
_start_prompt = "🤗 Transformers currently provides the following architectures"
_end_prompt = "1. Want to contribute a new model?"
with open(README_FILE, "r", encoding="utf-8", newline="\n") as f:
lines = f.readlines()
# Find the start of the list.
start_index = 0
while not lines[start_index].startswith(_start_prompt):
start_index += 1
start_index += 1
index = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt):
if lines[index].startswith("1."):
lines[index] = lines[index].replace(
"https://huggingface.co/docs/diffusers/main/model_doc",
"https://huggingface.co/docs/diffusers/model_doc",
)
index += 1
with open(README_FILE, "w", encoding="utf-8", newline="\n") as f:
f.writelines(lines)
def get_version():
"""Reads the current version in the __init__."""
with open(REPLACE_FILES["init"], "r") as f:
code = f.read()
default_version = REPLACE_PATTERNS["init"][0].search(code).groups()[0]
return packaging.version.parse(default_version)
def pre_release_work(patch=False):
"""Do all the necessary pre-release steps."""
# First let's get the default version: base version if we are in dev, bump minor otherwise.
default_version = get_version()
if patch and default_version.is_devrelease:
raise ValueError("Can't create a patch version from the dev branch, checkout a released version!")
if default_version.is_devrelease:
default_version = default_version.base_version
elif patch:
default_version = f"{default_version.major}.{default_version.minor}.{default_version.micro + 1}"
else:
default_version = f"{default_version.major}.{default_version.minor + 1}.0"
# Now let's ask nicely if that's the right one.
version = input(f"Which version are you releasing? [{default_version}]")
if len(version) == 0:
version = default_version
print(f"Updating version to {version}.")
global_version_update(version, patch=patch)
# if not patch:
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
def post_release_work():
"""Do all the necessary post-release steps."""
# First let's get the current version
current_version = get_version()
dev_version = f"{current_version.major}.{current_version.minor + 1}.0.dev0"
current_version = current_version.base_version
# Check with the user we got that right.
version = input(f"Which version are we developing now? [{dev_version}]")
if len(version) == 0:
version = dev_version
print(f"Updating version to {version}.")
global_version_update(version)
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
args = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| diffusers/utils/release.py/0 | {
"file_path": "diffusers/utils/release.py",
"repo_id": "diffusers",
"token_count": 2306
} |
# How to contribute to 🤗 LeRobot?
Everyone is welcome to contribute, and we value everybody's contribution. Code
is thus not the only way to help the community. Answering questions, helping
others, reaching out and improving the documentations are immensely valuable to
the community.
It also helps us if you spread the word: reference the library from blog posts
on the awesome projects it made possible, shout out on Twitter when it has
helped you, or simply ⭐️ the repo to say "thank you".
Whichever way you choose to contribute, please be mindful to respect our
[code of conduct](https://github.com/huggingface/lerobot/blob/main/CODE_OF_CONDUCT.md).
## You can contribute in so many ways!
Some of the ways you can contribute to 🤗 LeRobot:
* Fixing outstanding issues with the existing code.
* Implementing new models, datasets or simulation environments.
* Contributing to the examples or to the documentation.
* Submitting issues related to bugs or desired new features.
Following the guides below, feel free to open issues and PRs and to coordinate your efforts with the community on our [Discord Channel](https://discord.gg/VjFz58wn3R). For specific inquiries, reach out to [Remi Cadene](mailto:[email protected]).
If you are not sure how to contribute or want to know the next features we working on, look on this project page: [LeRobot TODO](https://github.com/orgs/huggingface/projects/46)
## Submitting a new issue or feature request
Do your best to follow these guidelines when submitting an issue or a feature
request. It will make it easier for us to come back to you quickly and with good
feedback.
### Did you find a bug?
The 🤗 LeRobot library is robust and reliable thanks to the users who notify us of
the problems they encounter. So thank you for reporting an issue.
First, we would really appreciate it if you could **make sure the bug was not
already reported** (use the search bar on Github under Issues).
Did not find it? :( So we can act quickly on it, please follow these steps:
* Include your **OS type and version**, the versions of **Python** and **PyTorch**.
* A short, self-contained, code snippet that allows us to reproduce the bug in
less than 30s.
* The full traceback if an exception is raised.
* Attach any other additional information, like screenshots, you think may help.
### Do you want a new feature?
A good feature request addresses the following points:
1. Motivation first:
* Is it related to a problem/frustration with the library? If so, please explain
why. Providing a code snippet that demonstrates the problem is best.
* Is it related to something you would need for a project? We'd love to hear
about it!
* Is it something you worked on and think could benefit the community?
Awesome! Tell us what problem it solved for you.
2. Write a *paragraph* describing the feature.
3. Provide a **code snippet** that demonstrates its future use.
4. In case this is related to a paper, please attach a link.
5. Attach any additional information (drawings, screenshots, etc.) you think may help.
If your issue is well written we're already 80% of the way there by the time you
post it.
## Adding new policies, datasets or environments
Look at our implementations for [datasets](./lerobot/common/datasets/), [policies](./lerobot/common/policies/),
environments ([aloha](https://github.com/huggingface/gym-aloha),
[xarm](https://github.com/huggingface/gym-xarm),
[pusht](https://github.com/huggingface/gym-pusht))
and follow the same api design.
When implementing a new dataset loadable with LeRobotDataset follow these steps:
- Update `available_datasets_per_env` in `lerobot/__init__.py`
When implementing a new environment (e.g. `gym_aloha`), follow these steps:
- Update `available_tasks_per_env` and `available_datasets_per_env` in `lerobot/__init__.py`
When implementing a new policy class (e.g. `DiffusionPolicy`) follow these steps:
- Update `available_policies` and `available_policies_per_env`, in `lerobot/__init__.py`
- Set the required `name` class attribute.
- Update variables in `tests/test_available.py` by importing your new Policy class
## Submitting a pull request (PR)
Before writing code, we strongly advise you to search through the existing PRs or
issues to make sure that nobody is already working on the same thing. If you are
unsure, it is always a good idea to open an issue to get some feedback.
You will need basic `git` proficiency to be able to contribute to
🤗 LeRobot. `git` is not the easiest tool to use but it has the greatest
manual. Type `git --help` in a shell and enjoy. If you prefer books, [Pro
Git](https://git-scm.com/book/en/v2) is a very good reference.
Follow these steps to start contributing:
1. Fork the [repository](https://github.com/huggingface/lerobot) by
clicking on the 'Fork' button on the repository's page. This creates a copy of the code
under your GitHub user account.
2. Clone your fork to your local disk, and add the base repository as a remote. The following command
assumes you have your public SSH key uploaded to GitHub. See the following guide for more
[information](https://docs.github.com/en/repositories/creating-and-managing-repositories/cloning-a-repository).
```bash
git clone [email protected]:<your Github handle>/lerobot.git
cd lerobot
git remote add upstream https://github.com/huggingface/lerobot.git
```
3. Create a new branch to hold your development changes, and do this for every new PR you work on.
Start by synchronizing your `main` branch with the `upstream/main` branch (more details in the [GitHub Docs](https://docs.github.com/en/github/collaborating-with-issues-and-pull-requests/syncing-a-fork)):
```bash
git checkout main
git fetch upstream
git rebase upstream/main
```
Once your `main` branch is synchronized, create a new branch from it:
```bash
git checkout -b a-descriptive-name-for-my-changes
```
🚨 **Do not** work on the `main` branch.
4. for development, we use `poetry` instead of just `pip` to easily track our dependencies.
If you don't have it already, follow the [instructions](https://python-poetry.org/docs/#installation) to install it.
Set up a development environment with conda or miniconda:
```bash
conda create -y -n lerobot-dev python=3.10 && conda activate lerobot-dev
```
To develop on 🤗 LeRobot, you will at least need to install the `dev` and `test` extras dependencies along with the core library:
```bash
poetry install --sync --extras "dev test"
```
You can also install the project with all its dependencies (including environments):
```bash
poetry install --sync --all-extras
```
> **Note:** If you don't install simulation environments with `--all-extras`, the tests that require them will be skipped when running the pytest suite locally. However, they *will* be tested in the CI. In general, we advise you to install everything and test locally before pushing.
Whichever command you chose to install the project (e.g. `poetry install --sync --all-extras`), you should run it again when pulling code with an updated version of `pyproject.toml` and `poetry.lock` in order to synchronize your virtual environment with the new dependencies.
The equivalent of `pip install some-package`, would just be:
```bash
poetry add some-package
```
When making changes to the poetry sections of the `pyproject.toml`, you should run the following command to lock dependencies.
```bash
poetry lock --no-update
```
5. Develop the features on your branch.
As you work on the features, you should make sure that the test suite
passes. You should run the tests impacted by your changes like this (see
below an explanation regarding the environment variable):
```bash
pytest tests/<TEST_TO_RUN>.py
```
6. Follow our style.
`lerobot` relies on `ruff` to format its source code
consistently. Set up [`pre-commit`](https://pre-commit.com/) to run these checks
automatically as Git commit hooks.
Install `pre-commit` hooks:
```bash
pre-commit install
```
You can run these hooks whenever you need on staged files with:
```bash
pre-commit
```
Once you're happy with your changes, add changed files using `git add` and
make a commit with `git commit` to record your changes locally:
```bash
git add modified_file.py
git commit
```
Note, if you already commited some changes that have a wrong formatting, you can use:
```bash
pre-commit run --all-files
```
Please write [good commit messages](https://chris.beams.io/posts/git-commit/).
It is a good idea to sync your copy of the code with the original
repository regularly. This way you can quickly account for changes:
```bash
git fetch upstream
git rebase upstream/main
```
Push the changes to your account using:
```bash
git push -u origin a-descriptive-name-for-my-changes
```
6. Once you are satisfied (**and the checklist below is happy too**), go to the
webpage of your fork on GitHub. Click on 'Pull request' to send your changes
to the project maintainers for review.
7. It's ok if maintainers ask you for changes. It happens to core contributors
too! So everyone can see the changes in the Pull request, work in your local
branch and push the changes to your fork. They will automatically appear in
the pull request.
### Checklist
1. The title of your pull request should be a summary of its contribution;
2. If your pull request addresses an issue, please mention the issue number in
the pull request description to make sure they are linked (and people
consulting the issue know you are working on it);
3. To indicate a work in progress please prefix the title with `[WIP]`, or preferably mark
the PR as a draft PR. These are useful to avoid duplicated work, and to differentiate
it from PRs ready to be merged;
4. Make sure existing tests pass;
<!-- 5. Add high-coverage tests. No quality testing = no merge.
See an example of a good PR here: https://github.com/huggingface/lerobot/pull/ -->
### Tests
An extensive test suite is included to test the library behavior and several examples. Library tests can be found in the [tests folder](https://github.com/huggingface/lerobot/tree/main/tests).
Install [git lfs](https://git-lfs.com/) to retrieve test artifacts (if you don't have it already).
On Mac:
```bash
brew install git-lfs
git lfs install
```
On Ubuntu:
```bash
sudo apt-get install git-lfs
git lfs install
```
Pull artifacts if they're not in [tests/data](tests/data)
```bash
git lfs pull
```
We use `pytest` in order to run the tests. From the root of the
repository, here's how to run tests with `pytest` for the library:
```bash
python -m pytest -sv ./tests
```
You can specify a smaller set of tests in order to test only the feature
you're working on.
| lerobot/CONTRIBUTING.md/0 | {
"file_path": "lerobot/CONTRIBUTING.md",
"repo_id": "lerobot",
"token_count": 3135
} |
# Getting Started with Real-World Robots
This tutorial will guide you through the process of setting up and training a neural network to autonomously control a real robot.
**What You'll Learn:**
1. How to order and assemble your robot.
2. How to connect, configure, and calibrate your robot.
3. How to record and visualize your dataset.
4. How to train a policy using your data and prepare it for evaluation.
5. How to evaluate your policy and visualize the results.
By following these steps, you'll be able to replicate tasks like picking up a Lego block and placing it in a bin with a high success rate, as demonstrated in [this video](https://x.com/RemiCadene/status/1814680760592572934).
This tutorial is specifically made for the affordable [Koch v1.1](https://github.com/jess-moss/koch-v1-1) robot, but it contains additional information to be easily adapted to various types of robots like [Aloha bimanual robot](https://aloha-2.github.io) by changing some configurations. The Koch v1.1 consists of a leader arm and a follower arm, each with 6 motors. It can work with one or several cameras to record the scene, which serve as visual sensors for the robot.
During the data collection phase, you will control the follower arm by moving the leader arm. This process is known as "teleoperation." This technique is used to collect robot trajectories. Afterward, you'll train a neural network to imitate these trajectories and deploy the network to enable your robot to operate autonomously.
If you encounter any issues at any step of the tutorial, feel free to seek help on [Discord](https://discord.com/invite/s3KuuzsPFb) or don't hesitate to iterate with us on the tutorial by creating issues or pull requests. Thanks!
## 1. Order and Assemble your Koch v1.1
Follow the sourcing and assembling instructions provided on the [Koch v1.1 Github page](https://github.com/jess-moss/koch-v1-1). This will guide you through setting up both the follower and leader arms, as shown in the image below.
<div style="text-align:center;">
<img src="../media/tutorial/koch_v1_1_leader_follower.webp?raw=true" alt="Koch v1.1 leader and follower arms" title="Koch v1.1 leader and follower arms" width="50%">
</div>
For a visual walkthrough of the assembly process, you can refer to [this video tutorial](https://youtu.be/8nQIg9BwwTk).
## 2. Configure motors, calibrate arms, teleoperate your Koch v1.1
First, install the additional dependencies required for robots built with dynamixel motors like Koch v1.1 by running one of the following commands (make sure gcc is installed).
Using `pip`:
```bash
pip install -e ".[dynamixel]"
```
Or using `poetry`:
```bash
poetry install --sync --extras "dynamixel"
```
/!\ For Linux only, ffmpeg and opencv requires conda install for now. Run this exact sequence of commands:
```bash
conda install -c conda-forge ffmpeg
pip uninstall opencv-python
conda install -c conda-forge "opencv>=4.10.0"
```
You are now ready to plug the 5V power supply to the motor bus of the leader arm (the smaller one) since all its motors only require 5V.
Then plug the 12V power supply to the motor bus of the follower arm. It has two motors that need 12V, and the rest will be powered with 5V through the voltage convertor.
Finally, connect both arms to your computer via USB. Note that the USB doesn't provide any power, and both arms need to be plugged in with their associated power supply to be detected by your computer.
Now you are ready to configure your motors for the first time, as detailed in the sections below. In the upcoming sections, you'll learn about our classes and functions by running some python code in an interactive session, or by copy-pasting it in a python file.
If you have already configured your motors the first time, you can streamline the process by directly running the teleoperate script (which is detailed further in the tutorial):
```bash
python lerobot/scripts/control_robot.py \
--robot.type=koch \
--control.type=teleoperate
```
It will automatically:
1. Identify any missing calibrations and initiate the calibration procedure.
2. Connect the robot and start teleoperation.
### a. Control your motors with DynamixelMotorsBus
You can use the [`DynamixelMotorsBus`](../lerobot/common/robot_devices/motors/dynamixel.py) to communicate with the motors connected as a chain to the corresponding USB bus. This class leverages the Python [Dynamixel SDK](https://emanual.robotis.com/docs/en/software/dynamixel/dynamixel_sdk/sample_code/python_read_write_protocol_2_0/#python-read-write-protocol-20) to facilitate reading from and writing to the motors.
**First Configuration of your motors**
You will need to unplug each motor in turn and run a command the identify the motor. The motor will save its own identification, so you only need to do this once. Start by unplugging all of the motors.
Do the Leader arm first, as all of its motors are of the same type. Plug in your first motor on your leader arm and run this script to set its ID to 1.
```bash
python lerobot/scripts/configure_motor.py \
--port /dev/tty.usbmodem58760432961 \
--brand dynamixel \
--model xl330-m288 \
--baudrate 1000000 \
--ID 1
```
Then unplug your first motor and plug the second motor and set its ID to 2.
```bash
python lerobot/scripts/configure_motor.py \
--port /dev/tty.usbmodem58760432961 \
--brand dynamixel \
--model xl330-m288 \
--baudrate 1000000 \
--ID 2
```
Redo the process for all your motors until ID 6.
The process for the follower arm is almost the same, but the follower arm has two types of motors. For the first two motors, make sure you set the model to `xl430-w250`. _Important: configuring follower motors requires plugging and unplugging power. Make sure you use the 5V power for the XL330s and the 12V power for the XL430s!_
After all of your motors are configured properly, you're ready to plug them all together in a daisy-chain as shown in the original video.
**Instantiate the DynamixelMotorsBus**
To begin, create two instances of the [`DynamixelMotorsBus`](../lerobot/common/robot_devices/motors/dynamixel.py), one for each arm, using their corresponding USB ports (e.g. `DynamixelMotorsBus(port="/dev/tty.usbmodem575E0031751"`).
To find the correct ports for each arm, run the utility script twice:
```bash
python lerobot/scripts/find_motors_bus_port.py
```
Example output when identifying the leader arm's port (e.g., `/dev/tty.usbmodem575E0031751` on Mac, or possibly `/dev/ttyACM0` on Linux):
```
Finding all available ports for the MotorBus.
['/dev/tty.usbmodem575E0032081', '/dev/tty.usbmodem575E0031751']
Remove the usb cable from your DynamixelMotorsBus and press Enter when done.
[...Disconnect leader arm and press Enter...]
The port of this DynamixelMotorsBus is /dev/tty.usbmodem575E0031751
Reconnect the usb cable.
```
Example output when identifying the follower arm's port (e.g., `/dev/tty.usbmodem575E0032081`, or possibly `/dev/ttyACM1` on Linux):
```
Finding all available ports for the MotorBus.
['/dev/tty.usbmodem575E0032081', '/dev/tty.usbmodem575E0031751']
Remove the usb cable from your DynamixelMotorsBus and press Enter when done.
[...Disconnect follower arm and press Enter...]
The port of this DynamixelMotorsBus is /dev/tty.usbmodem575E0032081
Reconnect the usb cable.
```
Troubleshooting: On Linux, you might need to give access to the USB ports by running this command with your ports:
```bash
sudo chmod 666 /dev/tty.usbmodem575E0032081
sudo chmod 666 /dev/tty.usbmodem575E0031751
```
*Listing and Configuring Motors*
Next, you'll need to list the motors for each arm, including their name, index, and model. Initially, each motor is assigned the factory default index `1`. Since each motor requires a unique index to function correctly when connected in a chain on a common bus, you'll need to assign different indices. It's recommended to use an ascending index order, starting from `1` (e.g., `1, 2, 3, 4, 5, 6`). These indices will be saved in the persistent memory of each motor during the first connection.
To assign indices to the motors, run this code in an interactive Python session. Replace the `port` values with the ones you identified earlier:
```python
from lerobot.common.robot_devices.motors.configs import DynamixelMotorsBusConfig
from lerobot.common.robot_devices.motors.dynamixel import DynamixelMotorsBus
leader_config = DynamixelMotorsBusConfig(
port="/dev/tty.usbmodem575E0031751",
motors={
# name: (index, model)
"shoulder_pan": (1, "xl330-m077"),
"shoulder_lift": (2, "xl330-m077"),
"elbow_flex": (3, "xl330-m077"),
"wrist_flex": (4, "xl330-m077"),
"wrist_roll": (5, "xl330-m077"),
"gripper": (6, "xl330-m077"),
},
)
follower_config = DynamixelMotorsBusConfig(
port="/dev/tty.usbmodem575E0032081",
motors={
# name: (index, model)
"shoulder_pan": (1, "xl430-w250"),
"shoulder_lift": (2, "xl430-w250"),
"elbow_flex": (3, "xl330-m288"),
"wrist_flex": (4, "xl330-m288"),
"wrist_roll": (5, "xl330-m288"),
"gripper": (6, "xl330-m288"),
},
)
leader_arm = DynamixelMotorsBus(leader_config)
follower_arm = DynamixelMotorsBus(follower_config)
```
IMPORTANTLY: Now that you have your ports, update [`KochRobotConfig`](../lerobot/common/robot_devices/robots/configs.py). You will find something like:
```python
@RobotConfig.register_subclass("koch")
@dataclass
class KochRobotConfig(ManipulatorRobotConfig):
calibration_dir: str = ".cache/calibration/koch"
# `max_relative_target` limits the magnitude of the relative positional target vector for safety purposes.
# Set this to a positive scalar to have the same value for all motors, or a list that is the same length as
# the number of motors in your follower arms.
max_relative_target: int | None = None
leader_arms: dict[str, MotorsBusConfig] = field(
default_factory=lambda: {
"main": DynamixelMotorsBusConfig(
port="/dev/tty.usbmodem585A0085511", <-- UPDATE HERE
motors={
# name: (index, model)
"shoulder_pan": [1, "xl330-m077"],
"shoulder_lift": [2, "xl330-m077"],
"elbow_flex": [3, "xl330-m077"],
"wrist_flex": [4, "xl330-m077"],
"wrist_roll": [5, "xl330-m077"],
"gripper": [6, "xl330-m077"],
},
),
}
)
follower_arms: dict[str, MotorsBusConfig] = field(
default_factory=lambda: {
"main": DynamixelMotorsBusConfig(
port="/dev/tty.usbmodem585A0076891", <-- UPDATE HERE
motors={
# name: (index, model)
"shoulder_pan": [1, "xl430-w250"],
"shoulder_lift": [2, "xl430-w250"],
"elbow_flex": [3, "xl330-m288"],
"wrist_flex": [4, "xl330-m288"],
"wrist_roll": [5, "xl330-m288"],
"gripper": [6, "xl330-m288"],
},
),
}
)
```
**Connect and Configure your Motors**
Before you can start using your motors, you'll need to configure them to ensure proper communication. When you first connect the motors, the [`DynamixelMotorsBus`](../lerobot/common/robot_devices/motors/dynamixel.py) automatically detects any mismatch between the current motor indices (factory set to `1`) and the specified indices (e.g., `1, 2, 3, 4, 5, 6`). This triggers a configuration procedure that requires you to unplug the power cord and motors, then reconnect each motor sequentially, starting from the one closest to the bus.
For a visual guide, refer to the [video tutorial of the configuration procedure](https://youtu.be/U78QQ9wCdpY).
To connect and configure the leader arm, run the following code in the same Python interactive session as earlier in the tutorial:
```python
leader_arm.connect()
```
When you connect the leader arm for the first time, you might see an output similar to this:
```
Read failed due to communication error on port /dev/tty.usbmodem575E0032081 for group_key ID_shoulder_pan_shoulder_lift_elbow_flex_wrist_flex_wrist_roll_gripper: [TxRxResult] There is no status packet!
/!\ A configuration issue has been detected with your motors:
If this is the first time you are using these motors, press enter to configure your motors... but before verify that all the cables are connected the proper way. If you find an issue, before making a modification, kill the python process, unplug the power cord to not damage the motors, rewire correctly, then plug the power again and relaunch the script.
Motor indices detected: {9600: [1]}
1. Unplug the power cord
2. Plug/unplug minimal number of cables to only have the first 1 motor(s) (['shoulder_pan']) connected.
3. Re-plug the power cord
Press Enter to continue...
*Follow the procedure*
Setting expected motor indices: [1, 2, 3, 4, 5, 6]
```
Once the leader arm is configured, repeat the process for the follower arm by running:
```python
follower_arm.connect()
```
Congratulations! Both arms are now properly configured and connected. You won't need to go through the configuration procedure again in the future.
**Troubleshooting**:
If the configuration process fails, you may need to do the configuration process via the Dynamixel Wizard.
Known failure modes:
- Calling `arm.connect()` raises `OSError: No motor found, but one new motor expected. Verify power cord is plugged in and retry` on Ubuntu 22.
Steps:
1. Visit https://emanual.robotis.com/docs/en/software/dynamixel/dynamixel_wizard2/#connect-dynamixel.
2. Follow the software installation instructions in section 3 of the web page.
3. Launch the software.
4. Configure the device scanning options in the menu under `Tools` > `Options` > `Scan`. Check only Protocol 2.0, select only the USB port identifier of interest, select all baudrates, set the ID range to `[0, 10]`. _While this step was not strictly necessary, it greatly speeds up scanning_.
5. For each motor in turn:
- Disconnect the power to the driver board.
- Connect **only** the motor of interest to the driver board, making sure to disconnect it from any other motors.
- Reconnect the power to the driver board.
- From the software menu select `Device` > `Scan` and let the scan run. A device should appear.
- If the device has an asterisk (*) near it, it means the firmware is indeed outdated. From the software menu, select `Tools` > `Firmware Update`. Follow the prompts.
- The main panel should have table with various parameters of the device (refer to the web page, section 5). Select the row with `ID`, and then set the desired ID on the bottom right panel by selecting and clicking `Save`.
- Just like you did with the ID, also set the `Baud Rate` to 1 Mbps.
6. Check everything has been done right:
- Rewire the arms in their final configuration and power both of them.
- Scan for devices. All 12 motors should appear.
- Select the motors one by one and move the arm. Check that the graphical indicator near the top right shows the movement.
**Read and Write with DynamixelMotorsBus**
To get familiar with how `DynamixelMotorsBus` communicates with the motors, you can start by reading data from them. Copy past this code in the same interactive python session:
```python
leader_pos = leader_arm.read("Present_Position")
follower_pos = follower_arm.read("Present_Position")
print(leader_pos)
print(follower_pos)
```
Expected output might look like:
```
array([2054, 523, 3071, 1831, 3049, 2441], dtype=int32)
array([2003, 1601, 56, 2152, 3101, 2283], dtype=int32)
```
Try moving the arms to various positions and observe how the values change.
Now let's try to enable torque in the follower arm by copy pasting this code:
```python
from lerobot.common.robot_devices.motors.dynamixel import TorqueMode
follower_arm.write("Torque_Enable", TorqueMode.ENABLED.value)
```
With torque enabled, the follower arm will be locked in its current position. Do not attempt to manually move the arm while torque is enabled, as this could damage the motors.
Now, to get more familiar with reading and writing, let's move the arm programmatically copy pasting the following example code:
```python
# Get the current position
position = follower_arm.read("Present_Position")
# Update first motor (shoulder_pan) position by +10 steps
position[0] += 10
follower_arm.write("Goal_Position", position)
# Update all motors position by -30 steps
position -= 30
follower_arm.write("Goal_Position", position)
# Update gripper by +30 steps
position[-1] += 30
follower_arm.write("Goal_Position", position[-1], "gripper")
```
When you're done playing, you can try to disable the torque, but make sure you hold your robot so that it doesn't fall:
```python
follower_arm.write("Torque_Enable", TorqueMode.DISABLED.value)
```
Finally, disconnect the arms:
```python
leader_arm.disconnect()
follower_arm.disconnect()
```
Alternatively, you can unplug the power cord, which will automatically disable torque and disconnect the motors.
*/!\ Warning*: These motors tend to overheat, especially under torque or if left plugged in for too long. Unplug after use.
### b. Teleoperate your Koch v1.1 with ManipulatorRobot
**Instantiate the ManipulatorRobot**
Before you can teleoperate your robot, you need to instantiate the [`ManipulatorRobot`](../lerobot/common/robot_devices/robots/manipulator.py) using the previously defined `leader_config` and `follower_config`.
For the Koch v1.1 robot, we only have one leader, so we refer to it as `"main"` and define it as `leader_arms={"main": leader_config}`. We do the same for the follower arm. For other robots (like the Aloha), which may have two pairs of leader and follower arms, you would define them like this: `leader_arms={"left": left_leader_config, "right": right_leader_config},`. Same thing for the follower arms.
Run the following code to instantiate your manipulator robot:
```python
from lerobot.common.robot_devices.robots.configs import KochRobotConfig
from lerobot.common.robot_devices.robots.manipulator import ManipulatorRobot
robot_config = KochRobotConfig(
leader_arms={"main": leader_config},
follower_arms={"main": follower_config},
cameras={}, # We don't use any camera for now
)
robot = ManipulatorRobot(robot_config)
```
The `KochRobotConfig` is used to set the associated settings and calibration process. For instance, we activate the torque of the gripper of the leader Koch v1.1 arm and position it at a 40 degree angle to use it as a trigger.
For the [Aloha bimanual robot](https://aloha-2.github.io), we would use `AlohaRobotConfig` to set different settings such as a secondary ID for shadow joints (shoulder, elbow). Specific to Aloha, LeRobot comes with default calibration files stored in in `.cache/calibration/aloha_default`. Assuming the motors have been properly assembled, no manual calibration step is expected for Aloha.
**Calibrate and Connect the ManipulatorRobot**
Next, you'll need to calibrate your Koch robot to ensure that the leader and follower arms have the same position values when they are in the same physical position. This calibration is essential because it allows a neural network trained on one Koch robot to work on another.
When you connect your robot for the first time, the [`ManipulatorRobot`](../lerobot/common/robot_devices/robots/manipulator.py) will detect if the calibration file is missing and trigger the calibration procedure. During this process, you will be guided to move each arm to three different positions.
Here are the positions you'll move the follower arm to:
| 1. Zero position | 2. Rotated position | 3. Rest position |
|---|---|---|
| <img src="../media/koch/follower_zero.webp?raw=true" alt="Koch v1.1 follower arm zero position" title="Koch v1.1 follower arm zero position" style="width:100%;"> | <img src="../media/koch/follower_rotated.webp?raw=true" alt="Koch v1.1 follower arm rotated position" title="Koch v1.1 follower arm rotated position" style="width:100%;"> | <img src="../media/koch/follower_rest.webp?raw=true" alt="Koch v1.1 follower arm rest position" title="Koch v1.1 follower arm rest position" style="width:100%;"> |
And here are the corresponding positions for the leader arm:
| 1. Zero position | 2. Rotated position | 3. Rest position |
|---|---|---|
| <img src="../media/koch/leader_zero.webp?raw=true" alt="Koch v1.1 leader arm zero position" title="Koch v1.1 leader arm zero position" style="width:100%;"> | <img src="../media/koch/leader_rotated.webp?raw=true" alt="Koch v1.1 leader arm rotated position" title="Koch v1.1 leader arm rotated position" style="width:100%;"> | <img src="../media/koch/leader_rest.webp?raw=true" alt="Koch v1.1 leader arm rest position" title="Koch v1.1 leader arm rest position" style="width:100%;"> |
You can watch a [video tutorial of the calibration procedure](https://youtu.be/8drnU9uRY24) for more details.
During calibration, we count the number of full 360-degree rotations your motors have made since they were first used. That's why we ask yo to move to this arbitrary "zero" position. We don't actually "set" the zero position, so you don't need to be accurate. After calculating these "offsets" to shift the motor values around 0, we need to assess the rotation direction of each motor, which might differ. That's why we ask you to rotate all motors to roughly 90 degrees, to mesure if the values changed negatively or positively.
Finally, the rest position ensures that the follower and leader arms are roughly aligned after calibration, preventing sudden movements that could damage the motors when starting teleoperation.
Importantly, once calibrated, all Koch robots will move to the same positions (e.g. zero and rotated position) when commanded.
Run the following code to calibrate and connect your robot:
```python
robot.connect()
```
The output will look like this:
```
Connecting main follower arm
Connecting main leader arm
Missing calibration file '.cache/calibration/koch/main_follower.json'
Running calibration of koch main follower...
Move arm to zero position
[...]
Move arm to rotated position
[...]
Move arm to rest position
[...]
Calibration is done! Saving calibration file '.cache/calibration/koch/main_follower.json'
Missing calibration file '.cache/calibration/koch/main_leader.json'
Running calibration of koch main leader...
Move arm to zero position
[...]
Move arm to rotated position
[...]
Move arm to rest position
[...]
Calibration is done! Saving calibration file '.cache/calibration/koch/main_leader.json'
```
*Verifying Calibration*
Once calibration is complete, you can check the positions of the leader and follower arms to ensure they match. If the calibration was successful, the positions should be very similar.
Run this code to get the positions in degrees:
```python
leader_pos = robot.leader_arms["main"].read("Present_Position")
follower_pos = robot.follower_arms["main"].read("Present_Position")
print(leader_pos)
print(follower_pos)
```
Example output:
```
array([-0.43945312, 133.94531, 179.82422, -18.984375, -1.9335938, 34.541016], dtype=float32)
array([-0.58723712, 131.72314, 174.98743, -16.872612, 0.786213, 35.271973], dtype=float32)
```
These values are in degrees, which makes them easier to interpret and debug. The zero position used during calibration should roughly correspond to 0 degrees for each motor, and the rotated position should roughly correspond to 90 degrees for each motor.
**Teleoperate your Koch v1.1**
You can easily teleoperate your robot by reading the positions from the leader arm and sending them as goal positions to the follower arm.
To teleoperate your robot for 30 seconds at a frequency of approximately 200Hz, run the following code:
```python
import tqdm
seconds = 30
frequency = 200
for _ in tqdm.tqdm(range(seconds*frequency)):
leader_pos = robot.leader_arms["main"].read("Present_Position")
robot.follower_arms["main"].write("Goal_Position", leader_pos)
```
*Using `teleop_step` for Teleoperation*
Alternatively, you can teleoperate the robot using the `teleop_step` method from [`ManipulatorRobot`](../lerobot/common/robot_devices/robots/manipulator.py).
Run this code to teleoperate:
```python
for _ in tqdm.tqdm(range(seconds*frequency)):
robot.teleop_step()
```
*Recording data during Teleoperation*
Teleoperation is particularly useful for recording data. You can use the `teleop_step(record_data=True)` to returns both the follower arm's position as `"observation.state"` and the leader arm's position as `"action"`. This function also converts the numpy arrays into PyTorch tensors. If you're working with a robot that has two leader and two follower arms (like the Aloha), the positions are concatenated.
Run the following code to see how slowly moving the leader arm affects the observation and action:
```python
leader_pos = robot.leader_arms["main"].read("Present_Position")
follower_pos = robot.follower_arms["main"].read("Present_Position")
observation, action = robot.teleop_step(record_data=True)
print(follower_pos)
print(observation)
print(leader_pos)
print(action)
```
Expected output:
```
array([7.8223, 131.1328, 165.5859, -23.4668, -0.9668, 32.4316], dtype=float32)
{'observation.state': tensor([7.8223, 131.1328, 165.5859, -23.4668, -0.9668, 32.4316])}
array([3.4277, 134.1211, 179.8242, -18.5449, -1.5820, 34.7168], dtype=float32)
{'action': tensor([3.4277, 134.1211, 179.8242, -18.5449, -1.5820, 34.7168])}
```
*Asynchronous Frame Recording*
Additionally, `teleop_step` can asynchronously record frames from multiple cameras and include them in the observation dictionary as `"observation.images.CAMERA_NAME"`. This feature will be covered in more detail in the next section.
*Disconnecting the Robot*
When you're finished, make sure to disconnect your robot by running:
```python
robot.disconnect()
```
Alternatively, you can unplug the power cord, which will also disable torque.
*/!\ Warning*: These motors tend to overheat, especially under torque or if left plugged in for too long. Unplug after use.
### c. Add your cameras with OpenCVCamera
**(Optional) Use your phone as camera on Linux**
If you want to use your phone as a camera on Linux, follow these steps to set up a virtual camera
1. *Install `v4l2loopback-dkms` and `v4l-utils`*. Those packages are required to create virtual camera devices (`v4l2loopback`) and verify their settings with the `v4l2-ctl` utility from `v4l-utils`. Install them using:
```python
sudo apt install v4l2loopback-dkms v4l-utils
```
2. *Install [DroidCam](https://droidcam.app) on your phone*. This app is available for both iOS and Android.
3. *Install [OBS Studio](https://obsproject.com)*. This software will help you manage the camera feed. Install it using [Flatpak](https://flatpak.org):
```python
flatpak install flathub com.obsproject.Studio
```
4. *Install the DroidCam OBS plugin*. This plugin integrates DroidCam with OBS Studio. Install it with:
```python
flatpak install flathub com.obsproject.Studio.Plugin.DroidCam
```
5. *Start OBS Studio*. Launch with:
```python
flatpak run com.obsproject.Studio
```
6. *Add your phone as a source*. Follow the instructions [here](https://droidcam.app/obs/usage). Be sure to set the resolution to `640x480`.
7. *Adjust resolution settings*. In OBS Studio, go to `File > Settings > Video`. Change the `Base(Canvas) Resolution` and the `Output(Scaled) Resolution` to `640x480` by manually typing it in.
8. *Start virtual camera*. In OBS Studio, follow the instructions [here](https://obsproject.com/kb/virtual-camera-guide).
9. *Verify the virtual camera setup*. Use `v4l2-ctl` to list the devices:
```python
v4l2-ctl --list-devices
```
You should see an entry like:
```
VirtualCam (platform:v4l2loopback-000):
/dev/video1
```
10. *Check the camera resolution*. Use `v4l2-ctl` to ensure that the virtual camera output resolution is `640x480`. Change `/dev/video1` to the port of your virtual camera from the output of `v4l2-ctl --list-devices`.
```python
v4l2-ctl -d /dev/video1 --get-fmt-video
```
You should see an entry like:
```
>>> Format Video Capture:
>>> Width/Height : 640/480
>>> Pixel Format : 'YUYV' (YUYV 4:2:2)
```
Troubleshooting: If the resolution is not correct you will have to delete the Virtual Camera port and try again as it cannot be changed.
If everything is set up correctly, you can proceed with the rest of the tutorial.
**(Optional) Use your iPhone as a camera on MacOS**
To use your iPhone as a camera on macOS, enable the Continuity Camera feature:
- Ensure your Mac is running macOS 13 or later, and your iPhone is on iOS 16 or later.
- Sign in both devices with the same Apple ID.
- Connect your devices with a USB cable or turn on Wi-Fi and Bluetooth for a wireless connection.
For more details, visit [Apple support](https://support.apple.com/en-gb/guide/mac-help/mchl77879b8a/mac).
Your iPhone should be detected automatically when running the camera setup script in the next section.
**Instantiate an OpenCVCamera**
The [`OpenCVCamera`](../lerobot/common/robot_devices/cameras/opencv.py) class allows you to efficiently record frames from most cameras using the [`opencv2`](https://docs.opencv.org) library. For more details on compatibility, see [Video I/O with OpenCV Overview](https://docs.opencv.org/4.x/d0/da7/videoio_overview.html).
To instantiate an [`OpenCVCamera`](../lerobot/common/robot_devices/cameras/opencv.py), you need a camera index (e.g. `OpenCVCamera(camera_index=0)`). When you only have one camera like a webcam of a laptop, the camera index is usually `0` but it might differ, and the camera index might change if you reboot your computer or re-plug your camera. This behavior depends on your operating system.
To find the camera indices, run the following utility script, which will save a few frames from each detected camera:
```bash
python lerobot/common/robot_devices/cameras/opencv.py \
--images-dir outputs/images_from_opencv_cameras
```
The output will look something like this if you have two cameras connected:
```
Mac or Windows detected. Finding available camera indices through scanning all indices from 0 to 60
[...]
Camera found at index 0
Camera found at index 1
[...]
Connecting cameras
OpenCVCamera(0, fps=30.0, width=1920.0, height=1080.0, color_mode=rgb)
OpenCVCamera(1, fps=24.0, width=1920.0, height=1080.0, color_mode=rgb)
Saving images to outputs/images_from_opencv_cameras
Frame: 0000 Latency (ms): 39.52
[...]
Frame: 0046 Latency (ms): 40.07
Images have been saved to outputs/images_from_opencv_cameras
```
Check the saved images in `outputs/images_from_opencv_cameras` to identify which camera index corresponds to which physical camera (e.g. `0` for `camera_00` or `1` for `camera_01`):
```
camera_00_frame_000000.png
[...]
camera_00_frame_000047.png
camera_01_frame_000000.png
[...]
camera_01_frame_000047.png
```
Note: Some cameras may take a few seconds to warm up, and the first frame might be black or green.
Finally, run this code to instantiate and connectyour camera:
```python
from lerobot.common.robot_devices.cameras.configs import OpenCVCameraConfig
from lerobot.common.robot_devices.cameras.opencv import OpenCVCamera
camera_config = OpenCVCameraConfig(camera_index=0)
camera = OpenCVCamera(config)
camera.connect()
color_image = camera.read()
print(color_image.shape)
print(color_image.dtype)
```
Expected output for a laptop camera on MacBookPro:
```
(1080, 1920, 3)
uint8
```
Or like this if you followed our tutorial to set a virtual camera:
```
(480, 640, 3)
uint8
```
With certain camera, you can also specify additional parameters like frame rate, resolution, and color mode during instantiation. For instance:
```python
config = OpenCVCameraConfig(camera_index=0, fps=30, width=640, height=480)
```
If the provided arguments are not compatible with the camera, an exception will be raised.
*Disconnecting the camera*
When you're done using the camera, disconnect it by running:
```python
camera.disconnect()
```
**Instantiate your robot with cameras**
Additionaly, you can set up your robot to work with your cameras.
Modify the following Python code with the appropriate camera names and configurations:
```python
robot = ManipulatorRobot(
leader_arms={"main": leader_arm},
follower_arms={"main": follower_arm},
calibration_dir=".cache/calibration/koch",
cameras={
"laptop": OpenCVCameraConfig(0, fps=30, width=640, height=480),
"phone": OpenCVCameraConfig(1, fps=30, width=640, height=480),
},
)
robot.connect()
```
As a result, `teleop_step(record_data=True` will return a frame for each camera following the pytorch "channel first" convention but we keep images in `uint8` with pixels in range [0,255] to easily save them.
Modify this code with the names of your cameras and run it:
```python
observation, action = robot.teleop_step(record_data=True)
print(observation["observation.images.laptop"].shape)
print(observation["observation.images.phone"].shape)
print(observation["observation.images.laptop"].min().item())
print(observation["observation.images.laptop"].max().item())
```
The output should look like this:
```
torch.Size([3, 480, 640])
torch.Size([3, 480, 640])
0
255
```
### d. Use `control_robot.py` and our `teleoperate` function
Instead of manually running the python code in a terminal window, you can use [`lerobot/scripts/control_robot.py`](../lerobot/scripts/control_robot.py) to instantiate your robot by providing the robot configurations via command line and control your robot with various modes as explained next.
Try running this code to teleoperate your robot (if you dont have a camera, keep reading):
```bash
python lerobot/scripts/control_robot.py \
--robot.type=koch \
--control.type=teleoperate
```
You will see a lot of lines appearing like this one:
```
INFO 2024-08-10 11:15:03 ol_robot.py:209 dt: 5.12 (195.1hz) dtRlead: 4.93 (203.0hz) dtRfoll: 0.19 (5239.0hz)
```
It contains
- `2024-08-10 11:15:03` which is the date and time of the call to the print function.
- `ol_robot.py:209` which is the end of the file name and the line number where the print function is called (`lerobot/scripts/control_robot.py` line `209`).
- `dt: 5.12 (195.1hz)` which is the "delta time" or the number of milliseconds spent between the previous call to `robot.teleop_step()` and the current one, associated with the frequency (5.12 ms equals 195.1 Hz) ; note that you can control the maximum frequency by adding fps as argument such as `--fps 30`.
- `dtRlead: 4.93 (203.0hz)` which is the number of milliseconds it took to read the position of the leader arm using `leader_arm.read("Present_Position")`.
- `dtWfoll: 0.22 (4446.9hz)` which is the number of milliseconds it took to set a new goal position for the follower arm using `follower_arm.write("Goal_position", leader_pos)` ; note that writing is done asynchronously so it takes less time than reading.
Importantly: If you don't have any camera, you can remove them dynamically with this [draccus](https://github.com/dlwh/draccus) syntax `--robot.cameras='{}'`:
```bash
python lerobot/scripts/control_robot.py \
--robot.type=koch \
--robot.cameras='{}' \
--control.type=teleoperate
```
We advise to create a new yaml file when the command becomes too long.
## 3. Record your Dataset and Visualize it
Using what you've learned previously, you can now easily record a dataset of states and actions for one episode. You can use `busy_wait` to control the speed of teleoperation and record at a fixed `fps` (frame per seconds).
Try this code to record 30 seconds at 60 fps:
```python
import time
from lerobot.scripts.control_robot import busy_wait
record_time_s = 30
fps = 60
states = []
actions = []
for _ in range(record_time_s * fps):
start_time = time.perf_counter()
observation, action = robot.teleop_step(record_data=True)
states.append(observation["observation.state"])
actions.append(action["action"])
dt_s = time.perf_counter() - start_time
busy_wait(1 / fps - dt_s)
# Note that observation and action are available in RAM, but
# you could potentially store them on disk with pickle/hdf5 or
# our optimized format `LeRobotDataset`. More on this next.
```
Importantly, many utilities are still missing. For instance, if you have cameras, you will need to save the images on disk to not go out of RAM, and to do so in threads to not slow down communication with your robot. Also, you will need to store your data in a format optimized for training and web sharing like [`LeRobotDataset`](../lerobot/common/datasets/lerobot_dataset.py). More on this in the next section.
### a. Use the `record` function
You can use the `record` function from [`lerobot/scripts/control_robot.py`](../lerobot/scripts/control_robot.py) to achieve efficient data recording. It encompasses many recording utilities:
1. Frames from cameras are saved on disk in threads, and encoded into videos at the end of each episode recording.
2. Video streams from cameras are displayed in window so that you can verify them.
3. Data is stored with [`LeRobotDataset`](../lerobot/common/datasets/lerobot_dataset.py) format which is pushed to your Hugging Face page (unless `--control.push_to_hub=false` is provided).
4. Checkpoints are done during recording, so if any issue occurs, you can resume recording by re-running the same command again with `--control.resume=true`. You might need to add `--control.local_files_only=true` if your dataset was not uploaded to hugging face hub. Also you will need to manually delete the dataset directory to start recording from scratch.
5. Set the flow of data recording using command line arguments:
- `--control.warmup_time_s=10` defines the number of seconds before starting data collection. It allows the robot devices to warmup and synchronize (10 seconds by default).
- `--control.episode_time_s=60` defines the number of seconds for data recording for each episode (60 seconds by default).
- `--control.reset_time_s=60` defines the number of seconds for resetting the environment after each episode (60 seconds by default).
- `--control.num_episodes=50` defines the number of episodes to record (50 by default).
6. Control the flow during data recording using keyboard keys:
- Press right arrow `->` at any time during episode recording to early stop and go to resetting. Same during resetting, to early stop and to go to the next episode recording.
- Press left arrow `<-` at any time during episode recording or resetting to early stop, cancel the current episode, and re-record it.
- Press escape `ESC` at any time during episode recording to end the session early and go straight to video encoding and dataset uploading.
7. Similarly to `teleoperate`, you can also use the command line to override anything.
Before trying `record`, if you want to push your dataset to the hub, make sure you've logged in using a write-access token, which can be generated from the [Hugging Face settings](https://huggingface.co/settings/tokens):
```bash
huggingface-cli login --token ${HUGGINGFACE_TOKEN} --add-to-git-credential
```
Also, store your Hugging Face repository name in a variable (e.g. `cadene` or `lerobot`). For instance, run this to use your Hugging Face user name as repository:
```bash
HF_USER=$(huggingface-cli whoami | head -n 1)
echo $HF_USER
```
If you don't want to push to hub, use `--control.push_to_hub=false`.
Now run this to record 2 episodes:
```bash
python lerobot/scripts/control_robot.py \
--robot.type=koch \
--control.type=record \
--control.single_task="Grasp a lego block and put it in the bin." \
--control.fps=30 \
--control.repo_id=${HF_USER}/koch_test \
--control.tags='["tutorial"]' \
--control.warmup_time_s=5 \
--control.episode_time_s=30 \
--control.reset_time_s=30 \
--control.num_episodes=2 \
--control.push_to_hub=true
```
This will write your dataset locally to `~/.cache/huggingface/lerobot/{repo-id}` (e.g. `data/cadene/koch_test`) and push it on the hub at `https://huggingface.co/datasets/{HF_USER}/{repo-id}`. Your dataset will be automatically tagged with `LeRobot` for the community to find it easily, and you can also add custom tags (in this case `tutorial` for example).
You can look for other LeRobot datasets on the hub by searching for `LeRobot` tags: https://huggingface.co/datasets?other=LeRobot
You will see a lot of lines appearing like this one:
```
INFO 2024-08-10 15:02:58 ol_robot.py:219 dt:33.34 (30.0hz) dtRlead: 5.06 (197.5hz) dtWfoll: 0.25 (3963.7hz) dtRfoll: 6.22 (160.7hz) dtRlaptop: 32.57 (30.7hz) dtRphone: 33.84 (29.5hz)
```
It contains:
- `2024-08-10 15:02:58` which is the date and time of the call to the print function,
- `ol_robot.py:219` which is the end of the file name and the line number where the print function is called (`lerobot/scripts/control_robot.py` line `219`).
- `dt:33.34 (30.0hz)` which is the "delta time" or the number of milliseconds spent between the previous call to `robot.teleop_step(record_data=True)` and the current one, associated with the frequency (33.34 ms equals 30.0 Hz) ; note that we use `--fps 30` so we expect 30.0 Hz ; when a step takes more time, the line appears in yellow.
- `dtRlead: 5.06 (197.5hz)` which is the delta time of reading the present position of the leader arm.
- `dtWfoll: 0.25 (3963.7hz)` which is the delta time of writing the goal position on the follower arm ; writing is asynchronous so it takes less time than reading.
- `dtRfoll: 6.22 (160.7hz)` which is the delta time of reading the present position on the follower arm.
- `dtRlaptop:32.57 (30.7hz) ` which is the delta time of capturing an image from the laptop camera in the thread running asynchrously.
- `dtRphone:33.84 (29.5hz)` which is the delta time of capturing an image from the phone camera in the thread running asynchrously.
Troubleshooting:
- On Linux, if you encounter a hanging issue when using cameras, uninstall opencv and re-install it with conda:
```bash
pip uninstall opencv-python
conda install -c conda-forge opencv=4.10.0
```
- On Linux, if you encounter any issue during video encoding with `ffmpeg: unknown encoder libsvtav1`, you can:
- install with conda-forge by running `conda install -c conda-forge ffmpeg` (it should be compiled with `libsvtav1`),
- or, install [Homebrew](https://brew.sh) and run `brew install ffmpeg` (it should be compiled with `libsvtav1`),
- or, install [ffmpeg build dependencies](https://trac.ffmpeg.org/wiki/CompilationGuide/Ubuntu#GettheDependencies) and [compile ffmpeg from source with libsvtav1](https://trac.ffmpeg.org/wiki/CompilationGuide/Ubuntu#libsvtav1),
- and, make sure you use the corresponding ffmpeg binary to your install with `which ffmpeg`.
- On Linux, if the left and right arrow keys and escape key don't have any effect during data recording, make sure you've set the `$DISPLAY` environment variable. See [pynput limitations](https://pynput.readthedocs.io/en/latest/limitations.html#linux).
At the end of data recording, your dataset will be uploaded on your Hugging Face page (e.g. https://huggingface.co/datasets/cadene/koch_test) that you can obtain by running:
```bash
echo https://huggingface.co/datasets/${HF_USER}/koch_test
```
### b. Advices for recording dataset
Once you're comfortable with data recording, it's time to create a larger dataset for training. A good starting task is grasping an object at different locations and placing it in a bin. We suggest recording at least 50 episodes, with 10 episodes per location. Keep the cameras fixed and maintain consistent grasping behavior throughout the recordings.
In the following sections, you’ll train your neural network. After achieving reliable grasping performance, you can start introducing more variations during data collection, such as additional grasp locations, different grasping techniques, and altering camera positions.
Avoid adding too much variation too quickly, as it may hinder your results.
In the coming months, we plan to release a foundational model for robotics. We anticipate that fine-tuning this model will enhance generalization, reducing the need for strict consistency during data collection.
### c. Visualize all episodes
You can visualize your dataset by running:
```bash
python lerobot/scripts/visualize_dataset_html.py \
--repo-id ${HF_USER}/koch_test
```
Note: You might need to add `--local-files-only 1` if your dataset was not uploaded to hugging face hub.
This will launch a local web server that looks like this:
<div style="text-align:center;">
<img src="../media/tutorial/visualize_dataset_html.webp?raw=true" alt="Koch v1.1 leader and follower arms" title="Koch v1.1 leader and follower arms" width="100%">
</div>
### d. Replay episode on your robot with the `replay` function
A useful feature of [`lerobot/scripts/control_robot.py`](../lerobot/scripts/control_robot.py) is the `replay` function, which allows to replay on your robot any episode that you've recorded or episodes from any dataset out there. This function helps you test the repeatability of your robot's actions and assess transferability across robots of the same model.
To replay the first episode of the dataset you just recorded, run the following command:
```bash
python lerobot/scripts/control_robot.py \
--robot.type=koch \
--control.type=replay \
--control.fps=30 \
--control.repo_id=${HF_USER}/koch_test \
--control.episode=0
```
Note: You might need to add `--control.local_files_only=true` if your dataset was not uploaded to hugging face hub.
Your robot should replicate movements similar to those you recorded. For example, check out [this video](https://x.com/RemiCadene/status/1793654950905680090) where we use `replay` on a Aloha robot from [Trossen Robotics](https://www.trossenrobotics.com).
## 4. Train a policy on your data
### a. Use the `train` script
To train a policy to control your robot, use the [`python lerobot/scripts/train.py`](../lerobot/scripts/train.py) script. A few arguments are required. Here is an example command:
```bash
python lerobot/scripts/train.py \
--dataset.repo_id=${HF_USER}/koch_test \
--policy.type=act \
--output_dir=outputs/train/act_koch_test \
--job_name=act_koch_test \
--device=cuda \
--wandb.enable=true
```
Note: You might need to add `--dataset.local_files_only=true` if your dataset was not uploaded to hugging face hub.
Let's explain it:
1. We provided the dataset as argument with `--dataset.repo_id=${HF_USER}/koch_test`.
2. We provided the policy with `policy.type=act`. This loads configurations from [`configuration_act.py`](../lerobot/common/policies/act/configuration_act.py). Importantly, this policy will automatically adapt to the number of motor sates, motor actions and cameras of your robot (e.g. `laptop` and `phone`) which have been saved in your dataset.
4. We provided `device=cuda` since we are training on a Nvidia GPU, but you could use `device=mps` to train on Apple silicon.
5. We provided `wandb.enable=true` to use [Weights and Biases](https://docs.wandb.ai/quickstart) for visualizing training plots. This is optional but if you use it, make sure you are logged in by running `wandb login`.
For more information on the `train` script see the previous tutorial: [`examples/4_train_policy_with_script.md`](../examples/4_train_policy_with_script.md)
### b. (Optional) Upload policy checkpoints to the hub
Once training is done, upload the latest checkpoint with:
```bash
huggingface-cli upload ${HF_USER}/act_koch_test \
outputs/train/act_koch_test/checkpoints/last/pretrained_model
```
You can also upload intermediate checkpoints with:
```bash
CKPT=010000
huggingface-cli upload ${HF_USER}/act_koch_test_${CKPT} \
outputs/train/act_koch_test/checkpoints/${CKPT}/pretrained_model
```
## 5. Evaluate your policy
Now that you have a policy checkpoint, you can easily control your robot with it using methods from [`ManipulatorRobot`](../lerobot/common/robot_devices/robots/manipulator.py) and the policy.
Try this code for running inference for 60 seconds at 30 fps:
```python
from lerobot.common.policies.act.modeling_act import ACTPolicy
inference_time_s = 60
fps = 30
device = "cuda" # TODO: On Mac, use "mps" or "cpu"
ckpt_path = "outputs/train/act_koch_test/checkpoints/last/pretrained_model"
policy = ACTPolicy.from_pretrained(ckpt_path)
policy.to(device)
for _ in range(inference_time_s * fps):
start_time = time.perf_counter()
# Read the follower state and access the frames from the cameras
observation = robot.capture_observation()
# Convert to pytorch format: channel first and float32 in [0,1]
# with batch dimension
for name in observation:
if "image" in name:
observation[name] = observation[name].type(torch.float32) / 255
observation[name] = observation[name].permute(2, 0, 1).contiguous()
observation[name] = observation[name].unsqueeze(0)
observation[name] = observation[name].to(device)
# Compute the next action with the policy
# based on the current observation
action = policy.select_action(observation)
# Remove batch dimension
action = action.squeeze(0)
# Move to cpu, if not already the case
action = action.to("cpu")
# Order the robot to move
robot.send_action(action)
dt_s = time.perf_counter() - start_time
busy_wait(1 / fps - dt_s)
```
### a. Use our `record` function
Ideally, when controlling your robot with your neural network, you would want to record evaluation episodes and to be able to visualize them later on, or even train on them like in Reinforcement Learning. This pretty much corresponds to recording a new dataset but with a neural network providing the actions instead of teleoperation.
To this end, you can use the `record` function from [`lerobot/scripts/control_robot.py`](../lerobot/scripts/control_robot.py) but with a policy checkpoint as input. For instance, run this command to record 10 evaluation episodes:
```bash
python lerobot/scripts/control_robot.py \
--robot.type=koch \
--control.type=record \
--control.fps=30 \
--control.repo_id=${HF_USER}/eval_act_koch_test \
--control.tags='["tutorial"]' \
--control.warmup_time_s=5 \
--control.episode_time_s=30 \
--control.reset_time_s=30 \
--control.num_episodes=10 \
--control.push_to_hub=true \
--control.policy.path=outputs/train/act_koch_test/checkpoints/last/pretrained_model
```
As you can see, it's almost the same command as previously used to record your training dataset. Two things changed:
1. There is an additional `--control.policy.path` argument which indicates the path to your policy checkpoint with (e.g. `outputs/train/eval_koch_test/checkpoints/last/pretrained_model`). You can also use the model repository if you uploaded a model checkpoint to the hub (e.g. `${HF_USER}/act_koch_test`).
2. The name of dataset begins by `eval` to reflect that you are running inference (e.g. `${HF_USER}/eval_act_koch_test`).
### b. Visualize evaluation afterwards
You can then visualize your evaluation dataset by running the same command as before but with the new inference dataset as argument:
```bash
python lerobot/scripts/visualize_dataset.py \
--repo-id ${HF_USER}/eval_act_koch_test
```
## 6. Next step
Join our [Discord](https://discord.com/invite/s3KuuzsPFb) to collaborate on data collection and help us train a fully open-source foundational models for robotics!
| lerobot/examples/7_get_started_with_real_robot.md/0 | {
"file_path": "lerobot/examples/7_get_started_with_real_robot.md",
"repo_id": "lerobot",
"token_count": 15959
} |
#!/usr/bin/env python
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
For all datasets in the RLDS format.
For https://github.com/google-deepmind/open_x_embodiment (OPENX) datasets.
NOTE: You need to install tensorflow and tensorflow_datsets before running this script.
Example:
python lerobot/scripts/push_dataset_to_hub.py \
--raw-dir /path/to/data/bridge_dataset/1.0.0/ \
--repo-id your_hub/sampled_bridge_data_v2 \
--raw-format rlds \
--episodes 3 4 5 8 9
Exact dataset fps defined in openx/config.py, obtained from:
https://docs.google.com/spreadsheets/d/1rPBD77tk60AEIGZrGSODwyyzs5FgCU9Uz3h-3_t2A9g/edit?gid=0#gid=0&range=R:R
"""
import shutil
from pathlib import Path
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds
import torch
import tqdm
from datasets import Dataset, Features, Image, Sequence, Value
from PIL import Image as PILImage
from lerobot.common.datasets.lerobot_dataset import CODEBASE_VERSION
from lerobot.common.datasets.push_dataset_to_hub.utils import (
calculate_episode_data_index,
concatenate_episodes,
get_default_encoding,
save_images_concurrently,
)
from lerobot.common.datasets.utils import (
hf_transform_to_torch,
)
from lerobot.common.datasets.video_utils import VideoFrame, encode_video_frames
np.set_printoptions(precision=2)
def tf_to_torch(data):
return torch.from_numpy(data.numpy())
def tf_img_convert(img):
if img.dtype == tf.string:
img = tf.io.decode_image(img, expand_animations=False, dtype=tf.uint8)
elif img.dtype != tf.uint8:
raise ValueError(f"Unsupported image dtype: found with dtype {img.dtype}")
return img.numpy()
def _broadcast_metadata_rlds(i: tf.Tensor, traj: dict) -> dict:
"""
In the RLDS format, each trajectory has some top-level metadata that is explicitly separated out, and a "steps"
entry. This function moves the "steps" entry to the top level, broadcasting any metadata to the length of the
trajectory. This function also adds the extra metadata fields `_len`, `_traj_index`, and `_frame_index`.
NOTE: adapted from DLimp library https://github.com/kvablack/dlimp/
"""
steps = traj.pop("steps")
traj_len = tf.shape(tf.nest.flatten(steps)[0])[0]
# broadcast metadata to the length of the trajectory
metadata = tf.nest.map_structure(lambda x: tf.repeat(x, traj_len), traj)
# put steps back in
assert "traj_metadata" not in steps
traj = {**steps, "traj_metadata": metadata}
assert "_len" not in traj
assert "_traj_index" not in traj
assert "_frame_index" not in traj
traj["_len"] = tf.repeat(traj_len, traj_len)
traj["_traj_index"] = tf.repeat(i, traj_len)
traj["_frame_index"] = tf.range(traj_len)
return traj
def load_from_raw(
raw_dir: Path,
videos_dir: Path,
fps: int,
video: bool,
episodes: list[int] | None = None,
encoding: dict | None = None,
):
"""
Args:
raw_dir (Path): _description_
videos_dir (Path): _description_
fps (int): _description_
video (bool): _description_
episodes (list[int] | None, optional): _description_. Defaults to None.
"""
ds_builder = tfds.builder_from_directory(str(raw_dir))
dataset = ds_builder.as_dataset(
split="all",
decoders={"steps": tfds.decode.SkipDecoding()},
)
dataset_info = ds_builder.info
print("dataset_info: ", dataset_info)
ds_length = len(dataset)
dataset = dataset.take(ds_length)
# "flatten" the dataset as such we can apply trajectory level map() easily
# each [obs][key] has a shape of (frame_size, ...)
dataset = dataset.enumerate().map(_broadcast_metadata_rlds)
# we will apply the standardization transform if the dataset_name is provided
# if the dataset name is not provided and the goal is to convert any rlds formatted dataset
# search for 'image' keys in the observations
image_keys = []
state_keys = []
observation_info = dataset_info.features["steps"]["observation"]
for key in observation_info:
# check whether the key is for an image or a vector observation
if len(observation_info[key].shape) == 3:
# only adding uint8 images discards depth images
if observation_info[key].dtype == tf.uint8:
image_keys.append(key)
else:
state_keys.append(key)
lang_key = "language_instruction" if "language_instruction" in dataset.element_spec else None
print(" - image_keys: ", image_keys)
print(" - lang_key: ", lang_key)
it = iter(dataset)
ep_dicts = []
# Init temp path to save ep_dicts in case of crash
tmp_ep_dicts_dir = videos_dir.parent.joinpath("ep_dicts")
tmp_ep_dicts_dir.mkdir(parents=True, exist_ok=True)
# check if ep_dicts have already been saved in /tmp
starting_ep_idx = 0
saved_ep_dicts = [ep.__str__() for ep in tmp_ep_dicts_dir.iterdir()]
if len(saved_ep_dicts) > 0:
saved_ep_dicts.sort()
# get last ep_idx number
starting_ep_idx = int(saved_ep_dicts[-1][-13:-3]) + 1
for i in range(starting_ep_idx):
episode = next(it)
ep_dicts.append(torch.load(saved_ep_dicts[i]))
# if we user specified episodes, skip the ones not in the list
if episodes is not None:
if ds_length == 0:
raise ValueError("No episodes found.")
# convert episodes index to sorted list
episodes = sorted(episodes)
for ep_idx in tqdm.tqdm(range(starting_ep_idx, ds_length)):
episode = next(it)
# if user specified episodes, skip the ones not in the list
if episodes is not None:
if len(episodes) == 0:
break
if ep_idx == episodes[0]:
# process this episode
print(" selecting episode idx: ", ep_idx)
episodes.pop(0)
else:
continue # skip
num_frames = episode["action"].shape[0]
ep_dict = {}
for key in state_keys:
ep_dict[f"observation.{key}"] = tf_to_torch(episode["observation"][key])
ep_dict["action"] = tf_to_torch(episode["action"])
ep_dict["next.reward"] = tf_to_torch(episode["reward"]).float()
ep_dict["next.done"] = tf_to_torch(episode["is_last"])
ep_dict["is_terminal"] = tf_to_torch(episode["is_terminal"])
ep_dict["is_first"] = tf_to_torch(episode["is_first"])
ep_dict["discount"] = tf_to_torch(episode["discount"])
# If lang_key is present, convert the entire tensor at once
if lang_key is not None:
ep_dict["language_instruction"] = [x.numpy().decode("utf-8") for x in episode[lang_key]]
ep_dict["timestamp"] = torch.arange(0, num_frames, 1) / fps
ep_dict["episode_index"] = torch.tensor([ep_idx] * num_frames)
ep_dict["frame_index"] = torch.arange(0, num_frames, 1)
image_array_dict = {key: [] for key in image_keys}
for im_key in image_keys:
imgs = episode["observation"][im_key]
image_array_dict[im_key] = [tf_img_convert(img) for img in imgs]
# loop through all cameras
for im_key in image_keys:
img_key = f"observation.images.{im_key}"
imgs_array = image_array_dict[im_key]
imgs_array = np.array(imgs_array)
if video:
# save png images in temporary directory
tmp_imgs_dir = videos_dir / "tmp_images"
save_images_concurrently(imgs_array, tmp_imgs_dir)
# encode images to a mp4 video
fname = f"{img_key}_episode_{ep_idx:06d}.mp4"
video_path = videos_dir / fname
encode_video_frames(tmp_imgs_dir, video_path, fps, **(encoding or {}))
# clean temporary images directory
shutil.rmtree(tmp_imgs_dir)
# store the reference to the video frame
ep_dict[img_key] = [
{"path": f"videos/{fname}", "timestamp": i / fps} for i in range(num_frames)
]
else:
ep_dict[img_key] = [PILImage.fromarray(x) for x in imgs_array]
path_ep_dict = tmp_ep_dicts_dir.joinpath(
"ep_dict_" + "0" * (10 - len(str(ep_idx))) + str(ep_idx) + ".pt"
)
torch.save(ep_dict, path_ep_dict)
ep_dicts.append(ep_dict)
data_dict = concatenate_episodes(ep_dicts)
total_frames = data_dict["frame_index"].shape[0]
data_dict["index"] = torch.arange(0, total_frames, 1)
return data_dict
def to_hf_dataset(data_dict, video) -> Dataset:
features = {}
for key in data_dict:
# check if vector state obs
if key.startswith("observation.") and "observation.images." not in key:
features[key] = Sequence(length=data_dict[key].shape[1], feature=Value(dtype="float32", id=None))
# check if image obs
elif "observation.images." in key:
if video:
features[key] = VideoFrame()
else:
features[key] = Image()
if "language_instruction" in data_dict:
features["language_instruction"] = Value(dtype="string", id=None)
features["action"] = Sequence(
length=data_dict["action"].shape[1], feature=Value(dtype="float32", id=None)
)
features["is_terminal"] = Value(dtype="bool", id=None)
features["is_first"] = Value(dtype="bool", id=None)
features["discount"] = Value(dtype="float32", id=None)
features["episode_index"] = Value(dtype="int64", id=None)
features["frame_index"] = Value(dtype="int64", id=None)
features["timestamp"] = Value(dtype="float32", id=None)
features["next.reward"] = Value(dtype="float32", id=None)
features["next.done"] = Value(dtype="bool", id=None)
features["index"] = Value(dtype="int64", id=None)
hf_dataset = Dataset.from_dict(data_dict, features=Features(features))
hf_dataset.set_transform(hf_transform_to_torch)
return hf_dataset
def from_raw_to_lerobot_format(
raw_dir: Path,
videos_dir: Path,
fps: int | None = None,
video: bool = True,
episodes: list[int] | None = None,
encoding: dict | None = None,
):
data_dict = load_from_raw(raw_dir, videos_dir, fps, video, episodes, encoding)
hf_dataset = to_hf_dataset(data_dict, video)
episode_data_index = calculate_episode_data_index(hf_dataset)
info = {
"codebase_version": CODEBASE_VERSION,
"fps": fps,
"video": video,
}
if video:
info["encoding"] = get_default_encoding()
return hf_dataset, episode_data_index, info
| lerobot/lerobot/common/datasets/push_dataset_to_hub/openx_rlds_format.py/0 | {
"file_path": "lerobot/lerobot/common/datasets/push_dataset_to_hub/openx_rlds_format.py",
"repo_id": "lerobot",
"token_count": 4796
} |
import torch
import torch.nn.functional as F # noqa: N812
from packaging.version import Version
if Version(torch.__version__) > Version("2.5.0"):
# Ffex attention is only available from torch 2.5 onwards
from torch.nn.attention.flex_attention import (
_mask_mod_signature,
_round_up_to_multiple,
create_block_mask,
create_mask,
flex_attention,
)
# @torch.compile(dynamic=False)
def flex_attention_forward(
attention_mask: torch.Tensor,
batch_size: int,
head_dim: int,
query_states: torch.Tensor,
key_states: torch.Tensor,
value_states: torch.Tensor,
scaling=None,
):
"""
This is defined out of classes to make compile happy.
"""
original_dtype = query_states.dtype
num_att_heads = 8
num_key_value_heads = 1
num_key_value_groups = num_att_heads // num_key_value_heads
key_states = key_states[:, :, :, None, :]
key_states = key_states.expand(
batch_size, key_states.shape[1], num_key_value_heads, num_key_value_groups, head_dim
)
key_states = key_states.reshape(
batch_size, key_states.shape[1], num_key_value_heads * num_key_value_groups, head_dim
)
value_states = value_states[:, :, :, None, :]
value_states = value_states.expand(
batch_size, value_states.shape[1], num_key_value_heads, num_key_value_groups, head_dim
)
value_states = value_states.reshape(
batch_size, value_states.shape[1], num_key_value_heads * num_key_value_groups, head_dim
)
query_states = query_states.transpose(1, 2)
key_states = key_states.transpose(1, 2)
value_states = value_states.transpose(1, 2)
query_states = query_states.to(torch.float32)
key_states = key_states.to(torch.float32)
value_states = value_states.to(torch.float32)
causal_mask = attention_mask
if causal_mask is not None:
causal_mask = causal_mask[:, None, :, : key_states.shape[2]]
if causal_mask.shape[1] == 1 and query_states.shape[1] > 1:
causal_mask = causal_mask.expand(-1, query_states.shape[1], -1, -1)
def precomputed_mask_factory(precomputed_mask: torch.Tensor) -> _mask_mod_signature:
def mask_mod(b, h, q_idx, kv_idx):
# Danger zone: if b,h,q_idx,kv_idx exceed the shape, device-side assert occurs.
return precomputed_mask[b][h][q_idx][kv_idx]
return mask_mod
b_mask, h_mask, q_len, kv_len = causal_mask.shape # The shape of your mask
block_size = 128
q_len_rounded = _round_up_to_multiple(q_len, block_size)
kv_len_rounded = _round_up_to_multiple(kv_len, block_size)
# *CRITICAL* we do need to expand here, else we get a CUDA index error
pad_q = q_len_rounded - q_len
pad_k = kv_len_rounded - kv_len
padded_causal_mask = F.pad(causal_mask, (0, pad_k, 0, pad_q), value=0.0)
mask_mod_fn_orig = precomputed_mask_factory(padded_causal_mask)
mask_4d = create_mask(
mod_fn=mask_mod_fn_orig,
B=b_mask,
H=h_mask,
Q_LEN=q_len_rounded,
KV_LEN=kv_len_rounded,
device=causal_mask.device,
_compile=False,
)
mask_mod_fn_padded = precomputed_mask_factory(mask_4d)
block_mask = create_block_mask(
mask_mod=mask_mod_fn_padded,
B=b_mask,
H=h_mask,
Q_LEN=q_len_rounded,
KV_LEN=kv_len_rounded,
BLOCK_SIZE=block_size,
device=causal_mask.device,
_compile=False,
)
# mask is applied inside the kernel, ideally more efficiently than score_mod.
attn_output, attention_weights = flex_attention(
query_states,
key_states,
value_states,
block_mask=block_mask,
enable_gqa=True, # because we shaped query/key states for GQA
scale=head_dim**-0.5 if scaling is None else scaling,
return_lse=True,
)
attn_output = attn_output.to(dtype=original_dtype)
attn_output = attn_output.transpose(1, 2).contiguous() # [B, Q_LEN, H, head_dim]
attn_output = attn_output.reshape(
batch_size,
-1,
attn_output.shape[2] * attn_output.shape[3], # merges [H, head_dim]
)
return attn_output
| lerobot/lerobot/common/policies/pi0/flex_attention.py/0 | {
"file_path": "lerobot/lerobot/common/policies/pi0/flex_attention.py",
"repo_id": "lerobot",
"token_count": 1891
} |
Subsets and Splits